]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - ubuntu/vbox/r0drv/alloc-r0drv.c
UBUNTU: ubuntu: vbox -- update to 5.1.6-dfsg-1
[mirror_ubuntu-zesty-kernel.git] / ubuntu / vbox / r0drv / alloc-r0drv.c
1 /* $Id: alloc-r0drv.cpp $ */
2 /** @file
3 * IPRT - Memory Allocation, Ring-0 Driver.
4 */
5
6 /*
7 * Copyright (C) 2006-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28 /*********************************************************************************************************************************
29 * Header Files *
30 *********************************************************************************************************************************/
31 #define RTMEM_NO_WRAP_TO_EF_APIS
32 #include <iprt/mem.h>
33 #include "internal/iprt.h"
34
35 #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
36 # include <iprt/asm-amd64-x86.h>
37 #endif
38 #include <iprt/assert.h>
39 #ifdef RT_MORE_STRICT
40 # include <iprt/mp.h>
41 #endif
42 #include <iprt/param.h>
43 #include <iprt/string.h>
44 #include <iprt/thread.h>
45 #include "r0drv/alloc-r0drv.h"
46
47
48 /*********************************************************************************************************************************
49 * Defined Constants And Macros *
50 *********************************************************************************************************************************/
51 #ifdef RT_STRICT
52 # define RTR0MEM_STRICT
53 #endif
54
55 #ifdef RTR0MEM_STRICT
56 # define RTR0MEM_FENCE_EXTRA 16
57 #else
58 # define RTR0MEM_FENCE_EXTRA 0
59 #endif
60
61
62 /*********************************************************************************************************************************
63 * Global Variables *
64 *********************************************************************************************************************************/
65 #ifdef RTR0MEM_STRICT
66 /** Fence data. */
67 static uint8_t const g_abFence[RTR0MEM_FENCE_EXTRA] =
68 {
69 0x77, 0x88, 0x66, 0x99, 0x55, 0xaa, 0x44, 0xbb,
70 0x33, 0xcc, 0x22, 0xdd, 0x11, 0xee, 0x00, 0xff
71 };
72 #endif
73
74
75 /**
76 * Wrapper around rtR0MemAllocEx.
77 *
78 * @returns Pointer to the allocated memory block header.
79 * @param cb The number of bytes to allocate (sans header).
80 * @param fFlags The allocation flags.
81 */
82 DECLINLINE(PRTMEMHDR) rtR0MemAlloc(size_t cb, uint32_t fFlags)
83 {
84 PRTMEMHDR pHdr;
85 int rc = rtR0MemAllocEx(cb, fFlags, &pHdr);
86 if (RT_FAILURE(rc))
87 return NULL;
88 return pHdr;
89 }
90
91
92 RTDECL(void *) RTMemTmpAllocTag(size_t cb, const char *pszTag) RT_NO_THROW_DEF
93 {
94 return RTMemAllocTag(cb, pszTag);
95 }
96 RT_EXPORT_SYMBOL(RTMemTmpAllocTag);
97
98
99 RTDECL(void *) RTMemTmpAllocZTag(size_t cb, const char *pszTag) RT_NO_THROW_DEF
100 {
101 return RTMemAllocZTag(cb, pszTag);
102 }
103 RT_EXPORT_SYMBOL(RTMemTmpAllocZTag);
104
105
106 RTDECL(void) RTMemTmpFree(void *pv) RT_NO_THROW_DEF
107 {
108 return RTMemFree(pv);
109 }
110 RT_EXPORT_SYMBOL(RTMemTmpFree);
111
112
113
114
115
116 RTDECL(void *) RTMemAllocTag(size_t cb, const char *pszTag) RT_NO_THROW_DEF
117 {
118 PRTMEMHDR pHdr;
119 RT_ASSERT_INTS_ON();
120 RT_NOREF_PV(pszTag);
121
122 pHdr = rtR0MemAlloc(cb + RTR0MEM_FENCE_EXTRA, 0);
123 if (pHdr)
124 {
125 #ifdef RTR0MEM_STRICT
126 pHdr->cbReq = (uint32_t)cb; Assert(pHdr->cbReq == cb);
127 memcpy((uint8_t *)(pHdr + 1) + cb, &g_abFence[0], RTR0MEM_FENCE_EXTRA);
128 #endif
129 return pHdr + 1;
130 }
131 return NULL;
132 }
133 RT_EXPORT_SYMBOL(RTMemAllocTag);
134
135
136 RTDECL(void *) RTMemAllocZTag(size_t cb, const char *pszTag) RT_NO_THROW_DEF
137 {
138 PRTMEMHDR pHdr;
139 RT_ASSERT_INTS_ON();
140 RT_NOREF_PV(pszTag);
141
142 pHdr = rtR0MemAlloc(cb + RTR0MEM_FENCE_EXTRA, RTMEMHDR_FLAG_ZEROED);
143 if (pHdr)
144 {
145 #ifdef RTR0MEM_STRICT
146 pHdr->cbReq = (uint32_t)cb; Assert(pHdr->cbReq == cb);
147 memcpy((uint8_t *)(pHdr + 1) + cb, &g_abFence[0], RTR0MEM_FENCE_EXTRA);
148 return memset(pHdr + 1, 0, cb);
149 #else
150 return memset(pHdr + 1, 0, pHdr->cb);
151 #endif
152 }
153 return NULL;
154 }
155 RT_EXPORT_SYMBOL(RTMemAllocZTag);
156
157
158 RTDECL(void *) RTMemAllocVarTag(size_t cbUnaligned, const char *pszTag)
159 {
160 size_t cbAligned;
161 if (cbUnaligned >= 16)
162 cbAligned = RT_ALIGN_Z(cbUnaligned, 16);
163 else
164 cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *));
165 return RTMemAllocTag(cbAligned, pszTag);
166 }
167 RT_EXPORT_SYMBOL(RTMemAllocVarTag);
168
169
170 RTDECL(void *) RTMemAllocZVarTag(size_t cbUnaligned, const char *pszTag)
171 {
172 size_t cbAligned;
173 if (cbUnaligned >= 16)
174 cbAligned = RT_ALIGN_Z(cbUnaligned, 16);
175 else
176 cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *));
177 return RTMemAllocZTag(cbAligned, pszTag);
178 }
179 RT_EXPORT_SYMBOL(RTMemAllocZVarTag);
180
181
182 RTDECL(void *) RTMemReallocTag(void *pvOld, size_t cbNew, const char *pszTag) RT_NO_THROW_DEF
183 {
184 PRTMEMHDR pHdrOld;
185
186 /* Free. */
187 if (!cbNew && pvOld)
188 {
189 RTMemFree(pvOld);
190 return NULL;
191 }
192
193 /* Alloc. */
194 if (!pvOld)
195 return RTMemAllocTag(cbNew, pszTag);
196
197 /*
198 * Realloc.
199 */
200 pHdrOld = (PRTMEMHDR)pvOld - 1;
201 RT_ASSERT_PREEMPTIBLE();
202
203 if (pHdrOld->u32Magic == RTMEMHDR_MAGIC)
204 {
205 PRTMEMHDR pHdrNew;
206
207 /* If there is sufficient space in the old block and we don't cause
208 substantial internal fragmentation, reuse the old block. */
209 if ( pHdrOld->cb >= cbNew + RTR0MEM_FENCE_EXTRA
210 && pHdrOld->cb - (cbNew + RTR0MEM_FENCE_EXTRA) <= 128)
211 {
212 pHdrOld->cbReq = (uint32_t)cbNew; Assert(pHdrOld->cbReq == cbNew);
213 #ifdef RTR0MEM_STRICT
214 memcpy((uint8_t *)(pHdrOld + 1) + cbNew, &g_abFence[0], RTR0MEM_FENCE_EXTRA);
215 #endif
216 return pvOld;
217 }
218
219 /* Allocate a new block and copy over the content. */
220 pHdrNew = rtR0MemAlloc(cbNew + RTR0MEM_FENCE_EXTRA, 0);
221 if (pHdrNew)
222 {
223 size_t cbCopy = RT_MIN(pHdrOld->cb, pHdrNew->cb);
224 memcpy(pHdrNew + 1, pvOld, cbCopy);
225 #ifdef RTR0MEM_STRICT
226 pHdrNew->cbReq = (uint32_t)cbNew; Assert(pHdrNew->cbReq == cbNew);
227 memcpy((uint8_t *)(pHdrNew + 1) + cbNew, &g_abFence[0], RTR0MEM_FENCE_EXTRA);
228 AssertReleaseMsg(!memcmp((uint8_t *)(pHdrOld + 1) + pHdrOld->cbReq, &g_abFence[0], RTR0MEM_FENCE_EXTRA),
229 ("pHdr=%p pvOld=%p cbReq=%u cb=%u cbNew=%zu fFlags=%#x\n"
230 "fence: %.*Rhxs\n"
231 "expected: %.*Rhxs\n",
232 pHdrOld, pvOld, pHdrOld->cbReq, pHdrOld->cb, cbNew, pHdrOld->fFlags,
233 RTR0MEM_FENCE_EXTRA, (uint8_t *)(pHdrOld + 1) + pHdrOld->cbReq,
234 RTR0MEM_FENCE_EXTRA, &g_abFence[0]));
235 #endif
236 rtR0MemFree(pHdrOld);
237 return pHdrNew + 1;
238 }
239 }
240 else
241 AssertMsgFailed(("pHdrOld->u32Magic=%RX32 pvOld=%p cbNew=%#zx\n", pHdrOld->u32Magic, pvOld, cbNew));
242
243 return NULL;
244 }
245 RT_EXPORT_SYMBOL(RTMemReallocTag);
246
247
248 RTDECL(void) RTMemFree(void *pv) RT_NO_THROW_DEF
249 {
250 PRTMEMHDR pHdr;
251 RT_ASSERT_INTS_ON();
252
253 if (!pv)
254 return;
255 pHdr = (PRTMEMHDR)pv - 1;
256 if (pHdr->u32Magic == RTMEMHDR_MAGIC)
257 {
258 Assert(!(pHdr->fFlags & RTMEMHDR_FLAG_ALLOC_EX));
259 Assert(!(pHdr->fFlags & RTMEMHDR_FLAG_EXEC));
260 #ifdef RTR0MEM_STRICT
261 AssertReleaseMsg(!memcmp((uint8_t *)(pHdr + 1) + pHdr->cbReq, &g_abFence[0], RTR0MEM_FENCE_EXTRA),
262 ("pHdr=%p pv=%p cbReq=%u cb=%u fFlags=%#x\n"
263 "fence: %.*Rhxs\n"
264 "expected: %.*Rhxs\n",
265 pHdr, pv, pHdr->cbReq, pHdr->cb, pHdr->fFlags,
266 RTR0MEM_FENCE_EXTRA, (uint8_t *)(pHdr + 1) + pHdr->cbReq,
267 RTR0MEM_FENCE_EXTRA, &g_abFence[0]));
268 #endif
269 rtR0MemFree(pHdr);
270 }
271 else
272 AssertMsgFailed(("pHdr->u32Magic=%RX32 pv=%p\n", pHdr->u32Magic, pv));
273 }
274 RT_EXPORT_SYMBOL(RTMemFree);
275
276
277
278
279
280
281 RTDECL(void *) RTMemExecAllocTag(size_t cb, const char *pszTag) RT_NO_THROW_DEF
282 {
283 PRTMEMHDR pHdr;
284 #ifdef RT_OS_SOLARIS /** @todo figure out why */
285 RT_ASSERT_INTS_ON();
286 #else
287 RT_ASSERT_PREEMPTIBLE();
288 #endif
289 RT_NOREF_PV(pszTag);
290
291
292 pHdr = rtR0MemAlloc(cb + RTR0MEM_FENCE_EXTRA, RTMEMHDR_FLAG_EXEC);
293 if (pHdr)
294 {
295 #ifdef RTR0MEM_STRICT
296 pHdr->cbReq = (uint32_t)cb; Assert(pHdr->cbReq == cb);
297 memcpy((uint8_t *)(pHdr + 1) + cb, &g_abFence[0], RTR0MEM_FENCE_EXTRA);
298 #endif
299 return pHdr + 1;
300 }
301 return NULL;
302 }
303 RT_EXPORT_SYMBOL(RTMemExecAllocTag);
304
305
306 RTDECL(void) RTMemExecFree(void *pv, size_t cb) RT_NO_THROW_DEF
307 {
308 PRTMEMHDR pHdr;
309 RT_ASSERT_INTS_ON();
310 RT_NOREF_PV(cb);
311
312 if (!pv)
313 return;
314 pHdr = (PRTMEMHDR)pv - 1;
315 if (pHdr->u32Magic == RTMEMHDR_MAGIC)
316 {
317 Assert(!(pHdr->fFlags & RTMEMHDR_FLAG_ALLOC_EX));
318 #ifdef RTR0MEM_STRICT
319 AssertReleaseMsg(!memcmp((uint8_t *)(pHdr + 1) + pHdr->cbReq, &g_abFence[0], RTR0MEM_FENCE_EXTRA),
320 ("pHdr=%p pv=%p cbReq=%u cb=%u fFlags=%#x\n"
321 "fence: %.*Rhxs\n"
322 "expected: %.*Rhxs\n",
323 pHdr, pv, pHdr->cbReq, pHdr->cb, pHdr->fFlags,
324 RTR0MEM_FENCE_EXTRA, (uint8_t *)(pHdr + 1) + pHdr->cbReq,
325 RTR0MEM_FENCE_EXTRA, &g_abFence[0]));
326 #endif
327 rtR0MemFree(pHdr);
328 }
329 else
330 AssertMsgFailed(("pHdr->u32Magic=%RX32 pv=%p\n", pHdr->u32Magic, pv));
331 }
332 RT_EXPORT_SYMBOL(RTMemExecFree);
333
334
335
336
337 RTDECL(int) RTMemAllocExTag(size_t cb, size_t cbAlignment, uint32_t fFlags, const char *pszTag, void **ppv) RT_NO_THROW_DEF
338 {
339 uint32_t fHdrFlags = RTMEMHDR_FLAG_ALLOC_EX;
340 PRTMEMHDR pHdr;
341 int rc;
342 RT_NOREF_PV(pszTag);
343
344 RT_ASSERT_PREEMPT_CPUID_VAR();
345 if (!(fFlags & RTMEMALLOCEX_FLAGS_ANY_CTX_ALLOC))
346 RT_ASSERT_INTS_ON();
347
348 /*
349 * Fake up some alignment support.
350 */
351 AssertMsgReturn(cbAlignment <= sizeof(void *), ("%zu (%#x)\n", cbAlignment, cbAlignment), VERR_UNSUPPORTED_ALIGNMENT);
352 if (cb < cbAlignment)
353 cb = cbAlignment;
354
355 /*
356 * Validate and convert flags.
357 */
358 AssertMsgReturn(!(fFlags & ~RTMEMALLOCEX_FLAGS_VALID_MASK_R0), ("%#x\n", fFlags), VERR_INVALID_PARAMETER);
359 if (fFlags & RTMEMALLOCEX_FLAGS_ZEROED)
360 fHdrFlags |= RTMEMHDR_FLAG_ZEROED;
361 if (fFlags & RTMEMALLOCEX_FLAGS_EXEC)
362 fHdrFlags |= RTMEMHDR_FLAG_EXEC;
363 if (fFlags & RTMEMALLOCEX_FLAGS_ANY_CTX_ALLOC)
364 fHdrFlags |= RTMEMHDR_FLAG_ANY_CTX_ALLOC;
365 if (fFlags & RTMEMALLOCEX_FLAGS_ANY_CTX_FREE)
366 fHdrFlags |= RTMEMHDR_FLAG_ANY_CTX_FREE;
367
368 /*
369 * Do the allocation.
370 */
371 rc = rtR0MemAllocEx(cb + RTR0MEM_FENCE_EXTRA, fHdrFlags, &pHdr);
372 if (RT_SUCCESS(rc))
373 {
374 void *pv;
375
376 Assert(pHdr->cbReq == cb + RTR0MEM_FENCE_EXTRA);
377 Assert((pHdr->fFlags & fFlags) == fFlags);
378
379 /*
380 * Calc user pointer, initialize the memory if requested, and if
381 * memory strictness is enable set up the fence.
382 */
383 pv = pHdr + 1;
384 *ppv = pv;
385 if (fFlags & RTMEMHDR_FLAG_ZEROED)
386 memset(pv, 0, pHdr->cb);
387
388 #ifdef RTR0MEM_STRICT
389 pHdr->cbReq = (uint32_t)cb;
390 memcpy((uint8_t *)pv + cb, &g_abFence[0], RTR0MEM_FENCE_EXTRA);
391 #endif
392 }
393 else if (rc == VERR_NO_MEMORY && (fFlags & RTMEMALLOCEX_FLAGS_EXEC))
394 rc = VERR_NO_EXEC_MEMORY;
395
396 RT_ASSERT_PREEMPT_CPUID();
397 return rc;
398 }
399 RT_EXPORT_SYMBOL(RTMemAllocExTag);
400
401
402 RTDECL(void) RTMemFreeEx(void *pv, size_t cb) RT_NO_THROW_DEF
403 {
404 PRTMEMHDR pHdr;
405 RT_NOREF_PV(cb);
406
407 if (!pv)
408 return;
409
410 AssertPtr(pv);
411 pHdr = (PRTMEMHDR)pv - 1;
412 if (pHdr->u32Magic == RTMEMHDR_MAGIC)
413 {
414 RT_ASSERT_PREEMPT_CPUID_VAR();
415
416 Assert(pHdr->fFlags & RTMEMHDR_FLAG_ALLOC_EX);
417 if (!(pHdr->fFlags & RTMEMHDR_FLAG_ANY_CTX_FREE))
418 RT_ASSERT_INTS_ON();
419 AssertMsg(pHdr->cbReq == cb, ("cbReq=%zu cb=%zu\n", pHdr->cb, cb));
420
421 #ifdef RTR0MEM_STRICT
422 AssertReleaseMsg(!memcmp((uint8_t *)(pHdr + 1) + pHdr->cbReq, &g_abFence[0], RTR0MEM_FENCE_EXTRA),
423 ("pHdr=%p pv=%p cbReq=%u cb=%u fFlags=%#x\n"
424 "fence: %.*Rhxs\n"
425 "expected: %.*Rhxs\n",
426 pHdr, pv, pHdr->cbReq, pHdr->cb, pHdr->fFlags,
427 RTR0MEM_FENCE_EXTRA, (uint8_t *)(pHdr + 1) + pHdr->cbReq,
428 RTR0MEM_FENCE_EXTRA, &g_abFence[0]));
429 #endif
430 rtR0MemFree(pHdr);
431 RT_ASSERT_PREEMPT_CPUID();
432 }
433 else
434 AssertMsgFailed(("pHdr->u32Magic=%RX32 pv=%p\n", pHdr->u32Magic, pv));
435 }
436 RT_EXPORT_SYMBOL(RTMemFreeEx);
437