1 /* $Id: memobj-r0drv-linux.c $ */
3 * IPRT - Ring-0 Memory Objects, Linux.
7 * Copyright (C) 2006-2017 Oracle Corporation
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
28 /*********************************************************************************************************************************
30 *********************************************************************************************************************************/
31 #include "the-linux-kernel.h"
33 #include <iprt/memobj.h>
34 #include <iprt/alloc.h>
35 #include <iprt/assert.h>
37 #include <iprt/process.h>
38 #include <iprt/string.h>
39 #include "internal/memobj.h"
42 /*********************************************************************************************************************************
43 * Defined Constants And Macros *
44 *********************************************************************************************************************************/
45 /* early 2.6 kernels */
46 #ifndef PAGE_SHARED_EXEC
47 # define PAGE_SHARED_EXEC PAGE_SHARED
49 #ifndef PAGE_READONLY_EXEC
50 # define PAGE_READONLY_EXEC PAGE_READONLY
54 * 2.6.29+ kernels don't work with remap_pfn_range() anymore because
55 * track_pfn_vma_new() is apparently not defined for non-RAM pages.
56 * It should be safe to use vm_insert_page() older kernels as well.
58 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23)
59 # define VBOX_USE_INSERT_PAGE
61 #if defined(CONFIG_X86_PAE) \
62 && ( defined(HAVE_26_STYLE_REMAP_PAGE_RANGE) \
63 || ( LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) \
64 && LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11)))
65 # define VBOX_USE_PAE_HACK
69 /*********************************************************************************************************************************
70 * Structures and Typedefs *
71 *********************************************************************************************************************************/
73 * The Darwin version of the memory object structure.
75 typedef struct RTR0MEMOBJLNX
77 /** The core structure. */
78 RTR0MEMOBJINTERNAL Core
;
79 /** Set if the allocation is contiguous.
80 * This means it has to be given back as one chunk. */
82 /** Set if we've vmap'ed the memory into ring-0. */
84 /** The pages in the apPages array. */
86 /** Array of struct page pointers. (variable size) */
87 struct page
*apPages
[1];
88 } RTR0MEMOBJLNX
, *PRTR0MEMOBJLNX
;
91 static void rtR0MemObjLinuxFreePages(PRTR0MEMOBJLNX pMemLnx
);
95 * Helper that converts from a RTR0PROCESS handle to a linux task.
97 * @returns The corresponding Linux task.
98 * @param R0Process IPRT ring-0 process handle.
100 static struct task_struct
*rtR0ProcessToLinuxTask(RTR0PROCESS R0Process
)
102 /** @todo fix rtR0ProcessToLinuxTask!! */
103 /** @todo many (all?) callers currently assume that we return 'current'! */
104 return R0Process
== RTR0ProcHandleSelf() ? current
: NULL
;
109 * Compute order. Some functions allocate 2^order pages.
112 * @param cPages Number of pages.
114 static int rtR0MemObjLinuxOrder(size_t cPages
)
119 for (iOrder
= 0, cTmp
= cPages
; cTmp
>>= 1; ++iOrder
)
121 if (cPages
& ~((size_t)1 << iOrder
))
129 * Converts from RTMEM_PROT_* to Linux PAGE_*.
131 * @returns Linux page protection constant.
132 * @param fProt The IPRT protection mask.
133 * @param fKernel Whether it applies to kernel or user space.
135 static pgprot_t
rtR0MemObjLinuxConvertProt(unsigned fProt
, bool fKernel
)
140 AssertMsgFailed(("%#x %d\n", fProt
, fKernel
));
141 case RTMEM_PROT_NONE
:
144 case RTMEM_PROT_READ
:
145 return fKernel
? PAGE_KERNEL_RO
: PAGE_READONLY
;
147 case RTMEM_PROT_WRITE
:
148 case RTMEM_PROT_WRITE
| RTMEM_PROT_READ
:
149 return fKernel
? PAGE_KERNEL
: PAGE_SHARED
;
151 case RTMEM_PROT_EXEC
:
152 case RTMEM_PROT_EXEC
| RTMEM_PROT_READ
:
153 #if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
156 pgprot_t fPg
= MY_PAGE_KERNEL_EXEC
;
157 pgprot_val(fPg
) &= ~_PAGE_RW
;
160 return PAGE_READONLY_EXEC
;
162 return fKernel
? MY_PAGE_KERNEL_EXEC
: PAGE_READONLY_EXEC
;
165 case RTMEM_PROT_WRITE
| RTMEM_PROT_EXEC
:
166 case RTMEM_PROT_WRITE
| RTMEM_PROT_EXEC
| RTMEM_PROT_READ
:
167 return fKernel
? MY_PAGE_KERNEL_EXEC
: PAGE_SHARED_EXEC
;
173 * Worker for rtR0MemObjNativeReserveUser and rtR0MemObjNativerMapUser that creates
174 * an empty user space mapping.
176 * We acquire the mmap_sem of the task!
178 * @returns Pointer to the mapping.
179 * (void *)-1 on failure.
180 * @param R3PtrFixed (RTR3PTR)-1 if anywhere, otherwise a specific location.
181 * @param cb The size of the mapping.
182 * @param uAlignment The alignment of the mapping.
183 * @param pTask The Linux task to create this mapping in.
184 * @param fProt The RTMEM_PROT_* mask.
186 static void *rtR0MemObjLinuxDoMmap(RTR3PTR R3PtrFixed
, size_t cb
, size_t uAlignment
, struct task_struct
*pTask
, unsigned fProt
)
189 unsigned long ulAddr
;
191 Assert(pTask
== current
); /* do_mmap */
195 * Convert from IPRT protection to mman.h PROT_ and call do_mmap.
197 fProt
&= (RTMEM_PROT_NONE
| RTMEM_PROT_READ
| RTMEM_PROT_WRITE
| RTMEM_PROT_EXEC
);
198 if (fProt
== RTMEM_PROT_NONE
)
199 fLnxProt
= PROT_NONE
;
203 if (fProt
& RTMEM_PROT_READ
)
204 fLnxProt
|= PROT_READ
;
205 if (fProt
& RTMEM_PROT_WRITE
)
206 fLnxProt
|= PROT_WRITE
;
207 if (fProt
& RTMEM_PROT_EXEC
)
208 fLnxProt
|= PROT_EXEC
;
211 if (R3PtrFixed
!= (RTR3PTR
)-1)
213 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
214 ulAddr
= vm_mmap(NULL
, R3PtrFixed
, cb
, fLnxProt
, MAP_SHARED
| MAP_ANONYMOUS
| MAP_FIXED
, 0);
216 down_write(&pTask
->mm
->mmap_sem
);
217 ulAddr
= do_mmap(NULL
, R3PtrFixed
, cb
, fLnxProt
, MAP_SHARED
| MAP_ANONYMOUS
| MAP_FIXED
, 0);
218 up_write(&pTask
->mm
->mmap_sem
);
223 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
224 ulAddr
= vm_mmap(NULL
, 0, cb
, fLnxProt
, MAP_SHARED
| MAP_ANONYMOUS
, 0);
226 down_write(&pTask
->mm
->mmap_sem
);
227 ulAddr
= do_mmap(NULL
, 0, cb
, fLnxProt
, MAP_SHARED
| MAP_ANONYMOUS
, 0);
228 up_write(&pTask
->mm
->mmap_sem
);
230 if ( !(ulAddr
& ~PAGE_MASK
)
231 && (ulAddr
& (uAlignment
- 1)))
233 /** @todo implement uAlignment properly... We'll probably need to make some dummy mappings to fill
234 * up alignment gaps. This is of course complicated by fragmentation (which we might have cause
235 * ourselves) and further by there begin two mmap strategies (top / bottom). */
236 /* For now, just ignore uAlignment requirements... */
241 if (ulAddr
& ~PAGE_MASK
) /* ~PAGE_MASK == PAGE_OFFSET_MASK */
243 return (void *)ulAddr
;
248 * Worker that destroys a user space mapping.
249 * Undoes what rtR0MemObjLinuxDoMmap did.
251 * We acquire the mmap_sem of the task!
253 * @param pv The ring-3 mapping.
254 * @param cb The size of the mapping.
255 * @param pTask The Linux task to destroy this mapping in.
257 static void rtR0MemObjLinuxDoMunmap(void *pv
, size_t cb
, struct task_struct
*pTask
)
259 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
260 Assert(pTask
== current
); RT_NOREF_PV(pTask
);
261 vm_munmap((unsigned long)pv
, cb
);
262 #elif defined(USE_RHEL4_MUNMAP)
263 down_write(&pTask
->mm
->mmap_sem
);
264 do_munmap(pTask
->mm
, (unsigned long)pv
, cb
, 0); /* should it be 1 or 0? */
265 up_write(&pTask
->mm
->mmap_sem
);
267 down_write(&pTask
->mm
->mmap_sem
);
268 do_munmap(pTask
->mm
, (unsigned long)pv
, cb
);
269 up_write(&pTask
->mm
->mmap_sem
);
275 * Internal worker that allocates physical pages and creates the memory object for them.
277 * @returns IPRT status code.
278 * @param ppMemLnx Where to store the memory object pointer.
279 * @param enmType The object type.
280 * @param cb The number of bytes to allocate.
281 * @param uAlignment The alignment of the physical memory.
282 * Only valid if fContiguous == true, ignored otherwise.
283 * @param fFlagsLnx The page allocation flags (GPFs).
284 * @param fContiguous Whether the allocation must be contiguous.
285 * @param rcNoMem What to return when we're out of pages.
287 static int rtR0MemObjLinuxAllocPages(PRTR0MEMOBJLNX
*ppMemLnx
, RTR0MEMOBJTYPE enmType
, size_t cb
,
288 size_t uAlignment
, unsigned fFlagsLnx
, bool fContiguous
, int rcNoMem
)
291 size_t const cPages
= cb
>> PAGE_SHIFT
;
292 struct page
*paPages
;
295 * Allocate a memory object structure that's large enough to contain
296 * the page pointer array.
298 PRTR0MEMOBJLNX pMemLnx
= (PRTR0MEMOBJLNX
)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJLNX
, apPages
[cPages
]), enmType
, NULL
, cb
);
300 return VERR_NO_MEMORY
;
301 pMemLnx
->cPages
= cPages
;
306 /* Try hard to allocate the memory, but the allocation attempt might fail. */
307 fFlagsLnx
|= __GFP_REPEAT
;
309 # ifdef __GFP_NOMEMALLOC
310 /* Introduced with Linux 2.6.12: Don't use emergency reserves */
311 fFlagsLnx
|= __GFP_NOMEMALLOC
;
316 * Allocate the pages.
317 * For small allocations we'll try contiguous first and then fall back on page by page.
319 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
321 || cb
<= PAGE_SIZE
* 2)
323 # ifdef VBOX_USE_INSERT_PAGE
324 paPages
= alloc_pages(fFlagsLnx
| __GFP_COMP
| __GFP_NOWARN
, rtR0MemObjLinuxOrder(cPages
));
326 paPages
= alloc_pages(fFlagsLnx
| __GFP_NOWARN
, rtR0MemObjLinuxOrder(cPages
));
331 for (iPage
= 0; iPage
< cPages
; iPage
++)
332 pMemLnx
->apPages
[iPage
] = &paPages
[iPage
];
334 else if (fContiguous
)
336 rtR0MemObjDelete(&pMemLnx
->Core
);
343 for (iPage
= 0; iPage
< cPages
; iPage
++)
345 pMemLnx
->apPages
[iPage
] = alloc_page(fFlagsLnx
| __GFP_NOWARN
);
346 if (RT_UNLIKELY(!pMemLnx
->apPages
[iPage
]))
349 __free_page(pMemLnx
->apPages
[iPage
]);
350 rtR0MemObjDelete(&pMemLnx
->Core
);
357 /** @todo figure out why we didn't allocate page-by-page on 2.4.21 and older... */
358 paPages
= alloc_pages(fFlagsLnx
, rtR0MemObjLinuxOrder(cPages
));
361 rtR0MemObjDelete(&pMemLnx
->Core
);
364 for (iPage
= 0; iPage
< cPages
; iPage
++)
366 pMemLnx
->apPages
[iPage
] = &paPages
[iPage
];
367 MY_SET_PAGES_EXEC(pMemLnx
->apPages
[iPage
], 1);
368 if (PageHighMem(pMemLnx
->apPages
[iPage
]))
373 #endif /* < 2.4.22 */
374 pMemLnx
->fContiguous
= fContiguous
;
376 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)
380 * Linux >= 4.5 with CONFIG_DEBUG_VM panics when setting PG_reserved on compound
381 * pages. According to Michal Hocko this shouldn't be necessary anyway because
382 * as pages which are not on the LRU list are never evictable.
384 for (iPage
= 0; iPage
< cPages
; iPage
++)
385 SetPageReserved(pMemLnx
->apPages
[iPage
]);
389 * Note that the physical address of memory allocated with alloc_pages(flags, order)
390 * is always 2^(PAGE_SHIFT+order)-aligned.
393 && uAlignment
> PAGE_SIZE
)
396 * Check for alignment constraints. The physical address of memory allocated with
397 * alloc_pages(flags, order) is always 2^(PAGE_SHIFT+order)-aligned.
399 if (RT_UNLIKELY(page_to_phys(pMemLnx
->apPages
[0]) & (uAlignment
- 1)))
402 * This should never happen!
404 printk("rtR0MemObjLinuxAllocPages(cb=0x%lx, uAlignment=0x%lx): alloc_pages(..., %d) returned physical memory at 0x%lx!\n",
405 (unsigned long)cb
, (unsigned long)uAlignment
, rtR0MemObjLinuxOrder(cPages
), (unsigned long)page_to_phys(pMemLnx
->apPages
[0]));
406 rtR0MemObjLinuxFreePages(pMemLnx
);
417 * Frees the physical pages allocated by the rtR0MemObjLinuxAllocPages() call.
419 * This method does NOT free the object.
421 * @param pMemLnx The object which physical pages should be freed.
423 static void rtR0MemObjLinuxFreePages(PRTR0MEMOBJLNX pMemLnx
)
425 size_t iPage
= pMemLnx
->cPages
;
429 * Restore the page flags.
433 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)
435 * See SetPageReserved() in rtR0MemObjLinuxAllocPages()
437 ClearPageReserved(pMemLnx
->apPages
[iPage
]);
439 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
441 MY_SET_PAGES_NOEXEC(pMemLnx
->apPages
[iPage
], 1);
448 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
449 if (!pMemLnx
->fContiguous
)
451 iPage
= pMemLnx
->cPages
;
453 __free_page(pMemLnx
->apPages
[iPage
]);
457 __free_pages(pMemLnx
->apPages
[0], rtR0MemObjLinuxOrder(pMemLnx
->cPages
));
465 * Maps the allocation into ring-0.
467 * This will update the RTR0MEMOBJLNX::Core.pv and RTR0MEMOBJ::fMappedToRing0 members.
469 * Contiguous mappings that isn't in 'high' memory will already be mapped into kernel
470 * space, so we'll use that mapping if possible. If execute access is required, we'll
471 * play safe and do our own mapping.
473 * @returns IPRT status code.
474 * @param pMemLnx The linux memory object to map.
475 * @param fExecutable Whether execute access is required.
477 static int rtR0MemObjLinuxVMap(PRTR0MEMOBJLNX pMemLnx
, bool fExecutable
)
479 int rc
= VINF_SUCCESS
;
482 * Choose mapping strategy.
484 bool fMustMap
= fExecutable
485 || !pMemLnx
->fContiguous
;
488 size_t iPage
= pMemLnx
->cPages
;
490 if (PageHighMem(pMemLnx
->apPages
[iPage
]))
497 Assert(!pMemLnx
->Core
.pv
);
498 Assert(!pMemLnx
->fMappedToRing0
);
503 * Use vmap - 2.4.22 and later.
505 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
507 pgprot_val(fPg
) = _PAGE_PRESENT
| _PAGE_RW
;
510 pgprot_val(fPg
) |= _PAGE_NX
;
514 pMemLnx
->Core
.pv
= vmap(&pMemLnx
->apPages
[0], pMemLnx
->cPages
, VM_MAP
, fPg
);
516 pMemLnx
->Core
.pv
= vmap(&pMemLnx
->apPages
[0], pMemLnx
->cPages
, VM_ALLOC
, fPg
);
518 if (pMemLnx
->Core
.pv
)
519 pMemLnx
->fMappedToRing0
= true;
521 rc
= VERR_MAP_FAILED
;
523 rc
= VERR_NOT_SUPPORTED
;
529 * Use the kernel RAM mapping.
531 pMemLnx
->Core
.pv
= phys_to_virt(page_to_phys(pMemLnx
->apPages
[0]));
532 Assert(pMemLnx
->Core
.pv
);
540 * Undoes what rtR0MemObjLinuxVMap() did.
542 * @param pMemLnx The linux memory object.
544 static void rtR0MemObjLinuxVUnmap(PRTR0MEMOBJLNX pMemLnx
)
546 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
547 if (pMemLnx
->fMappedToRing0
)
549 Assert(pMemLnx
->Core
.pv
);
550 vunmap(pMemLnx
->Core
.pv
);
551 pMemLnx
->fMappedToRing0
= false;
554 Assert(!pMemLnx
->fMappedToRing0
);
556 pMemLnx
->Core
.pv
= NULL
;
560 DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem
)
562 IPRT_LINUX_SAVE_EFL_AC();
563 PRTR0MEMOBJLNX pMemLnx
= (PRTR0MEMOBJLNX
)pMem
;
566 * Release any memory that we've allocated or locked.
568 switch (pMemLnx
->Core
.enmType
)
570 case RTR0MEMOBJTYPE_LOW
:
571 case RTR0MEMOBJTYPE_PAGE
:
572 case RTR0MEMOBJTYPE_CONT
:
573 case RTR0MEMOBJTYPE_PHYS
:
574 case RTR0MEMOBJTYPE_PHYS_NC
:
575 rtR0MemObjLinuxVUnmap(pMemLnx
);
576 rtR0MemObjLinuxFreePages(pMemLnx
);
579 case RTR0MEMOBJTYPE_LOCK
:
580 if (pMemLnx
->Core
.u
.Lock
.R0Process
!= NIL_RTR0PROCESS
)
582 struct task_struct
*pTask
= rtR0ProcessToLinuxTask(pMemLnx
->Core
.u
.Lock
.R0Process
);
585 if (pTask
&& pTask
->mm
)
586 down_read(&pTask
->mm
->mmap_sem
);
588 iPage
= pMemLnx
->cPages
;
591 if (!PageReserved(pMemLnx
->apPages
[iPage
]))
592 SetPageDirty(pMemLnx
->apPages
[iPage
]);
593 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)
594 put_page(pMemLnx
->apPages
[iPage
]);
596 page_cache_release(pMemLnx
->apPages
[iPage
]);
600 if (pTask
&& pTask
->mm
)
601 up_read(&pTask
->mm
->mmap_sem
);
603 /* else: kernel memory - nothing to do here. */
606 case RTR0MEMOBJTYPE_RES_VIRT
:
607 Assert(pMemLnx
->Core
.pv
);
608 if (pMemLnx
->Core
.u
.ResVirt
.R0Process
!= NIL_RTR0PROCESS
)
610 struct task_struct
*pTask
= rtR0ProcessToLinuxTask(pMemLnx
->Core
.u
.Lock
.R0Process
);
612 if (pTask
&& pTask
->mm
)
613 rtR0MemObjLinuxDoMunmap(pMemLnx
->Core
.pv
, pMemLnx
->Core
.cb
, pTask
);
617 vunmap(pMemLnx
->Core
.pv
);
619 Assert(pMemLnx
->cPages
== 1 && pMemLnx
->apPages
[0] != NULL
);
620 __free_page(pMemLnx
->apPages
[0]);
621 pMemLnx
->apPages
[0] = NULL
;
624 pMemLnx
->Core
.pv
= NULL
;
627 case RTR0MEMOBJTYPE_MAPPING
:
628 Assert(pMemLnx
->cPages
== 0); Assert(pMemLnx
->Core
.pv
);
629 if (pMemLnx
->Core
.u
.ResVirt
.R0Process
!= NIL_RTR0PROCESS
)
631 struct task_struct
*pTask
= rtR0ProcessToLinuxTask(pMemLnx
->Core
.u
.Lock
.R0Process
);
633 if (pTask
&& pTask
->mm
)
634 rtR0MemObjLinuxDoMunmap(pMemLnx
->Core
.pv
, pMemLnx
->Core
.cb
, pTask
);
637 vunmap(pMemLnx
->Core
.pv
);
638 pMemLnx
->Core
.pv
= NULL
;
642 AssertMsgFailed(("enmType=%d\n", pMemLnx
->Core
.enmType
));
643 return VERR_INTERNAL_ERROR
;
645 IPRT_LINUX_RESTORE_EFL_ONLY_AC();
650 DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem
, size_t cb
, bool fExecutable
)
652 IPRT_LINUX_SAVE_EFL_AC();
653 PRTR0MEMOBJLNX pMemLnx
;
656 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
657 rc
= rtR0MemObjLinuxAllocPages(&pMemLnx
, RTR0MEMOBJTYPE_PAGE
, cb
, PAGE_SIZE
, GFP_HIGHUSER
,
658 false /* non-contiguous */, VERR_NO_MEMORY
);
660 rc
= rtR0MemObjLinuxAllocPages(&pMemLnx
, RTR0MEMOBJTYPE_PAGE
, cb
, PAGE_SIZE
, GFP_USER
,
661 false /* non-contiguous */, VERR_NO_MEMORY
);
665 rc
= rtR0MemObjLinuxVMap(pMemLnx
, fExecutable
);
668 *ppMem
= &pMemLnx
->Core
;
669 IPRT_LINUX_RESTORE_EFL_AC();
673 rtR0MemObjLinuxFreePages(pMemLnx
);
674 rtR0MemObjDelete(&pMemLnx
->Core
);
677 IPRT_LINUX_RESTORE_EFL_AC();
682 DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem
, size_t cb
, bool fExecutable
)
684 IPRT_LINUX_SAVE_EFL_AC();
685 PRTR0MEMOBJLNX pMemLnx
;
688 /* Try to avoid GFP_DMA. GFM_DMA32 was introduced with Linux 2.6.15. */
689 #if (defined(RT_ARCH_AMD64) || defined(CONFIG_X86_PAE)) && defined(GFP_DMA32)
690 /* ZONE_DMA32: 0-4GB */
691 rc
= rtR0MemObjLinuxAllocPages(&pMemLnx
, RTR0MEMOBJTYPE_LOW
, cb
, PAGE_SIZE
, GFP_DMA32
,
692 false /* non-contiguous */, VERR_NO_LOW_MEMORY
);
696 /* ZONE_DMA: 0-16MB */
697 rc
= rtR0MemObjLinuxAllocPages(&pMemLnx
, RTR0MEMOBJTYPE_LOW
, cb
, PAGE_SIZE
, GFP_DMA
,
698 false /* non-contiguous */, VERR_NO_LOW_MEMORY
);
700 # ifdef CONFIG_X86_PAE
702 /* ZONE_NORMAL: 0-896MB */
703 rc
= rtR0MemObjLinuxAllocPages(&pMemLnx
, RTR0MEMOBJTYPE_LOW
, cb
, PAGE_SIZE
, GFP_USER
,
704 false /* non-contiguous */, VERR_NO_LOW_MEMORY
);
708 rc
= rtR0MemObjLinuxVMap(pMemLnx
, fExecutable
);
711 *ppMem
= &pMemLnx
->Core
;
712 IPRT_LINUX_RESTORE_EFL_AC();
716 rtR0MemObjLinuxFreePages(pMemLnx
);
717 rtR0MemObjDelete(&pMemLnx
->Core
);
720 IPRT_LINUX_RESTORE_EFL_AC();
725 DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem
, size_t cb
, bool fExecutable
)
727 IPRT_LINUX_SAVE_EFL_AC();
728 PRTR0MEMOBJLNX pMemLnx
;
731 #if (defined(RT_ARCH_AMD64) || defined(CONFIG_X86_PAE)) && defined(GFP_DMA32)
732 /* ZONE_DMA32: 0-4GB */
733 rc
= rtR0MemObjLinuxAllocPages(&pMemLnx
, RTR0MEMOBJTYPE_CONT
, cb
, PAGE_SIZE
, GFP_DMA32
,
734 true /* contiguous */, VERR_NO_CONT_MEMORY
);
738 /* ZONE_DMA: 0-16MB */
739 rc
= rtR0MemObjLinuxAllocPages(&pMemLnx
, RTR0MEMOBJTYPE_CONT
, cb
, PAGE_SIZE
, GFP_DMA
,
740 true /* contiguous */, VERR_NO_CONT_MEMORY
);
742 /* ZONE_NORMAL (32-bit hosts): 0-896MB */
743 rc
= rtR0MemObjLinuxAllocPages(&pMemLnx
, RTR0MEMOBJTYPE_CONT
, cb
, PAGE_SIZE
, GFP_USER
,
744 true /* contiguous */, VERR_NO_CONT_MEMORY
);
748 rc
= rtR0MemObjLinuxVMap(pMemLnx
, fExecutable
);
751 #if defined(RT_STRICT) && (defined(RT_ARCH_AMD64) || defined(CONFIG_HIGHMEM64G))
752 size_t iPage
= pMemLnx
->cPages
;
754 Assert(page_to_phys(pMemLnx
->apPages
[iPage
]) < _4G
);
756 pMemLnx
->Core
.u
.Cont
.Phys
= page_to_phys(pMemLnx
->apPages
[0]);
757 *ppMem
= &pMemLnx
->Core
;
758 IPRT_LINUX_RESTORE_EFL_AC();
762 rtR0MemObjLinuxFreePages(pMemLnx
);
763 rtR0MemObjDelete(&pMemLnx
->Core
);
766 IPRT_LINUX_RESTORE_EFL_AC();
772 * Worker for rtR0MemObjLinuxAllocPhysSub that tries one allocation strategy.
774 * @returns IPRT status code.
775 * @param ppMemLnx Where to
776 * @param enmType The object type.
777 * @param cb The size of the allocation.
778 * @param uAlignment The alignment of the physical memory.
779 * Only valid for fContiguous == true, ignored otherwise.
780 * @param PhysHighest See rtR0MemObjNativeAllocPhys.
781 * @param fGfp The Linux GFP flags to use for the allocation.
783 static int rtR0MemObjLinuxAllocPhysSub2(PPRTR0MEMOBJINTERNAL ppMem
, RTR0MEMOBJTYPE enmType
,
784 size_t cb
, size_t uAlignment
, RTHCPHYS PhysHighest
, unsigned fGfp
)
786 PRTR0MEMOBJLNX pMemLnx
;
789 rc
= rtR0MemObjLinuxAllocPages(&pMemLnx
, enmType
, cb
, uAlignment
, fGfp
,
790 enmType
== RTR0MEMOBJTYPE_PHYS
/* contiguous / non-contiguous */,
791 VERR_NO_PHYS_MEMORY
);
796 * Check the addresses if necessary. (Can be optimized a bit for PHYS.)
798 if (PhysHighest
!= NIL_RTHCPHYS
)
800 size_t iPage
= pMemLnx
->cPages
;
802 if (page_to_phys(pMemLnx
->apPages
[iPage
]) > PhysHighest
)
804 rtR0MemObjLinuxFreePages(pMemLnx
);
805 rtR0MemObjDelete(&pMemLnx
->Core
);
806 return VERR_NO_MEMORY
;
811 * Complete the object.
813 if (enmType
== RTR0MEMOBJTYPE_PHYS
)
815 pMemLnx
->Core
.u
.Phys
.PhysBase
= page_to_phys(pMemLnx
->apPages
[0]);
816 pMemLnx
->Core
.u
.Phys
.fAllocated
= true;
818 *ppMem
= &pMemLnx
->Core
;
824 * Worker for rtR0MemObjNativeAllocPhys and rtR0MemObjNativeAllocPhysNC.
826 * @returns IPRT status code.
827 * @param ppMem Where to store the memory object pointer on success.
828 * @param enmType The object type.
829 * @param cb The size of the allocation.
830 * @param uAlignment The alignment of the physical memory.
831 * Only valid for enmType == RTR0MEMOBJTYPE_PHYS, ignored otherwise.
832 * @param PhysHighest See rtR0MemObjNativeAllocPhys.
834 static int rtR0MemObjLinuxAllocPhysSub(PPRTR0MEMOBJINTERNAL ppMem
, RTR0MEMOBJTYPE enmType
,
835 size_t cb
, size_t uAlignment
, RTHCPHYS PhysHighest
)
838 IPRT_LINUX_SAVE_EFL_AC();
841 * There are two clear cases and that's the <=16MB and anything-goes ones.
842 * When the physical address limit is somewhere in-between those two we'll
843 * just have to try, starting with HIGHUSER and working our way thru the
844 * different types, hoping we'll get lucky.
846 * We should probably move this physical address restriction logic up to
847 * the page alloc function as it would be more efficient there. But since
848 * we don't expect this to be a performance issue just yet it can wait.
850 if (PhysHighest
== NIL_RTHCPHYS
)
851 /* ZONE_HIGHMEM: the whole physical memory */
852 rc
= rtR0MemObjLinuxAllocPhysSub2(ppMem
, enmType
, cb
, uAlignment
, PhysHighest
, GFP_HIGHUSER
);
853 else if (PhysHighest
<= _1M
* 16)
854 /* ZONE_DMA: 0-16MB */
855 rc
= rtR0MemObjLinuxAllocPhysSub2(ppMem
, enmType
, cb
, uAlignment
, PhysHighest
, GFP_DMA
);
860 /* ZONE_HIGHMEM: the whole physical memory */
861 rc
= rtR0MemObjLinuxAllocPhysSub2(ppMem
, enmType
, cb
, uAlignment
, PhysHighest
, GFP_HIGHUSER
);
863 /* ZONE_NORMAL: 0-896MB */
864 rc
= rtR0MemObjLinuxAllocPhysSub2(ppMem
, enmType
, cb
, uAlignment
, PhysHighest
, GFP_USER
);
867 /* ZONE_DMA32: 0-4GB */
868 rc
= rtR0MemObjLinuxAllocPhysSub2(ppMem
, enmType
, cb
, uAlignment
, PhysHighest
, GFP_DMA32
);
871 /* ZONE_DMA: 0-16MB */
872 rc
= rtR0MemObjLinuxAllocPhysSub2(ppMem
, enmType
, cb
, uAlignment
, PhysHighest
, GFP_DMA
);
874 IPRT_LINUX_RESTORE_EFL_AC();
880 * Translates a kernel virtual address to a linux page structure by walking the
883 * @note We do assume that the page tables will not change as we are walking
884 * them. This assumption is rather forced by the fact that I could not
885 * immediately see any way of preventing this from happening. So, we
886 * take some extra care when accessing them.
888 * Because of this, we don't want to use this function on memory where
889 * attribute changes to nearby pages is likely to cause large pages to
890 * be used or split up. So, don't use this for the linear mapping of
893 * @returns Pointer to the page structur or NULL if it could not be found.
894 * @param pv The kernel virtual address.
896 static struct page
*rtR0MemObjLinuxVirtToPage(void *pv
)
898 unsigned long ulAddr
= (unsigned long)pv
;
905 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
908 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
915 /* Should this happen in a situation this code will be called in? And if
916 * so, can it change under our feet? See also
917 * "Documentation/vm/active_mm.txt" in the kernel sources. */
918 if (RT_UNLIKELY(!current
->active_mm
))
920 u
.Global
= *pgd_offset(current
->active_mm
, ulAddr
);
921 if (RT_UNLIKELY(pgd_none(u
.Global
)))
923 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
924 # if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
925 u
.Four
= *p4d_offset(&u
.Global
, ulAddr
);
926 if (RT_UNLIKELY(p4d_none(u
.Four
)))
928 if (p4d_large(u
.Four
))
930 pPage
= p4d_page(u
.Four
);
931 AssertReturn(pPage
, NULL
);
932 pfn
= page_to_pfn(pPage
); /* doing the safe way... */
933 AssertCompile(P4D_SHIFT
- PAGE_SHIFT
< 31);
934 pfn
+= (ulAddr
>> PAGE_SHIFT
) & ((UINT32_C(1) << (P4D_SHIFT
- PAGE_SHIFT
)) - 1);
935 return pfn_to_page(pfn
);
937 u
.Upper
= *pud_offset(&u
.Four
, ulAddr
);
939 u
.Upper
= *pud_offset(&u
.Global
, ulAddr
);
941 if (RT_UNLIKELY(pud_none(u
.Upper
)))
943 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
944 if (pud_large(u
.Upper
))
946 pPage
= pud_page(u
.Upper
);
947 AssertReturn(pPage
, NULL
);
948 pfn
= page_to_pfn(pPage
); /* doing the safe way... */
949 pfn
+= (ulAddr
>> PAGE_SHIFT
) & ((UINT32_C(1) << (PUD_SHIFT
- PAGE_SHIFT
)) - 1);
950 return pfn_to_page(pfn
);
953 u
.Middle
= *pmd_offset(&u
.Upper
, ulAddr
);
955 u
.Middle
= *pmd_offset(&u
.Global
, ulAddr
);
956 #endif /* < 2.6.11 */
957 if (RT_UNLIKELY(pmd_none(u
.Middle
)))
959 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
960 if (pmd_large(u
.Middle
))
962 pPage
= pmd_page(u
.Middle
);
963 AssertReturn(pPage
, NULL
);
964 pfn
= page_to_pfn(pPage
); /* doing the safe way... */
965 pfn
+= (ulAddr
>> PAGE_SHIFT
) & ((UINT32_C(1) << (PMD_SHIFT
- PAGE_SHIFT
)) - 1);
966 return pfn_to_page(pfn
);
970 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 5) || defined(pte_offset_map) /* As usual, RHEL 3 had pte_offset_map earlier. */
971 pEntry
= pte_offset_map(&u
.Middle
, ulAddr
);
973 pEntry
= pte_offset(&u
.Middle
, ulAddr
);
975 if (RT_UNLIKELY(!pEntry
))
978 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 5) || defined(pte_offset_map)
982 if (RT_UNLIKELY(!pte_present(u
.Entry
)))
984 return pte_page(u
.Entry
);
988 DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem
, size_t cb
, RTHCPHYS PhysHighest
, size_t uAlignment
)
990 return rtR0MemObjLinuxAllocPhysSub(ppMem
, RTR0MEMOBJTYPE_PHYS
, cb
, uAlignment
, PhysHighest
);
994 DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem
, size_t cb
, RTHCPHYS PhysHighest
)
996 return rtR0MemObjLinuxAllocPhysSub(ppMem
, RTR0MEMOBJTYPE_PHYS_NC
, cb
, PAGE_SIZE
, PhysHighest
);
1000 DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem
, RTHCPHYS Phys
, size_t cb
, uint32_t uCachePolicy
)
1002 IPRT_LINUX_SAVE_EFL_AC();
1005 * All we need to do here is to validate that we can use
1006 * ioremap on the specified address (32/64-bit dma_addr_t).
1008 PRTR0MEMOBJLNX pMemLnx
;
1009 dma_addr_t PhysAddr
= Phys
;
1010 AssertMsgReturn(PhysAddr
== Phys
, ("%#llx\n", (unsigned long long)Phys
), VERR_ADDRESS_TOO_BIG
);
1012 pMemLnx
= (PRTR0MEMOBJLNX
)rtR0MemObjNew(sizeof(*pMemLnx
), RTR0MEMOBJTYPE_PHYS
, NULL
, cb
);
1015 IPRT_LINUX_RESTORE_EFL_AC();
1016 return VERR_NO_MEMORY
;
1019 pMemLnx
->Core
.u
.Phys
.PhysBase
= PhysAddr
;
1020 pMemLnx
->Core
.u
.Phys
.fAllocated
= false;
1021 pMemLnx
->Core
.u
.Phys
.uCachePolicy
= uCachePolicy
;
1022 Assert(!pMemLnx
->cPages
);
1023 *ppMem
= &pMemLnx
->Core
;
1024 IPRT_LINUX_RESTORE_EFL_AC();
1025 return VINF_SUCCESS
;
1028 /* openSUSE Leap 42.3 detection :-/ */
1029 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) \
1030 && LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) \
1031 && defined(FAULT_FLAG_REMOTE)
1032 # define GET_USER_PAGES_API KERNEL_VERSION(4, 10, 0) /* no typo! */
1034 # define GET_USER_PAGES_API LINUX_VERSION_CODE
1037 DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem
, RTR3PTR R3Ptr
, size_t cb
, uint32_t fAccess
, RTR0PROCESS R0Process
)
1039 IPRT_LINUX_SAVE_EFL_AC();
1040 const int cPages
= cb
>> PAGE_SHIFT
;
1041 struct task_struct
*pTask
= rtR0ProcessToLinuxTask(R0Process
);
1042 struct vm_area_struct
**papVMAs
;
1043 PRTR0MEMOBJLNX pMemLnx
;
1044 int rc
= VERR_NO_MEMORY
;
1045 int const fWrite
= fAccess
& RTMEM_PROT_WRITE
? 1 : 0;
1048 * Check for valid task and size overflows.
1051 return VERR_NOT_SUPPORTED
;
1052 if (((size_t)cPages
<< PAGE_SHIFT
) != cb
)
1053 return VERR_OUT_OF_RANGE
;
1056 * Allocate the memory object and a temporary buffer for the VMAs.
1058 pMemLnx
= (PRTR0MEMOBJLNX
)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJLNX
, apPages
[cPages
]), RTR0MEMOBJTYPE_LOCK
, (void *)R3Ptr
, cb
);
1061 IPRT_LINUX_RESTORE_EFL_AC();
1062 return VERR_NO_MEMORY
;
1065 papVMAs
= (struct vm_area_struct
**)RTMemAlloc(sizeof(*papVMAs
) * cPages
);
1068 down_read(&pTask
->mm
->mmap_sem
);
1073 #if GET_USER_PAGES_API >= KERNEL_VERSION(4, 6, 0)
1074 if (R0Process
== RTR0ProcHandleSelf())
1075 rc
= get_user_pages(R3Ptr
, /* Where from. */
1076 cPages
, /* How many pages. */
1077 # if GET_USER_PAGES_API >= KERNEL_VERSION(4, 9, 0)
1078 fWrite
? FOLL_WRITE
| /* Write to memory. */
1079 FOLL_FORCE
/* force write access. */
1080 : 0, /* Write to memory. */
1082 fWrite
, /* Write to memory. */
1083 fWrite
, /* force write access. */
1085 &pMemLnx
->apPages
[0], /* Page array. */
1086 papVMAs
); /* vmas */
1088 * Actually this should not happen at the moment as call this function
1089 * only for our own process.
1092 rc
= get_user_pages_remote(
1093 pTask
, /* Task for fault accounting. */
1094 pTask
->mm
, /* Whose pages. */
1095 R3Ptr
, /* Where from. */
1096 cPages
, /* How many pages. */
1097 # if GET_USER_PAGES_API >= KERNEL_VERSION(4, 9, 0)
1098 fWrite
? FOLL_WRITE
| /* Write to memory. */
1099 FOLL_FORCE
/* force write access. */
1100 : 0, /* Write to memory. */
1102 fWrite
, /* Write to memory. */
1103 fWrite
, /* force write access. */
1105 &pMemLnx
->apPages
[0], /* Page array. */
1107 # if GET_USER_PAGES_API >= KERNEL_VERSION(4, 10, 0)
1111 #else /* GET_USER_PAGES_API < KERNEL_VERSION(4, 6, 0) */
1112 rc
= get_user_pages(pTask
, /* Task for fault accounting. */
1113 pTask
->mm
, /* Whose pages. */
1114 R3Ptr
, /* Where from. */
1115 cPages
, /* How many pages. */
1116 # if GET_USER_PAGES_API >= KERNEL_VERSION(4, 9, 0)
1117 fWrite
? FOLL_WRITE
| /* Write to memory. */
1118 FOLL_FORCE
/* force write access. */
1119 : 0, /* Write to memory. */
1121 fWrite
, /* Write to memory. */
1122 fWrite
, /* force write access. */
1124 &pMemLnx
->apPages
[0], /* Page array. */
1125 papVMAs
); /* vmas */
1126 #endif /* GET_USER_PAGES_API < KERNEL_VERSION(4, 6, 0) */
1130 * Flush dcache (required?), protect against fork and _really_ pin the page
1131 * table entries. get_user_pages() will protect against swapping out the
1132 * pages but it will NOT protect against removing page table entries. This
1133 * can be achieved with
1134 * - using mlock / mmap(..., MAP_LOCKED, ...) from userland. This requires
1135 * an appropriate limit set up with setrlimit(..., RLIMIT_MEMLOCK, ...).
1136 * Usual Linux distributions support only a limited size of locked pages
1138 * - setting the PageReserved bit (as we do in rtR0MemObjLinuxAllocPages()
1140 * - setting the VM_LOCKED flag. This is the same as doing mlock() without
1143 /** @todo The Linux fork() protection will require more work if this API
1144 * is to be used for anything but locking VM pages. */
1147 flush_dcache_page(pMemLnx
->apPages
[rc
]);
1148 papVMAs
[rc
]->vm_flags
|= (VM_DONTCOPY
| VM_LOCKED
);
1151 up_read(&pTask
->mm
->mmap_sem
);
1155 pMemLnx
->Core
.u
.Lock
.R0Process
= R0Process
;
1156 pMemLnx
->cPages
= cPages
;
1157 Assert(!pMemLnx
->fMappedToRing0
);
1158 *ppMem
= &pMemLnx
->Core
;
1160 IPRT_LINUX_RESTORE_EFL_AC();
1161 return VINF_SUCCESS
;
1165 * Failed - we need to unlock any pages that we succeeded to lock.
1169 if (!PageReserved(pMemLnx
->apPages
[rc
]))
1170 SetPageDirty(pMemLnx
->apPages
[rc
]);
1171 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)
1172 put_page(pMemLnx
->apPages
[rc
]);
1174 page_cache_release(pMemLnx
->apPages
[rc
]);
1178 up_read(&pTask
->mm
->mmap_sem
);
1181 rc
= VERR_LOCK_FAILED
;
1184 rtR0MemObjDelete(&pMemLnx
->Core
);
1185 IPRT_LINUX_RESTORE_EFL_AC();
1190 DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem
, void *pv
, size_t cb
, uint32_t fAccess
)
1192 IPRT_LINUX_SAVE_EFL_AC();
1193 void *pvLast
= (uint8_t *)pv
+ cb
- 1;
1194 size_t const cPages
= cb
>> PAGE_SHIFT
;
1195 PRTR0MEMOBJLNX pMemLnx
;
1196 bool fLinearMapping
;
1202 if ( !RTR0MemKernelIsValidAddr(pv
)
1203 || !RTR0MemKernelIsValidAddr(pv
+ cb
))
1204 return VERR_INVALID_PARAMETER
;
1207 * The lower part of the kernel memory has a linear mapping between
1208 * physical and virtual addresses. So we take a short cut here. This is
1209 * assumed to be the cleanest way to handle those addresses (and the code
1210 * is well tested, though the test for determining it is not very nice).
1211 * If we ever decide it isn't we can still remove it.
1214 fLinearMapping
= (unsigned long)pvLast
< VMALLOC_START
;
1216 fLinearMapping
= (unsigned long)pv
>= (unsigned long)__va(0)
1217 && (unsigned long)pvLast
< (unsigned long)high_memory
;
1221 * Allocate the memory object.
1223 pMemLnx
= (PRTR0MEMOBJLNX
)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJLNX
, apPages
[cPages
]), RTR0MEMOBJTYPE_LOCK
, pv
, cb
);
1226 IPRT_LINUX_RESTORE_EFL_AC();
1227 return VERR_NO_MEMORY
;
1232 * We ASSUME all kernel pages are non-swappable and non-movable.
1235 pbPage
= (uint8_t *)pvLast
;
1237 if (!fLinearMapping
)
1241 struct page
*pPage
= rtR0MemObjLinuxVirtToPage(pbPage
);
1242 if (RT_UNLIKELY(!pPage
))
1244 rc
= VERR_LOCK_FAILED
;
1247 pMemLnx
->apPages
[iPage
] = pPage
;
1248 pbPage
-= PAGE_SIZE
;
1255 pMemLnx
->apPages
[iPage
] = virt_to_page(pbPage
);
1256 pbPage
-= PAGE_SIZE
;
1262 * Complete the memory object and return.
1264 pMemLnx
->Core
.u
.Lock
.R0Process
= NIL_RTR0PROCESS
;
1265 pMemLnx
->cPages
= cPages
;
1266 Assert(!pMemLnx
->fMappedToRing0
);
1267 *ppMem
= &pMemLnx
->Core
;
1269 IPRT_LINUX_RESTORE_EFL_AC();
1270 return VINF_SUCCESS
;
1273 rtR0MemObjDelete(&pMemLnx
->Core
);
1274 IPRT_LINUX_RESTORE_EFL_AC();
1279 DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem
, void *pvFixed
, size_t cb
, size_t uAlignment
)
1281 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
1282 IPRT_LINUX_SAVE_EFL_AC();
1283 const size_t cPages
= cb
>> PAGE_SHIFT
;
1284 struct page
*pDummyPage
;
1285 struct page
**papPages
;
1287 /* check for unsupported stuff. */
1288 AssertMsgReturn(pvFixed
== (void *)-1, ("%p\n", pvFixed
), VERR_NOT_SUPPORTED
);
1289 if (uAlignment
> PAGE_SIZE
)
1290 return VERR_NOT_SUPPORTED
;
1293 * Allocate a dummy page and create a page pointer array for vmap such that
1294 * the dummy page is mapped all over the reserved area.
1296 pDummyPage
= alloc_page(GFP_HIGHUSER
| __GFP_NOWARN
);
1299 papPages
= RTMemAlloc(sizeof(*papPages
) * cPages
);
1303 size_t iPage
= cPages
;
1305 papPages
[iPage
] = pDummyPage
;
1307 pv
= vmap(papPages
, cPages
, VM_MAP
, PAGE_KERNEL_RO
);
1309 pv
= vmap(papPages
, cPages
, VM_ALLOC
, PAGE_KERNEL_RO
);
1311 RTMemFree(papPages
);
1314 PRTR0MEMOBJLNX pMemLnx
= (PRTR0MEMOBJLNX
)rtR0MemObjNew(sizeof(*pMemLnx
), RTR0MEMOBJTYPE_RES_VIRT
, pv
, cb
);
1317 pMemLnx
->Core
.u
.ResVirt
.R0Process
= NIL_RTR0PROCESS
;
1318 pMemLnx
->cPages
= 1;
1319 pMemLnx
->apPages
[0] = pDummyPage
;
1320 *ppMem
= &pMemLnx
->Core
;
1321 IPRT_LINUX_RESTORE_EFL_AC();
1322 return VINF_SUCCESS
;
1327 __free_page(pDummyPage
);
1329 IPRT_LINUX_RESTORE_EFL_AC();
1330 return VERR_NO_MEMORY
;
1332 #else /* < 2.4.22 */
1334 * Could probably use ioremap here, but the caller is in a better position than us
1335 * to select some safe physical memory.
1337 return VERR_NOT_SUPPORTED
;
1342 DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem
, RTR3PTR R3PtrFixed
, size_t cb
, size_t uAlignment
, RTR0PROCESS R0Process
)
1344 IPRT_LINUX_SAVE_EFL_AC();
1345 PRTR0MEMOBJLNX pMemLnx
;
1347 struct task_struct
*pTask
= rtR0ProcessToLinuxTask(R0Process
);
1349 return VERR_NOT_SUPPORTED
;
1352 * Check that the specified alignment is supported.
1354 if (uAlignment
> PAGE_SIZE
)
1355 return VERR_NOT_SUPPORTED
;
1358 * Let rtR0MemObjLinuxDoMmap do the difficult bits.
1360 pv
= rtR0MemObjLinuxDoMmap(R3PtrFixed
, cb
, uAlignment
, pTask
, RTMEM_PROT_NONE
);
1361 if (pv
== (void *)-1)
1363 IPRT_LINUX_RESTORE_EFL_AC();
1364 return VERR_NO_MEMORY
;
1367 pMemLnx
= (PRTR0MEMOBJLNX
)rtR0MemObjNew(sizeof(*pMemLnx
), RTR0MEMOBJTYPE_RES_VIRT
, pv
, cb
);
1370 rtR0MemObjLinuxDoMunmap(pv
, cb
, pTask
);
1371 IPRT_LINUX_RESTORE_EFL_AC();
1372 return VERR_NO_MEMORY
;
1375 pMemLnx
->Core
.u
.ResVirt
.R0Process
= R0Process
;
1376 *ppMem
= &pMemLnx
->Core
;
1377 IPRT_LINUX_RESTORE_EFL_AC();
1378 return VINF_SUCCESS
;
1382 DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem
, RTR0MEMOBJ pMemToMap
,
1383 void *pvFixed
, size_t uAlignment
,
1384 unsigned fProt
, size_t offSub
, size_t cbSub
)
1386 int rc
= VERR_NO_MEMORY
;
1387 PRTR0MEMOBJLNX pMemLnxToMap
= (PRTR0MEMOBJLNX
)pMemToMap
;
1388 PRTR0MEMOBJLNX pMemLnx
;
1389 IPRT_LINUX_SAVE_EFL_AC();
1391 /* Fail if requested to do something we can't. */
1392 AssertMsgReturn(!offSub
&& !cbSub
, ("%#x %#x\n", offSub
, cbSub
), VERR_NOT_SUPPORTED
);
1393 AssertMsgReturn(pvFixed
== (void *)-1, ("%p\n", pvFixed
), VERR_NOT_SUPPORTED
);
1394 if (uAlignment
> PAGE_SIZE
)
1395 return VERR_NOT_SUPPORTED
;
1398 * Create the IPRT memory object.
1400 pMemLnx
= (PRTR0MEMOBJLNX
)rtR0MemObjNew(sizeof(*pMemLnx
), RTR0MEMOBJTYPE_MAPPING
, NULL
, pMemLnxToMap
->Core
.cb
);
1403 if (pMemLnxToMap
->cPages
)
1405 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
1407 * Use vmap - 2.4.22 and later.
1409 pgprot_t fPg
= rtR0MemObjLinuxConvertProt(fProt
, true /* kernel */);
1411 pMemLnx
->Core
.pv
= vmap(&pMemLnxToMap
->apPages
[0], pMemLnxToMap
->cPages
, VM_MAP
, fPg
);
1413 pMemLnx
->Core
.pv
= vmap(&pMemLnxToMap
->apPages
[0], pMemLnxToMap
->cPages
, VM_ALLOC
, fPg
);
1415 if (pMemLnx
->Core
.pv
)
1417 pMemLnx
->fMappedToRing0
= true;
1421 rc
= VERR_MAP_FAILED
;
1423 #else /* < 2.4.22 */
1425 * Only option here is to share mappings if possible and forget about fProt.
1427 if (rtR0MemObjIsRing3(pMemToMap
))
1428 rc
= VERR_NOT_SUPPORTED
;
1432 if (!pMemLnxToMap
->Core
.pv
)
1433 rc
= rtR0MemObjLinuxVMap(pMemLnxToMap
, !!(fProt
& RTMEM_PROT_EXEC
));
1436 Assert(pMemLnxToMap
->Core
.pv
);
1437 pMemLnx
->Core
.pv
= pMemLnxToMap
->Core
.pv
;
1445 * MMIO / physical memory.
1447 Assert(pMemLnxToMap
->Core
.enmType
== RTR0MEMOBJTYPE_PHYS
&& !pMemLnxToMap
->Core
.u
.Phys
.fAllocated
);
1448 pMemLnx
->Core
.pv
= pMemLnxToMap
->Core
.u
.Phys
.uCachePolicy
== RTMEM_CACHE_POLICY_MMIO
1449 ? ioremap_nocache(pMemLnxToMap
->Core
.u
.Phys
.PhysBase
, pMemLnxToMap
->Core
.cb
)
1450 : ioremap(pMemLnxToMap
->Core
.u
.Phys
.PhysBase
, pMemLnxToMap
->Core
.cb
);
1451 if (pMemLnx
->Core
.pv
)
1453 /** @todo fix protection. */
1459 pMemLnx
->Core
.u
.Mapping
.R0Process
= NIL_RTR0PROCESS
;
1460 *ppMem
= &pMemLnx
->Core
;
1461 IPRT_LINUX_RESTORE_EFL_AC();
1462 return VINF_SUCCESS
;
1464 rtR0MemObjDelete(&pMemLnx
->Core
);
1467 IPRT_LINUX_RESTORE_EFL_AC();
1472 #ifdef VBOX_USE_PAE_HACK
1474 * Replace the PFN of a PTE with the address of the actual page.
1476 * The caller maps a reserved dummy page at the address with the desired access
1479 * This hack is required for older Linux kernels which don't provide
1480 * remap_pfn_range().
1482 * @returns 0 on success, -ENOMEM on failure.
1483 * @param mm The memory context.
1484 * @param ulAddr The mapping address.
1485 * @param Phys The physical address of the page to map.
1487 static int rtR0MemObjLinuxFixPte(struct mm_struct
*mm
, unsigned long ulAddr
, RTHCPHYS Phys
)
1492 spin_lock(&mm
->page_table_lock
);
1494 pgd
= pgd_offset(mm
, ulAddr
);
1495 if (!pgd_none(*pgd
) && !pgd_bad(*pgd
))
1497 pmd_t
*pmd
= pmd_offset(pgd
, ulAddr
);
1498 if (!pmd_none(*pmd
))
1500 pte_t
*ptep
= pte_offset_map(pmd
, ulAddr
);
1504 pte
.pte_high
&= 0xfff00000;
1505 pte
.pte_high
|= ((Phys
>> 32) & 0x000fffff);
1506 pte
.pte_low
&= 0x00000fff;
1507 pte
.pte_low
|= (Phys
& 0xfffff000);
1515 spin_unlock(&mm
->page_table_lock
);
1518 #endif /* VBOX_USE_PAE_HACK */
1521 DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem
, RTR0MEMOBJ pMemToMap
, RTR3PTR R3PtrFixed
,
1522 size_t uAlignment
, unsigned fProt
, RTR0PROCESS R0Process
)
1524 struct task_struct
*pTask
= rtR0ProcessToLinuxTask(R0Process
);
1525 PRTR0MEMOBJLNX pMemLnxToMap
= (PRTR0MEMOBJLNX
)pMemToMap
;
1526 int rc
= VERR_NO_MEMORY
;
1527 PRTR0MEMOBJLNX pMemLnx
;
1528 #ifdef VBOX_USE_PAE_HACK
1529 struct page
*pDummyPage
;
1532 IPRT_LINUX_SAVE_EFL_AC();
1535 * Check for restrictions.
1538 return VERR_NOT_SUPPORTED
;
1539 if (uAlignment
> PAGE_SIZE
)
1540 return VERR_NOT_SUPPORTED
;
1542 #ifdef VBOX_USE_PAE_HACK
1544 * Allocate a dummy page for use when mapping the memory.
1546 pDummyPage
= alloc_page(GFP_USER
| __GFP_NOWARN
);
1549 IPRT_LINUX_RESTORE_EFL_AC();
1550 return VERR_NO_MEMORY
;
1552 SetPageReserved(pDummyPage
);
1553 DummyPhys
= page_to_phys(pDummyPage
);
1557 * Create the IPRT memory object.
1559 pMemLnx
= (PRTR0MEMOBJLNX
)rtR0MemObjNew(sizeof(*pMemLnx
), RTR0MEMOBJTYPE_MAPPING
, NULL
, pMemLnxToMap
->Core
.cb
);
1563 * Allocate user space mapping.
1566 pv
= rtR0MemObjLinuxDoMmap(R3PtrFixed
, pMemLnxToMap
->Core
.cb
, uAlignment
, pTask
, fProt
);
1567 if (pv
!= (void *)-1)
1570 * Map page by page into the mmap area.
1571 * This is generic, paranoid and not very efficient.
1573 pgprot_t fPg
= rtR0MemObjLinuxConvertProt(fProt
, false /* user */);
1574 unsigned long ulAddrCur
= (unsigned long)pv
;
1575 const size_t cPages
= pMemLnxToMap
->Core
.cb
>> PAGE_SHIFT
;
1578 down_write(&pTask
->mm
->mmap_sem
);
1581 if (pMemLnxToMap
->cPages
)
1583 for (iPage
= 0; iPage
< cPages
; iPage
++, ulAddrCur
+= PAGE_SIZE
)
1585 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11)
1586 RTHCPHYS Phys
= page_to_phys(pMemLnxToMap
->apPages
[iPage
]);
1588 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) || defined(HAVE_26_STYLE_REMAP_PAGE_RANGE)
1589 struct vm_area_struct
*vma
= find_vma(pTask
->mm
, ulAddrCur
); /* this is probably the same for all the pages... */
1590 AssertBreakStmt(vma
, rc
= VERR_INTERNAL_ERROR
);
1592 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) && defined(RT_ARCH_X86)
1593 /* remap_page_range() limitation on x86 */
1594 AssertBreakStmt(Phys
< _4G
, rc
= VERR_NO_MEMORY
);
1597 #if defined(VBOX_USE_INSERT_PAGE) && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
1598 rc
= vm_insert_page(vma
, ulAddrCur
, pMemLnxToMap
->apPages
[iPage
]);
1599 /* Thes flags help making 100% sure some bad stuff wont happen (swap, core, ++).
1600 * See remap_pfn_range() in mm/memory.c */
1601 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
1602 vma
->vm_flags
|= VM_DONTEXPAND
| VM_DONTDUMP
;
1604 vma
->vm_flags
|= VM_RESERVED
;
1606 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
1607 rc
= remap_pfn_range(vma
, ulAddrCur
, page_to_pfn(pMemLnxToMap
->apPages
[iPage
]), PAGE_SIZE
, fPg
);
1608 #elif defined(VBOX_USE_PAE_HACK)
1609 rc
= remap_page_range(vma
, ulAddrCur
, DummyPhys
, PAGE_SIZE
, fPg
);
1611 rc
= rtR0MemObjLinuxFixPte(pTask
->mm
, ulAddrCur
, Phys
);
1612 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) || defined(HAVE_26_STYLE_REMAP_PAGE_RANGE)
1613 rc
= remap_page_range(vma
, ulAddrCur
, Phys
, PAGE_SIZE
, fPg
);
1615 rc
= remap_page_range(ulAddrCur
, Phys
, PAGE_SIZE
, fPg
);
1619 rc
= VERR_NO_MEMORY
;
1627 if (pMemLnxToMap
->Core
.enmType
== RTR0MEMOBJTYPE_PHYS
)
1628 Phys
= pMemLnxToMap
->Core
.u
.Phys
.PhysBase
;
1629 else if (pMemLnxToMap
->Core
.enmType
== RTR0MEMOBJTYPE_CONT
)
1630 Phys
= pMemLnxToMap
->Core
.u
.Cont
.Phys
;
1633 AssertMsgFailed(("%d\n", pMemLnxToMap
->Core
.enmType
));
1634 Phys
= NIL_RTHCPHYS
;
1636 if (Phys
!= NIL_RTHCPHYS
)
1638 for (iPage
= 0; iPage
< cPages
; iPage
++, ulAddrCur
+= PAGE_SIZE
, Phys
+= PAGE_SIZE
)
1640 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) || defined(HAVE_26_STYLE_REMAP_PAGE_RANGE)
1641 struct vm_area_struct
*vma
= find_vma(pTask
->mm
, ulAddrCur
); /* this is probably the same for all the pages... */
1642 AssertBreakStmt(vma
, rc
= VERR_INTERNAL_ERROR
);
1644 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) && defined(RT_ARCH_X86)
1645 /* remap_page_range() limitation on x86 */
1646 AssertBreakStmt(Phys
< _4G
, rc
= VERR_NO_MEMORY
);
1649 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
1650 rc
= remap_pfn_range(vma
, ulAddrCur
, Phys
, PAGE_SIZE
, fPg
);
1651 #elif defined(VBOX_USE_PAE_HACK)
1652 rc
= remap_page_range(vma
, ulAddrCur
, DummyPhys
, PAGE_SIZE
, fPg
);
1654 rc
= rtR0MemObjLinuxFixPte(pTask
->mm
, ulAddrCur
, Phys
);
1655 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) || defined(HAVE_26_STYLE_REMAP_PAGE_RANGE)
1656 rc
= remap_page_range(vma
, ulAddrCur
, Phys
, PAGE_SIZE
, fPg
);
1658 rc
= remap_page_range(ulAddrCur
, Phys
, PAGE_SIZE
, fPg
);
1662 rc
= VERR_NO_MEMORY
;
1669 #ifdef CONFIG_NUMA_BALANCING
1670 # if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
1671 # ifdef RHEL_RELEASE_CODE
1672 # if RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 0)
1673 # define VBOX_NUMA_HACK_OLD
1679 /** @todo Ugly hack! But right now we have no other means to
1680 * disable automatic NUMA page balancing. */
1682 # ifdef VBOX_NUMA_HACK_OLD
1683 pTask
->mm
->numa_next_reset
= jiffies
+ 0x7fffffffUL
;
1685 pTask
->mm
->numa_next_scan
= jiffies
+ 0x7fffffffUL
;
1687 # ifdef VBOX_NUMA_HACK_OLD
1688 pTask
->mm
->numa_next_reset
= jiffies
+ 0x7fffffffffffffffUL
;
1690 pTask
->mm
->numa_next_scan
= jiffies
+ 0x7fffffffffffffffUL
;
1693 #endif /* CONFIG_NUMA_BALANCING */
1695 up_write(&pTask
->mm
->mmap_sem
);
1699 #ifdef VBOX_USE_PAE_HACK
1700 __free_page(pDummyPage
);
1702 pMemLnx
->Core
.pv
= pv
;
1703 pMemLnx
->Core
.u
.Mapping
.R0Process
= R0Process
;
1704 *ppMem
= &pMemLnx
->Core
;
1705 IPRT_LINUX_RESTORE_EFL_AC();
1706 return VINF_SUCCESS
;
1712 rtR0MemObjLinuxDoMunmap(pv
, pMemLnxToMap
->Core
.cb
, pTask
);
1714 rtR0MemObjDelete(&pMemLnx
->Core
);
1716 #ifdef VBOX_USE_PAE_HACK
1717 __free_page(pDummyPage
);
1720 IPRT_LINUX_RESTORE_EFL_AC();
1725 DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem
, size_t offSub
, size_t cbSub
, uint32_t fProt
)
1731 return VERR_NOT_SUPPORTED
;
1735 DECLHIDDEN(RTHCPHYS
) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem
, size_t iPage
)
1737 PRTR0MEMOBJLNX pMemLnx
= (PRTR0MEMOBJLNX
)pMem
;
1739 if (pMemLnx
->cPages
)
1740 return page_to_phys(pMemLnx
->apPages
[iPage
]);
1742 switch (pMemLnx
->Core
.enmType
)
1744 case RTR0MEMOBJTYPE_CONT
:
1745 return pMemLnx
->Core
.u
.Cont
.Phys
+ (iPage
<< PAGE_SHIFT
);
1747 case RTR0MEMOBJTYPE_PHYS
:
1748 return pMemLnx
->Core
.u
.Phys
.PhysBase
+ (iPage
<< PAGE_SHIFT
);
1750 /* the parent knows */
1751 case RTR0MEMOBJTYPE_MAPPING
:
1752 return rtR0MemObjNativeGetPagePhysAddr(pMemLnx
->Core
.uRel
.Child
.pParent
, iPage
);
1755 case RTR0MEMOBJTYPE_LOW
:
1756 case RTR0MEMOBJTYPE_LOCK
:
1757 case RTR0MEMOBJTYPE_PHYS_NC
:
1758 case RTR0MEMOBJTYPE_PAGE
:
1760 AssertMsgFailed(("%d\n", pMemLnx
->Core
.enmType
));
1763 case RTR0MEMOBJTYPE_RES_VIRT
:
1764 return NIL_RTHCPHYS
;