]>
Commit | Line | Data |
---|---|---|
39adb5c3 TG |
1 | /* $Id: memobj-r0drv-linux.c $ */ |
2 | /** @file | |
3 | * IPRT - Ring-0 Memory Objects, Linux. | |
4 | */ | |
5 | ||
6 | /* | |
7 | * Copyright (C) 2006-2016 Oracle Corporation | |
8 | * | |
9 | * This file is part of VirtualBox Open Source Edition (OSE), as | |
10 | * available from http://www.virtualbox.org. This file is free software; | |
11 | * you can redistribute it and/or modify it under the terms of the GNU | |
12 | * General Public License (GPL) as published by the Free Software | |
13 | * Foundation, in version 2 as it comes in the "COPYING" file of the | |
14 | * VirtualBox OSE distribution. VirtualBox OSE is distributed in the | |
15 | * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind. | |
16 | * | |
17 | * The contents of this file may alternatively be used under the terms | |
18 | * of the Common Development and Distribution License Version 1.0 | |
19 | * (CDDL) only, as it comes in the "COPYING.CDDL" file of the | |
20 | * VirtualBox OSE distribution, in which case the provisions of the | |
21 | * CDDL are applicable instead of those of the GPL. | |
22 | * | |
23 | * You may elect to license modified versions of this file under the | |
24 | * terms and conditions of either the GPL or the CDDL or both. | |
25 | */ | |
26 | ||
27 | ||
28 | /********************************************************************************************************************************* | |
29 | * Header Files * | |
30 | *********************************************************************************************************************************/ | |
31 | #include "the-linux-kernel.h" | |
32 | ||
33 | #include <iprt/memobj.h> | |
34 | #include <iprt/alloc.h> | |
35 | #include <iprt/assert.h> | |
36 | #include <iprt/log.h> | |
37 | #include <iprt/process.h> | |
38 | #include <iprt/string.h> | |
39 | #include "internal/memobj.h" | |
40 | ||
41 | ||
42 | /********************************************************************************************************************************* | |
43 | * Defined Constants And Macros * | |
44 | *********************************************************************************************************************************/ | |
45 | /* early 2.6 kernels */ | |
46 | #ifndef PAGE_SHARED_EXEC | |
47 | # define PAGE_SHARED_EXEC PAGE_SHARED | |
48 | #endif | |
49 | #ifndef PAGE_READONLY_EXEC | |
50 | # define PAGE_READONLY_EXEC PAGE_READONLY | |
51 | #endif | |
52 | ||
53 | /* | |
54 | * 2.6.29+ kernels don't work with remap_pfn_range() anymore because | |
55 | * track_pfn_vma_new() is apparently not defined for non-RAM pages. | |
56 | * It should be safe to use vm_insert_page() older kernels as well. | |
57 | */ | |
58 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23) | |
59 | # define VBOX_USE_INSERT_PAGE | |
60 | #endif | |
61 | #if defined(CONFIG_X86_PAE) \ | |
62 | && ( defined(HAVE_26_STYLE_REMAP_PAGE_RANGE) \ | |
63 | || ( LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) \ | |
64 | && LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11))) | |
65 | # define VBOX_USE_PAE_HACK | |
66 | #endif | |
67 | ||
68 | ||
69 | /********************************************************************************************************************************* | |
70 | * Structures and Typedefs * | |
71 | *********************************************************************************************************************************/ | |
72 | /** | |
73 | * The Darwin version of the memory object structure. | |
74 | */ | |
75 | typedef struct RTR0MEMOBJLNX | |
76 | { | |
77 | /** The core structure. */ | |
78 | RTR0MEMOBJINTERNAL Core; | |
79 | /** Set if the allocation is contiguous. | |
80 | * This means it has to be given back as one chunk. */ | |
81 | bool fContiguous; | |
82 | /** Set if we've vmap'ed the memory into ring-0. */ | |
83 | bool fMappedToRing0; | |
84 | /** The pages in the apPages array. */ | |
85 | size_t cPages; | |
86 | /** Array of struct page pointers. (variable size) */ | |
87 | struct page *apPages[1]; | |
88 | } RTR0MEMOBJLNX, *PRTR0MEMOBJLNX; | |
89 | ||
90 | ||
91 | static void rtR0MemObjLinuxFreePages(PRTR0MEMOBJLNX pMemLnx); | |
92 | ||
93 | ||
94 | /** | |
95 | * Helper that converts from a RTR0PROCESS handle to a linux task. | |
96 | * | |
97 | * @returns The corresponding Linux task. | |
98 | * @param R0Process IPRT ring-0 process handle. | |
99 | */ | |
100 | static struct task_struct *rtR0ProcessToLinuxTask(RTR0PROCESS R0Process) | |
101 | { | |
102 | /** @todo fix rtR0ProcessToLinuxTask!! */ | |
103 | /** @todo many (all?) callers currently assume that we return 'current'! */ | |
104 | return R0Process == RTR0ProcHandleSelf() ? current : NULL; | |
105 | } | |
106 | ||
107 | ||
108 | /** | |
109 | * Compute order. Some functions allocate 2^order pages. | |
110 | * | |
111 | * @returns order. | |
112 | * @param cPages Number of pages. | |
113 | */ | |
114 | static int rtR0MemObjLinuxOrder(size_t cPages) | |
115 | { | |
116 | int iOrder; | |
117 | size_t cTmp; | |
118 | ||
119 | for (iOrder = 0, cTmp = cPages; cTmp >>= 1; ++iOrder) | |
120 | ; | |
121 | if (cPages & ~((size_t)1 << iOrder)) | |
122 | ++iOrder; | |
123 | ||
124 | return iOrder; | |
125 | } | |
126 | ||
127 | ||
128 | /** | |
129 | * Converts from RTMEM_PROT_* to Linux PAGE_*. | |
130 | * | |
131 | * @returns Linux page protection constant. | |
132 | * @param fProt The IPRT protection mask. | |
133 | * @param fKernel Whether it applies to kernel or user space. | |
134 | */ | |
135 | static pgprot_t rtR0MemObjLinuxConvertProt(unsigned fProt, bool fKernel) | |
136 | { | |
137 | switch (fProt) | |
138 | { | |
139 | default: | |
140 | AssertMsgFailed(("%#x %d\n", fProt, fKernel)); | |
141 | case RTMEM_PROT_NONE: | |
142 | return PAGE_NONE; | |
143 | ||
144 | case RTMEM_PROT_READ: | |
145 | return fKernel ? PAGE_KERNEL_RO : PAGE_READONLY; | |
146 | ||
147 | case RTMEM_PROT_WRITE: | |
148 | case RTMEM_PROT_WRITE | RTMEM_PROT_READ: | |
149 | return fKernel ? PAGE_KERNEL : PAGE_SHARED; | |
150 | ||
151 | case RTMEM_PROT_EXEC: | |
152 | case RTMEM_PROT_EXEC | RTMEM_PROT_READ: | |
153 | #if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64) | |
154 | if (fKernel) | |
155 | { | |
156 | pgprot_t fPg = MY_PAGE_KERNEL_EXEC; | |
157 | pgprot_val(fPg) &= ~_PAGE_RW; | |
158 | return fPg; | |
159 | } | |
160 | return PAGE_READONLY_EXEC; | |
161 | #else | |
162 | return fKernel ? MY_PAGE_KERNEL_EXEC : PAGE_READONLY_EXEC; | |
163 | #endif | |
164 | ||
165 | case RTMEM_PROT_WRITE | RTMEM_PROT_EXEC: | |
166 | case RTMEM_PROT_WRITE | RTMEM_PROT_EXEC | RTMEM_PROT_READ: | |
167 | return fKernel ? MY_PAGE_KERNEL_EXEC : PAGE_SHARED_EXEC; | |
168 | } | |
169 | } | |
170 | ||
171 | ||
172 | /** | |
173 | * Worker for rtR0MemObjNativeReserveUser and rtR0MemObjNativerMapUser that creates | |
174 | * an empty user space mapping. | |
175 | * | |
176 | * We acquire the mmap_sem of the task! | |
177 | * | |
178 | * @returns Pointer to the mapping. | |
179 | * (void *)-1 on failure. | |
180 | * @param R3PtrFixed (RTR3PTR)-1 if anywhere, otherwise a specific location. | |
181 | * @param cb The size of the mapping. | |
182 | * @param uAlignment The alignment of the mapping. | |
183 | * @param pTask The Linux task to create this mapping in. | |
184 | * @param fProt The RTMEM_PROT_* mask. | |
185 | */ | |
186 | static void *rtR0MemObjLinuxDoMmap(RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, struct task_struct *pTask, unsigned fProt) | |
187 | { | |
188 | unsigned fLnxProt; | |
189 | unsigned long ulAddr; | |
190 | ||
191 | Assert(pTask == current); /* do_mmap */ | |
192 | RT_NOREF_PV(pTask); | |
193 | ||
194 | /* | |
195 | * Convert from IPRT protection to mman.h PROT_ and call do_mmap. | |
196 | */ | |
197 | fProt &= (RTMEM_PROT_NONE | RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC); | |
198 | if (fProt == RTMEM_PROT_NONE) | |
199 | fLnxProt = PROT_NONE; | |
200 | else | |
201 | { | |
202 | fLnxProt = 0; | |
203 | if (fProt & RTMEM_PROT_READ) | |
204 | fLnxProt |= PROT_READ; | |
205 | if (fProt & RTMEM_PROT_WRITE) | |
206 | fLnxProt |= PROT_WRITE; | |
207 | if (fProt & RTMEM_PROT_EXEC) | |
208 | fLnxProt |= PROT_EXEC; | |
209 | } | |
210 | ||
211 | if (R3PtrFixed != (RTR3PTR)-1) | |
212 | { | |
213 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0) | |
214 | ulAddr = vm_mmap(NULL, R3PtrFixed, cb, fLnxProt, MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, 0); | |
215 | #else | |
216 | down_write(&pTask->mm->mmap_sem); | |
217 | ulAddr = do_mmap(NULL, R3PtrFixed, cb, fLnxProt, MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, 0); | |
218 | up_write(&pTask->mm->mmap_sem); | |
219 | #endif | |
220 | } | |
221 | else | |
222 | { | |
223 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0) | |
224 | ulAddr = vm_mmap(NULL, 0, cb, fLnxProt, MAP_SHARED | MAP_ANONYMOUS, 0); | |
225 | #else | |
226 | down_write(&pTask->mm->mmap_sem); | |
227 | ulAddr = do_mmap(NULL, 0, cb, fLnxProt, MAP_SHARED | MAP_ANONYMOUS, 0); | |
228 | up_write(&pTask->mm->mmap_sem); | |
229 | #endif | |
230 | if ( !(ulAddr & ~PAGE_MASK) | |
231 | && (ulAddr & (uAlignment - 1))) | |
232 | { | |
233 | /** @todo implement uAlignment properly... We'll probably need to make some dummy mappings to fill | |
234 | * up alignment gaps. This is of course complicated by fragmentation (which we might have cause | |
235 | * ourselves) and further by there begin two mmap strategies (top / bottom). */ | |
236 | /* For now, just ignore uAlignment requirements... */ | |
237 | } | |
238 | } | |
239 | ||
240 | ||
241 | if (ulAddr & ~PAGE_MASK) /* ~PAGE_MASK == PAGE_OFFSET_MASK */ | |
242 | return (void *)-1; | |
243 | return (void *)ulAddr; | |
244 | } | |
245 | ||
246 | ||
247 | /** | |
248 | * Worker that destroys a user space mapping. | |
249 | * Undoes what rtR0MemObjLinuxDoMmap did. | |
250 | * | |
251 | * We acquire the mmap_sem of the task! | |
252 | * | |
253 | * @param pv The ring-3 mapping. | |
254 | * @param cb The size of the mapping. | |
255 | * @param pTask The Linux task to destroy this mapping in. | |
256 | */ | |
257 | static void rtR0MemObjLinuxDoMunmap(void *pv, size_t cb, struct task_struct *pTask) | |
258 | { | |
259 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0) | |
260 | Assert(pTask == current); RT_NOREF_PV(pTask); | |
261 | vm_munmap((unsigned long)pv, cb); | |
262 | #elif defined(USE_RHEL4_MUNMAP) | |
263 | down_write(&pTask->mm->mmap_sem); | |
264 | do_munmap(pTask->mm, (unsigned long)pv, cb, 0); /* should it be 1 or 0? */ | |
265 | up_write(&pTask->mm->mmap_sem); | |
266 | #else | |
267 | down_write(&pTask->mm->mmap_sem); | |
268 | do_munmap(pTask->mm, (unsigned long)pv, cb); | |
269 | up_write(&pTask->mm->mmap_sem); | |
270 | #endif | |
271 | } | |
272 | ||
273 | ||
274 | /** | |
275 | * Internal worker that allocates physical pages and creates the memory object for them. | |
276 | * | |
277 | * @returns IPRT status code. | |
278 | * @param ppMemLnx Where to store the memory object pointer. | |
279 | * @param enmType The object type. | |
280 | * @param cb The number of bytes to allocate. | |
281 | * @param uAlignment The alignment of the physical memory. | |
282 | * Only valid if fContiguous == true, ignored otherwise. | |
283 | * @param fFlagsLnx The page allocation flags (GPFs). | |
284 | * @param fContiguous Whether the allocation must be contiguous. | |
285 | * @param rcNoMem What to return when we're out of pages. | |
286 | */ | |
287 | static int rtR0MemObjLinuxAllocPages(PRTR0MEMOBJLNX *ppMemLnx, RTR0MEMOBJTYPE enmType, size_t cb, | |
288 | size_t uAlignment, unsigned fFlagsLnx, bool fContiguous, int rcNoMem) | |
289 | { | |
290 | size_t iPage; | |
291 | size_t const cPages = cb >> PAGE_SHIFT; | |
292 | struct page *paPages; | |
293 | ||
294 | /* | |
295 | * Allocate a memory object structure that's large enough to contain | |
296 | * the page pointer array. | |
297 | */ | |
298 | PRTR0MEMOBJLNX pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJLNX, apPages[cPages]), enmType, NULL, cb); | |
299 | if (!pMemLnx) | |
300 | return VERR_NO_MEMORY; | |
301 | pMemLnx->cPages = cPages; | |
302 | ||
303 | if (cPages > 255) | |
304 | { | |
305 | # ifdef __GFP_REPEAT | |
306 | /* Try hard to allocate the memory, but the allocation attempt might fail. */ | |
307 | fFlagsLnx |= __GFP_REPEAT; | |
308 | # endif | |
309 | # ifdef __GFP_NOMEMALLOC | |
310 | /* Introduced with Linux 2.6.12: Don't use emergency reserves */ | |
311 | fFlagsLnx |= __GFP_NOMEMALLOC; | |
312 | # endif | |
313 | } | |
314 | ||
315 | /* | |
316 | * Allocate the pages. | |
317 | * For small allocations we'll try contiguous first and then fall back on page by page. | |
318 | */ | |
319 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22) | |
320 | if ( fContiguous | |
321 | || cb <= PAGE_SIZE * 2) | |
322 | { | |
323 | # ifdef VBOX_USE_INSERT_PAGE | |
324 | paPages = alloc_pages(fFlagsLnx | __GFP_COMP | __GFP_NOWARN, rtR0MemObjLinuxOrder(cPages)); | |
325 | # else | |
326 | paPages = alloc_pages(fFlagsLnx | __GFP_NOWARN, rtR0MemObjLinuxOrder(cPages)); | |
327 | # endif | |
328 | if (paPages) | |
329 | { | |
330 | fContiguous = true; | |
331 | for (iPage = 0; iPage < cPages; iPage++) | |
332 | pMemLnx->apPages[iPage] = &paPages[iPage]; | |
333 | } | |
334 | else if (fContiguous) | |
335 | { | |
336 | rtR0MemObjDelete(&pMemLnx->Core); | |
337 | return rcNoMem; | |
338 | } | |
339 | } | |
340 | ||
341 | if (!fContiguous) | |
342 | { | |
343 | for (iPage = 0; iPage < cPages; iPage++) | |
344 | { | |
345 | pMemLnx->apPages[iPage] = alloc_page(fFlagsLnx | __GFP_NOWARN); | |
346 | if (RT_UNLIKELY(!pMemLnx->apPages[iPage])) | |
347 | { | |
348 | while (iPage-- > 0) | |
349 | __free_page(pMemLnx->apPages[iPage]); | |
350 | rtR0MemObjDelete(&pMemLnx->Core); | |
351 | return rcNoMem; | |
352 | } | |
353 | } | |
354 | } | |
355 | ||
356 | #else /* < 2.4.22 */ | |
357 | /** @todo figure out why we didn't allocate page-by-page on 2.4.21 and older... */ | |
358 | paPages = alloc_pages(fFlagsLnx, rtR0MemObjLinuxOrder(cPages)); | |
359 | if (!paPages) | |
360 | { | |
361 | rtR0MemObjDelete(&pMemLnx->Core); | |
362 | return rcNoMem; | |
363 | } | |
364 | for (iPage = 0; iPage < cPages; iPage++) | |
365 | { | |
366 | pMemLnx->apPages[iPage] = &paPages[iPage]; | |
367 | MY_SET_PAGES_EXEC(pMemLnx->apPages[iPage], 1); | |
368 | if (PageHighMem(pMemLnx->apPages[iPage])) | |
369 | BUG(); | |
370 | } | |
371 | ||
372 | fContiguous = true; | |
373 | #endif /* < 2.4.22 */ | |
374 | pMemLnx->fContiguous = fContiguous; | |
375 | ||
376 | #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) | |
377 | /* | |
378 | * Reserve the pages. | |
379 | * | |
380 | * Linux >= 4.5 with CONFIG_DEBUG_VM panics when setting PG_reserved on compound | |
381 | * pages. According to Michal Hocko this shouldn't be necessary anyway because | |
382 | * as pages which are not on the LRU list are never evictable. | |
383 | */ | |
384 | for (iPage = 0; iPage < cPages; iPage++) | |
385 | SetPageReserved(pMemLnx->apPages[iPage]); | |
386 | #endif | |
387 | ||
388 | /* | |
389 | * Note that the physical address of memory allocated with alloc_pages(flags, order) | |
390 | * is always 2^(PAGE_SHIFT+order)-aligned. | |
391 | */ | |
392 | if ( fContiguous | |
393 | && uAlignment > PAGE_SIZE) | |
394 | { | |
395 | /* | |
396 | * Check for alignment constraints. The physical address of memory allocated with | |
397 | * alloc_pages(flags, order) is always 2^(PAGE_SHIFT+order)-aligned. | |
398 | */ | |
399 | if (RT_UNLIKELY(page_to_phys(pMemLnx->apPages[0]) & (uAlignment - 1))) | |
400 | { | |
401 | /* | |
402 | * This should never happen! | |
403 | */ | |
404 | printk("rtR0MemObjLinuxAllocPages(cb=0x%lx, uAlignment=0x%lx): alloc_pages(..., %d) returned physical memory at 0x%lx!\n", | |
405 | (unsigned long)cb, (unsigned long)uAlignment, rtR0MemObjLinuxOrder(cPages), (unsigned long)page_to_phys(pMemLnx->apPages[0])); | |
406 | rtR0MemObjLinuxFreePages(pMemLnx); | |
407 | return rcNoMem; | |
408 | } | |
409 | } | |
410 | ||
411 | *ppMemLnx = pMemLnx; | |
412 | return VINF_SUCCESS; | |
413 | } | |
414 | ||
415 | ||
416 | /** | |
417 | * Frees the physical pages allocated by the rtR0MemObjLinuxAllocPages() call. | |
418 | * | |
419 | * This method does NOT free the object. | |
420 | * | |
421 | * @param pMemLnx The object which physical pages should be freed. | |
422 | */ | |
423 | static void rtR0MemObjLinuxFreePages(PRTR0MEMOBJLNX pMemLnx) | |
424 | { | |
425 | size_t iPage = pMemLnx->cPages; | |
426 | if (iPage > 0) | |
427 | { | |
428 | /* | |
429 | * Restore the page flags. | |
430 | */ | |
431 | while (iPage-- > 0) | |
432 | { | |
433 | #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) | |
434 | /* | |
435 | * See SetPageReserved() in rtR0MemObjLinuxAllocPages() | |
436 | */ | |
437 | ClearPageReserved(pMemLnx->apPages[iPage]); | |
438 | #endif | |
439 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22) | |
440 | #else | |
441 | MY_SET_PAGES_NOEXEC(pMemLnx->apPages[iPage], 1); | |
442 | #endif | |
443 | } | |
444 | ||
445 | /* | |
446 | * Free the pages. | |
447 | */ | |
448 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22) | |
449 | if (!pMemLnx->fContiguous) | |
450 | { | |
451 | iPage = pMemLnx->cPages; | |
452 | while (iPage-- > 0) | |
453 | __free_page(pMemLnx->apPages[iPage]); | |
454 | } | |
455 | else | |
456 | #endif | |
457 | __free_pages(pMemLnx->apPages[0], rtR0MemObjLinuxOrder(pMemLnx->cPages)); | |
458 | ||
459 | pMemLnx->cPages = 0; | |
460 | } | |
461 | } | |
462 | ||
463 | ||
464 | /** | |
465 | * Maps the allocation into ring-0. | |
466 | * | |
467 | * This will update the RTR0MEMOBJLNX::Core.pv and RTR0MEMOBJ::fMappedToRing0 members. | |
468 | * | |
469 | * Contiguous mappings that isn't in 'high' memory will already be mapped into kernel | |
470 | * space, so we'll use that mapping if possible. If execute access is required, we'll | |
471 | * play safe and do our own mapping. | |
472 | * | |
473 | * @returns IPRT status code. | |
474 | * @param pMemLnx The linux memory object to map. | |
475 | * @param fExecutable Whether execute access is required. | |
476 | */ | |
477 | static int rtR0MemObjLinuxVMap(PRTR0MEMOBJLNX pMemLnx, bool fExecutable) | |
478 | { | |
479 | int rc = VINF_SUCCESS; | |
480 | ||
481 | /* | |
482 | * Choose mapping strategy. | |
483 | */ | |
484 | bool fMustMap = fExecutable | |
485 | || !pMemLnx->fContiguous; | |
486 | if (!fMustMap) | |
487 | { | |
488 | size_t iPage = pMemLnx->cPages; | |
489 | while (iPage-- > 0) | |
490 | if (PageHighMem(pMemLnx->apPages[iPage])) | |
491 | { | |
492 | fMustMap = true; | |
493 | break; | |
494 | } | |
495 | } | |
496 | ||
497 | Assert(!pMemLnx->Core.pv); | |
498 | Assert(!pMemLnx->fMappedToRing0); | |
499 | ||
500 | if (fMustMap) | |
501 | { | |
502 | /* | |
503 | * Use vmap - 2.4.22 and later. | |
504 | */ | |
505 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22) | |
506 | pgprot_t fPg; | |
507 | pgprot_val(fPg) = _PAGE_PRESENT | _PAGE_RW; | |
508 | # ifdef _PAGE_NX | |
509 | if (!fExecutable) | |
510 | pgprot_val(fPg) |= _PAGE_NX; | |
511 | # endif | |
512 | ||
513 | # ifdef VM_MAP | |
514 | pMemLnx->Core.pv = vmap(&pMemLnx->apPages[0], pMemLnx->cPages, VM_MAP, fPg); | |
515 | # else | |
516 | pMemLnx->Core.pv = vmap(&pMemLnx->apPages[0], pMemLnx->cPages, VM_ALLOC, fPg); | |
517 | # endif | |
518 | if (pMemLnx->Core.pv) | |
519 | pMemLnx->fMappedToRing0 = true; | |
520 | else | |
521 | rc = VERR_MAP_FAILED; | |
522 | #else /* < 2.4.22 */ | |
523 | rc = VERR_NOT_SUPPORTED; | |
524 | #endif | |
525 | } | |
526 | else | |
527 | { | |
528 | /* | |
529 | * Use the kernel RAM mapping. | |
530 | */ | |
531 | pMemLnx->Core.pv = phys_to_virt(page_to_phys(pMemLnx->apPages[0])); | |
532 | Assert(pMemLnx->Core.pv); | |
533 | } | |
534 | ||
535 | return rc; | |
536 | } | |
537 | ||
538 | ||
539 | /** | |
540 | * Undoes what rtR0MemObjLinuxVMap() did. | |
541 | * | |
542 | * @param pMemLnx The linux memory object. | |
543 | */ | |
544 | static void rtR0MemObjLinuxVUnmap(PRTR0MEMOBJLNX pMemLnx) | |
545 | { | |
546 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22) | |
547 | if (pMemLnx->fMappedToRing0) | |
548 | { | |
549 | Assert(pMemLnx->Core.pv); | |
550 | vunmap(pMemLnx->Core.pv); | |
551 | pMemLnx->fMappedToRing0 = false; | |
552 | } | |
553 | #else /* < 2.4.22 */ | |
554 | Assert(!pMemLnx->fMappedToRing0); | |
555 | #endif | |
556 | pMemLnx->Core.pv = NULL; | |
557 | } | |
558 | ||
559 | ||
560 | DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem) | |
561 | { | |
562 | IPRT_LINUX_SAVE_EFL_AC(); | |
563 | PRTR0MEMOBJLNX pMemLnx = (PRTR0MEMOBJLNX)pMem; | |
564 | ||
565 | /* | |
566 | * Release any memory that we've allocated or locked. | |
567 | */ | |
568 | switch (pMemLnx->Core.enmType) | |
569 | { | |
570 | case RTR0MEMOBJTYPE_LOW: | |
571 | case RTR0MEMOBJTYPE_PAGE: | |
572 | case RTR0MEMOBJTYPE_CONT: | |
573 | case RTR0MEMOBJTYPE_PHYS: | |
574 | case RTR0MEMOBJTYPE_PHYS_NC: | |
575 | rtR0MemObjLinuxVUnmap(pMemLnx); | |
576 | rtR0MemObjLinuxFreePages(pMemLnx); | |
577 | break; | |
578 | ||
579 | case RTR0MEMOBJTYPE_LOCK: | |
580 | if (pMemLnx->Core.u.Lock.R0Process != NIL_RTR0PROCESS) | |
581 | { | |
582 | struct task_struct *pTask = rtR0ProcessToLinuxTask(pMemLnx->Core.u.Lock.R0Process); | |
583 | size_t iPage; | |
584 | Assert(pTask); | |
585 | if (pTask && pTask->mm) | |
586 | down_read(&pTask->mm->mmap_sem); | |
587 | ||
588 | iPage = pMemLnx->cPages; | |
589 | while (iPage-- > 0) | |
590 | { | |
591 | if (!PageReserved(pMemLnx->apPages[iPage])) | |
592 | SetPageDirty(pMemLnx->apPages[iPage]); | |
593 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0) | |
594 | put_page(pMemLnx->apPages[iPage]); | |
595 | #else | |
596 | page_cache_release(pMemLnx->apPages[iPage]); | |
597 | #endif | |
598 | } | |
599 | ||
600 | if (pTask && pTask->mm) | |
601 | up_read(&pTask->mm->mmap_sem); | |
602 | } | |
603 | /* else: kernel memory - nothing to do here. */ | |
604 | break; | |
605 | ||
606 | case RTR0MEMOBJTYPE_RES_VIRT: | |
607 | Assert(pMemLnx->Core.pv); | |
608 | if (pMemLnx->Core.u.ResVirt.R0Process != NIL_RTR0PROCESS) | |
609 | { | |
610 | struct task_struct *pTask = rtR0ProcessToLinuxTask(pMemLnx->Core.u.Lock.R0Process); | |
611 | Assert(pTask); | |
612 | if (pTask && pTask->mm) | |
613 | rtR0MemObjLinuxDoMunmap(pMemLnx->Core.pv, pMemLnx->Core.cb, pTask); | |
614 | } | |
615 | else | |
616 | { | |
617 | vunmap(pMemLnx->Core.pv); | |
618 | ||
619 | Assert(pMemLnx->cPages == 1 && pMemLnx->apPages[0] != NULL); | |
620 | __free_page(pMemLnx->apPages[0]); | |
621 | pMemLnx->apPages[0] = NULL; | |
622 | pMemLnx->cPages = 0; | |
623 | } | |
624 | pMemLnx->Core.pv = NULL; | |
625 | break; | |
626 | ||
627 | case RTR0MEMOBJTYPE_MAPPING: | |
628 | Assert(pMemLnx->cPages == 0); Assert(pMemLnx->Core.pv); | |
629 | if (pMemLnx->Core.u.ResVirt.R0Process != NIL_RTR0PROCESS) | |
630 | { | |
631 | struct task_struct *pTask = rtR0ProcessToLinuxTask(pMemLnx->Core.u.Lock.R0Process); | |
632 | Assert(pTask); | |
633 | if (pTask && pTask->mm) | |
634 | rtR0MemObjLinuxDoMunmap(pMemLnx->Core.pv, pMemLnx->Core.cb, pTask); | |
635 | } | |
636 | else | |
637 | vunmap(pMemLnx->Core.pv); | |
638 | pMemLnx->Core.pv = NULL; | |
639 | break; | |
640 | ||
641 | default: | |
642 | AssertMsgFailed(("enmType=%d\n", pMemLnx->Core.enmType)); | |
643 | return VERR_INTERNAL_ERROR; | |
644 | } | |
645 | IPRT_LINUX_RESTORE_EFL_ONLY_AC(); | |
646 | return VINF_SUCCESS; | |
647 | } | |
648 | ||
649 | ||
650 | DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable) | |
651 | { | |
652 | IPRT_LINUX_SAVE_EFL_AC(); | |
653 | PRTR0MEMOBJLNX pMemLnx; | |
654 | int rc; | |
655 | ||
656 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22) | |
657 | rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_PAGE, cb, PAGE_SIZE, GFP_HIGHUSER, | |
658 | false /* non-contiguous */, VERR_NO_MEMORY); | |
659 | #else | |
660 | rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_PAGE, cb, PAGE_SIZE, GFP_USER, | |
661 | false /* non-contiguous */, VERR_NO_MEMORY); | |
662 | #endif | |
663 | if (RT_SUCCESS(rc)) | |
664 | { | |
665 | rc = rtR0MemObjLinuxVMap(pMemLnx, fExecutable); | |
666 | if (RT_SUCCESS(rc)) | |
667 | { | |
668 | *ppMem = &pMemLnx->Core; | |
669 | IPRT_LINUX_RESTORE_EFL_AC(); | |
670 | return rc; | |
671 | } | |
672 | ||
673 | rtR0MemObjLinuxFreePages(pMemLnx); | |
674 | rtR0MemObjDelete(&pMemLnx->Core); | |
675 | } | |
676 | ||
677 | IPRT_LINUX_RESTORE_EFL_AC(); | |
678 | return rc; | |
679 | } | |
680 | ||
681 | ||
682 | DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable) | |
683 | { | |
684 | IPRT_LINUX_SAVE_EFL_AC(); | |
685 | PRTR0MEMOBJLNX pMemLnx; | |
686 | int rc; | |
687 | ||
688 | /* Try to avoid GFP_DMA. GFM_DMA32 was introduced with Linux 2.6.15. */ | |
689 | #if (defined(RT_ARCH_AMD64) || defined(CONFIG_X86_PAE)) && defined(GFP_DMA32) | |
690 | /* ZONE_DMA32: 0-4GB */ | |
691 | rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_LOW, cb, PAGE_SIZE, GFP_DMA32, | |
692 | false /* non-contiguous */, VERR_NO_LOW_MEMORY); | |
693 | if (RT_FAILURE(rc)) | |
694 | #endif | |
695 | #ifdef RT_ARCH_AMD64 | |
696 | /* ZONE_DMA: 0-16MB */ | |
697 | rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_LOW, cb, PAGE_SIZE, GFP_DMA, | |
698 | false /* non-contiguous */, VERR_NO_LOW_MEMORY); | |
699 | #else | |
700 | # ifdef CONFIG_X86_PAE | |
701 | # endif | |
702 | /* ZONE_NORMAL: 0-896MB */ | |
703 | rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_LOW, cb, PAGE_SIZE, GFP_USER, | |
704 | false /* non-contiguous */, VERR_NO_LOW_MEMORY); | |
705 | #endif | |
706 | if (RT_SUCCESS(rc)) | |
707 | { | |
708 | rc = rtR0MemObjLinuxVMap(pMemLnx, fExecutable); | |
709 | if (RT_SUCCESS(rc)) | |
710 | { | |
711 | *ppMem = &pMemLnx->Core; | |
712 | IPRT_LINUX_RESTORE_EFL_AC(); | |
713 | return rc; | |
714 | } | |
715 | ||
716 | rtR0MemObjLinuxFreePages(pMemLnx); | |
717 | rtR0MemObjDelete(&pMemLnx->Core); | |
718 | } | |
719 | ||
720 | IPRT_LINUX_RESTORE_EFL_AC(); | |
721 | return rc; | |
722 | } | |
723 | ||
724 | ||
725 | DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable) | |
726 | { | |
727 | IPRT_LINUX_SAVE_EFL_AC(); | |
728 | PRTR0MEMOBJLNX pMemLnx; | |
729 | int rc; | |
730 | ||
731 | #if (defined(RT_ARCH_AMD64) || defined(CONFIG_X86_PAE)) && defined(GFP_DMA32) | |
732 | /* ZONE_DMA32: 0-4GB */ | |
733 | rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_CONT, cb, PAGE_SIZE, GFP_DMA32, | |
734 | true /* contiguous */, VERR_NO_CONT_MEMORY); | |
735 | if (RT_FAILURE(rc)) | |
736 | #endif | |
737 | #ifdef RT_ARCH_AMD64 | |
738 | /* ZONE_DMA: 0-16MB */ | |
739 | rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_CONT, cb, PAGE_SIZE, GFP_DMA, | |
740 | true /* contiguous */, VERR_NO_CONT_MEMORY); | |
741 | #else | |
742 | /* ZONE_NORMAL (32-bit hosts): 0-896MB */ | |
743 | rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_CONT, cb, PAGE_SIZE, GFP_USER, | |
744 | true /* contiguous */, VERR_NO_CONT_MEMORY); | |
745 | #endif | |
746 | if (RT_SUCCESS(rc)) | |
747 | { | |
748 | rc = rtR0MemObjLinuxVMap(pMemLnx, fExecutable); | |
749 | if (RT_SUCCESS(rc)) | |
750 | { | |
751 | #if defined(RT_STRICT) && (defined(RT_ARCH_AMD64) || defined(CONFIG_HIGHMEM64G)) | |
752 | size_t iPage = pMemLnx->cPages; | |
753 | while (iPage-- > 0) | |
754 | Assert(page_to_phys(pMemLnx->apPages[iPage]) < _4G); | |
755 | #endif | |
756 | pMemLnx->Core.u.Cont.Phys = page_to_phys(pMemLnx->apPages[0]); | |
757 | *ppMem = &pMemLnx->Core; | |
758 | IPRT_LINUX_RESTORE_EFL_AC(); | |
759 | return rc; | |
760 | } | |
761 | ||
762 | rtR0MemObjLinuxFreePages(pMemLnx); | |
763 | rtR0MemObjDelete(&pMemLnx->Core); | |
764 | } | |
765 | ||
766 | IPRT_LINUX_RESTORE_EFL_AC(); | |
767 | return rc; | |
768 | } | |
769 | ||
770 | ||
771 | /** | |
772 | * Worker for rtR0MemObjLinuxAllocPhysSub that tries one allocation strategy. | |
773 | * | |
774 | * @returns IPRT status code. | |
775 | * @param ppMemLnx Where to | |
776 | * @param enmType The object type. | |
777 | * @param cb The size of the allocation. | |
778 | * @param uAlignment The alignment of the physical memory. | |
779 | * Only valid for fContiguous == true, ignored otherwise. | |
780 | * @param PhysHighest See rtR0MemObjNativeAllocPhys. | |
781 | * @param fGfp The Linux GFP flags to use for the allocation. | |
782 | */ | |
783 | static int rtR0MemObjLinuxAllocPhysSub2(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJTYPE enmType, | |
784 | size_t cb, size_t uAlignment, RTHCPHYS PhysHighest, unsigned fGfp) | |
785 | { | |
786 | PRTR0MEMOBJLNX pMemLnx; | |
787 | int rc; | |
788 | ||
789 | rc = rtR0MemObjLinuxAllocPages(&pMemLnx, enmType, cb, uAlignment, fGfp, | |
790 | enmType == RTR0MEMOBJTYPE_PHYS /* contiguous / non-contiguous */, | |
791 | VERR_NO_PHYS_MEMORY); | |
792 | if (RT_FAILURE(rc)) | |
793 | return rc; | |
794 | ||
795 | /* | |
796 | * Check the addresses if necessary. (Can be optimized a bit for PHYS.) | |
797 | */ | |
798 | if (PhysHighest != NIL_RTHCPHYS) | |
799 | { | |
800 | size_t iPage = pMemLnx->cPages; | |
801 | while (iPage-- > 0) | |
802 | if (page_to_phys(pMemLnx->apPages[iPage]) > PhysHighest) | |
803 | { | |
804 | rtR0MemObjLinuxFreePages(pMemLnx); | |
805 | rtR0MemObjDelete(&pMemLnx->Core); | |
806 | return VERR_NO_MEMORY; | |
807 | } | |
808 | } | |
809 | ||
810 | /* | |
811 | * Complete the object. | |
812 | */ | |
813 | if (enmType == RTR0MEMOBJTYPE_PHYS) | |
814 | { | |
815 | pMemLnx->Core.u.Phys.PhysBase = page_to_phys(pMemLnx->apPages[0]); | |
816 | pMemLnx->Core.u.Phys.fAllocated = true; | |
817 | } | |
818 | *ppMem = &pMemLnx->Core; | |
819 | return rc; | |
820 | } | |
821 | ||
822 | ||
823 | /** | |
824 | * Worker for rtR0MemObjNativeAllocPhys and rtR0MemObjNativeAllocPhysNC. | |
825 | * | |
826 | * @returns IPRT status code. | |
827 | * @param ppMem Where to store the memory object pointer on success. | |
828 | * @param enmType The object type. | |
829 | * @param cb The size of the allocation. | |
830 | * @param uAlignment The alignment of the physical memory. | |
831 | * Only valid for enmType == RTR0MEMOBJTYPE_PHYS, ignored otherwise. | |
832 | * @param PhysHighest See rtR0MemObjNativeAllocPhys. | |
833 | */ | |
834 | static int rtR0MemObjLinuxAllocPhysSub(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJTYPE enmType, | |
835 | size_t cb, size_t uAlignment, RTHCPHYS PhysHighest) | |
836 | { | |
837 | int rc; | |
838 | IPRT_LINUX_SAVE_EFL_AC(); | |
839 | ||
840 | /* | |
841 | * There are two clear cases and that's the <=16MB and anything-goes ones. | |
842 | * When the physical address limit is somewhere in-between those two we'll | |
843 | * just have to try, starting with HIGHUSER and working our way thru the | |
844 | * different types, hoping we'll get lucky. | |
845 | * | |
846 | * We should probably move this physical address restriction logic up to | |
847 | * the page alloc function as it would be more efficient there. But since | |
848 | * we don't expect this to be a performance issue just yet it can wait. | |
849 | */ | |
850 | if (PhysHighest == NIL_RTHCPHYS) | |
851 | /* ZONE_HIGHMEM: the whole physical memory */ | |
852 | rc = rtR0MemObjLinuxAllocPhysSub2(ppMem, enmType, cb, uAlignment, PhysHighest, GFP_HIGHUSER); | |
853 | else if (PhysHighest <= _1M * 16) | |
854 | /* ZONE_DMA: 0-16MB */ | |
855 | rc = rtR0MemObjLinuxAllocPhysSub2(ppMem, enmType, cb, uAlignment, PhysHighest, GFP_DMA); | |
856 | else | |
857 | { | |
858 | rc = VERR_NO_MEMORY; | |
859 | if (RT_FAILURE(rc)) | |
860 | /* ZONE_HIGHMEM: the whole physical memory */ | |
861 | rc = rtR0MemObjLinuxAllocPhysSub2(ppMem, enmType, cb, uAlignment, PhysHighest, GFP_HIGHUSER); | |
862 | if (RT_FAILURE(rc)) | |
863 | /* ZONE_NORMAL: 0-896MB */ | |
864 | rc = rtR0MemObjLinuxAllocPhysSub2(ppMem, enmType, cb, uAlignment, PhysHighest, GFP_USER); | |
865 | #ifdef GFP_DMA32 | |
866 | if (RT_FAILURE(rc)) | |
867 | /* ZONE_DMA32: 0-4GB */ | |
868 | rc = rtR0MemObjLinuxAllocPhysSub2(ppMem, enmType, cb, uAlignment, PhysHighest, GFP_DMA32); | |
869 | #endif | |
870 | if (RT_FAILURE(rc)) | |
871 | /* ZONE_DMA: 0-16MB */ | |
872 | rc = rtR0MemObjLinuxAllocPhysSub2(ppMem, enmType, cb, uAlignment, PhysHighest, GFP_DMA); | |
873 | } | |
874 | IPRT_LINUX_RESTORE_EFL_AC(); | |
875 | return rc; | |
876 | } | |
877 | ||
878 | ||
879 | /** | |
880 | * Translates a kernel virtual address to a linux page structure by walking the | |
881 | * page tables. | |
882 | * | |
883 | * @note We do assume that the page tables will not change as we are walking | |
884 | * them. This assumption is rather forced by the fact that I could not | |
885 | * immediately see any way of preventing this from happening. So, we | |
886 | * take some extra care when accessing them. | |
887 | * | |
888 | * Because of this, we don't want to use this function on memory where | |
889 | * attribute changes to nearby pages is likely to cause large pages to | |
890 | * be used or split up. So, don't use this for the linear mapping of | |
891 | * physical memory. | |
892 | * | |
893 | * @returns Pointer to the page structur or NULL if it could not be found. | |
894 | * @param pv The kernel virtual address. | |
895 | */ | |
896 | static struct page *rtR0MemObjLinuxVirtToPage(void *pv) | |
897 | { | |
898 | unsigned long ulAddr = (unsigned long)pv; | |
899 | unsigned long pfn; | |
900 | struct page *pPage; | |
901 | pte_t *pEntry; | |
902 | union | |
903 | { | |
904 | pgd_t Global; | |
905 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11) | |
906 | pud_t Upper; | |
907 | #endif | |
908 | pmd_t Middle; | |
909 | pte_t Entry; | |
910 | } u; | |
911 | ||
912 | /* Should this happen in a situation this code will be called in? And if | |
913 | * so, can it change under our feet? See also | |
914 | * "Documentation/vm/active_mm.txt" in the kernel sources. */ | |
915 | if (RT_UNLIKELY(!current->active_mm)) | |
916 | return NULL; | |
917 | u.Global = *pgd_offset(current->active_mm, ulAddr); | |
918 | if (RT_UNLIKELY(pgd_none(u.Global))) | |
919 | return NULL; | |
920 | ||
921 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11) | |
922 | u.Upper = *pud_offset(&u.Global, ulAddr); | |
923 | if (RT_UNLIKELY(pud_none(u.Upper))) | |
924 | return NULL; | |
925 | # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) | |
926 | if (pud_large(u.Upper)) | |
927 | { | |
928 | pPage = pud_page(u.Upper); | |
929 | AssertReturn(pPage, NULL); | |
930 | pfn = page_to_pfn(pPage); /* doing the safe way... */ | |
931 | pfn += (ulAddr >> PAGE_SHIFT) & ((UINT32_C(1) << (PUD_SHIFT - PAGE_SHIFT)) - 1); | |
932 | return pfn_to_page(pfn); | |
933 | } | |
934 | # endif | |
935 | ||
936 | u.Middle = *pmd_offset(&u.Upper, ulAddr); | |
937 | #else /* < 2.6.11 */ | |
938 | u.Middle = *pmd_offset(&u.Global, ulAddr); | |
939 | #endif /* < 2.6.11 */ | |
940 | if (RT_UNLIKELY(pmd_none(u.Middle))) | |
941 | return NULL; | |
942 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) | |
943 | if (pmd_large(u.Middle)) | |
944 | { | |
945 | pPage = pmd_page(u.Middle); | |
946 | AssertReturn(pPage, NULL); | |
947 | pfn = page_to_pfn(pPage); /* doing the safe way... */ | |
948 | pfn += (ulAddr >> PAGE_SHIFT) & ((UINT32_C(1) << (PMD_SHIFT - PAGE_SHIFT)) - 1); | |
949 | return pfn_to_page(pfn); | |
950 | } | |
951 | #endif | |
952 | ||
953 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 5) || defined(pte_offset_map) /* As usual, RHEL 3 had pte_offset_map earlier. */ | |
954 | pEntry = pte_offset_map(&u.Middle, ulAddr); | |
955 | #else | |
956 | pEntry = pte_offset(&u.Middle, ulAddr); | |
957 | #endif | |
958 | if (RT_UNLIKELY(!pEntry)) | |
959 | return NULL; | |
960 | u.Entry = *pEntry; | |
961 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 5) || defined(pte_offset_map) | |
962 | pte_unmap(pEntry); | |
963 | #endif | |
964 | ||
965 | if (RT_UNLIKELY(!pte_present(u.Entry))) | |
966 | return NULL; | |
967 | return pte_page(u.Entry); | |
968 | } | |
969 | ||
970 | ||
971 | DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment) | |
972 | { | |
973 | return rtR0MemObjLinuxAllocPhysSub(ppMem, RTR0MEMOBJTYPE_PHYS, cb, uAlignment, PhysHighest); | |
974 | } | |
975 | ||
976 | ||
977 | DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest) | |
978 | { | |
979 | return rtR0MemObjLinuxAllocPhysSub(ppMem, RTR0MEMOBJTYPE_PHYS_NC, cb, PAGE_SIZE, PhysHighest); | |
980 | } | |
981 | ||
982 | ||
983 | DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy) | |
984 | { | |
985 | IPRT_LINUX_SAVE_EFL_AC(); | |
986 | ||
987 | /* | |
988 | * All we need to do here is to validate that we can use | |
989 | * ioremap on the specified address (32/64-bit dma_addr_t). | |
990 | */ | |
991 | PRTR0MEMOBJLNX pMemLnx; | |
992 | dma_addr_t PhysAddr = Phys; | |
993 | AssertMsgReturn(PhysAddr == Phys, ("%#llx\n", (unsigned long long)Phys), VERR_ADDRESS_TOO_BIG); | |
994 | ||
995 | pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(sizeof(*pMemLnx), RTR0MEMOBJTYPE_PHYS, NULL, cb); | |
996 | if (!pMemLnx) | |
997 | { | |
998 | IPRT_LINUX_RESTORE_EFL_AC(); | |
999 | return VERR_NO_MEMORY; | |
1000 | } | |
1001 | ||
1002 | pMemLnx->Core.u.Phys.PhysBase = PhysAddr; | |
1003 | pMemLnx->Core.u.Phys.fAllocated = false; | |
1004 | pMemLnx->Core.u.Phys.uCachePolicy = uCachePolicy; | |
1005 | Assert(!pMemLnx->cPages); | |
1006 | *ppMem = &pMemLnx->Core; | |
1007 | IPRT_LINUX_RESTORE_EFL_AC(); | |
1008 | return VINF_SUCCESS; | |
1009 | } | |
1010 | ||
1011 | ||
1012 | DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process) | |
1013 | { | |
1014 | IPRT_LINUX_SAVE_EFL_AC(); | |
1015 | const int cPages = cb >> PAGE_SHIFT; | |
1016 | struct task_struct *pTask = rtR0ProcessToLinuxTask(R0Process); | |
1017 | struct vm_area_struct **papVMAs; | |
1018 | PRTR0MEMOBJLNX pMemLnx; | |
1019 | int rc = VERR_NO_MEMORY; | |
1020 | int const fWrite = fAccess & RTMEM_PROT_WRITE ? 1 : 0; | |
1021 | ||
1022 | /* | |
1023 | * Check for valid task and size overflows. | |
1024 | */ | |
1025 | if (!pTask) | |
1026 | return VERR_NOT_SUPPORTED; | |
1027 | if (((size_t)cPages << PAGE_SHIFT) != cb) | |
1028 | return VERR_OUT_OF_RANGE; | |
1029 | ||
1030 | /* | |
1031 | * Allocate the memory object and a temporary buffer for the VMAs. | |
1032 | */ | |
1033 | pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJLNX, apPages[cPages]), RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb); | |
1034 | if (!pMemLnx) | |
1035 | { | |
1036 | IPRT_LINUX_RESTORE_EFL_AC(); | |
1037 | return VERR_NO_MEMORY; | |
1038 | } | |
1039 | ||
1040 | papVMAs = (struct vm_area_struct **)RTMemAlloc(sizeof(*papVMAs) * cPages); | |
1041 | if (papVMAs) | |
1042 | { | |
1043 | down_read(&pTask->mm->mmap_sem); | |
1044 | ||
1045 | /* | |
1046 | * Get user pages. | |
1047 | */ | |
1048 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0) | |
1049 | if (R0Process == RTR0ProcHandleSelf()) | |
1050 | rc = get_user_pages(R3Ptr, /* Where from. */ | |
1051 | cPages, /* How many pages. */ | |
1052 | fWrite, /* Write to memory. */ | |
1053 | fWrite, /* force write access. */ | |
1054 | &pMemLnx->apPages[0], /* Page array. */ | |
1055 | papVMAs); /* vmas */ | |
1056 | /* | |
1057 | * Actually this should not happen at the moment as call this function | |
1058 | * only for our own process. | |
1059 | */ | |
1060 | else | |
1061 | rc = get_user_pages_remote( | |
1062 | pTask, /* Task for fault accounting. */ | |
1063 | pTask->mm, /* Whose pages. */ | |
1064 | R3Ptr, /* Where from. */ | |
1065 | cPages, /* How many pages. */ | |
1066 | fWrite, /* Write to memory. */ | |
1067 | fWrite, /* force write access. */ | |
1068 | &pMemLnx->apPages[0], /* Page array. */ | |
1069 | papVMAs); /* vmas */ | |
1070 | #else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) */ | |
1071 | rc = get_user_pages(pTask, /* Task for fault accounting. */ | |
1072 | pTask->mm, /* Whose pages. */ | |
1073 | R3Ptr, /* Where from. */ | |
1074 | cPages, /* How many pages. */ | |
1075 | fWrite, /* Write to memory. */ | |
1076 | fWrite, /* force write access. */ | |
1077 | &pMemLnx->apPages[0], /* Page array. */ | |
1078 | papVMAs); /* vmas */ | |
1079 | #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) */ | |
1080 | if (rc == cPages) | |
1081 | { | |
1082 | /* | |
1083 | * Flush dcache (required?), protect against fork and _really_ pin the page | |
1084 | * table entries. get_user_pages() will protect against swapping out the | |
1085 | * pages but it will NOT protect against removing page table entries. This | |
1086 | * can be achieved with | |
1087 | * - using mlock / mmap(..., MAP_LOCKED, ...) from userland. This requires | |
1088 | * an appropriate limit set up with setrlimit(..., RLIMIT_MEMLOCK, ...). | |
1089 | * Usual Linux distributions support only a limited size of locked pages | |
1090 | * (e.g. 32KB). | |
1091 | * - setting the PageReserved bit (as we do in rtR0MemObjLinuxAllocPages() | |
1092 | * or by | |
1093 | * - setting the VM_LOCKED flag. This is the same as doing mlock() without | |
1094 | * a range check. | |
1095 | */ | |
1096 | /** @todo The Linux fork() protection will require more work if this API | |
1097 | * is to be used for anything but locking VM pages. */ | |
1098 | while (rc-- > 0) | |
1099 | { | |
1100 | flush_dcache_page(pMemLnx->apPages[rc]); | |
1101 | papVMAs[rc]->vm_flags |= (VM_DONTCOPY | VM_LOCKED); | |
1102 | } | |
1103 | ||
1104 | up_read(&pTask->mm->mmap_sem); | |
1105 | ||
1106 | RTMemFree(papVMAs); | |
1107 | ||
1108 | pMemLnx->Core.u.Lock.R0Process = R0Process; | |
1109 | pMemLnx->cPages = cPages; | |
1110 | Assert(!pMemLnx->fMappedToRing0); | |
1111 | *ppMem = &pMemLnx->Core; | |
1112 | ||
1113 | IPRT_LINUX_RESTORE_EFL_AC(); | |
1114 | return VINF_SUCCESS; | |
1115 | } | |
1116 | ||
1117 | /* | |
1118 | * Failed - we need to unlock any pages that we succeeded to lock. | |
1119 | */ | |
1120 | while (rc-- > 0) | |
1121 | { | |
1122 | if (!PageReserved(pMemLnx->apPages[rc])) | |
1123 | SetPageDirty(pMemLnx->apPages[rc]); | |
1124 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0) | |
1125 | put_page(pMemLnx->apPages[rc]); | |
1126 | #else | |
1127 | page_cache_release(pMemLnx->apPages[rc]); | |
1128 | #endif | |
1129 | } | |
1130 | ||
1131 | up_read(&pTask->mm->mmap_sem); | |
1132 | ||
1133 | RTMemFree(papVMAs); | |
1134 | rc = VERR_LOCK_FAILED; | |
1135 | } | |
1136 | ||
1137 | rtR0MemObjDelete(&pMemLnx->Core); | |
1138 | IPRT_LINUX_RESTORE_EFL_AC(); | |
1139 | return rc; | |
1140 | } | |
1141 | ||
1142 | ||
1143 | DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess) | |
1144 | { | |
1145 | IPRT_LINUX_SAVE_EFL_AC(); | |
1146 | void *pvLast = (uint8_t *)pv + cb - 1; | |
1147 | size_t const cPages = cb >> PAGE_SHIFT; | |
1148 | PRTR0MEMOBJLNX pMemLnx; | |
1149 | bool fLinearMapping; | |
1150 | int rc; | |
1151 | uint8_t *pbPage; | |
1152 | size_t iPage; | |
1153 | NOREF(fAccess); | |
1154 | ||
1155 | if ( !RTR0MemKernelIsValidAddr(pv) | |
1156 | || !RTR0MemKernelIsValidAddr(pv + cb)) | |
1157 | return VERR_INVALID_PARAMETER; | |
1158 | ||
1159 | /* | |
1160 | * The lower part of the kernel memory has a linear mapping between | |
1161 | * physical and virtual addresses. So we take a short cut here. This is | |
1162 | * assumed to be the cleanest way to handle those addresses (and the code | |
1163 | * is well tested, though the test for determining it is not very nice). | |
1164 | * If we ever decide it isn't we can still remove it. | |
1165 | */ | |
1166 | #if 0 | |
1167 | fLinearMapping = (unsigned long)pvLast < VMALLOC_START; | |
1168 | #else | |
1169 | fLinearMapping = (unsigned long)pv >= (unsigned long)__va(0) | |
1170 | && (unsigned long)pvLast < (unsigned long)high_memory; | |
1171 | #endif | |
1172 | ||
1173 | /* | |
1174 | * Allocate the memory object. | |
1175 | */ | |
1176 | pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJLNX, apPages[cPages]), RTR0MEMOBJTYPE_LOCK, pv, cb); | |
1177 | if (!pMemLnx) | |
1178 | { | |
1179 | IPRT_LINUX_RESTORE_EFL_AC(); | |
1180 | return VERR_NO_MEMORY; | |
1181 | } | |
1182 | ||
1183 | /* | |
1184 | * Gather the pages. | |
1185 | * We ASSUME all kernel pages are non-swappable and non-movable. | |
1186 | */ | |
1187 | rc = VINF_SUCCESS; | |
1188 | pbPage = (uint8_t *)pvLast; | |
1189 | iPage = cPages; | |
1190 | if (!fLinearMapping) | |
1191 | { | |
1192 | while (iPage-- > 0) | |
1193 | { | |
1194 | struct page *pPage = rtR0MemObjLinuxVirtToPage(pbPage); | |
1195 | if (RT_UNLIKELY(!pPage)) | |
1196 | { | |
1197 | rc = VERR_LOCK_FAILED; | |
1198 | break; | |
1199 | } | |
1200 | pMemLnx->apPages[iPage] = pPage; | |
1201 | pbPage -= PAGE_SIZE; | |
1202 | } | |
1203 | } | |
1204 | else | |
1205 | { | |
1206 | while (iPage-- > 0) | |
1207 | { | |
1208 | pMemLnx->apPages[iPage] = virt_to_page(pbPage); | |
1209 | pbPage -= PAGE_SIZE; | |
1210 | } | |
1211 | } | |
1212 | if (RT_SUCCESS(rc)) | |
1213 | { | |
1214 | /* | |
1215 | * Complete the memory object and return. | |
1216 | */ | |
1217 | pMemLnx->Core.u.Lock.R0Process = NIL_RTR0PROCESS; | |
1218 | pMemLnx->cPages = cPages; | |
1219 | Assert(!pMemLnx->fMappedToRing0); | |
1220 | *ppMem = &pMemLnx->Core; | |
1221 | ||
1222 | IPRT_LINUX_RESTORE_EFL_AC(); | |
1223 | return VINF_SUCCESS; | |
1224 | } | |
1225 | ||
1226 | rtR0MemObjDelete(&pMemLnx->Core); | |
1227 | IPRT_LINUX_RESTORE_EFL_AC(); | |
1228 | return rc; | |
1229 | } | |
1230 | ||
1231 | ||
1232 | DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment) | |
1233 | { | |
1234 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22) | |
1235 | IPRT_LINUX_SAVE_EFL_AC(); | |
1236 | const size_t cPages = cb >> PAGE_SHIFT; | |
1237 | struct page *pDummyPage; | |
1238 | struct page **papPages; | |
1239 | ||
1240 | /* check for unsupported stuff. */ | |
1241 | AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED); | |
1242 | if (uAlignment > PAGE_SIZE) | |
1243 | return VERR_NOT_SUPPORTED; | |
1244 | ||
1245 | /* | |
1246 | * Allocate a dummy page and create a page pointer array for vmap such that | |
1247 | * the dummy page is mapped all over the reserved area. | |
1248 | */ | |
1249 | pDummyPage = alloc_page(GFP_HIGHUSER | __GFP_NOWARN); | |
1250 | if (pDummyPage) | |
1251 | { | |
1252 | papPages = RTMemAlloc(sizeof(*papPages) * cPages); | |
1253 | if (papPages) | |
1254 | { | |
1255 | void *pv; | |
1256 | size_t iPage = cPages; | |
1257 | while (iPage-- > 0) | |
1258 | papPages[iPage] = pDummyPage; | |
1259 | # ifdef VM_MAP | |
1260 | pv = vmap(papPages, cPages, VM_MAP, PAGE_KERNEL_RO); | |
1261 | # else | |
1262 | pv = vmap(papPages, cPages, VM_ALLOC, PAGE_KERNEL_RO); | |
1263 | # endif | |
1264 | RTMemFree(papPages); | |
1265 | if (pv) | |
1266 | { | |
1267 | PRTR0MEMOBJLNX pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(sizeof(*pMemLnx), RTR0MEMOBJTYPE_RES_VIRT, pv, cb); | |
1268 | if (pMemLnx) | |
1269 | { | |
1270 | pMemLnx->Core.u.ResVirt.R0Process = NIL_RTR0PROCESS; | |
1271 | pMemLnx->cPages = 1; | |
1272 | pMemLnx->apPages[0] = pDummyPage; | |
1273 | *ppMem = &pMemLnx->Core; | |
1274 | IPRT_LINUX_RESTORE_EFL_AC(); | |
1275 | return VINF_SUCCESS; | |
1276 | } | |
1277 | vunmap(pv); | |
1278 | } | |
1279 | } | |
1280 | __free_page(pDummyPage); | |
1281 | } | |
1282 | IPRT_LINUX_RESTORE_EFL_AC(); | |
1283 | return VERR_NO_MEMORY; | |
1284 | ||
1285 | #else /* < 2.4.22 */ | |
1286 | /* | |
1287 | * Could probably use ioremap here, but the caller is in a better position than us | |
1288 | * to select some safe physical memory. | |
1289 | */ | |
1290 | return VERR_NOT_SUPPORTED; | |
1291 | #endif | |
1292 | } | |
1293 | ||
1294 | ||
1295 | DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process) | |
1296 | { | |
1297 | IPRT_LINUX_SAVE_EFL_AC(); | |
1298 | PRTR0MEMOBJLNX pMemLnx; | |
1299 | void *pv; | |
1300 | struct task_struct *pTask = rtR0ProcessToLinuxTask(R0Process); | |
1301 | if (!pTask) | |
1302 | return VERR_NOT_SUPPORTED; | |
1303 | ||
1304 | /* | |
1305 | * Check that the specified alignment is supported. | |
1306 | */ | |
1307 | if (uAlignment > PAGE_SIZE) | |
1308 | return VERR_NOT_SUPPORTED; | |
1309 | ||
1310 | /* | |
1311 | * Let rtR0MemObjLinuxDoMmap do the difficult bits. | |
1312 | */ | |
1313 | pv = rtR0MemObjLinuxDoMmap(R3PtrFixed, cb, uAlignment, pTask, RTMEM_PROT_NONE); | |
1314 | if (pv == (void *)-1) | |
1315 | { | |
1316 | IPRT_LINUX_RESTORE_EFL_AC(); | |
1317 | return VERR_NO_MEMORY; | |
1318 | } | |
1319 | ||
1320 | pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(sizeof(*pMemLnx), RTR0MEMOBJTYPE_RES_VIRT, pv, cb); | |
1321 | if (!pMemLnx) | |
1322 | { | |
1323 | rtR0MemObjLinuxDoMunmap(pv, cb, pTask); | |
1324 | IPRT_LINUX_RESTORE_EFL_AC(); | |
1325 | return VERR_NO_MEMORY; | |
1326 | } | |
1327 | ||
1328 | pMemLnx->Core.u.ResVirt.R0Process = R0Process; | |
1329 | *ppMem = &pMemLnx->Core; | |
1330 | IPRT_LINUX_RESTORE_EFL_AC(); | |
1331 | return VINF_SUCCESS; | |
1332 | } | |
1333 | ||
1334 | ||
1335 | DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, | |
1336 | void *pvFixed, size_t uAlignment, | |
1337 | unsigned fProt, size_t offSub, size_t cbSub) | |
1338 | { | |
1339 | int rc = VERR_NO_MEMORY; | |
1340 | PRTR0MEMOBJLNX pMemLnxToMap = (PRTR0MEMOBJLNX)pMemToMap; | |
1341 | PRTR0MEMOBJLNX pMemLnx; | |
1342 | IPRT_LINUX_SAVE_EFL_AC(); | |
1343 | ||
1344 | /* Fail if requested to do something we can't. */ | |
1345 | AssertMsgReturn(!offSub && !cbSub, ("%#x %#x\n", offSub, cbSub), VERR_NOT_SUPPORTED); | |
1346 | AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED); | |
1347 | if (uAlignment > PAGE_SIZE) | |
1348 | return VERR_NOT_SUPPORTED; | |
1349 | ||
1350 | /* | |
1351 | * Create the IPRT memory object. | |
1352 | */ | |
1353 | pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(sizeof(*pMemLnx), RTR0MEMOBJTYPE_MAPPING, NULL, pMemLnxToMap->Core.cb); | |
1354 | if (pMemLnx) | |
1355 | { | |
1356 | if (pMemLnxToMap->cPages) | |
1357 | { | |
1358 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22) | |
1359 | /* | |
1360 | * Use vmap - 2.4.22 and later. | |
1361 | */ | |
1362 | pgprot_t fPg = rtR0MemObjLinuxConvertProt(fProt, true /* kernel */); | |
1363 | # ifdef VM_MAP | |
1364 | pMemLnx->Core.pv = vmap(&pMemLnxToMap->apPages[0], pMemLnxToMap->cPages, VM_MAP, fPg); | |
1365 | # else | |
1366 | pMemLnx->Core.pv = vmap(&pMemLnxToMap->apPages[0], pMemLnxToMap->cPages, VM_ALLOC, fPg); | |
1367 | # endif | |
1368 | if (pMemLnx->Core.pv) | |
1369 | { | |
1370 | pMemLnx->fMappedToRing0 = true; | |
1371 | rc = VINF_SUCCESS; | |
1372 | } | |
1373 | else | |
1374 | rc = VERR_MAP_FAILED; | |
1375 | ||
1376 | #else /* < 2.4.22 */ | |
1377 | /* | |
1378 | * Only option here is to share mappings if possible and forget about fProt. | |
1379 | */ | |
1380 | if (rtR0MemObjIsRing3(pMemToMap)) | |
1381 | rc = VERR_NOT_SUPPORTED; | |
1382 | else | |
1383 | { | |
1384 | rc = VINF_SUCCESS; | |
1385 | if (!pMemLnxToMap->Core.pv) | |
1386 | rc = rtR0MemObjLinuxVMap(pMemLnxToMap, !!(fProt & RTMEM_PROT_EXEC)); | |
1387 | if (RT_SUCCESS(rc)) | |
1388 | { | |
1389 | Assert(pMemLnxToMap->Core.pv); | |
1390 | pMemLnx->Core.pv = pMemLnxToMap->Core.pv; | |
1391 | } | |
1392 | } | |
1393 | #endif | |
1394 | } | |
1395 | else | |
1396 | { | |
1397 | /* | |
1398 | * MMIO / physical memory. | |
1399 | */ | |
1400 | Assert(pMemLnxToMap->Core.enmType == RTR0MEMOBJTYPE_PHYS && !pMemLnxToMap->Core.u.Phys.fAllocated); | |
1401 | pMemLnx->Core.pv = pMemLnxToMap->Core.u.Phys.uCachePolicy == RTMEM_CACHE_POLICY_MMIO | |
1402 | ? ioremap_nocache(pMemLnxToMap->Core.u.Phys.PhysBase, pMemLnxToMap->Core.cb) | |
1403 | : ioremap(pMemLnxToMap->Core.u.Phys.PhysBase, pMemLnxToMap->Core.cb); | |
1404 | if (pMemLnx->Core.pv) | |
1405 | { | |
1406 | /** @todo fix protection. */ | |
1407 | rc = VINF_SUCCESS; | |
1408 | } | |
1409 | } | |
1410 | if (RT_SUCCESS(rc)) | |
1411 | { | |
1412 | pMemLnx->Core.u.Mapping.R0Process = NIL_RTR0PROCESS; | |
1413 | *ppMem = &pMemLnx->Core; | |
1414 | IPRT_LINUX_RESTORE_EFL_AC(); | |
1415 | return VINF_SUCCESS; | |
1416 | } | |
1417 | rtR0MemObjDelete(&pMemLnx->Core); | |
1418 | } | |
1419 | ||
1420 | IPRT_LINUX_RESTORE_EFL_AC(); | |
1421 | return rc; | |
1422 | } | |
1423 | ||
1424 | ||
1425 | #ifdef VBOX_USE_PAE_HACK | |
1426 | /** | |
1427 | * Replace the PFN of a PTE with the address of the actual page. | |
1428 | * | |
1429 | * The caller maps a reserved dummy page at the address with the desired access | |
1430 | * and flags. | |
1431 | * | |
1432 | * This hack is required for older Linux kernels which don't provide | |
1433 | * remap_pfn_range(). | |
1434 | * | |
1435 | * @returns 0 on success, -ENOMEM on failure. | |
1436 | * @param mm The memory context. | |
1437 | * @param ulAddr The mapping address. | |
1438 | * @param Phys The physical address of the page to map. | |
1439 | */ | |
1440 | static int rtR0MemObjLinuxFixPte(struct mm_struct *mm, unsigned long ulAddr, RTHCPHYS Phys) | |
1441 | { | |
1442 | int rc = -ENOMEM; | |
1443 | pgd_t *pgd; | |
1444 | ||
1445 | spin_lock(&mm->page_table_lock); | |
1446 | ||
1447 | pgd = pgd_offset(mm, ulAddr); | |
1448 | if (!pgd_none(*pgd) && !pgd_bad(*pgd)) | |
1449 | { | |
1450 | pmd_t *pmd = pmd_offset(pgd, ulAddr); | |
1451 | if (!pmd_none(*pmd)) | |
1452 | { | |
1453 | pte_t *ptep = pte_offset_map(pmd, ulAddr); | |
1454 | if (ptep) | |
1455 | { | |
1456 | pte_t pte = *ptep; | |
1457 | pte.pte_high &= 0xfff00000; | |
1458 | pte.pte_high |= ((Phys >> 32) & 0x000fffff); | |
1459 | pte.pte_low &= 0x00000fff; | |
1460 | pte.pte_low |= (Phys & 0xfffff000); | |
1461 | set_pte(ptep, pte); | |
1462 | pte_unmap(ptep); | |
1463 | rc = 0; | |
1464 | } | |
1465 | } | |
1466 | } | |
1467 | ||
1468 | spin_unlock(&mm->page_table_lock); | |
1469 | return rc; | |
1470 | } | |
1471 | #endif /* VBOX_USE_PAE_HACK */ | |
1472 | ||
1473 | ||
1474 | DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, | |
1475 | size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process) | |
1476 | { | |
1477 | struct task_struct *pTask = rtR0ProcessToLinuxTask(R0Process); | |
1478 | PRTR0MEMOBJLNX pMemLnxToMap = (PRTR0MEMOBJLNX)pMemToMap; | |
1479 | int rc = VERR_NO_MEMORY; | |
1480 | PRTR0MEMOBJLNX pMemLnx; | |
1481 | #ifdef VBOX_USE_PAE_HACK | |
1482 | struct page *pDummyPage; | |
1483 | RTHCPHYS DummyPhys; | |
1484 | #endif | |
1485 | IPRT_LINUX_SAVE_EFL_AC(); | |
1486 | ||
1487 | /* | |
1488 | * Check for restrictions. | |
1489 | */ | |
1490 | if (!pTask) | |
1491 | return VERR_NOT_SUPPORTED; | |
1492 | if (uAlignment > PAGE_SIZE) | |
1493 | return VERR_NOT_SUPPORTED; | |
1494 | ||
1495 | #ifdef VBOX_USE_PAE_HACK | |
1496 | /* | |
1497 | * Allocate a dummy page for use when mapping the memory. | |
1498 | */ | |
1499 | pDummyPage = alloc_page(GFP_USER | __GFP_NOWARN); | |
1500 | if (!pDummyPage) | |
1501 | { | |
1502 | IPRT_LINUX_RESTORE_EFL_AC(); | |
1503 | return VERR_NO_MEMORY; | |
1504 | } | |
1505 | SetPageReserved(pDummyPage); | |
1506 | DummyPhys = page_to_phys(pDummyPage); | |
1507 | #endif | |
1508 | ||
1509 | /* | |
1510 | * Create the IPRT memory object. | |
1511 | */ | |
1512 | pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(sizeof(*pMemLnx), RTR0MEMOBJTYPE_MAPPING, NULL, pMemLnxToMap->Core.cb); | |
1513 | if (pMemLnx) | |
1514 | { | |
1515 | /* | |
1516 | * Allocate user space mapping. | |
1517 | */ | |
1518 | void *pv; | |
1519 | pv = rtR0MemObjLinuxDoMmap(R3PtrFixed, pMemLnxToMap->Core.cb, uAlignment, pTask, fProt); | |
1520 | if (pv != (void *)-1) | |
1521 | { | |
1522 | /* | |
1523 | * Map page by page into the mmap area. | |
1524 | * This is generic, paranoid and not very efficient. | |
1525 | */ | |
1526 | pgprot_t fPg = rtR0MemObjLinuxConvertProt(fProt, false /* user */); | |
1527 | unsigned long ulAddrCur = (unsigned long)pv; | |
1528 | const size_t cPages = pMemLnxToMap->Core.cb >> PAGE_SHIFT; | |
1529 | size_t iPage; | |
1530 | ||
1531 | down_write(&pTask->mm->mmap_sem); | |
1532 | ||
1533 | rc = VINF_SUCCESS; | |
1534 | if (pMemLnxToMap->cPages) | |
1535 | { | |
1536 | for (iPage = 0; iPage < cPages; iPage++, ulAddrCur += PAGE_SIZE) | |
1537 | { | |
1538 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11) | |
1539 | RTHCPHYS Phys = page_to_phys(pMemLnxToMap->apPages[iPage]); | |
1540 | #endif | |
1541 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) || defined(HAVE_26_STYLE_REMAP_PAGE_RANGE) | |
1542 | struct vm_area_struct *vma = find_vma(pTask->mm, ulAddrCur); /* this is probably the same for all the pages... */ | |
1543 | AssertBreakStmt(vma, rc = VERR_INTERNAL_ERROR); | |
1544 | #endif | |
1545 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) && defined(RT_ARCH_X86) | |
1546 | /* remap_page_range() limitation on x86 */ | |
1547 | AssertBreakStmt(Phys < _4G, rc = VERR_NO_MEMORY); | |
1548 | #endif | |
1549 | ||
1550 | #if defined(VBOX_USE_INSERT_PAGE) && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) | |
1551 | rc = vm_insert_page(vma, ulAddrCur, pMemLnxToMap->apPages[iPage]); | |
1552 | /* Thes flags help making 100% sure some bad stuff wont happen (swap, core, ++). | |
1553 | * See remap_pfn_range() in mm/memory.c */ | |
1554 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0) | |
1555 | vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; | |
1556 | #else | |
1557 | vma->vm_flags |= VM_RESERVED; | |
1558 | #endif | |
1559 | #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11) | |
1560 | rc = remap_pfn_range(vma, ulAddrCur, page_to_pfn(pMemLnxToMap->apPages[iPage]), PAGE_SIZE, fPg); | |
1561 | #elif defined(VBOX_USE_PAE_HACK) | |
1562 | rc = remap_page_range(vma, ulAddrCur, DummyPhys, PAGE_SIZE, fPg); | |
1563 | if (!rc) | |
1564 | rc = rtR0MemObjLinuxFixPte(pTask->mm, ulAddrCur, Phys); | |
1565 | #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) || defined(HAVE_26_STYLE_REMAP_PAGE_RANGE) | |
1566 | rc = remap_page_range(vma, ulAddrCur, Phys, PAGE_SIZE, fPg); | |
1567 | #else /* 2.4 */ | |
1568 | rc = remap_page_range(ulAddrCur, Phys, PAGE_SIZE, fPg); | |
1569 | #endif | |
1570 | if (rc) | |
1571 | { | |
1572 | rc = VERR_NO_MEMORY; | |
1573 | break; | |
1574 | } | |
1575 | } | |
1576 | } | |
1577 | else | |
1578 | { | |
1579 | RTHCPHYS Phys; | |
1580 | if (pMemLnxToMap->Core.enmType == RTR0MEMOBJTYPE_PHYS) | |
1581 | Phys = pMemLnxToMap->Core.u.Phys.PhysBase; | |
1582 | else if (pMemLnxToMap->Core.enmType == RTR0MEMOBJTYPE_CONT) | |
1583 | Phys = pMemLnxToMap->Core.u.Cont.Phys; | |
1584 | else | |
1585 | { | |
1586 | AssertMsgFailed(("%d\n", pMemLnxToMap->Core.enmType)); | |
1587 | Phys = NIL_RTHCPHYS; | |
1588 | } | |
1589 | if (Phys != NIL_RTHCPHYS) | |
1590 | { | |
1591 | for (iPage = 0; iPage < cPages; iPage++, ulAddrCur += PAGE_SIZE, Phys += PAGE_SIZE) | |
1592 | { | |
1593 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) || defined(HAVE_26_STYLE_REMAP_PAGE_RANGE) | |
1594 | struct vm_area_struct *vma = find_vma(pTask->mm, ulAddrCur); /* this is probably the same for all the pages... */ | |
1595 | AssertBreakStmt(vma, rc = VERR_INTERNAL_ERROR); | |
1596 | #endif | |
1597 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) && defined(RT_ARCH_X86) | |
1598 | /* remap_page_range() limitation on x86 */ | |
1599 | AssertBreakStmt(Phys < _4G, rc = VERR_NO_MEMORY); | |
1600 | #endif | |
1601 | ||
1602 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11) | |
1603 | rc = remap_pfn_range(vma, ulAddrCur, Phys, PAGE_SIZE, fPg); | |
1604 | #elif defined(VBOX_USE_PAE_HACK) | |
1605 | rc = remap_page_range(vma, ulAddrCur, DummyPhys, PAGE_SIZE, fPg); | |
1606 | if (!rc) | |
1607 | rc = rtR0MemObjLinuxFixPte(pTask->mm, ulAddrCur, Phys); | |
1608 | #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) || defined(HAVE_26_STYLE_REMAP_PAGE_RANGE) | |
1609 | rc = remap_page_range(vma, ulAddrCur, Phys, PAGE_SIZE, fPg); | |
1610 | #else /* 2.4 */ | |
1611 | rc = remap_page_range(ulAddrCur, Phys, PAGE_SIZE, fPg); | |
1612 | #endif | |
1613 | if (rc) | |
1614 | { | |
1615 | rc = VERR_NO_MEMORY; | |
1616 | break; | |
1617 | } | |
1618 | } | |
1619 | } | |
1620 | } | |
1621 | ||
1622 | #ifdef CONFIG_NUMA_BALANCING | |
1623 | # if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0) | |
1624 | # ifdef RHEL_RELEASE_CODE | |
1625 | # if RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 0) | |
1626 | # define VBOX_NUMA_HACK_OLD | |
1627 | # endif | |
1628 | # endif | |
1629 | # endif | |
1630 | if (RT_SUCCESS(rc)) | |
1631 | { | |
1632 | /** @todo Ugly hack! But right now we have no other means to | |
1633 | * disable automatic NUMA page balancing. */ | |
1634 | # ifdef RT_OS_X86 | |
1635 | # ifdef VBOX_NUMA_HACK_OLD | |
1636 | pTask->mm->numa_next_reset = jiffies + 0x7fffffffUL; | |
1637 | # endif | |
1638 | pTask->mm->numa_next_scan = jiffies + 0x7fffffffUL; | |
1639 | # else | |
1640 | # ifdef VBOX_NUMA_HACK_OLD | |
1641 | pTask->mm->numa_next_reset = jiffies + 0x7fffffffffffffffUL; | |
1642 | # endif | |
1643 | pTask->mm->numa_next_scan = jiffies + 0x7fffffffffffffffUL; | |
1644 | # endif | |
1645 | } | |
1646 | #endif /* CONFIG_NUMA_BALANCING */ | |
1647 | ||
1648 | up_write(&pTask->mm->mmap_sem); | |
1649 | ||
1650 | if (RT_SUCCESS(rc)) | |
1651 | { | |
1652 | #ifdef VBOX_USE_PAE_HACK | |
1653 | __free_page(pDummyPage); | |
1654 | #endif | |
1655 | pMemLnx->Core.pv = pv; | |
1656 | pMemLnx->Core.u.Mapping.R0Process = R0Process; | |
1657 | *ppMem = &pMemLnx->Core; | |
1658 | IPRT_LINUX_RESTORE_EFL_AC(); | |
1659 | return VINF_SUCCESS; | |
1660 | } | |
1661 | ||
1662 | /* | |
1663 | * Bail out. | |
1664 | */ | |
1665 | rtR0MemObjLinuxDoMunmap(pv, pMemLnxToMap->Core.cb, pTask); | |
1666 | } | |
1667 | rtR0MemObjDelete(&pMemLnx->Core); | |
1668 | } | |
1669 | #ifdef VBOX_USE_PAE_HACK | |
1670 | __free_page(pDummyPage); | |
1671 | #endif | |
1672 | ||
1673 | IPRT_LINUX_RESTORE_EFL_AC(); | |
1674 | return rc; | |
1675 | } | |
1676 | ||
1677 | ||
1678 | DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt) | |
1679 | { | |
1680 | NOREF(pMem); | |
1681 | NOREF(offSub); | |
1682 | NOREF(cbSub); | |
1683 | NOREF(fProt); | |
1684 | return VERR_NOT_SUPPORTED; | |
1685 | } | |
1686 | ||
1687 | ||
1688 | DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage) | |
1689 | { | |
1690 | PRTR0MEMOBJLNX pMemLnx = (PRTR0MEMOBJLNX)pMem; | |
1691 | ||
1692 | if (pMemLnx->cPages) | |
1693 | return page_to_phys(pMemLnx->apPages[iPage]); | |
1694 | ||
1695 | switch (pMemLnx->Core.enmType) | |
1696 | { | |
1697 | case RTR0MEMOBJTYPE_CONT: | |
1698 | return pMemLnx->Core.u.Cont.Phys + (iPage << PAGE_SHIFT); | |
1699 | ||
1700 | case RTR0MEMOBJTYPE_PHYS: | |
1701 | return pMemLnx->Core.u.Phys.PhysBase + (iPage << PAGE_SHIFT); | |
1702 | ||
1703 | /* the parent knows */ | |
1704 | case RTR0MEMOBJTYPE_MAPPING: | |
1705 | return rtR0MemObjNativeGetPagePhysAddr(pMemLnx->Core.uRel.Child.pParent, iPage); | |
1706 | ||
1707 | /* cPages > 0 */ | |
1708 | case RTR0MEMOBJTYPE_LOW: | |
1709 | case RTR0MEMOBJTYPE_LOCK: | |
1710 | case RTR0MEMOBJTYPE_PHYS_NC: | |
1711 | case RTR0MEMOBJTYPE_PAGE: | |
1712 | default: | |
1713 | AssertMsgFailed(("%d\n", pMemLnx->Core.enmType)); | |
1714 | /* fall thru */ | |
1715 | ||
1716 | case RTR0MEMOBJTYPE_RES_VIRT: | |
1717 | return NIL_RTHCPHYS; | |
1718 | } | |
1719 | } | |
1720 |