1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
32 #define pr_fmt(fmt) "[TTM] " fmt
34 #include <drm/ttm/ttm_module.h>
35 #include <drm/ttm/ttm_bo_driver.h>
36 #include <drm/ttm/ttm_placement.h>
37 #include <drm/drm_vma_manager.h>
39 #include <linux/pfn_t.h>
40 #include <linux/rbtree.h>
41 #include <linux/module.h>
42 #include <linux/uaccess.h>
43 #include <linux/mem_encrypt.h>
45 static vm_fault_t
ttm_bo_vm_fault_idle(struct ttm_buffer_object
*bo
,
51 if (likely(!bo
->moving
))
55 * Quick non-stalling check for idle.
57 if (dma_fence_is_signaled(bo
->moving
))
61 * If possible, avoid waiting for GPU with mmap_lock
62 * held. We only do this if the fault allows retry and this
63 * is the first attempt.
65 if (fault_flag_allow_retry_first(vmf
->flags
)) {
67 if (vmf
->flags
& FAULT_FLAG_RETRY_NOWAIT
)
71 mmap_read_unlock(vmf
->vma
->vm_mm
);
72 (void) dma_fence_wait(bo
->moving
, true);
73 dma_resv_unlock(bo
->base
.resv
);
81 err
= dma_fence_wait(bo
->moving
, true);
82 if (unlikely(err
!= 0)) {
83 ret
= (err
!= -ERESTARTSYS
) ? VM_FAULT_SIGBUS
:
89 dma_fence_put(bo
->moving
);
96 static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object
*bo
,
97 unsigned long page_offset
)
99 struct ttm_bo_device
*bdev
= bo
->bdev
;
101 if (bdev
->driver
->io_mem_pfn
)
102 return bdev
->driver
->io_mem_pfn(bo
, page_offset
);
104 return ((bo
->mem
.bus
.base
+ bo
->mem
.bus
.offset
) >> PAGE_SHIFT
)
109 * ttm_bo_vm_reserve - Reserve a buffer object in a retryable vm callback
110 * @bo: The buffer object
111 * @vmf: The fault structure handed to the callback
113 * vm callbacks like fault() and *_mkwrite() allow for the mm_sem to be dropped
114 * during long waits, and after the wait the callback will be restarted. This
115 * is to allow other threads using the same virtual memory space concurrent
116 * access to map(), unmap() completely unrelated buffer objects. TTM buffer
117 * object reservations sometimes wait for GPU and should therefore be
118 * considered long waits. This function reserves the buffer object interruptibly
119 * taking this into account. Starvation is avoided by the vm system not
120 * allowing too many repeated restarts.
121 * This function is intended to be used in customized fault() and _mkwrite()
125 * 0 on success and the bo was reserved.
126 * VM_FAULT_RETRY if blocking wait.
127 * VM_FAULT_NOPAGE if blocking wait and retrying was not allowed.
129 vm_fault_t
ttm_bo_vm_reserve(struct ttm_buffer_object
*bo
,
130 struct vm_fault
*vmf
)
133 * Work around locking order reversal in fault / nopfn
134 * between mmap_lock and bo_reserve: Perform a trylock operation
135 * for reserve, and if it fails, retry the fault after waiting
136 * for the buffer to become unreserved.
138 if (unlikely(!dma_resv_trylock(bo
->base
.resv
))) {
140 * If the fault allows retry and this is the first
141 * fault attempt, we try to release the mmap_lock
144 if (fault_flag_allow_retry_first(vmf
->flags
)) {
145 if (!(vmf
->flags
& FAULT_FLAG_RETRY_NOWAIT
)) {
147 mmap_read_unlock(vmf
->vma
->vm_mm
);
148 if (!dma_resv_lock_interruptible(bo
->base
.resv
,
150 dma_resv_unlock(bo
->base
.resv
);
154 return VM_FAULT_RETRY
;
157 if (dma_resv_lock_interruptible(bo
->base
.resv
, NULL
))
158 return VM_FAULT_NOPAGE
;
163 EXPORT_SYMBOL(ttm_bo_vm_reserve
);
165 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
167 * ttm_bo_vm_insert_huge - Insert a pfn for PUD or PMD faults
169 * @bo: The buffer object
170 * @page_offset: Page offset from bo start
171 * @fault_page_size: The size of the fault in pages.
172 * @pgprot: The page protections.
173 * Does additional checking whether it's possible to insert a PUD or PMD
174 * pfn and performs the insertion.
176 * Return: VM_FAULT_NOPAGE on successful insertion, VM_FAULT_FALLBACK if
177 * a huge fault was not possible, or on insertion error.
179 static vm_fault_t
ttm_bo_vm_insert_huge(struct vm_fault
*vmf
,
180 struct ttm_buffer_object
*bo
,
182 pgoff_t fault_page_size
,
189 struct ttm_tt
*ttm
= bo
->ttm
;
190 bool write
= vmf
->flags
& FAULT_FLAG_WRITE
;
192 /* Fault should not cross bo boundary. */
193 page_offset
&= ~(fault_page_size
- 1);
194 if (page_offset
+ fault_page_size
> bo
->num_pages
)
197 if (bo
->mem
.bus
.is_iomem
)
198 pfn
= ttm_bo_io_mem_pfn(bo
, page_offset
);
200 pfn
= page_to_pfn(ttm
->pages
[page_offset
]);
202 /* pfn must be fault_page_size aligned. */
203 if ((pfn
& (fault_page_size
- 1)) != 0)
206 /* Check that memory is contiguous. */
207 if (!bo
->mem
.bus
.is_iomem
) {
208 for (i
= 1; i
< fault_page_size
; ++i
) {
209 if (page_to_pfn(ttm
->pages
[page_offset
+ i
]) != pfn
+ i
)
212 } else if (bo
->bdev
->driver
->io_mem_pfn
) {
213 for (i
= 1; i
< fault_page_size
; ++i
) {
214 if (ttm_bo_io_mem_pfn(bo
, page_offset
+ i
) != pfn
+ i
)
219 pfnt
= __pfn_to_pfn_t(pfn
, PFN_DEV
);
220 if (fault_page_size
== (HPAGE_PMD_SIZE
>> PAGE_SHIFT
))
221 ret
= vmf_insert_pfn_pmd_prot(vmf
, pfnt
, pgprot
, write
);
222 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
223 else if (fault_page_size
== (HPAGE_PUD_SIZE
>> PAGE_SHIFT
))
224 ret
= vmf_insert_pfn_pud_prot(vmf
, pfnt
, pgprot
, write
);
227 WARN_ON_ONCE(ret
= VM_FAULT_FALLBACK
);
229 if (ret
!= VM_FAULT_NOPAGE
)
232 return VM_FAULT_NOPAGE
;
234 count_vm_event(THP_FAULT_FALLBACK
);
235 return VM_FAULT_FALLBACK
;
238 static vm_fault_t
ttm_bo_vm_insert_huge(struct vm_fault
*vmf
,
239 struct ttm_buffer_object
*bo
,
241 pgoff_t fault_page_size
,
244 return VM_FAULT_FALLBACK
;
249 * ttm_bo_vm_fault_reserved - TTM fault helper
250 * @vmf: The struct vm_fault given as argument to the fault callback
251 * @prot: The page protection to be used for this memory area.
252 * @num_prefault: Maximum number of prefault pages. The caller may want to
253 * specify this based on madvice settings and the size of the GPU object
254 * backed by the memory.
255 * @fault_page_size: The size of the fault in pages.
257 * This function inserts one or more page table entries pointing to the
258 * memory backing the buffer object, and then returns a return code
259 * instructing the caller to retry the page access.
262 * VM_FAULT_NOPAGE on success or pending signal
263 * VM_FAULT_SIGBUS on unspecified error
264 * VM_FAULT_OOM on out-of-memory
265 * VM_FAULT_RETRY if retryable wait
267 vm_fault_t
ttm_bo_vm_fault_reserved(struct vm_fault
*vmf
,
269 pgoff_t num_prefault
,
270 pgoff_t fault_page_size
)
272 struct vm_area_struct
*vma
= vmf
->vma
;
273 struct ttm_buffer_object
*bo
= vma
->vm_private_data
;
274 struct ttm_bo_device
*bdev
= bo
->bdev
;
275 unsigned long page_offset
;
276 unsigned long page_last
;
278 struct ttm_tt
*ttm
= NULL
;
282 vm_fault_t ret
= VM_FAULT_NOPAGE
;
283 unsigned long address
= vmf
->address
;
284 struct ttm_mem_type_manager
*man
=
285 &bdev
->man
[bo
->mem
.mem_type
];
288 * Refuse to fault imported pages. This should be handled
289 * (if at all) by redirecting mmap to the exporter.
291 if (bo
->ttm
&& (bo
->ttm
->page_flags
& TTM_PAGE_FLAG_SG
))
292 return VM_FAULT_SIGBUS
;
294 if (bdev
->driver
->fault_reserve_notify
) {
295 struct dma_fence
*moving
= dma_fence_get(bo
->moving
);
297 err
= bdev
->driver
->fault_reserve_notify(bo
);
303 return VM_FAULT_NOPAGE
;
305 return VM_FAULT_SIGBUS
;
308 if (bo
->moving
!= moving
) {
309 spin_lock(&ttm_bo_glob
.lru_lock
);
310 ttm_bo_move_to_lru_tail(bo
, NULL
);
311 spin_unlock(&ttm_bo_glob
.lru_lock
);
313 dma_fence_put(moving
);
317 * Wait for buffer data in transit, due to a pipelined
320 ret
= ttm_bo_vm_fault_idle(bo
, vmf
);
321 if (unlikely(ret
!= 0))
324 err
= ttm_mem_io_lock(man
, true);
325 if (unlikely(err
!= 0))
326 return VM_FAULT_NOPAGE
;
327 err
= ttm_mem_io_reserve_vm(bo
);
328 if (unlikely(err
!= 0)) {
329 ret
= VM_FAULT_SIGBUS
;
333 page_offset
= ((address
- vma
->vm_start
) >> PAGE_SHIFT
) +
334 vma
->vm_pgoff
- drm_vma_node_start(&bo
->base
.vma_node
);
335 page_last
= vma_pages(vma
) + vma
->vm_pgoff
-
336 drm_vma_node_start(&bo
->base
.vma_node
);
338 if (unlikely(page_offset
>= bo
->num_pages
)) {
339 ret
= VM_FAULT_SIGBUS
;
343 prot
= ttm_io_prot(bo
->mem
.placement
, prot
);
344 if (!bo
->mem
.bus
.is_iomem
) {
345 struct ttm_operation_ctx ctx
= {
346 .interruptible
= false,
347 .no_wait_gpu
= false,
348 .flags
= TTM_OPT_FLAG_FORCE_ALLOC
353 if (ttm_tt_populate(bo
->ttm
, &ctx
)) {
358 /* Iomem should not be marked encrypted */
359 prot
= pgprot_decrypted(prot
);
362 /* We don't prefault on huge faults. Yet. */
363 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE
) && fault_page_size
!= 1) {
364 ret
= ttm_bo_vm_insert_huge(vmf
, bo
, page_offset
,
365 fault_page_size
, prot
);
370 * Speculatively prefault a number of pages. Only error on
373 for (i
= 0; i
< num_prefault
; ++i
) {
374 if (bo
->mem
.bus
.is_iomem
) {
375 pfn
= ttm_bo_io_mem_pfn(bo
, page_offset
);
377 page
= ttm
->pages
[page_offset
];
378 if (unlikely(!page
&& i
== 0)) {
381 } else if (unlikely(!page
)) {
384 page
->index
= drm_vma_node_start(&bo
->base
.vma_node
) +
386 pfn
= page_to_pfn(page
);
390 * Note that the value of @prot at this point may differ from
391 * the value of @vma->vm_page_prot in the caching- and
392 * encryption bits. This is because the exact location of the
393 * data may not be known at mmap() time and may also change
394 * at arbitrary times while the data is mmap'ed.
395 * See vmf_insert_mixed_prot() for a discussion.
397 if (vma
->vm_flags
& VM_MIXEDMAP
)
398 ret
= vmf_insert_mixed_prot(vma
, address
,
399 __pfn_to_pfn_t(pfn
, PFN_DEV
),
402 ret
= vmf_insert_pfn_prot(vma
, address
, pfn
, prot
);
404 /* Never error on prefaulted PTEs */
405 if (unlikely((ret
& VM_FAULT_ERROR
))) {
412 address
+= PAGE_SIZE
;
413 if (unlikely(++page_offset
>= page_last
))
416 ret
= VM_FAULT_NOPAGE
;
418 ttm_mem_io_unlock(man
);
421 EXPORT_SYMBOL(ttm_bo_vm_fault_reserved
);
423 vm_fault_t
ttm_bo_vm_fault(struct vm_fault
*vmf
)
425 struct vm_area_struct
*vma
= vmf
->vma
;
427 struct ttm_buffer_object
*bo
= vma
->vm_private_data
;
430 ret
= ttm_bo_vm_reserve(bo
, vmf
);
434 prot
= vma
->vm_page_prot
;
435 ret
= ttm_bo_vm_fault_reserved(vmf
, prot
, TTM_BO_VM_NUM_PREFAULT
, 1);
436 if (ret
== VM_FAULT_RETRY
&& !(vmf
->flags
& FAULT_FLAG_RETRY_NOWAIT
))
439 dma_resv_unlock(bo
->base
.resv
);
443 EXPORT_SYMBOL(ttm_bo_vm_fault
);
445 void ttm_bo_vm_open(struct vm_area_struct
*vma
)
447 struct ttm_buffer_object
*bo
= vma
->vm_private_data
;
449 WARN_ON(bo
->bdev
->dev_mapping
!= vma
->vm_file
->f_mapping
);
453 EXPORT_SYMBOL(ttm_bo_vm_open
);
455 void ttm_bo_vm_close(struct vm_area_struct
*vma
)
457 struct ttm_buffer_object
*bo
= vma
->vm_private_data
;
460 vma
->vm_private_data
= NULL
;
462 EXPORT_SYMBOL(ttm_bo_vm_close
);
464 static int ttm_bo_vm_access_kmap(struct ttm_buffer_object
*bo
,
465 unsigned long offset
,
466 uint8_t *buf
, int len
, int write
)
468 unsigned long page
= offset
>> PAGE_SHIFT
;
469 unsigned long bytes_left
= len
;
472 /* Copy a page at a time, that way no extra virtual address
475 offset
-= page
<< PAGE_SHIFT
;
477 unsigned long bytes
= min(bytes_left
, PAGE_SIZE
- offset
);
478 struct ttm_bo_kmap_obj map
;
482 ret
= ttm_bo_kmap(bo
, page
, 1, &map
);
486 ptr
= (uint8_t *)ttm_kmap_obj_virtual(&map
, &is_iomem
) + offset
;
487 WARN_ON_ONCE(is_iomem
);
489 memcpy(ptr
, buf
, bytes
);
491 memcpy(buf
, ptr
, bytes
);
498 } while (bytes_left
);
503 int ttm_bo_vm_access(struct vm_area_struct
*vma
, unsigned long addr
,
504 void *buf
, int len
, int write
)
506 unsigned long offset
= (addr
) - vma
->vm_start
;
507 struct ttm_buffer_object
*bo
= vma
->vm_private_data
;
510 if (len
< 1 || (offset
+ len
) >> PAGE_SHIFT
> bo
->num_pages
)
513 ret
= ttm_bo_reserve(bo
, true, false, NULL
);
517 switch (bo
->mem
.mem_type
) {
519 if (unlikely(bo
->ttm
->page_flags
& TTM_PAGE_FLAG_SWAPPED
)) {
520 ret
= ttm_tt_swapin(bo
->ttm
);
521 if (unlikely(ret
!= 0))
526 ret
= ttm_bo_vm_access_kmap(bo
, offset
, buf
, len
, write
);
529 if (bo
->bdev
->driver
->access_memory
)
530 ret
= bo
->bdev
->driver
->access_memory(
531 bo
, offset
, buf
, len
, write
);
536 ttm_bo_unreserve(bo
);
540 EXPORT_SYMBOL(ttm_bo_vm_access
);
542 static const struct vm_operations_struct ttm_bo_vm_ops
= {
543 .fault
= ttm_bo_vm_fault
,
544 .open
= ttm_bo_vm_open
,
545 .close
= ttm_bo_vm_close
,
546 .access
= ttm_bo_vm_access
,
549 static struct ttm_buffer_object
*ttm_bo_vm_lookup(struct ttm_bo_device
*bdev
,
550 unsigned long offset
,
553 struct drm_vma_offset_node
*node
;
554 struct ttm_buffer_object
*bo
= NULL
;
556 drm_vma_offset_lock_lookup(bdev
->vma_manager
);
558 node
= drm_vma_offset_lookup_locked(bdev
->vma_manager
, offset
, pages
);
560 bo
= container_of(node
, struct ttm_buffer_object
,
562 bo
= ttm_bo_get_unless_zero(bo
);
565 drm_vma_offset_unlock_lookup(bdev
->vma_manager
);
568 pr_err("Could not find buffer object to map\n");
573 static void ttm_bo_mmap_vma_setup(struct ttm_buffer_object
*bo
, struct vm_area_struct
*vma
)
575 vma
->vm_ops
= &ttm_bo_vm_ops
;
578 * Note: We're transferring the bo reference to
579 * vma->vm_private_data here.
582 vma
->vm_private_data
= bo
;
585 * We'd like to use VM_PFNMAP on shared mappings, where
586 * (vma->vm_flags & VM_SHARED) != 0, for performance reasons,
587 * but for some reason VM_PFNMAP + x86 PAT + write-combine is very
588 * bad for performance. Until that has been sorted out, use
589 * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719
591 vma
->vm_flags
|= VM_MIXEDMAP
;
592 vma
->vm_flags
|= VM_IO
| VM_DONTEXPAND
| VM_DONTDUMP
;
595 int ttm_bo_mmap(struct file
*filp
, struct vm_area_struct
*vma
,
596 struct ttm_bo_device
*bdev
)
598 struct ttm_bo_driver
*driver
;
599 struct ttm_buffer_object
*bo
;
602 if (unlikely(vma
->vm_pgoff
< DRM_FILE_PAGE_OFFSET_START
))
605 bo
= ttm_bo_vm_lookup(bdev
, vma
->vm_pgoff
, vma_pages(vma
));
609 driver
= bo
->bdev
->driver
;
610 if (unlikely(!driver
->verify_access
)) {
614 ret
= driver
->verify_access(bo
, filp
);
615 if (unlikely(ret
!= 0))
618 ttm_bo_mmap_vma_setup(bo
, vma
);
624 EXPORT_SYMBOL(ttm_bo_mmap
);
626 int ttm_bo_mmap_obj(struct vm_area_struct
*vma
, struct ttm_buffer_object
*bo
)
629 ttm_bo_mmap_vma_setup(bo
, vma
);
632 EXPORT_SYMBOL(ttm_bo_mmap_obj
);