]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/gpu/drm/drm_gem_shmem_helper.c
Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-jammy-kernel.git] / drivers / gpu / drm / drm_gem_shmem_helper.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright 2018 Noralf Trønnes
4 */
5
6 #include <linux/dma-buf.h>
7 #include <linux/export.h>
8 #include <linux/mutex.h>
9 #include <linux/shmem_fs.h>
10 #include <linux/slab.h>
11 #include <linux/vmalloc.h>
12
13 #include <drm/drm_device.h>
14 #include <drm/drm_drv.h>
15 #include <drm/drm_gem_shmem_helper.h>
16 #include <drm/drm_prime.h>
17 #include <drm/drm_print.h>
18
19 /**
20 * DOC: overview
21 *
22 * This library provides helpers for GEM objects backed by shmem buffers
23 * allocated using anonymous pageable memory.
24 */
25
26 static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
27 .free = drm_gem_shmem_free_object,
28 .print_info = drm_gem_shmem_print_info,
29 .pin = drm_gem_shmem_pin,
30 .unpin = drm_gem_shmem_unpin,
31 .get_sg_table = drm_gem_shmem_get_sg_table,
32 .vmap = drm_gem_shmem_vmap,
33 .vunmap = drm_gem_shmem_vunmap,
34 .vm_ops = &drm_gem_shmem_vm_ops,
35 };
36
37 /**
38 * drm_gem_shmem_create - Allocate an object with the given size
39 * @dev: DRM device
40 * @size: Size of the object to allocate
41 *
42 * This function creates a shmem GEM object.
43 *
44 * Returns:
45 * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
46 * error code on failure.
47 */
48 struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
49 {
50 struct drm_gem_shmem_object *shmem;
51 struct drm_gem_object *obj;
52 int ret;
53
54 size = PAGE_ALIGN(size);
55
56 if (dev->driver->gem_create_object)
57 obj = dev->driver->gem_create_object(dev, size);
58 else
59 obj = kzalloc(sizeof(*shmem), GFP_KERNEL);
60 if (!obj)
61 return ERR_PTR(-ENOMEM);
62
63 if (!obj->funcs)
64 obj->funcs = &drm_gem_shmem_funcs;
65
66 ret = drm_gem_object_init(dev, obj, size);
67 if (ret)
68 goto err_free;
69
70 ret = drm_gem_create_mmap_offset(obj);
71 if (ret)
72 goto err_release;
73
74 shmem = to_drm_gem_shmem_obj(obj);
75 mutex_init(&shmem->pages_lock);
76 mutex_init(&shmem->vmap_lock);
77
78 /*
79 * Our buffers are kept pinned, so allocating them
80 * from the MOVABLE zone is a really bad idea, and
81 * conflicts with CMA. See comments above new_inode()
82 * why this is required _and_ expected if you're
83 * going to pin these pages.
84 */
85 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
86 __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
87
88 return shmem;
89
90 err_release:
91 drm_gem_object_release(obj);
92 err_free:
93 kfree(obj);
94
95 return ERR_PTR(ret);
96 }
97 EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
98
99 /**
100 * drm_gem_shmem_free_object - Free resources associated with a shmem GEM object
101 * @obj: GEM object to free
102 *
103 * This function cleans up the GEM object state and frees the memory used to
104 * store the object itself.
105 */
106 void drm_gem_shmem_free_object(struct drm_gem_object *obj)
107 {
108 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
109
110 WARN_ON(shmem->vmap_use_count);
111
112 if (obj->import_attach) {
113 shmem->pages_use_count--;
114 drm_prime_gem_destroy(obj, shmem->sgt);
115 kvfree(shmem->pages);
116 } else {
117 if (shmem->sgt) {
118 dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl,
119 shmem->sgt->nents, DMA_BIDIRECTIONAL);
120
121 drm_gem_shmem_put_pages(shmem);
122 sg_free_table(shmem->sgt);
123 kfree(shmem->sgt);
124 }
125 }
126
127 WARN_ON(shmem->pages_use_count);
128
129 drm_gem_object_release(obj);
130 mutex_destroy(&shmem->pages_lock);
131 mutex_destroy(&shmem->vmap_lock);
132 kfree(shmem);
133 }
134 EXPORT_SYMBOL_GPL(drm_gem_shmem_free_object);
135
136 static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
137 {
138 struct drm_gem_object *obj = &shmem->base;
139 struct page **pages;
140
141 if (shmem->pages_use_count++ > 0)
142 return 0;
143
144 pages = drm_gem_get_pages(obj);
145 if (IS_ERR(pages)) {
146 DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages));
147 shmem->pages_use_count = 0;
148 return PTR_ERR(pages);
149 }
150
151 shmem->pages = pages;
152
153 return 0;
154 }
155
156 /*
157 * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object
158 * @shmem: shmem GEM object
159 *
160 * This function makes sure that backing pages exists for the shmem GEM object
161 * and increases the use count.
162 *
163 * Returns:
164 * 0 on success or a negative error code on failure.
165 */
166 int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
167 {
168 int ret;
169
170 ret = mutex_lock_interruptible(&shmem->pages_lock);
171 if (ret)
172 return ret;
173 ret = drm_gem_shmem_get_pages_locked(shmem);
174 mutex_unlock(&shmem->pages_lock);
175
176 return ret;
177 }
178 EXPORT_SYMBOL(drm_gem_shmem_get_pages);
179
180 static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
181 {
182 struct drm_gem_object *obj = &shmem->base;
183
184 if (WARN_ON_ONCE(!shmem->pages_use_count))
185 return;
186
187 if (--shmem->pages_use_count > 0)
188 return;
189
190 drm_gem_put_pages(obj, shmem->pages,
191 shmem->pages_mark_dirty_on_put,
192 shmem->pages_mark_accessed_on_put);
193 shmem->pages = NULL;
194 }
195
196 /*
197 * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
198 * @shmem: shmem GEM object
199 *
200 * This function decreases the use count and puts the backing pages when use drops to zero.
201 */
202 void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
203 {
204 mutex_lock(&shmem->pages_lock);
205 drm_gem_shmem_put_pages_locked(shmem);
206 mutex_unlock(&shmem->pages_lock);
207 }
208 EXPORT_SYMBOL(drm_gem_shmem_put_pages);
209
210 /**
211 * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
212 * @obj: GEM object
213 *
214 * This function makes sure the backing pages are pinned in memory while the
215 * buffer is exported.
216 *
217 * Returns:
218 * 0 on success or a negative error code on failure.
219 */
220 int drm_gem_shmem_pin(struct drm_gem_object *obj)
221 {
222 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
223
224 return drm_gem_shmem_get_pages(shmem);
225 }
226 EXPORT_SYMBOL(drm_gem_shmem_pin);
227
228 /**
229 * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
230 * @obj: GEM object
231 *
232 * This function removes the requirement that the backing pages are pinned in
233 * memory.
234 */
235 void drm_gem_shmem_unpin(struct drm_gem_object *obj)
236 {
237 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
238
239 drm_gem_shmem_put_pages(shmem);
240 }
241 EXPORT_SYMBOL(drm_gem_shmem_unpin);
242
243 static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
244 {
245 struct drm_gem_object *obj = &shmem->base;
246 int ret;
247
248 if (shmem->vmap_use_count++ > 0)
249 return shmem->vaddr;
250
251 ret = drm_gem_shmem_get_pages(shmem);
252 if (ret)
253 goto err_zero_use;
254
255 if (obj->import_attach)
256 shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
257 else
258 shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
259 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
260
261 if (!shmem->vaddr) {
262 DRM_DEBUG_KMS("Failed to vmap pages\n");
263 ret = -ENOMEM;
264 goto err_put_pages;
265 }
266
267 return shmem->vaddr;
268
269 err_put_pages:
270 drm_gem_shmem_put_pages(shmem);
271 err_zero_use:
272 shmem->vmap_use_count = 0;
273
274 return ERR_PTR(ret);
275 }
276
277 /*
278 * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
279 * @shmem: shmem GEM object
280 *
281 * This function makes sure that a virtual address exists for the buffer backing
282 * the shmem GEM object.
283 *
284 * Returns:
285 * 0 on success or a negative error code on failure.
286 */
287 void *drm_gem_shmem_vmap(struct drm_gem_object *obj)
288 {
289 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
290 void *vaddr;
291 int ret;
292
293 ret = mutex_lock_interruptible(&shmem->vmap_lock);
294 if (ret)
295 return ERR_PTR(ret);
296 vaddr = drm_gem_shmem_vmap_locked(shmem);
297 mutex_unlock(&shmem->vmap_lock);
298
299 return vaddr;
300 }
301 EXPORT_SYMBOL(drm_gem_shmem_vmap);
302
303 static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem)
304 {
305 struct drm_gem_object *obj = &shmem->base;
306
307 if (WARN_ON_ONCE(!shmem->vmap_use_count))
308 return;
309
310 if (--shmem->vmap_use_count > 0)
311 return;
312
313 if (obj->import_attach)
314 dma_buf_vunmap(obj->import_attach->dmabuf, shmem->vaddr);
315 else
316 vunmap(shmem->vaddr);
317
318 shmem->vaddr = NULL;
319 drm_gem_shmem_put_pages(shmem);
320 }
321
322 /*
323 * drm_gem_shmem_vunmap - Unmap a virtual mapping fo a shmem GEM object
324 * @shmem: shmem GEM object
325 *
326 * This function removes the virtual address when use count drops to zero.
327 */
328 void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr)
329 {
330 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
331
332 mutex_lock(&shmem->vmap_lock);
333 drm_gem_shmem_vunmap_locked(shmem);
334 mutex_unlock(&shmem->vmap_lock);
335 }
336 EXPORT_SYMBOL(drm_gem_shmem_vunmap);
337
338 struct drm_gem_shmem_object *
339 drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
340 struct drm_device *dev, size_t size,
341 uint32_t *handle)
342 {
343 struct drm_gem_shmem_object *shmem;
344 int ret;
345
346 shmem = drm_gem_shmem_create(dev, size);
347 if (IS_ERR(shmem))
348 return shmem;
349
350 /*
351 * Allocate an id of idr table where the obj is registered
352 * and handle has the id what user can see.
353 */
354 ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
355 /* drop reference from allocate - handle holds it now. */
356 drm_gem_object_put_unlocked(&shmem->base);
357 if (ret)
358 return ERR_PTR(ret);
359
360 return shmem;
361 }
362 EXPORT_SYMBOL(drm_gem_shmem_create_with_handle);
363
364 /**
365 * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
366 * @file: DRM file structure to create the dumb buffer for
367 * @dev: DRM device
368 * @args: IOCTL data
369 *
370 * This function computes the pitch of the dumb buffer and rounds it up to an
371 * integer number of bytes per pixel. Drivers for hardware that doesn't have
372 * any additional restrictions on the pitch can directly use this function as
373 * their &drm_driver.dumb_create callback.
374 *
375 * For hardware with additional restrictions, drivers can adjust the fields
376 * set up by userspace before calling into this function.
377 *
378 * Returns:
379 * 0 on success or a negative error code on failure.
380 */
381 int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
382 struct drm_mode_create_dumb *args)
383 {
384 u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
385 struct drm_gem_shmem_object *shmem;
386
387 if (!args->pitch || !args->size) {
388 args->pitch = min_pitch;
389 args->size = args->pitch * args->height;
390 } else {
391 /* ensure sane minimum values */
392 if (args->pitch < min_pitch)
393 args->pitch = min_pitch;
394 if (args->size < args->pitch * args->height)
395 args->size = args->pitch * args->height;
396 }
397
398 shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
399
400 return PTR_ERR_OR_ZERO(shmem);
401 }
402 EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
403
404 static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
405 {
406 struct vm_area_struct *vma = vmf->vma;
407 struct drm_gem_object *obj = vma->vm_private_data;
408 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
409 loff_t num_pages = obj->size >> PAGE_SHIFT;
410 struct page *page;
411
412 if (vmf->pgoff >= num_pages || WARN_ON_ONCE(!shmem->pages))
413 return VM_FAULT_SIGBUS;
414
415 page = shmem->pages[vmf->pgoff];
416
417 return vmf_insert_page(vma, vmf->address, page);
418 }
419
420 static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
421 {
422 struct drm_gem_object *obj = vma->vm_private_data;
423 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
424 int ret;
425
426 ret = drm_gem_shmem_get_pages(shmem);
427 WARN_ON_ONCE(ret != 0);
428
429 drm_gem_vm_open(vma);
430 }
431
432 static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
433 {
434 struct drm_gem_object *obj = vma->vm_private_data;
435 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
436
437 drm_gem_shmem_put_pages(shmem);
438 drm_gem_vm_close(vma);
439 }
440
441 const struct vm_operations_struct drm_gem_shmem_vm_ops = {
442 .fault = drm_gem_shmem_fault,
443 .open = drm_gem_shmem_vm_open,
444 .close = drm_gem_shmem_vm_close,
445 };
446 EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
447
448 /**
449 * drm_gem_shmem_mmap - Memory-map a shmem GEM object
450 * @filp: File object
451 * @vma: VMA for the area to be mapped
452 *
453 * This function implements an augmented version of the GEM DRM file mmap
454 * operation for shmem objects. Drivers which employ the shmem helpers should
455 * use this function as their &file_operations.mmap handler in the DRM device file's
456 * file_operations structure.
457 *
458 * Instead of directly referencing this function, drivers should use the
459 * DEFINE_DRM_GEM_SHMEM_FOPS() macro.
460 *
461 * Returns:
462 * 0 on success or a negative error code on failure.
463 */
464 int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma)
465 {
466 struct drm_gem_shmem_object *shmem;
467 int ret;
468
469 ret = drm_gem_mmap(filp, vma);
470 if (ret)
471 return ret;
472
473 shmem = to_drm_gem_shmem_obj(vma->vm_private_data);
474
475 ret = drm_gem_shmem_get_pages(shmem);
476 if (ret) {
477 drm_gem_vm_close(vma);
478 return ret;
479 }
480
481 /* VM_PFNMAP was set by drm_gem_mmap() */
482 vma->vm_flags &= ~VM_PFNMAP;
483 vma->vm_flags |= VM_MIXEDMAP;
484
485 /* Remove the fake offset */
486 vma->vm_pgoff -= drm_vma_node_start(&shmem->base.vma_node);
487
488 return 0;
489 }
490 EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
491
492 /**
493 * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
494 * @p: DRM printer
495 * @indent: Tab indentation level
496 * @obj: GEM object
497 */
498 void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent,
499 const struct drm_gem_object *obj)
500 {
501 const struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
502
503 drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
504 drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
505 drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
506 }
507 EXPORT_SYMBOL(drm_gem_shmem_print_info);
508
509 /**
510 * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
511 * pages for a shmem GEM object
512 * @obj: GEM object
513 *
514 * This function exports a scatter/gather table suitable for PRIME usage by
515 * calling the standard DMA mapping API.
516 *
517 * Returns:
518 * A pointer to the scatter/gather table of pinned pages or NULL on failure.
519 */
520 struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj)
521 {
522 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
523
524 return drm_prime_pages_to_sg(shmem->pages, obj->size >> PAGE_SHIFT);
525 }
526 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
527
528 /**
529 * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
530 * scatter/gather table for a shmem GEM object.
531 * @obj: GEM object
532 *
533 * This function returns a scatter/gather table suitable for driver usage. If
534 * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
535 * table created.
536 *
537 * Returns:
538 * A pointer to the scatter/gather table of pinned pages or errno on failure.
539 */
540 struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj)
541 {
542 int ret;
543 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
544 struct sg_table *sgt;
545
546 if (shmem->sgt)
547 return shmem->sgt;
548
549 WARN_ON(obj->import_attach);
550
551 ret = drm_gem_shmem_get_pages(shmem);
552 if (ret)
553 return ERR_PTR(ret);
554
555 sgt = drm_gem_shmem_get_sg_table(&shmem->base);
556 if (IS_ERR(sgt)) {
557 ret = PTR_ERR(sgt);
558 goto err_put_pages;
559 }
560 /* Map the pages for use by the h/w. */
561 dma_map_sg(obj->dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
562
563 shmem->sgt = sgt;
564
565 return sgt;
566
567 err_put_pages:
568 drm_gem_shmem_put_pages(shmem);
569 return ERR_PTR(ret);
570 }
571 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
572
573 /**
574 * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
575 * another driver's scatter/gather table of pinned pages
576 * @dev: Device to import into
577 * @attach: DMA-BUF attachment
578 * @sgt: Scatter/gather table of pinned pages
579 *
580 * This function imports a scatter/gather table exported via DMA-BUF by
581 * another driver. Drivers that use the shmem helpers should set this as their
582 * &drm_driver.gem_prime_import_sg_table callback.
583 *
584 * Returns:
585 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
586 * error code on failure.
587 */
588 struct drm_gem_object *
589 drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
590 struct dma_buf_attachment *attach,
591 struct sg_table *sgt)
592 {
593 size_t size = PAGE_ALIGN(attach->dmabuf->size);
594 size_t npages = size >> PAGE_SHIFT;
595 struct drm_gem_shmem_object *shmem;
596 int ret;
597
598 shmem = drm_gem_shmem_create(dev, size);
599 if (IS_ERR(shmem))
600 return ERR_CAST(shmem);
601
602 shmem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
603 if (!shmem->pages) {
604 ret = -ENOMEM;
605 goto err_free_gem;
606 }
607
608 ret = drm_prime_sg_to_page_addr_arrays(sgt, shmem->pages, NULL, npages);
609 if (ret < 0)
610 goto err_free_array;
611
612 shmem->sgt = sgt;
613 shmem->pages_use_count = 1; /* Permanently pinned from our point of view */
614
615 DRM_DEBUG_PRIME("size = %zu\n", size);
616
617 return &shmem->base;
618
619 err_free_array:
620 kvfree(shmem->pages);
621 err_free_gem:
622 drm_gem_object_put_unlocked(&shmem->base);
623
624 return ERR_PTR(ret);
625 }
626 EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);