]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/drm/ttm/ttm_bo_driver.h
d4a568eafef5d5580c3cebcf0cc040805206872a
[mirror_ubuntu-artful-kernel.git] / include / drm / ttm / ttm_bo_driver.h
1 /**************************************************************************
2 *
3 * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27 /*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30 #ifndef _TTM_BO_DRIVER_H_
31 #define _TTM_BO_DRIVER_H_
32
33 #include <ttm/ttm_bo_api.h>
34 #include <ttm/ttm_memory.h>
35 #include <ttm/ttm_module.h>
36 #include <ttm/ttm_placement.h>
37 #include <drm/drm_mm.h>
38 #include <drm/drm_global.h>
39 #include <drm/drm_vma_manager.h>
40 #include <linux/workqueue.h>
41 #include <linux/fs.h>
42 #include <linux/spinlock.h>
43 #include <linux/reservation.h>
44
45 struct ttm_backend_func {
46 /**
47 * struct ttm_backend_func member bind
48 *
49 * @ttm: Pointer to a struct ttm_tt.
50 * @bo_mem: Pointer to a struct ttm_mem_reg describing the
51 * memory type and location for binding.
52 *
53 * Bind the backend pages into the aperture in the location
54 * indicated by @bo_mem. This function should be able to handle
55 * differences between aperture and system page sizes.
56 */
57 int (*bind) (struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
58
59 /**
60 * struct ttm_backend_func member unbind
61 *
62 * @ttm: Pointer to a struct ttm_tt.
63 *
64 * Unbind previously bound backend pages. This function should be
65 * able to handle differences between aperture and system page sizes.
66 */
67 int (*unbind) (struct ttm_tt *ttm);
68
69 /**
70 * struct ttm_backend_func member destroy
71 *
72 * @ttm: Pointer to a struct ttm_tt.
73 *
74 * Destroy the backend. This will be call back from ttm_tt_destroy so
75 * don't call ttm_tt_destroy from the callback or infinite loop.
76 */
77 void (*destroy) (struct ttm_tt *ttm);
78 };
79
80 #define TTM_PAGE_FLAG_WRITE (1 << 3)
81 #define TTM_PAGE_FLAG_SWAPPED (1 << 4)
82 #define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5)
83 #define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6)
84 #define TTM_PAGE_FLAG_DMA32 (1 << 7)
85 #define TTM_PAGE_FLAG_SG (1 << 8)
86
87 enum ttm_caching_state {
88 tt_uncached,
89 tt_wc,
90 tt_cached
91 };
92
93 /**
94 * struct ttm_tt
95 *
96 * @bdev: Pointer to a struct ttm_bo_device.
97 * @func: Pointer to a struct ttm_backend_func that describes
98 * the backend methods.
99 * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL
100 * pointer.
101 * @pages: Array of pages backing the data.
102 * @num_pages: Number of pages in the page array.
103 * @bdev: Pointer to the current struct ttm_bo_device.
104 * @be: Pointer to the ttm backend.
105 * @swap_storage: Pointer to shmem struct file for swap storage.
106 * @caching_state: The current caching state of the pages.
107 * @state: The current binding state of the pages.
108 *
109 * This is a structure holding the pages, caching- and aperture binding
110 * status for a buffer object that isn't backed by fixed (VRAM / AGP)
111 * memory.
112 */
113
114 struct ttm_tt {
115 struct ttm_bo_device *bdev;
116 struct ttm_backend_func *func;
117 struct page *dummy_read_page;
118 struct page **pages;
119 uint32_t page_flags;
120 unsigned long num_pages;
121 struct sg_table *sg; /* for SG objects via dma-buf */
122 struct ttm_bo_global *glob;
123 struct file *swap_storage;
124 enum ttm_caching_state caching_state;
125 enum {
126 tt_bound,
127 tt_unbound,
128 tt_unpopulated,
129 } state;
130 };
131
132 /**
133 * struct ttm_dma_tt
134 *
135 * @ttm: Base ttm_tt struct.
136 * @cpu_address: The CPU address of the pages
137 * @dma_address: The DMA (bus) addresses of the pages
138 * @pages_list: used by some page allocation backend
139 *
140 * This is a structure holding the pages, caching- and aperture binding
141 * status for a buffer object that isn't backed by fixed (VRAM / AGP)
142 * memory.
143 */
144 struct ttm_dma_tt {
145 struct ttm_tt ttm;
146 void **cpu_address;
147 dma_addr_t *dma_address;
148 struct list_head pages_list;
149 };
150
151 #define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */
152 #define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */
153 #define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */
154
155 struct ttm_mem_type_manager;
156
157 struct ttm_mem_type_manager_func {
158 /**
159 * struct ttm_mem_type_manager member init
160 *
161 * @man: Pointer to a memory type manager.
162 * @p_size: Implementation dependent, but typically the size of the
163 * range to be managed in pages.
164 *
165 * Called to initialize a private range manager. The function is
166 * expected to initialize the man::priv member.
167 * Returns 0 on success, negative error code on failure.
168 */
169 int (*init)(struct ttm_mem_type_manager *man, unsigned long p_size);
170
171 /**
172 * struct ttm_mem_type_manager member takedown
173 *
174 * @man: Pointer to a memory type manager.
175 *
176 * Called to undo the setup done in init. All allocated resources
177 * should be freed.
178 */
179 int (*takedown)(struct ttm_mem_type_manager *man);
180
181 /**
182 * struct ttm_mem_type_manager member get_node
183 *
184 * @man: Pointer to a memory type manager.
185 * @bo: Pointer to the buffer object we're allocating space for.
186 * @placement: Placement details.
187 * @flags: Additional placement flags.
188 * @mem: Pointer to a struct ttm_mem_reg to be filled in.
189 *
190 * This function should allocate space in the memory type managed
191 * by @man. Placement details if
192 * applicable are given by @placement. If successful,
193 * @mem::mm_node should be set to a non-null value, and
194 * @mem::start should be set to a value identifying the beginning
195 * of the range allocated, and the function should return zero.
196 * If the memory region accommodate the buffer object, @mem::mm_node
197 * should be set to NULL, and the function should return 0.
198 * If a system error occurred, preventing the request to be fulfilled,
199 * the function should return a negative error code.
200 *
201 * Note that @mem::mm_node will only be dereferenced by
202 * struct ttm_mem_type_manager functions and optionally by the driver,
203 * which has knowledge of the underlying type.
204 *
205 * This function may not be called from within atomic context, so
206 * an implementation can and must use either a mutex or a spinlock to
207 * protect any data structures managing the space.
208 */
209 int (*get_node)(struct ttm_mem_type_manager *man,
210 struct ttm_buffer_object *bo,
211 const struct ttm_place *place,
212 struct ttm_mem_reg *mem);
213
214 /**
215 * struct ttm_mem_type_manager member put_node
216 *
217 * @man: Pointer to a memory type manager.
218 * @mem: Pointer to a struct ttm_mem_reg to be filled in.
219 *
220 * This function frees memory type resources previously allocated
221 * and that are identified by @mem::mm_node and @mem::start. May not
222 * be called from within atomic context.
223 */
224 void (*put_node)(struct ttm_mem_type_manager *man,
225 struct ttm_mem_reg *mem);
226
227 /**
228 * struct ttm_mem_type_manager member debug
229 *
230 * @man: Pointer to a memory type manager.
231 * @prefix: Prefix to be used in printout to identify the caller.
232 *
233 * This function is called to print out the state of the memory
234 * type manager to aid debugging of out-of-memory conditions.
235 * It may not be called from within atomic context.
236 */
237 void (*debug)(struct ttm_mem_type_manager *man, const char *prefix);
238 };
239
240 /**
241 * struct ttm_mem_type_manager
242 *
243 * @has_type: The memory type has been initialized.
244 * @use_type: The memory type is enabled.
245 * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
246 * managed by this memory type.
247 * @gpu_offset: If used, the GPU offset of the first managed page of
248 * fixed memory or the first managed location in an aperture.
249 * @size: Size of the managed region.
250 * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
251 * as defined in ttm_placement_common.h
252 * @default_caching: The default caching policy used for a buffer object
253 * placed in this memory type if the user doesn't provide one.
254 * @func: structure pointer implementing the range manager. See above
255 * @priv: Driver private closure for @func.
256 * @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures
257 * @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions
258 * reserved by the TTM vm system.
259 * @io_reserve_lru: Optional lru list for unreserving io mem regions.
260 * @io_reserve_fastpath: Only use bdev::driver::io_mem_reserve to obtain
261 * static information. bdev::driver::io_mem_free is never used.
262 * @lru: The lru list for this memory type.
263 *
264 * This structure is used to identify and manage memory types for a device.
265 * It's set up by the ttm_bo_driver::init_mem_type method.
266 */
267
268
269
270 struct ttm_mem_type_manager {
271 struct ttm_bo_device *bdev;
272
273 /*
274 * No protection. Constant from start.
275 */
276
277 bool has_type;
278 bool use_type;
279 uint32_t flags;
280 uint64_t gpu_offset; /* GPU address space is independent of CPU word size */
281 uint64_t size;
282 uint32_t available_caching;
283 uint32_t default_caching;
284 const struct ttm_mem_type_manager_func *func;
285 void *priv;
286 struct mutex io_reserve_mutex;
287 bool use_io_reserve_lru;
288 bool io_reserve_fastpath;
289
290 /*
291 * Protected by @io_reserve_mutex:
292 */
293
294 struct list_head io_reserve_lru;
295
296 /*
297 * Protected by the global->lru_lock.
298 */
299
300 struct list_head lru;
301 };
302
303 /**
304 * struct ttm_bo_driver
305 *
306 * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
307 * @invalidate_caches: Callback to invalidate read caches when a buffer object
308 * has been evicted.
309 * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager
310 * structure.
311 * @evict_flags: Callback to obtain placement flags when a buffer is evicted.
312 * @move: Callback for a driver to hook in accelerated functions to
313 * move a buffer.
314 * If set to NULL, a potentially slow memcpy() move is used.
315 */
316
317 struct ttm_bo_driver {
318 /**
319 * ttm_tt_create
320 *
321 * @bdev: pointer to a struct ttm_bo_device:
322 * @size: Size of the data needed backing.
323 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
324 * @dummy_read_page: See struct ttm_bo_device.
325 *
326 * Create a struct ttm_tt to back data with system memory pages.
327 * No pages are actually allocated.
328 * Returns:
329 * NULL: Out of memory.
330 */
331 struct ttm_tt *(*ttm_tt_create)(struct ttm_bo_device *bdev,
332 unsigned long size,
333 uint32_t page_flags,
334 struct page *dummy_read_page);
335
336 /**
337 * ttm_tt_populate
338 *
339 * @ttm: The struct ttm_tt to contain the backing pages.
340 *
341 * Allocate all backing pages
342 * Returns:
343 * -ENOMEM: Out of memory.
344 */
345 int (*ttm_tt_populate)(struct ttm_tt *ttm);
346
347 /**
348 * ttm_tt_unpopulate
349 *
350 * @ttm: The struct ttm_tt to contain the backing pages.
351 *
352 * Free all backing page
353 */
354 void (*ttm_tt_unpopulate)(struct ttm_tt *ttm);
355
356 /**
357 * struct ttm_bo_driver member invalidate_caches
358 *
359 * @bdev: the buffer object device.
360 * @flags: new placement of the rebound buffer object.
361 *
362 * A previosly evicted buffer has been rebound in a
363 * potentially new location. Tell the driver that it might
364 * consider invalidating read (texture) caches on the next command
365 * submission as a consequence.
366 */
367
368 int (*invalidate_caches) (struct ttm_bo_device *bdev, uint32_t flags);
369 int (*init_mem_type) (struct ttm_bo_device *bdev, uint32_t type,
370 struct ttm_mem_type_manager *man);
371 /**
372 * struct ttm_bo_driver member evict_flags:
373 *
374 * @bo: the buffer object to be evicted
375 *
376 * Return the bo flags for a buffer which is not mapped to the hardware.
377 * These will be placed in proposed_flags so that when the move is
378 * finished, they'll end up in bo->mem.flags
379 */
380
381 void(*evict_flags) (struct ttm_buffer_object *bo,
382 struct ttm_placement *placement);
383 /**
384 * struct ttm_bo_driver member move:
385 *
386 * @bo: the buffer to move
387 * @evict: whether this motion is evicting the buffer from
388 * the graphics address space
389 * @interruptible: Use interruptible sleeps if possible when sleeping.
390 * @no_wait: whether this should give up and return -EBUSY
391 * if this move would require sleeping
392 * @new_mem: the new memory region receiving the buffer
393 *
394 * Move a buffer between two memory regions.
395 */
396 int (*move) (struct ttm_buffer_object *bo,
397 bool evict, bool interruptible,
398 bool no_wait_gpu,
399 struct ttm_mem_reg *new_mem);
400
401 /**
402 * struct ttm_bo_driver_member verify_access
403 *
404 * @bo: Pointer to a buffer object.
405 * @filp: Pointer to a struct file trying to access the object.
406 *
407 * Called from the map / write / read methods to verify that the
408 * caller is permitted to access the buffer object.
409 * This member may be set to NULL, which will refuse this kind of
410 * access for all buffer objects.
411 * This function should return 0 if access is granted, -EPERM otherwise.
412 */
413 int (*verify_access) (struct ttm_buffer_object *bo,
414 struct file *filp);
415
416 /* hook to notify driver about a driver move so it
417 * can do tiling things */
418 void (*move_notify)(struct ttm_buffer_object *bo,
419 struct ttm_mem_reg *new_mem);
420 /* notify the driver we are taking a fault on this BO
421 * and have reserved it */
422 int (*fault_reserve_notify)(struct ttm_buffer_object *bo);
423
424 /**
425 * notify the driver that we're about to swap out this bo
426 */
427 void (*swap_notify) (struct ttm_buffer_object *bo);
428
429 /**
430 * Driver callback on when mapping io memory (for bo_move_memcpy
431 * for instance). TTM will take care to call io_mem_free whenever
432 * the mapping is not use anymore. io_mem_reserve & io_mem_free
433 * are balanced.
434 */
435 int (*io_mem_reserve)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
436 void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
437 };
438
439 /**
440 * struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global.
441 */
442
443 struct ttm_bo_global_ref {
444 struct drm_global_reference ref;
445 struct ttm_mem_global *mem_glob;
446 };
447
448 /**
449 * struct ttm_bo_global - Buffer object driver global data.
450 *
451 * @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
452 * @dummy_read_page: Pointer to a dummy page used for mapping requests
453 * of unpopulated pages.
454 * @shrink: A shrink callback object used for buffer object swap.
455 * @device_list_mutex: Mutex protecting the device list.
456 * This mutex is held while traversing the device list for pm options.
457 * @lru_lock: Spinlock protecting the bo subsystem lru lists.
458 * @device_list: List of buffer object devices.
459 * @swap_lru: Lru list of buffer objects used for swapping.
460 */
461
462 struct ttm_bo_global {
463
464 /**
465 * Constant after init.
466 */
467
468 struct kobject kobj;
469 struct ttm_mem_global *mem_glob;
470 struct page *dummy_read_page;
471 struct ttm_mem_shrink shrink;
472 struct mutex device_list_mutex;
473 spinlock_t lru_lock;
474
475 /**
476 * Protected by device_list_mutex.
477 */
478 struct list_head device_list;
479
480 /**
481 * Protected by the lru_lock.
482 */
483 struct list_head swap_lru;
484
485 /**
486 * Internal protection.
487 */
488 atomic_t bo_count;
489 };
490
491
492 #define TTM_NUM_MEM_TYPES 8
493
494 #define TTM_BO_PRIV_FLAG_MOVING 0 /* Buffer object is moving and needs
495 idling before CPU mapping */
496 #define TTM_BO_PRIV_FLAG_MAX 1
497 /**
498 * struct ttm_bo_device - Buffer object driver device-specific data.
499 *
500 * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
501 * @man: An array of mem_type_managers.
502 * @vma_manager: Address space manager
503 * lru_lock: Spinlock that protects the buffer+device lru lists and
504 * ddestroy lists.
505 * @dev_mapping: A pointer to the struct address_space representing the
506 * device address space.
507 * @wq: Work queue structure for the delayed delete workqueue.
508 *
509 */
510
511 struct ttm_bo_device {
512
513 /*
514 * Constant after bo device init / atomic.
515 */
516 struct list_head device_list;
517 struct ttm_bo_global *glob;
518 struct ttm_bo_driver *driver;
519 struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
520
521 /*
522 * Protected by internal locks.
523 */
524 struct drm_vma_offset_manager vma_manager;
525
526 /*
527 * Protected by the global:lru lock.
528 */
529 struct list_head ddestroy;
530
531 /*
532 * Protected by load / firstopen / lastclose /unload sync.
533 */
534
535 struct address_space *dev_mapping;
536
537 /*
538 * Internal protection.
539 */
540
541 struct delayed_work wq;
542
543 bool need_dma32;
544 };
545
546 /**
547 * ttm_flag_masked
548 *
549 * @old: Pointer to the result and original value.
550 * @new: New value of bits.
551 * @mask: Mask of bits to change.
552 *
553 * Convenience function to change a number of bits identified by a mask.
554 */
555
556 static inline uint32_t
557 ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
558 {
559 *old ^= (*old ^ new) & mask;
560 return *old;
561 }
562
563 /**
564 * ttm_tt_init
565 *
566 * @ttm: The struct ttm_tt.
567 * @bdev: pointer to a struct ttm_bo_device:
568 * @size: Size of the data needed backing.
569 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
570 * @dummy_read_page: See struct ttm_bo_device.
571 *
572 * Create a struct ttm_tt to back data with system memory pages.
573 * No pages are actually allocated.
574 * Returns:
575 * NULL: Out of memory.
576 */
577 extern int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
578 unsigned long size, uint32_t page_flags,
579 struct page *dummy_read_page);
580 extern int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
581 unsigned long size, uint32_t page_flags,
582 struct page *dummy_read_page);
583
584 /**
585 * ttm_tt_fini
586 *
587 * @ttm: the ttm_tt structure.
588 *
589 * Free memory of ttm_tt structure
590 */
591 extern void ttm_tt_fini(struct ttm_tt *ttm);
592 extern void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
593
594 /**
595 * ttm_ttm_bind:
596 *
597 * @ttm: The struct ttm_tt containing backing pages.
598 * @bo_mem: The struct ttm_mem_reg identifying the binding location.
599 *
600 * Bind the pages of @ttm to an aperture location identified by @bo_mem
601 */
602 extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
603
604 /**
605 * ttm_ttm_destroy:
606 *
607 * @ttm: The struct ttm_tt.
608 *
609 * Unbind, unpopulate and destroy common struct ttm_tt.
610 */
611 extern void ttm_tt_destroy(struct ttm_tt *ttm);
612
613 /**
614 * ttm_ttm_unbind:
615 *
616 * @ttm: The struct ttm_tt.
617 *
618 * Unbind a struct ttm_tt.
619 */
620 extern void ttm_tt_unbind(struct ttm_tt *ttm);
621
622 /**
623 * ttm_tt_swapin:
624 *
625 * @ttm: The struct ttm_tt.
626 *
627 * Swap in a previously swap out ttm_tt.
628 */
629 extern int ttm_tt_swapin(struct ttm_tt *ttm);
630
631 /**
632 * ttm_tt_set_placement_caching:
633 *
634 * @ttm A struct ttm_tt the backing pages of which will change caching policy.
635 * @placement: Flag indicating the desired caching policy.
636 *
637 * This function will change caching policy of any default kernel mappings of
638 * the pages backing @ttm. If changing from cached to uncached or
639 * write-combined,
640 * all CPU caches will first be flushed to make sure the data of the pages
641 * hit RAM. This function may be very costly as it involves global TLB
642 * and cache flushes and potential page splitting / combining.
643 */
644 extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
645 extern int ttm_tt_swapout(struct ttm_tt *ttm,
646 struct file *persistent_swap_storage);
647
648 /**
649 * ttm_tt_unpopulate - free pages from a ttm
650 *
651 * @ttm: Pointer to the ttm_tt structure
652 *
653 * Calls the driver method to free all pages from a ttm
654 */
655 extern void ttm_tt_unpopulate(struct ttm_tt *ttm);
656
657 /*
658 * ttm_bo.c
659 */
660
661 /**
662 * ttm_mem_reg_is_pci
663 *
664 * @bdev: Pointer to a struct ttm_bo_device.
665 * @mem: A valid struct ttm_mem_reg.
666 *
667 * Returns true if the memory described by @mem is PCI memory,
668 * false otherwise.
669 */
670 extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
671 struct ttm_mem_reg *mem);
672
673 /**
674 * ttm_bo_mem_space
675 *
676 * @bo: Pointer to a struct ttm_buffer_object. the data of which
677 * we want to allocate space for.
678 * @proposed_placement: Proposed new placement for the buffer object.
679 * @mem: A struct ttm_mem_reg.
680 * @interruptible: Sleep interruptible when sliping.
681 * @no_wait_gpu: Return immediately if the GPU is busy.
682 *
683 * Allocate memory space for the buffer object pointed to by @bo, using
684 * the placement flags in @mem, potentially evicting other idle buffer objects.
685 * This function may sleep while waiting for space to become available.
686 * Returns:
687 * -EBUSY: No space available (only if no_wait == 1).
688 * -ENOMEM: Could not allocate memory for the buffer object, either due to
689 * fragmentation or concurrent allocators.
690 * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
691 */
692 extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
693 struct ttm_placement *placement,
694 struct ttm_mem_reg *mem,
695 bool interruptible,
696 bool no_wait_gpu);
697
698 extern void ttm_bo_mem_put(struct ttm_buffer_object *bo,
699 struct ttm_mem_reg *mem);
700 extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
701 struct ttm_mem_reg *mem);
702
703 extern void ttm_bo_global_release(struct drm_global_reference *ref);
704 extern int ttm_bo_global_init(struct drm_global_reference *ref);
705
706 extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
707
708 /**
709 * ttm_bo_device_init
710 *
711 * @bdev: A pointer to a struct ttm_bo_device to initialize.
712 * @glob: A pointer to an initialized struct ttm_bo_global.
713 * @driver: A pointer to a struct ttm_bo_driver set up by the caller.
714 * @mapping: The address space to use for this bo.
715 * @file_page_offset: Offset into the device address space that is available
716 * for buffer data. This ensures compatibility with other users of the
717 * address space.
718 *
719 * Initializes a struct ttm_bo_device:
720 * Returns:
721 * !0: Failure.
722 */
723 extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
724 struct ttm_bo_global *glob,
725 struct ttm_bo_driver *driver,
726 struct address_space *mapping,
727 uint64_t file_page_offset, bool need_dma32);
728
729 /**
730 * ttm_bo_unmap_virtual
731 *
732 * @bo: tear down the virtual mappings for this BO
733 */
734 extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
735
736 /**
737 * ttm_bo_unmap_virtual
738 *
739 * @bo: tear down the virtual mappings for this BO
740 *
741 * The caller must take ttm_mem_io_lock before calling this function.
742 */
743 extern void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo);
744
745 extern int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo);
746 extern void ttm_mem_io_free_vm(struct ttm_buffer_object *bo);
747 extern int ttm_mem_io_lock(struct ttm_mem_type_manager *man,
748 bool interruptible);
749 extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
750
751 extern void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo);
752 extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
753
754 /**
755 * __ttm_bo_reserve:
756 *
757 * @bo: A pointer to a struct ttm_buffer_object.
758 * @interruptible: Sleep interruptible if waiting.
759 * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
760 * @ticket: ticket used to acquire the ww_mutex.
761 *
762 * Will not remove reserved buffers from the lru lists.
763 * Otherwise identical to ttm_bo_reserve.
764 *
765 * Returns:
766 * -EDEADLK: The reservation may cause a deadlock.
767 * Release all buffer reservations, wait for @bo to become unreserved and
768 * try again. (only if use_sequence == 1).
769 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
770 * a signal. Release all buffer reservations and return to user-space.
771 * -EBUSY: The function needed to sleep, but @no_wait was true
772 * -EALREADY: Bo already reserved using @ticket. This error code will only
773 * be returned if @use_ticket is set to true.
774 */
775 static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo,
776 bool interruptible, bool no_wait,
777 struct ww_acquire_ctx *ticket)
778 {
779 int ret = 0;
780
781 if (no_wait) {
782 bool success;
783 if (WARN_ON(ticket))
784 return -EBUSY;
785
786 success = ww_mutex_trylock(&bo->resv->lock);
787 return success ? 0 : -EBUSY;
788 }
789
790 if (interruptible)
791 ret = ww_mutex_lock_interruptible(&bo->resv->lock, ticket);
792 else
793 ret = ww_mutex_lock(&bo->resv->lock, ticket);
794 if (ret == -EINTR)
795 return -ERESTARTSYS;
796 return ret;
797 }
798
799 /**
800 * ttm_bo_reserve:
801 *
802 * @bo: A pointer to a struct ttm_buffer_object.
803 * @interruptible: Sleep interruptible if waiting.
804 * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
805 * @ticket: ticket used to acquire the ww_mutex.
806 *
807 * Locks a buffer object for validation. (Or prevents other processes from
808 * locking it for validation) and removes it from lru lists, while taking
809 * a number of measures to prevent deadlocks.
810 *
811 * Deadlocks may occur when two processes try to reserve multiple buffers in
812 * different order, either by will or as a result of a buffer being evicted
813 * to make room for a buffer already reserved. (Buffers are reserved before
814 * they are evicted). The following algorithm prevents such deadlocks from
815 * occurring:
816 * Processes attempting to reserve multiple buffers other than for eviction,
817 * (typically execbuf), should first obtain a unique 32-bit
818 * validation sequence number,
819 * and call this function with @use_ticket == 1 and @ticket->stamp == the unique
820 * sequence number. If upon call of this function, the buffer object is already
821 * reserved, the validation sequence is checked against the validation
822 * sequence of the process currently reserving the buffer,
823 * and if the current validation sequence is greater than that of the process
824 * holding the reservation, the function returns -EDEADLK. Otherwise it sleeps
825 * waiting for the buffer to become unreserved, after which it retries
826 * reserving.
827 * The caller should, when receiving an -EDEADLK error
828 * release all its buffer reservations, wait for @bo to become unreserved, and
829 * then rerun the validation with the same validation sequence. This procedure
830 * will always guarantee that the process with the lowest validation sequence
831 * will eventually succeed, preventing both deadlocks and starvation.
832 *
833 * Returns:
834 * -EDEADLK: The reservation may cause a deadlock.
835 * Release all buffer reservations, wait for @bo to become unreserved and
836 * try again. (only if use_sequence == 1).
837 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
838 * a signal. Release all buffer reservations and return to user-space.
839 * -EBUSY: The function needed to sleep, but @no_wait was true
840 * -EALREADY: Bo already reserved using @ticket. This error code will only
841 * be returned if @use_ticket is set to true.
842 */
843 static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
844 bool interruptible, bool no_wait,
845 struct ww_acquire_ctx *ticket)
846 {
847 int ret;
848
849 WARN_ON(!atomic_read(&bo->kref.refcount));
850
851 ret = __ttm_bo_reserve(bo, interruptible, no_wait, ticket);
852 if (likely(ret == 0))
853 ttm_bo_del_sub_from_lru(bo);
854
855 return ret;
856 }
857
858 /**
859 * ttm_bo_reserve_slowpath:
860 * @bo: A pointer to a struct ttm_buffer_object.
861 * @interruptible: Sleep interruptible if waiting.
862 * @sequence: Set (@bo)->sequence to this value after lock
863 *
864 * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
865 * from all our other reservations. Because there are no other reservations
866 * held by us, this function cannot deadlock any more.
867 */
868 static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
869 bool interruptible,
870 struct ww_acquire_ctx *ticket)
871 {
872 int ret = 0;
873
874 WARN_ON(!atomic_read(&bo->kref.refcount));
875
876 if (interruptible)
877 ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
878 ticket);
879 else
880 ww_mutex_lock_slow(&bo->resv->lock, ticket);
881
882 if (likely(ret == 0))
883 ttm_bo_del_sub_from_lru(bo);
884 else if (ret == -EINTR)
885 ret = -ERESTARTSYS;
886
887 return ret;
888 }
889
890 /**
891 * __ttm_bo_unreserve
892 * @bo: A pointer to a struct ttm_buffer_object.
893 *
894 * Unreserve a previous reservation of @bo where the buffer object is
895 * already on lru lists.
896 */
897 static inline void __ttm_bo_unreserve(struct ttm_buffer_object *bo)
898 {
899 ww_mutex_unlock(&bo->resv->lock);
900 }
901
902 /**
903 * ttm_bo_unreserve
904 *
905 * @bo: A pointer to a struct ttm_buffer_object.
906 *
907 * Unreserve a previous reservation of @bo.
908 */
909 static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
910 {
911 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
912 spin_lock(&bo->glob->lru_lock);
913 ttm_bo_add_to_lru(bo);
914 spin_unlock(&bo->glob->lru_lock);
915 }
916 __ttm_bo_unreserve(bo);
917 }
918
919 /**
920 * ttm_bo_unreserve_ticket
921 * @bo: A pointer to a struct ttm_buffer_object.
922 * @ticket: ww_acquire_ctx used for reserving
923 *
924 * Unreserve a previous reservation of @bo made with @ticket.
925 */
926 static inline void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo,
927 struct ww_acquire_ctx *t)
928 {
929 ttm_bo_unreserve(bo);
930 }
931
932 /*
933 * ttm_bo_util.c
934 */
935
936 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
937 struct ttm_mem_reg *mem);
938 void ttm_mem_io_free(struct ttm_bo_device *bdev,
939 struct ttm_mem_reg *mem);
940 /**
941 * ttm_bo_move_ttm
942 *
943 * @bo: A pointer to a struct ttm_buffer_object.
944 * @evict: 1: This is an eviction. Don't try to pipeline.
945 * @no_wait_gpu: Return immediately if the GPU is busy.
946 * @new_mem: struct ttm_mem_reg indicating where to move.
947 *
948 * Optimized move function for a buffer object with both old and
949 * new placement backed by a TTM. The function will, if successful,
950 * free any old aperture space, and set (@new_mem)->mm_node to NULL,
951 * and update the (@bo)->mem placement flags. If unsuccessful, the old
952 * data remains untouched, and it's up to the caller to free the
953 * memory space indicated by @new_mem.
954 * Returns:
955 * !0: Failure.
956 */
957
958 extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
959 bool evict, bool no_wait_gpu,
960 struct ttm_mem_reg *new_mem);
961
962 /**
963 * ttm_bo_move_memcpy
964 *
965 * @bo: A pointer to a struct ttm_buffer_object.
966 * @evict: 1: This is an eviction. Don't try to pipeline.
967 * @no_wait_gpu: Return immediately if the GPU is busy.
968 * @new_mem: struct ttm_mem_reg indicating where to move.
969 *
970 * Fallback move function for a mappable buffer object in mappable memory.
971 * The function will, if successful,
972 * free any old aperture space, and set (@new_mem)->mm_node to NULL,
973 * and update the (@bo)->mem placement flags. If unsuccessful, the old
974 * data remains untouched, and it's up to the caller to free the
975 * memory space indicated by @new_mem.
976 * Returns:
977 * !0: Failure.
978 */
979
980 extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
981 bool evict, bool no_wait_gpu,
982 struct ttm_mem_reg *new_mem);
983
984 /**
985 * ttm_bo_free_old_node
986 *
987 * @bo: A pointer to a struct ttm_buffer_object.
988 *
989 * Utility function to free an old placement after a successful move.
990 */
991 extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
992
993 /**
994 * ttm_bo_move_accel_cleanup.
995 *
996 * @bo: A pointer to a struct ttm_buffer_object.
997 * @fence: A fence object that signals when moving is complete.
998 * @evict: This is an evict move. Don't return until the buffer is idle.
999 * @no_wait_gpu: Return immediately if the GPU is busy.
1000 * @new_mem: struct ttm_mem_reg indicating where to move.
1001 *
1002 * Accelerated move function to be called when an accelerated move
1003 * has been scheduled. The function will create a new temporary buffer object
1004 * representing the old placement, and put the sync object on both buffer
1005 * objects. After that the newly created buffer object is unref'd to be
1006 * destroyed when the move is complete. This will help pipeline
1007 * buffer moves.
1008 */
1009
1010 extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
1011 struct fence *fence,
1012 bool evict, bool no_wait_gpu,
1013 struct ttm_mem_reg *new_mem);
1014 /**
1015 * ttm_io_prot
1016 *
1017 * @c_state: Caching state.
1018 * @tmp: Page protection flag for a normal, cached mapping.
1019 *
1020 * Utility function that returns the pgprot_t that should be used for
1021 * setting up a PTE with the caching model indicated by @c_state.
1022 */
1023 extern pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
1024
1025 extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
1026
1027 #if IS_ENABLED(CONFIG_AGP)
1028 #include <linux/agp_backend.h>
1029
1030 /**
1031 * ttm_agp_tt_create
1032 *
1033 * @bdev: Pointer to a struct ttm_bo_device.
1034 * @bridge: The agp bridge this device is sitting on.
1035 * @size: Size of the data needed backing.
1036 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
1037 * @dummy_read_page: See struct ttm_bo_device.
1038 *
1039 *
1040 * Create a TTM backend that uses the indicated AGP bridge as an aperture
1041 * for TT memory. This function uses the linux agpgart interface to
1042 * bind and unbind memory backing a ttm_tt.
1043 */
1044 extern struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
1045 struct agp_bridge_data *bridge,
1046 unsigned long size, uint32_t page_flags,
1047 struct page *dummy_read_page);
1048 int ttm_agp_tt_populate(struct ttm_tt *ttm);
1049 void ttm_agp_tt_unpopulate(struct ttm_tt *ttm);
1050 #endif
1051
1052 #endif