]>
Commit | Line | Data |
---|---|---|
d38ceaf9 AD |
1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. | |
3 | * Copyright 2008 Red Hat Inc. | |
4 | * Copyright 2009 Jerome Glisse. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the "Software"), | |
8 | * to deal in the Software without restriction, including without limitation | |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
10 | * and/or sell copies of the Software, and to permit persons to whom the | |
11 | * Software is furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
22 | * OTHER DEALINGS IN THE SOFTWARE. | |
23 | * | |
24 | * Authors: Dave Airlie | |
25 | * Alex Deucher | |
26 | * Jerome Glisse | |
27 | */ | |
28 | #ifndef __AMDGPU_OBJECT_H__ | |
29 | #define __AMDGPU_OBJECT_H__ | |
30 | ||
31 | #include <drm/amdgpu_drm.h> | |
32 | #include "amdgpu.h" | |
62914a99 JG |
33 | #ifdef CONFIG_MMU_NOTIFIER |
34 | #include <linux/mmu_notifier.h> | |
35 | #endif | |
d38ceaf9 | 36 | |
9702d40d | 37 | #define AMDGPU_BO_INVALID_OFFSET LONG_MAX |
bf314ca3 | 38 | #define AMDGPU_BO_MAX_PLACEMENTS 3 |
9702d40d | 39 | |
a906dbb1 CZ |
40 | struct amdgpu_bo_param { |
41 | unsigned long size; | |
42 | int byte_align; | |
43 | u32 domain; | |
aa2b2e28 | 44 | u32 preferred_domain; |
a906dbb1 CZ |
45 | u64 flags; |
46 | enum ttm_bo_type type; | |
061468c4 | 47 | bool no_wait_gpu; |
52791eee | 48 | struct dma_resv *resv; |
a906dbb1 CZ |
49 | }; |
50 | ||
ec681545 | 51 | /* bo virtual addresses in a vm */ |
9124a398 | 52 | struct amdgpu_bo_va_mapping { |
aebc5e6f | 53 | struct amdgpu_bo_va *bo_va; |
9124a398 CK |
54 | struct list_head list; |
55 | struct rb_node rb; | |
56 | uint64_t start; | |
57 | uint64_t last; | |
58 | uint64_t __subtree_last; | |
59 | uint64_t offset; | |
60 | uint64_t flags; | |
61 | }; | |
62 | ||
ec681545 | 63 | /* User space allocated BO in a VM */ |
9124a398 | 64 | struct amdgpu_bo_va { |
ec681545 CK |
65 | struct amdgpu_vm_bo_base base; |
66 | ||
9124a398 | 67 | /* protected by bo being reserved */ |
9124a398 CK |
68 | unsigned ref_count; |
69 | ||
00b5cc83 CK |
70 | /* all other members protected by the VM PD being reserved */ |
71 | struct dma_fence *last_pt_update; | |
72 | ||
9124a398 CK |
73 | /* mappings for this bo_va */ |
74 | struct list_head invalids; | |
75 | struct list_head valids; | |
cb7b6ec2 CK |
76 | |
77 | /* If the mappings are cleared or filled */ | |
78 | bool cleared; | |
df399b06 | 79 | |
80 | bool is_xgmi; | |
9124a398 CK |
81 | }; |
82 | ||
9124a398 CK |
83 | struct amdgpu_bo { |
84 | /* Protected by tbo.reserved */ | |
6d7d9c5a | 85 | u32 preferred_domains; |
9124a398 | 86 | u32 allowed_domains; |
bf314ca3 | 87 | struct ttm_place placements[AMDGPU_BO_MAX_PLACEMENTS]; |
9124a398 CK |
88 | struct ttm_placement placement; |
89 | struct ttm_buffer_object tbo; | |
90 | struct ttm_bo_kmap_obj kmap; | |
91 | u64 flags; | |
92 | unsigned pin_count; | |
93 | u64 tiling_flags; | |
94 | u64 metadata_flags; | |
95 | void *metadata; | |
96 | u32 metadata_size; | |
97 | unsigned prime_shared_count; | |
646b9025 CK |
98 | /* per VM structure for page tables and with virtual addresses */ |
99 | struct amdgpu_vm_bo_base *vm_bo; | |
9124a398 | 100 | /* Constant after initialization */ |
9124a398 CK |
101 | struct amdgpu_bo *parent; |
102 | struct amdgpu_bo *shadow; | |
103 | ||
104 | struct ttm_bo_kmap_obj dma_buf_vmap; | |
105 | struct amdgpu_mn *mn; | |
ed5b89c6 | 106 | |
62914a99 JG |
107 | |
108 | #ifdef CONFIG_MMU_NOTIFIER | |
109 | struct mmu_interval_notifier notifier; | |
110 | #endif | |
111 | ||
112 | struct list_head shadow_list; | |
a46a2cd1 FK |
113 | |
114 | struct kgd_mem *kfd_bo; | |
9124a398 CK |
115 | }; |
116 | ||
b82485fd AR |
117 | static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo) |
118 | { | |
119 | return container_of(tbo, struct amdgpu_bo, tbo); | |
120 | } | |
121 | ||
d38ceaf9 AD |
122 | /** |
123 | * amdgpu_mem_type_to_domain - return domain corresponding to mem_type | |
124 | * @mem_type: ttm memory type | |
125 | * | |
126 | * Returns corresponding domain of the ttm mem_type | |
127 | */ | |
128 | static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type) | |
129 | { | |
130 | switch (mem_type) { | |
131 | case TTM_PL_VRAM: | |
132 | return AMDGPU_GEM_DOMAIN_VRAM; | |
133 | case TTM_PL_TT: | |
134 | return AMDGPU_GEM_DOMAIN_GTT; | |
135 | case TTM_PL_SYSTEM: | |
136 | return AMDGPU_GEM_DOMAIN_CPU; | |
137 | case AMDGPU_PL_GDS: | |
138 | return AMDGPU_GEM_DOMAIN_GDS; | |
139 | case AMDGPU_PL_GWS: | |
140 | return AMDGPU_GEM_DOMAIN_GWS; | |
141 | case AMDGPU_PL_OA: | |
142 | return AMDGPU_GEM_DOMAIN_OA; | |
143 | default: | |
144 | break; | |
145 | } | |
146 | return 0; | |
147 | } | |
148 | ||
149 | /** | |
150 | * amdgpu_bo_reserve - reserve bo | |
151 | * @bo: bo structure | |
152 | * @no_intr: don't return -ERESTARTSYS on pending signal | |
153 | * | |
154 | * Returns: | |
155 | * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by | |
156 | * a signal. Release all buffer reservations and return to user-space. | |
157 | */ | |
158 | static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr) | |
159 | { | |
a7d64de6 | 160 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); |
d38ceaf9 AD |
161 | int r; |
162 | ||
55c2e5a1 | 163 | r = __ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL); |
d38ceaf9 AD |
164 | if (unlikely(r != 0)) { |
165 | if (r != -ERESTARTSYS) | |
a7d64de6 | 166 | dev_err(adev->dev, "%p reserve failed\n", bo); |
d38ceaf9 AD |
167 | return r; |
168 | } | |
169 | return 0; | |
170 | } | |
171 | ||
172 | static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo) | |
173 | { | |
174 | ttm_bo_unreserve(&bo->tbo); | |
175 | } | |
176 | ||
d38ceaf9 AD |
177 | static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo) |
178 | { | |
179 | return bo->tbo.num_pages << PAGE_SHIFT; | |
180 | } | |
181 | ||
182 | static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo) | |
183 | { | |
184 | return (bo->tbo.num_pages << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE; | |
185 | } | |
186 | ||
187 | static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo) | |
188 | { | |
189 | return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE; | |
190 | } | |
191 | ||
192 | /** | |
193 | * amdgpu_bo_mmap_offset - return mmap offset of bo | |
194 | * @bo: amdgpu object for which we query the offset | |
195 | * | |
196 | * Returns mmap offset of the object. | |
197 | */ | |
198 | static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo) | |
199 | { | |
b96f3e7c | 200 | return drm_vma_node_offset_addr(&bo->tbo.base.vma_node); |
d38ceaf9 AD |
201 | } |
202 | ||
5422a28f CK |
203 | /** |
204 | * amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM | |
205 | */ | |
206 | static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo) | |
207 | { | |
208 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); | |
209 | unsigned fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT; | |
210 | struct drm_mm_node *node = bo->tbo.mem.mm_node; | |
211 | unsigned long pages_left; | |
212 | ||
213 | if (bo->tbo.mem.mem_type != TTM_PL_VRAM) | |
214 | return false; | |
215 | ||
216 | for (pages_left = bo->tbo.mem.num_pages; pages_left; | |
217 | pages_left -= node->size, node++) | |
218 | if (node->start < fpfn) | |
219 | return true; | |
220 | ||
221 | return false; | |
222 | } | |
223 | ||
177ae09b AR |
224 | /** |
225 | * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced | |
226 | */ | |
227 | static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo) | |
228 | { | |
229 | return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC; | |
230 | } | |
231 | ||
4cd24494 AD |
232 | /** |
233 | * amdgpu_bo_encrypted - test if the BO is encrypted | |
234 | * @bo: pointer to a buffer object | |
235 | * | |
236 | * Return true if the buffer object is encrypted, false otherwise. | |
237 | */ | |
238 | static inline bool amdgpu_bo_encrypted(struct amdgpu_bo *bo) | |
239 | { | |
240 | return bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED; | |
241 | } | |
242 | ||
c704ab18 CK |
243 | bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); |
244 | void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain); | |
245 | ||
3216c6b7 CZ |
246 | int amdgpu_bo_create(struct amdgpu_device *adev, |
247 | struct amdgpu_bo_param *bp, | |
eab3de23 | 248 | struct amdgpu_bo **bo_ptr); |
9d903cbd CK |
249 | int amdgpu_bo_create_reserved(struct amdgpu_device *adev, |
250 | unsigned long size, int align, | |
251 | u32 domain, struct amdgpu_bo **bo_ptr, | |
252 | u64 *gpu_addr, void **cpu_addr); | |
7c204889 CK |
253 | int amdgpu_bo_create_kernel(struct amdgpu_device *adev, |
254 | unsigned long size, int align, | |
255 | u32 domain, struct amdgpu_bo **bo_ptr, | |
256 | u64 *gpu_addr, void **cpu_addr); | |
de7b45ba CK |
257 | int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev, |
258 | uint64_t offset, uint64_t size, uint32_t domain, | |
259 | struct amdgpu_bo **bo_ptr, void **cpu_addr); | |
aa1d562e JZ |
260 | void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr, |
261 | void **cpu_addr); | |
d38ceaf9 | 262 | int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr); |
f5e1c740 | 263 | void *amdgpu_bo_kptr(struct amdgpu_bo *bo); |
d38ceaf9 AD |
264 | void amdgpu_bo_kunmap(struct amdgpu_bo *bo); |
265 | struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo); | |
266 | void amdgpu_bo_unref(struct amdgpu_bo **bo); | |
7b7c6c81 | 267 | int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain); |
d38ceaf9 | 268 | int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, |
7b7c6c81 | 269 | u64 min_offset, u64 max_offset); |
d38ceaf9 AD |
270 | int amdgpu_bo_unpin(struct amdgpu_bo *bo); |
271 | int amdgpu_bo_evict_vram(struct amdgpu_device *adev); | |
d38ceaf9 | 272 | int amdgpu_bo_init(struct amdgpu_device *adev); |
6f752ec2 | 273 | int amdgpu_bo_late_init(struct amdgpu_device *adev); |
d38ceaf9 AD |
274 | void amdgpu_bo_fini(struct amdgpu_device *adev); |
275 | int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo, | |
276 | struct vm_area_struct *vma); | |
277 | int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags); | |
278 | void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags); | |
279 | int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata, | |
280 | uint32_t metadata_size, uint64_t flags); | |
281 | int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, | |
282 | size_t buffer_size, uint32_t *metadata_size, | |
283 | uint64_t *flags); | |
284 | void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, | |
66257db7 NH |
285 | bool evict, |
286 | struct ttm_mem_reg *new_mem); | |
ab2f7a5c | 287 | void amdgpu_bo_release_notify(struct ttm_buffer_object *bo); |
d38ceaf9 | 288 | int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo); |
f54d1867 | 289 | void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, |
d38ceaf9 | 290 | bool shared); |
9f3cc18d CK |
291 | int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv, |
292 | enum amdgpu_sync_mode sync_mode, void *owner, | |
293 | bool intr); | |
e8e32426 | 294 | int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr); |
cdb7e8f2 | 295 | u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo); |
82521316 | 296 | int amdgpu_bo_validate(struct amdgpu_bo *bo); |
403009bf CK |
297 | int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, |
298 | struct dma_fence **fence); | |
84b74608 DS |
299 | uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev, |
300 | uint32_t domain); | |
d38ceaf9 AD |
301 | |
302 | /* | |
303 | * sub allocation | |
304 | */ | |
305 | ||
306 | static inline uint64_t amdgpu_sa_bo_gpu_addr(struct amdgpu_sa_bo *sa_bo) | |
307 | { | |
308 | return sa_bo->manager->gpu_addr + sa_bo->soffset; | |
309 | } | |
310 | ||
311 | static inline void * amdgpu_sa_bo_cpu_addr(struct amdgpu_sa_bo *sa_bo) | |
312 | { | |
313 | return sa_bo->manager->cpu_ptr + sa_bo->soffset; | |
314 | } | |
315 | ||
316 | int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev, | |
317 | struct amdgpu_sa_manager *sa_manager, | |
318 | unsigned size, u32 align, u32 domain); | |
319 | void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev, | |
320 | struct amdgpu_sa_manager *sa_manager); | |
321 | int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev, | |
322 | struct amdgpu_sa_manager *sa_manager); | |
bbf0b345 JZ |
323 | int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, |
324 | struct amdgpu_sa_bo **sa_bo, | |
325 | unsigned size, unsigned align); | |
d38ceaf9 AD |
326 | void amdgpu_sa_bo_free(struct amdgpu_device *adev, |
327 | struct amdgpu_sa_bo **sa_bo, | |
f54d1867 | 328 | struct dma_fence *fence); |
d38ceaf9 AD |
329 | #if defined(CONFIG_DEBUG_FS) |
330 | void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, | |
331 | struct seq_file *m); | |
332 | #endif | |
15997544 | 333 | int amdgpu_debugfs_sa_init(struct amdgpu_device *adev); |
d38ceaf9 | 334 | |
3d1b8ec7 AG |
335 | bool amdgpu_bo_support_uswc(u64 bo_flags); |
336 | ||
d38ceaf9 AD |
337 | |
338 | #endif |