]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
drm/amdgpu: limit reg_write_reg_wait workaround to SRIOV v2
[mirror_ubuntu-jammy-kernel.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_object.h
CommitLineData
d38ceaf9
AD
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __AMDGPU_OBJECT_H__
29#define __AMDGPU_OBJECT_H__
30
31#include <drm/amdgpu_drm.h>
32#include "amdgpu.h"
33
9702d40d
CK
34#define AMDGPU_BO_INVALID_OFFSET LONG_MAX
35
a906dbb1
CZ
36struct amdgpu_bo_param {
37 unsigned long size;
38 int byte_align;
39 u32 domain;
40 u64 flags;
41 enum ttm_bo_type type;
42 struct reservation_object *resv;
43};
44
ec681545 45/* bo virtual addresses in a vm */
9124a398 46struct amdgpu_bo_va_mapping {
aebc5e6f 47 struct amdgpu_bo_va *bo_va;
9124a398
CK
48 struct list_head list;
49 struct rb_node rb;
50 uint64_t start;
51 uint64_t last;
52 uint64_t __subtree_last;
53 uint64_t offset;
54 uint64_t flags;
55};
56
ec681545 57/* User space allocated BO in a VM */
9124a398 58struct amdgpu_bo_va {
ec681545
CK
59 struct amdgpu_vm_bo_base base;
60
9124a398 61 /* protected by bo being reserved */
9124a398
CK
62 unsigned ref_count;
63
00b5cc83
CK
64 /* all other members protected by the VM PD being reserved */
65 struct dma_fence *last_pt_update;
66
9124a398
CK
67 /* mappings for this bo_va */
68 struct list_head invalids;
69 struct list_head valids;
cb7b6ec2
CK
70
71 /* If the mappings are cleared or filled */
72 bool cleared;
9124a398
CK
73};
74
9124a398
CK
75struct amdgpu_bo {
76 /* Protected by tbo.reserved */
6d7d9c5a 77 u32 preferred_domains;
9124a398
CK
78 u32 allowed_domains;
79 struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
80 struct ttm_placement placement;
81 struct ttm_buffer_object tbo;
82 struct ttm_bo_kmap_obj kmap;
83 u64 flags;
84 unsigned pin_count;
85 u64 tiling_flags;
86 u64 metadata_flags;
87 void *metadata;
88 u32 metadata_size;
89 unsigned prime_shared_count;
90 /* list of all virtual address to which this bo is associated to */
91 struct list_head va;
92 /* Constant after initialization */
93 struct drm_gem_object gem_base;
94 struct amdgpu_bo *parent;
95 struct amdgpu_bo *shadow;
96
97 struct ttm_bo_kmap_obj dma_buf_vmap;
98 struct amdgpu_mn *mn;
ed5b89c6
CK
99
100 union {
101 struct list_head mn_list;
102 struct list_head shadow_list;
103 };
a46a2cd1
FK
104
105 struct kgd_mem *kfd_bo;
9124a398
CK
106};
107
b82485fd
AR
108static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo)
109{
110 return container_of(tbo, struct amdgpu_bo, tbo);
111}
112
d38ceaf9
AD
113/**
114 * amdgpu_mem_type_to_domain - return domain corresponding to mem_type
115 * @mem_type: ttm memory type
116 *
117 * Returns corresponding domain of the ttm mem_type
118 */
119static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type)
120{
121 switch (mem_type) {
122 case TTM_PL_VRAM:
123 return AMDGPU_GEM_DOMAIN_VRAM;
124 case TTM_PL_TT:
125 return AMDGPU_GEM_DOMAIN_GTT;
126 case TTM_PL_SYSTEM:
127 return AMDGPU_GEM_DOMAIN_CPU;
128 case AMDGPU_PL_GDS:
129 return AMDGPU_GEM_DOMAIN_GDS;
130 case AMDGPU_PL_GWS:
131 return AMDGPU_GEM_DOMAIN_GWS;
132 case AMDGPU_PL_OA:
133 return AMDGPU_GEM_DOMAIN_OA;
134 default:
135 break;
136 }
137 return 0;
138}
139
140/**
141 * amdgpu_bo_reserve - reserve bo
142 * @bo: bo structure
143 * @no_intr: don't return -ERESTARTSYS on pending signal
144 *
145 * Returns:
146 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
147 * a signal. Release all buffer reservations and return to user-space.
148 */
149static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr)
150{
a7d64de6 151 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
d38ceaf9
AD
152 int r;
153
dfd5e50e 154 r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
d38ceaf9
AD
155 if (unlikely(r != 0)) {
156 if (r != -ERESTARTSYS)
a7d64de6 157 dev_err(adev->dev, "%p reserve failed\n", bo);
d38ceaf9
AD
158 return r;
159 }
160 return 0;
161}
162
163static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo)
164{
165 ttm_bo_unreserve(&bo->tbo);
166}
167
d38ceaf9
AD
168static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo)
169{
170 return bo->tbo.num_pages << PAGE_SHIFT;
171}
172
173static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo)
174{
175 return (bo->tbo.num_pages << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
176}
177
178static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo)
179{
180 return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
181}
182
183/**
184 * amdgpu_bo_mmap_offset - return mmap offset of bo
185 * @bo: amdgpu object for which we query the offset
186 *
187 * Returns mmap offset of the object.
188 */
189static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
190{
191 return drm_vma_node_offset_addr(&bo->tbo.vma_node);
192}
193
b99f3103
NH
194/**
195 * amdgpu_bo_gpu_accessible - return whether the bo is currently in memory that
196 * is accessible to the GPU.
197 */
198static inline bool amdgpu_bo_gpu_accessible(struct amdgpu_bo *bo)
199{
9d63c034 200 switch (bo->tbo.mem.mem_type) {
3da917b6 201 case TTM_PL_TT: return amdgpu_gtt_mgr_has_gart_addr(&bo->tbo.mem);
9d63c034
CK
202 case TTM_PL_VRAM: return true;
203 default: return false;
204 }
b99f3103
NH
205}
206
5422a28f
CK
207/**
208 * amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM
209 */
210static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
211{
212 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
213 unsigned fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
214 struct drm_mm_node *node = bo->tbo.mem.mm_node;
215 unsigned long pages_left;
216
217 if (bo->tbo.mem.mem_type != TTM_PL_VRAM)
218 return false;
219
220 for (pages_left = bo->tbo.mem.num_pages; pages_left;
221 pages_left -= node->size, node++)
222 if (node->start < fpfn)
223 return true;
224
225 return false;
226}
227
177ae09b
AR
228/**
229 * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
230 */
231static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
232{
233 return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
234}
235
3216c6b7
CZ
236int amdgpu_bo_create(struct amdgpu_device *adev,
237 struct amdgpu_bo_param *bp,
eab3de23 238 struct amdgpu_bo **bo_ptr);
9d903cbd
CK
239int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
240 unsigned long size, int align,
241 u32 domain, struct amdgpu_bo **bo_ptr,
242 u64 *gpu_addr, void **cpu_addr);
7c204889
CK
243int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
244 unsigned long size, int align,
245 u32 domain, struct amdgpu_bo **bo_ptr,
246 u64 *gpu_addr, void **cpu_addr);
aa1d562e
JZ
247void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
248 void **cpu_addr);
d38ceaf9 249int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
f5e1c740 250void *amdgpu_bo_kptr(struct amdgpu_bo *bo);
d38ceaf9
AD
251void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
252struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
253void amdgpu_bo_unref(struct amdgpu_bo **bo);
254int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr);
255int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
7e5a547f
CZ
256 u64 min_offset, u64 max_offset,
257 u64 *gpu_addr);
d38ceaf9
AD
258int amdgpu_bo_unpin(struct amdgpu_bo *bo);
259int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
d38ceaf9 260int amdgpu_bo_init(struct amdgpu_device *adev);
6f752ec2 261int amdgpu_bo_late_init(struct amdgpu_device *adev);
d38ceaf9
AD
262void amdgpu_bo_fini(struct amdgpu_device *adev);
263int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
264 struct vm_area_struct *vma);
265int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags);
266void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags);
267int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
268 uint32_t metadata_size, uint64_t flags);
269int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
270 size_t buffer_size, uint32_t *metadata_size,
271 uint64_t *flags);
272void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
66257db7
NH
273 bool evict,
274 struct ttm_mem_reg *new_mem);
d38ceaf9 275int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
f54d1867 276void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
d38ceaf9 277 bool shared);
cdb7e8f2 278u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
20f4eff1
CZ
279int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
280 struct amdgpu_ring *ring,
281 struct amdgpu_bo *bo,
282 struct reservation_object *resv,
f54d1867 283 struct dma_fence **fence, bool direct);
82521316 284int amdgpu_bo_validate(struct amdgpu_bo *bo);
20f4eff1
CZ
285int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
286 struct amdgpu_ring *ring,
287 struct amdgpu_bo *bo,
288 struct reservation_object *resv,
f54d1867 289 struct dma_fence **fence,
20f4eff1
CZ
290 bool direct);
291
d38ceaf9
AD
292
293/*
294 * sub allocation
295 */
296
297static inline uint64_t amdgpu_sa_bo_gpu_addr(struct amdgpu_sa_bo *sa_bo)
298{
299 return sa_bo->manager->gpu_addr + sa_bo->soffset;
300}
301
302static inline void * amdgpu_sa_bo_cpu_addr(struct amdgpu_sa_bo *sa_bo)
303{
304 return sa_bo->manager->cpu_ptr + sa_bo->soffset;
305}
306
307int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
308 struct amdgpu_sa_manager *sa_manager,
309 unsigned size, u32 align, u32 domain);
310void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
311 struct amdgpu_sa_manager *sa_manager);
312int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
313 struct amdgpu_sa_manager *sa_manager);
bbf0b345
JZ
314int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
315 struct amdgpu_sa_bo **sa_bo,
316 unsigned size, unsigned align);
d38ceaf9
AD
317void amdgpu_sa_bo_free(struct amdgpu_device *adev,
318 struct amdgpu_sa_bo **sa_bo,
f54d1867 319 struct dma_fence *fence);
d38ceaf9
AD
320#if defined(CONFIG_DEBUG_FS)
321void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
322 struct seq_file *m);
323#endif
324
325
326#endif