]>
Commit | Line | Data |
---|---|---|
81629cba AD |
1 | /* amdgpu_drm.h -- Public header for the amdgpu driver -*- linux-c -*- |
2 | * | |
3 | * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. | |
4 | * Copyright 2000 VA Linux Systems, Inc., Fremont, California. | |
5 | * Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas. | |
6 | * Copyright 2014 Advanced Micro Devices, Inc. | |
7 | * | |
8 | * Permission is hereby granted, free of charge, to any person obtaining a | |
9 | * copy of this software and associated documentation files (the "Software"), | |
10 | * to deal in the Software without restriction, including without limitation | |
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
12 | * and/or sell copies of the Software, and to permit persons to whom the | |
13 | * Software is furnished to do so, subject to the following conditions: | |
14 | * | |
15 | * The above copyright notice and this permission notice shall be included in | |
16 | * all copies or substantial portions of the Software. | |
17 | * | |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
21 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
22 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
23 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
24 | * OTHER DEALINGS IN THE SOFTWARE. | |
25 | * | |
26 | * Authors: | |
27 | * Kevin E. Martin <martin@valinux.com> | |
28 | * Gareth Hughes <gareth@valinux.com> | |
29 | * Keith Whitwell <keith@tungstengraphics.com> | |
30 | */ | |
31 | ||
32 | #ifndef __AMDGPU_DRM_H__ | |
33 | #define __AMDGPU_DRM_H__ | |
34 | ||
35 | #include <drm/drm.h> | |
36 | ||
37 | #define DRM_AMDGPU_GEM_CREATE 0x00 | |
38 | #define DRM_AMDGPU_GEM_MMAP 0x01 | |
39 | #define DRM_AMDGPU_CTX 0x02 | |
40 | #define DRM_AMDGPU_BO_LIST 0x03 | |
41 | #define DRM_AMDGPU_CS 0x04 | |
42 | #define DRM_AMDGPU_INFO 0x05 | |
43 | #define DRM_AMDGPU_GEM_METADATA 0x06 | |
44 | #define DRM_AMDGPU_GEM_WAIT_IDLE 0x07 | |
45 | #define DRM_AMDGPU_GEM_VA 0x08 | |
46 | #define DRM_AMDGPU_WAIT_CS 0x09 | |
47 | #define DRM_AMDGPU_GEM_OP 0x10 | |
48 | #define DRM_AMDGPU_GEM_USERPTR 0x11 | |
49 | ||
50 | #define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create) | |
51 | #define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap) | |
52 | #define DRM_IOCTL_AMDGPU_CTX DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_CTX, union drm_amdgpu_ctx) | |
53 | #define DRM_IOCTL_AMDGPU_BO_LIST DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_BO_LIST, union drm_amdgpu_bo_list) | |
54 | #define DRM_IOCTL_AMDGPU_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_CS, union drm_amdgpu_cs) | |
55 | #define DRM_IOCTL_AMDGPU_INFO DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_INFO, struct drm_amdgpu_info) | |
56 | #define DRM_IOCTL_AMDGPU_GEM_METADATA DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_METADATA, struct drm_amdgpu_gem_metadata) | |
57 | #define DRM_IOCTL_AMDGPU_GEM_WAIT_IDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_WAIT_IDLE, union drm_amdgpu_gem_wait_idle) | |
58 | #define DRM_IOCTL_AMDGPU_GEM_VA DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_VA, union drm_amdgpu_gem_va) | |
59 | #define DRM_IOCTL_AMDGPU_WAIT_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_CS, union drm_amdgpu_wait_cs) | |
60 | #define DRM_IOCTL_AMDGPU_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_OP, struct drm_amdgpu_gem_op) | |
61 | #define DRM_IOCTL_AMDGPU_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_USERPTR, struct drm_amdgpu_gem_userptr) | |
62 | ||
63 | #define AMDGPU_GEM_DOMAIN_CPU 0x1 | |
64 | #define AMDGPU_GEM_DOMAIN_GTT 0x2 | |
65 | #define AMDGPU_GEM_DOMAIN_VRAM 0x4 | |
66 | #define AMDGPU_GEM_DOMAIN_GDS 0x8 | |
67 | #define AMDGPU_GEM_DOMAIN_GWS 0x10 | |
68 | #define AMDGPU_GEM_DOMAIN_OA 0x20 | |
69 | ||
70 | #define AMDGPU_GEM_DOMAIN_MASK 0x3F | |
71 | ||
72 | /* Flag that CPU access will be required for the case of VRAM domain */ | |
73 | #define AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED (1 << 0) | |
74 | /* Flag that CPU access will not work, this VRAM domain is invisible */ | |
75 | #define AMDGPU_GEM_CREATE_NO_CPU_ACCESS (1 << 1) | |
81629cba | 76 | /* Flag that USWC attributes should be used for GTT */ |
88671288 | 77 | #define AMDGPU_GEM_CREATE_CPU_GTT_USWC (1 << 2) |
81629cba AD |
78 | |
79 | /* Flag mask for GTT domain_flags */ | |
80 | #define AMDGPU_GEM_CREATE_CPU_GTT_MASK \ | |
88671288 | 81 | (AMDGPU_GEM_CREATE_CPU_GTT_USWC | \ |
81629cba AD |
82 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | \ |
83 | AMDGPU_GEM_CREATE_NO_CPU_ACCESS) | |
84 | ||
85 | struct drm_amdgpu_gem_create_in { | |
86 | /** the requested memory size */ | |
87 | uint64_t bo_size; | |
88 | /** physical start_addr alignment in bytes for some HW requirements */ | |
89 | uint64_t alignment; | |
90 | /** the requested memory domains */ | |
91 | uint64_t domains; | |
92 | /** allocation flags */ | |
93 | uint64_t domain_flags; | |
94 | }; | |
95 | ||
96 | struct drm_amdgpu_gem_create_out { | |
97 | /** returned GEM object handle */ | |
98 | uint32_t handle; | |
99 | uint32_t _pad; | |
100 | }; | |
101 | ||
102 | union drm_amdgpu_gem_create { | |
103 | struct drm_amdgpu_gem_create_in in; | |
104 | struct drm_amdgpu_gem_create_out out; | |
105 | }; | |
106 | ||
107 | /** Opcode to create new residency list. */ | |
108 | #define AMDGPU_BO_LIST_OP_CREATE 0 | |
109 | /** Opcode to destroy previously created residency list */ | |
110 | #define AMDGPU_BO_LIST_OP_DESTROY 1 | |
111 | /** Opcode to update resource information in the list */ | |
112 | #define AMDGPU_BO_LIST_OP_UPDATE 2 | |
113 | ||
114 | struct drm_amdgpu_bo_list_in { | |
115 | /** Type of operation */ | |
116 | uint32_t operation; | |
117 | /** Handle of list or 0 if we want to create one */ | |
118 | uint32_t list_handle; | |
119 | /** Number of BOs in list */ | |
120 | uint32_t bo_number; | |
121 | /** Size of each element describing BO */ | |
122 | uint32_t bo_info_size; | |
123 | /** Pointer to array describing BOs */ | |
124 | uint64_t bo_info_ptr; | |
125 | }; | |
126 | ||
127 | struct drm_amdgpu_bo_list_entry { | |
128 | /** Handle of BO */ | |
129 | uint32_t bo_handle; | |
130 | /** New (if specified) BO priority to be used during migration */ | |
131 | uint32_t bo_priority; | |
132 | }; | |
133 | ||
134 | struct drm_amdgpu_bo_list_out { | |
135 | /** Handle of resource list */ | |
136 | uint32_t list_handle; | |
137 | uint32_t _pad; | |
138 | }; | |
139 | ||
140 | union drm_amdgpu_bo_list { | |
141 | struct drm_amdgpu_bo_list_in in; | |
142 | struct drm_amdgpu_bo_list_out out; | |
143 | }; | |
144 | ||
145 | /* context related */ | |
146 | #define AMDGPU_CTX_OP_ALLOC_CTX 1 | |
147 | #define AMDGPU_CTX_OP_FREE_CTX 2 | |
148 | #define AMDGPU_CTX_OP_QUERY_STATE 3 | |
149 | ||
150 | #define AMDGPU_CTX_OP_STATE_RUNNING 1 | |
151 | ||
d94aed5a MO |
152 | /* GPU reset status */ |
153 | #define AMDGPU_CTX_NO_RESET 0 | |
154 | #define AMDGPU_CTX_GUILTY_RESET 1 /* this the context caused it */ | |
155 | #define AMDGPU_CTX_INNOCENT_RESET 2 /* some other context caused it */ | |
156 | #define AMDGPU_CTX_UNKNOWN_RESET 3 /* unknown cause */ | |
157 | ||
81629cba AD |
158 | struct drm_amdgpu_ctx_in { |
159 | uint32_t op; | |
160 | uint32_t flags; | |
161 | uint32_t ctx_id; | |
162 | uint32_t _pad; | |
163 | }; | |
164 | ||
165 | union drm_amdgpu_ctx_out { | |
166 | struct { | |
167 | uint32_t ctx_id; | |
168 | uint32_t _pad; | |
169 | } alloc; | |
170 | ||
171 | struct { | |
172 | uint64_t flags; | |
d94aed5a MO |
173 | /** Number of resets caused by this context so far. */ |
174 | uint32_t hangs; | |
175 | /** Reset status since the last call of the ioctl. */ | |
176 | uint32_t reset_status; | |
81629cba AD |
177 | } state; |
178 | }; | |
179 | ||
180 | union drm_amdgpu_ctx { | |
181 | struct drm_amdgpu_ctx_in in; | |
182 | union drm_amdgpu_ctx_out out; | |
183 | }; | |
184 | ||
185 | /* | |
186 | * This is not a reliable API and you should expect it to fail for any | |
187 | * number of reasons and have fallback path that do not use userptr to | |
188 | * perform any operation. | |
189 | */ | |
190 | #define AMDGPU_GEM_USERPTR_READONLY (1 << 0) | |
191 | #define AMDGPU_GEM_USERPTR_ANONONLY (1 << 1) | |
192 | #define AMDGPU_GEM_USERPTR_VALIDATE (1 << 2) | |
193 | #define AMDGPU_GEM_USERPTR_REGISTER (1 << 3) | |
194 | ||
195 | struct drm_amdgpu_gem_userptr { | |
196 | uint64_t addr; | |
197 | uint64_t size; | |
198 | uint32_t flags; | |
199 | uint32_t handle; | |
200 | }; | |
201 | ||
202 | #define AMDGPU_TILING_MACRO 0x1 | |
203 | #define AMDGPU_TILING_MICRO 0x2 | |
204 | #define AMDGPU_TILING_SWAP_16BIT 0x4 | |
205 | #define AMDGPU_TILING_R600_NO_SCANOUT AMDGPU_TILING_SWAP_16BIT | |
206 | #define AMDGPU_TILING_SWAP_32BIT 0x8 | |
207 | /* this object requires a surface when mapped - i.e. front buffer */ | |
208 | #define AMDGPU_TILING_SURFACE 0x10 | |
209 | #define AMDGPU_TILING_MICRO_SQUARE 0x20 | |
210 | #define AMDGPU_TILING_EG_BANKW_SHIFT 8 | |
211 | #define AMDGPU_TILING_EG_BANKW_MASK 0xf | |
212 | #define AMDGPU_TILING_EG_BANKH_SHIFT 12 | |
213 | #define AMDGPU_TILING_EG_BANKH_MASK 0xf | |
214 | #define AMDGPU_TILING_EG_MACRO_TILE_ASPECT_SHIFT 16 | |
215 | #define AMDGPU_TILING_EG_MACRO_TILE_ASPECT_MASK 0xf | |
216 | #define AMDGPU_TILING_EG_TILE_SPLIT_SHIFT 24 | |
217 | #define AMDGPU_TILING_EG_TILE_SPLIT_MASK 0xf | |
218 | #define AMDGPU_TILING_EG_STENCIL_TILE_SPLIT_SHIFT 28 | |
219 | #define AMDGPU_TILING_EG_STENCIL_TILE_SPLIT_MASK 0xf | |
220 | ||
221 | #define AMDGPU_GEM_METADATA_OP_SET_METADATA 1 | |
222 | #define AMDGPU_GEM_METADATA_OP_GET_METADATA 2 | |
223 | ||
224 | /** The same structure is shared for input/output */ | |
225 | struct drm_amdgpu_gem_metadata { | |
226 | uint32_t handle; /* GEM Object handle */ | |
227 | uint32_t op; /** Do we want get or set metadata */ | |
228 | struct { | |
229 | uint64_t flags; | |
230 | uint64_t tiling_info; /* family specific tiling info */ | |
231 | uint32_t data_size_bytes; | |
232 | uint32_t data[64]; | |
233 | } data; | |
234 | }; | |
235 | ||
236 | struct drm_amdgpu_gem_mmap_in { | |
237 | uint32_t handle; /** the GEM object handle */ | |
238 | uint32_t _pad; | |
239 | }; | |
240 | ||
241 | struct drm_amdgpu_gem_mmap_out { | |
242 | uint64_t addr_ptr; /** mmap offset from the vma offset manager */ | |
243 | }; | |
244 | ||
245 | union drm_amdgpu_gem_mmap { | |
246 | struct drm_amdgpu_gem_mmap_in in; | |
247 | struct drm_amdgpu_gem_mmap_out out; | |
248 | }; | |
249 | ||
250 | struct drm_amdgpu_gem_wait_idle_in { | |
251 | uint32_t handle; /* GEM object handle */ | |
252 | uint32_t flags; | |
253 | uint64_t timeout; /* Timeout to wait. If 0 then returned immediately with the status */ | |
254 | }; | |
255 | ||
256 | struct drm_amdgpu_gem_wait_idle_out { | |
257 | uint32_t status; /* BO status: 0 - BO is idle, 1 - BO is busy */ | |
258 | uint32_t domain; /* Returned current memory domain */ | |
259 | }; | |
260 | ||
261 | union drm_amdgpu_gem_wait_idle { | |
262 | struct drm_amdgpu_gem_wait_idle_in in; | |
263 | struct drm_amdgpu_gem_wait_idle_out out; | |
264 | }; | |
265 | ||
266 | struct drm_amdgpu_wait_cs_in { | |
267 | uint64_t handle; | |
268 | uint64_t timeout; | |
269 | uint32_t ip_type; | |
270 | uint32_t ip_instance; | |
271 | uint32_t ring; | |
66b3cf2a | 272 | uint32_t ctx_id; |
81629cba AD |
273 | }; |
274 | ||
275 | struct drm_amdgpu_wait_cs_out { | |
276 | uint64_t status; | |
277 | }; | |
278 | ||
279 | union drm_amdgpu_wait_cs { | |
280 | struct drm_amdgpu_wait_cs_in in; | |
281 | struct drm_amdgpu_wait_cs_out out; | |
282 | }; | |
283 | ||
284 | /* Sets or returns a value associated with a buffer. */ | |
285 | struct drm_amdgpu_gem_op { | |
286 | uint32_t handle; /* buffer */ | |
287 | uint32_t op; /* AMDGPU_GEM_OP_* */ | |
288 | uint64_t value; /* input or return value */ | |
289 | }; | |
290 | ||
291 | #define AMDGPU_GEM_OP_GET_GEM_CREATE_INFO 0 | |
292 | #define AMDGPU_GEM_OP_SET_INITIAL_DOMAIN 1 | |
293 | ||
294 | #define AMDGPU_VA_OP_MAP 1 | |
295 | #define AMDGPU_VA_OP_UNMAP 2 | |
296 | ||
297 | #define AMDGPU_VA_RESULT_OK 0 | |
298 | #define AMDGPU_VA_RESULT_ERROR 1 | |
299 | #define AMDGPU_VA_RESULT_VA_INVALID_ALIGNMENT 2 | |
300 | ||
301 | /* Mapping flags */ | |
302 | /* readable mapping */ | |
303 | #define AMDGPU_VM_PAGE_READABLE (1 << 1) | |
304 | /* writable mapping */ | |
305 | #define AMDGPU_VM_PAGE_WRITEABLE (1 << 2) | |
306 | /* executable mapping, new for VI */ | |
307 | #define AMDGPU_VM_PAGE_EXECUTABLE (1 << 3) | |
308 | ||
309 | struct drm_amdgpu_gem_va_in { | |
310 | /* GEM object handle */ | |
311 | uint32_t handle; | |
312 | uint32_t _pad; | |
313 | /* map or unmap*/ | |
314 | uint32_t operation; | |
315 | /* specify mapping flags */ | |
316 | uint32_t flags; | |
317 | /* va address to assign . Must be correctly aligned.*/ | |
318 | uint64_t va_address; | |
319 | /* Specify offset inside of BO to assign. Must be correctly aligned.*/ | |
320 | uint64_t offset_in_bo; | |
321 | /* Specify mapping size. If 0 and offset is 0 then map the whole BO.*/ | |
322 | /* Must be correctly aligned. */ | |
323 | uint64_t map_size; | |
324 | }; | |
325 | ||
326 | struct drm_amdgpu_gem_va_out { | |
327 | uint32_t result; | |
328 | uint32_t _pad; | |
329 | }; | |
330 | ||
331 | union drm_amdgpu_gem_va { | |
332 | struct drm_amdgpu_gem_va_in in; | |
333 | struct drm_amdgpu_gem_va_out out; | |
334 | }; | |
335 | ||
336 | #define AMDGPU_HW_IP_GFX 0 | |
337 | #define AMDGPU_HW_IP_COMPUTE 1 | |
338 | #define AMDGPU_HW_IP_DMA 2 | |
339 | #define AMDGPU_HW_IP_UVD 3 | |
340 | #define AMDGPU_HW_IP_VCE 4 | |
341 | #define AMDGPU_HW_IP_NUM 5 | |
342 | ||
343 | #define AMDGPU_HW_IP_INSTANCE_MAX_COUNT 1 | |
344 | ||
345 | #define AMDGPU_CHUNK_ID_IB 0x01 | |
346 | #define AMDGPU_CHUNK_ID_FENCE 0x02 | |
347 | struct drm_amdgpu_cs_chunk { | |
348 | uint32_t chunk_id; | |
349 | uint32_t length_dw; | |
350 | uint64_t chunk_data; | |
351 | }; | |
352 | ||
353 | struct drm_amdgpu_cs_in { | |
354 | /** Rendering context id */ | |
355 | uint32_t ctx_id; | |
356 | /** Handle of resource list associated with CS */ | |
357 | uint32_t bo_list_handle; | |
358 | uint32_t num_chunks; | |
359 | uint32_t _pad; | |
360 | /* this points to uint64_t * which point to cs chunks */ | |
361 | uint64_t chunks; | |
362 | }; | |
363 | ||
364 | struct drm_amdgpu_cs_out { | |
365 | uint64_t handle; | |
366 | }; | |
367 | ||
368 | union drm_amdgpu_cs { | |
369 | struct drm_amdgpu_cs_in in; | |
370 | struct drm_amdgpu_cs_out out; | |
371 | }; | |
372 | ||
373 | /* Specify flags to be used for IB */ | |
374 | ||
375 | /* This IB should be submitted to CE */ | |
376 | #define AMDGPU_IB_FLAG_CE (1<<0) | |
377 | ||
378 | /* GDS is used by this IB */ | |
379 | #define AMDGPU_IB_FLAG_GDS (1<<1) | |
380 | ||
aa2bdb24 JZ |
381 | /* CE Preamble */ |
382 | #define AMDGPU_IB_FLAG_PREAMBLE (1<<2) | |
383 | ||
81629cba AD |
384 | struct drm_amdgpu_cs_chunk_ib { |
385 | /** | |
386 | * Handle of GEM object to be used as IB or 0 if it is already in | |
387 | * residency list. | |
388 | */ | |
389 | uint32_t handle; | |
390 | uint32_t flags; /* IB Flags */ | |
391 | uint64_t va_start; /* Virtual address to begin IB execution */ | |
392 | uint32_t ib_bytes; /* Size of submission */ | |
393 | uint32_t ip_type; /* HW IP to submit to */ | |
394 | uint32_t ip_instance; /* HW IP index of the same type to submit to */ | |
395 | uint32_t ring; /* Ring index to submit to */ | |
396 | }; | |
397 | ||
398 | struct drm_amdgpu_cs_chunk_fence { | |
399 | uint32_t handle; | |
400 | uint32_t offset; | |
401 | }; | |
402 | ||
403 | struct drm_amdgpu_cs_chunk_data { | |
404 | union { | |
405 | struct drm_amdgpu_cs_chunk_ib ib_data; | |
406 | struct drm_amdgpu_cs_chunk_fence fence_data; | |
407 | }; | |
408 | }; | |
409 | ||
410 | /** | |
411 | * Query h/w info: Flag that this is integrated (a.h.a. fusion) GPU | |
412 | * | |
413 | */ | |
414 | #define AMDGPU_IDS_FLAGS_FUSION 0x1 | |
415 | ||
416 | /* indicate if acceleration can be working */ | |
417 | #define AMDGPU_INFO_ACCEL_WORKING 0x00 | |
418 | /* get the crtc_id from the mode object id? */ | |
419 | #define AMDGPU_INFO_CRTC_FROM_ID 0x01 | |
420 | /* query hw IP info */ | |
421 | #define AMDGPU_INFO_HW_IP_INFO 0x02 | |
422 | /* query hw IP instance count for the specified type */ | |
423 | #define AMDGPU_INFO_HW_IP_COUNT 0x03 | |
424 | /* timestamp for GL_ARB_timer_query */ | |
425 | #define AMDGPU_INFO_TIMESTAMP 0x05 | |
426 | /* Query the firmware version */ | |
427 | #define AMDGPU_INFO_FW_VERSION 0x0e | |
428 | /* Subquery id: Query VCE firmware version */ | |
429 | #define AMDGPU_INFO_FW_VCE 0x1 | |
430 | /* Subquery id: Query UVD firmware version */ | |
431 | #define AMDGPU_INFO_FW_UVD 0x2 | |
432 | /* Subquery id: Query GMC firmware version */ | |
433 | #define AMDGPU_INFO_FW_GMC 0x03 | |
434 | /* Subquery id: Query GFX ME firmware version */ | |
435 | #define AMDGPU_INFO_FW_GFX_ME 0x04 | |
436 | /* Subquery id: Query GFX PFP firmware version */ | |
437 | #define AMDGPU_INFO_FW_GFX_PFP 0x05 | |
438 | /* Subquery id: Query GFX CE firmware version */ | |
439 | #define AMDGPU_INFO_FW_GFX_CE 0x06 | |
440 | /* Subquery id: Query GFX RLC firmware version */ | |
441 | #define AMDGPU_INFO_FW_GFX_RLC 0x07 | |
442 | /* Subquery id: Query GFX MEC firmware version */ | |
443 | #define AMDGPU_INFO_FW_GFX_MEC 0x08 | |
444 | /* Subquery id: Query SMC firmware version */ | |
445 | #define AMDGPU_INFO_FW_SMC 0x0a | |
446 | /* Subquery id: Query SDMA firmware version */ | |
447 | #define AMDGPU_INFO_FW_SDMA 0x0b | |
448 | /* number of bytes moved for TTM migration */ | |
449 | #define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f | |
450 | /* the used VRAM size */ | |
451 | #define AMDGPU_INFO_VRAM_USAGE 0x10 | |
452 | /* the used GTT size */ | |
453 | #define AMDGPU_INFO_GTT_USAGE 0x11 | |
454 | /* Information about GDS, etc. resource configuration */ | |
455 | #define AMDGPU_INFO_GDS_CONFIG 0x13 | |
456 | /* Query information about VRAM and GTT domains */ | |
457 | #define AMDGPU_INFO_VRAM_GTT 0x14 | |
458 | /* Query information about register in MMR address space*/ | |
459 | #define AMDGPU_INFO_READ_MMR_REG 0x15 | |
460 | /* Query information about device: rev id, family, etc. */ | |
461 | #define AMDGPU_INFO_DEV_INFO 0x16 | |
462 | /* visible vram usage */ | |
463 | #define AMDGPU_INFO_VIS_VRAM_USAGE 0x17 | |
464 | ||
465 | #define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0 | |
466 | #define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff | |
467 | #define AMDGPU_INFO_MMR_SH_INDEX_SHIFT 8 | |
468 | #define AMDGPU_INFO_MMR_SH_INDEX_MASK 0xff | |
469 | ||
470 | /* Input structure for the INFO ioctl */ | |
471 | struct drm_amdgpu_info { | |
472 | /* Where the return value will be stored */ | |
473 | uint64_t return_pointer; | |
474 | /* The size of the return value. Just like "size" in "snprintf", | |
475 | * it limits how many bytes the kernel can write. */ | |
476 | uint32_t return_size; | |
477 | /* The query request id. */ | |
478 | uint32_t query; | |
479 | ||
480 | union { | |
481 | struct { | |
482 | uint32_t id; | |
483 | uint32_t _pad; | |
484 | } mode_crtc; | |
485 | ||
486 | struct { | |
487 | /** AMDGPU_HW_IP_* */ | |
488 | uint32_t type; | |
489 | /** | |
490 | * Index of the IP if there are more IPs of the same type. | |
491 | * Ignored by AMDGPU_INFO_HW_IP_COUNT. | |
492 | */ | |
493 | uint32_t ip_instance; | |
494 | } query_hw_ip; | |
495 | ||
496 | struct { | |
497 | uint32_t dword_offset; | |
498 | uint32_t count; /* number of registers to read */ | |
499 | uint32_t instance; | |
500 | uint32_t flags; | |
501 | } read_mmr_reg; | |
502 | ||
503 | struct { | |
504 | /** AMDGPU_INFO_FW_* */ | |
505 | uint32_t fw_type; | |
506 | /** Index of the IP if there are more IPs of the same type. */ | |
507 | uint32_t ip_instance; | |
508 | /** | |
509 | * Index of the engine. Whether this is used depends | |
510 | * on the firmware type. (e.g. MEC, SDMA) | |
511 | */ | |
512 | uint32_t index; | |
513 | uint32_t _pad; | |
514 | } query_fw; | |
515 | }; | |
516 | }; | |
517 | ||
518 | struct drm_amdgpu_info_gds { | |
519 | /** GDS GFX partition size */ | |
520 | uint32_t gds_gfx_partition_size; | |
521 | /** GDS compute partition size */ | |
522 | uint32_t compute_partition_size; | |
523 | /** total GDS memory size */ | |
524 | uint32_t gds_total_size; | |
525 | /** GWS size per GFX partition */ | |
526 | uint32_t gws_per_gfx_partition; | |
527 | /** GSW size per compute partition */ | |
528 | uint32_t gws_per_compute_partition; | |
529 | /** OA size per GFX partition */ | |
530 | uint32_t oa_per_gfx_partition; | |
531 | /** OA size per compute partition */ | |
532 | uint32_t oa_per_compute_partition; | |
533 | uint32_t _pad; | |
534 | }; | |
535 | ||
536 | struct drm_amdgpu_info_vram_gtt { | |
537 | uint64_t vram_size; | |
538 | uint64_t vram_cpu_accessible_size; | |
539 | uint64_t gtt_size; | |
540 | }; | |
541 | ||
542 | struct drm_amdgpu_info_firmware { | |
543 | uint32_t ver; | |
544 | uint32_t feature; | |
545 | }; | |
546 | ||
547 | struct drm_amdgpu_info_device { | |
548 | /** PCI Device ID */ | |
549 | uint32_t device_id; | |
550 | /** Internal chip revision: A0, A1, etc.) */ | |
551 | uint32_t chip_rev; | |
552 | uint32_t external_rev; | |
553 | /** Revision id in PCI Config space */ | |
554 | uint32_t pci_rev; | |
555 | uint32_t family; | |
556 | uint32_t num_shader_engines; | |
557 | uint32_t num_shader_arrays_per_engine; | |
558 | uint32_t gpu_counter_freq; /* in KHz */ | |
559 | uint64_t max_engine_clock; /* in KHz */ | |
560 | /* cu information */ | |
561 | uint32_t cu_active_number; | |
562 | uint32_t cu_ao_mask; | |
563 | uint32_t cu_bitmap[4][4]; | |
564 | /** Render backend pipe mask. One render backend is CB+DB. */ | |
565 | uint32_t enabled_rb_pipes_mask; | |
566 | uint32_t num_rb_pipes; | |
567 | uint32_t num_hw_gfx_contexts; | |
568 | uint32_t _pad; | |
569 | uint64_t ids_flags; | |
570 | /** Starting virtual address for UMDs. */ | |
571 | uint64_t virtual_address_offset; | |
02b70c8c JZ |
572 | /** The maximum virtual address */ |
573 | uint64_t virtual_address_max; | |
81629cba AD |
574 | /** Required alignment of virtual addresses. */ |
575 | uint32_t virtual_address_alignment; | |
576 | /** Page table entry - fragment size */ | |
577 | uint32_t pte_fragment_size; | |
578 | uint32_t gart_page_size; | |
579 | }; | |
580 | ||
581 | struct drm_amdgpu_info_hw_ip { | |
582 | /** Version of h/w IP */ | |
583 | uint32_t hw_ip_version_major; | |
584 | uint32_t hw_ip_version_minor; | |
585 | /** Capabilities */ | |
586 | uint64_t capabilities_flags; | |
587 | /** Bitmask of available rings. Bit 0 means ring 0, etc. */ | |
588 | uint32_t available_rings; | |
589 | uint32_t _pad; | |
590 | }; | |
591 | ||
592 | /* | |
593 | * Supported GPU families | |
594 | */ | |
595 | #define AMDGPU_FAMILY_UNKNOWN 0 | |
596 | #define AMDGPU_FAMILY_CI 120 /* Bonaire, Hawaii */ | |
597 | #define AMDGPU_FAMILY_KV 125 /* Kaveri, Kabini, Mullins */ | |
598 | #define AMDGPU_FAMILY_VI 130 /* Iceland, Tonga */ | |
599 | #define AMDGPU_FAMILY_CZ 135 /* Carrizo */ | |
600 | ||
601 | #endif |