]>
Commit | Line | Data |
---|---|---|
81629cba AD |
1 | /* amdgpu_drm.h -- Public header for the amdgpu driver -*- linux-c -*- |
2 | * | |
3 | * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. | |
4 | * Copyright 2000 VA Linux Systems, Inc., Fremont, California. | |
5 | * Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas. | |
6 | * Copyright 2014 Advanced Micro Devices, Inc. | |
7 | * | |
8 | * Permission is hereby granted, free of charge, to any person obtaining a | |
9 | * copy of this software and associated documentation files (the "Software"), | |
10 | * to deal in the Software without restriction, including without limitation | |
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
12 | * and/or sell copies of the Software, and to permit persons to whom the | |
13 | * Software is furnished to do so, subject to the following conditions: | |
14 | * | |
15 | * The above copyright notice and this permission notice shall be included in | |
16 | * all copies or substantial portions of the Software. | |
17 | * | |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
21 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
22 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
23 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
24 | * OTHER DEALINGS IN THE SOFTWARE. | |
25 | * | |
26 | * Authors: | |
27 | * Kevin E. Martin <martin@valinux.com> | |
28 | * Gareth Hughes <gareth@valinux.com> | |
29 | * Keith Whitwell <keith@tungstengraphics.com> | |
30 | */ | |
31 | ||
32 | #ifndef __AMDGPU_DRM_H__ | |
33 | #define __AMDGPU_DRM_H__ | |
34 | ||
35 | #include <drm/drm.h> | |
36 | ||
37 | #define DRM_AMDGPU_GEM_CREATE 0x00 | |
38 | #define DRM_AMDGPU_GEM_MMAP 0x01 | |
39 | #define DRM_AMDGPU_CTX 0x02 | |
40 | #define DRM_AMDGPU_BO_LIST 0x03 | |
41 | #define DRM_AMDGPU_CS 0x04 | |
42 | #define DRM_AMDGPU_INFO 0x05 | |
43 | #define DRM_AMDGPU_GEM_METADATA 0x06 | |
44 | #define DRM_AMDGPU_GEM_WAIT_IDLE 0x07 | |
45 | #define DRM_AMDGPU_GEM_VA 0x08 | |
46 | #define DRM_AMDGPU_WAIT_CS 0x09 | |
47 | #define DRM_AMDGPU_GEM_OP 0x10 | |
48 | #define DRM_AMDGPU_GEM_USERPTR 0x11 | |
49 | ||
50 | #define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create) | |
51 | #define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap) | |
52 | #define DRM_IOCTL_AMDGPU_CTX DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_CTX, union drm_amdgpu_ctx) | |
53 | #define DRM_IOCTL_AMDGPU_BO_LIST DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_BO_LIST, union drm_amdgpu_bo_list) | |
54 | #define DRM_IOCTL_AMDGPU_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_CS, union drm_amdgpu_cs) | |
55 | #define DRM_IOCTL_AMDGPU_INFO DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_INFO, struct drm_amdgpu_info) | |
56 | #define DRM_IOCTL_AMDGPU_GEM_METADATA DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_METADATA, struct drm_amdgpu_gem_metadata) | |
57 | #define DRM_IOCTL_AMDGPU_GEM_WAIT_IDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_WAIT_IDLE, union drm_amdgpu_gem_wait_idle) | |
58 | #define DRM_IOCTL_AMDGPU_GEM_VA DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_VA, union drm_amdgpu_gem_va) | |
59 | #define DRM_IOCTL_AMDGPU_WAIT_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_CS, union drm_amdgpu_wait_cs) | |
60 | #define DRM_IOCTL_AMDGPU_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_OP, struct drm_amdgpu_gem_op) | |
61 | #define DRM_IOCTL_AMDGPU_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_USERPTR, struct drm_amdgpu_gem_userptr) | |
62 | ||
63 | #define AMDGPU_GEM_DOMAIN_CPU 0x1 | |
64 | #define AMDGPU_GEM_DOMAIN_GTT 0x2 | |
65 | #define AMDGPU_GEM_DOMAIN_VRAM 0x4 | |
66 | #define AMDGPU_GEM_DOMAIN_GDS 0x8 | |
67 | #define AMDGPU_GEM_DOMAIN_GWS 0x10 | |
68 | #define AMDGPU_GEM_DOMAIN_OA 0x20 | |
69 | ||
81629cba AD |
70 | /* Flag that CPU access will be required for the case of VRAM domain */ |
71 | #define AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED (1 << 0) | |
72 | /* Flag that CPU access will not work, this VRAM domain is invisible */ | |
73 | #define AMDGPU_GEM_CREATE_NO_CPU_ACCESS (1 << 1) | |
81629cba | 74 | /* Flag that USWC attributes should be used for GTT */ |
88671288 | 75 | #define AMDGPU_GEM_CREATE_CPU_GTT_USWC (1 << 2) |
81629cba | 76 | |
81629cba AD |
77 | struct drm_amdgpu_gem_create_in { |
78 | /** the requested memory size */ | |
79 | uint64_t bo_size; | |
80 | /** physical start_addr alignment in bytes for some HW requirements */ | |
81 | uint64_t alignment; | |
82 | /** the requested memory domains */ | |
83 | uint64_t domains; | |
84 | /** allocation flags */ | |
85 | uint64_t domain_flags; | |
86 | }; | |
87 | ||
88 | struct drm_amdgpu_gem_create_out { | |
89 | /** returned GEM object handle */ | |
90 | uint32_t handle; | |
91 | uint32_t _pad; | |
92 | }; | |
93 | ||
94 | union drm_amdgpu_gem_create { | |
95 | struct drm_amdgpu_gem_create_in in; | |
96 | struct drm_amdgpu_gem_create_out out; | |
97 | }; | |
98 | ||
99 | /** Opcode to create new residency list. */ | |
100 | #define AMDGPU_BO_LIST_OP_CREATE 0 | |
101 | /** Opcode to destroy previously created residency list */ | |
102 | #define AMDGPU_BO_LIST_OP_DESTROY 1 | |
103 | /** Opcode to update resource information in the list */ | |
104 | #define AMDGPU_BO_LIST_OP_UPDATE 2 | |
105 | ||
106 | struct drm_amdgpu_bo_list_in { | |
107 | /** Type of operation */ | |
108 | uint32_t operation; | |
109 | /** Handle of list or 0 if we want to create one */ | |
110 | uint32_t list_handle; | |
111 | /** Number of BOs in list */ | |
112 | uint32_t bo_number; | |
113 | /** Size of each element describing BO */ | |
114 | uint32_t bo_info_size; | |
115 | /** Pointer to array describing BOs */ | |
116 | uint64_t bo_info_ptr; | |
117 | }; | |
118 | ||
119 | struct drm_amdgpu_bo_list_entry { | |
120 | /** Handle of BO */ | |
121 | uint32_t bo_handle; | |
122 | /** New (if specified) BO priority to be used during migration */ | |
123 | uint32_t bo_priority; | |
124 | }; | |
125 | ||
126 | struct drm_amdgpu_bo_list_out { | |
127 | /** Handle of resource list */ | |
128 | uint32_t list_handle; | |
129 | uint32_t _pad; | |
130 | }; | |
131 | ||
132 | union drm_amdgpu_bo_list { | |
133 | struct drm_amdgpu_bo_list_in in; | |
134 | struct drm_amdgpu_bo_list_out out; | |
135 | }; | |
136 | ||
137 | /* context related */ | |
138 | #define AMDGPU_CTX_OP_ALLOC_CTX 1 | |
139 | #define AMDGPU_CTX_OP_FREE_CTX 2 | |
140 | #define AMDGPU_CTX_OP_QUERY_STATE 3 | |
141 | ||
142 | #define AMDGPU_CTX_OP_STATE_RUNNING 1 | |
143 | ||
d94aed5a MO |
144 | /* GPU reset status */ |
145 | #define AMDGPU_CTX_NO_RESET 0 | |
146 | #define AMDGPU_CTX_GUILTY_RESET 1 /* this the context caused it */ | |
147 | #define AMDGPU_CTX_INNOCENT_RESET 2 /* some other context caused it */ | |
148 | #define AMDGPU_CTX_UNKNOWN_RESET 3 /* unknown cause */ | |
149 | ||
81629cba AD |
150 | struct drm_amdgpu_ctx_in { |
151 | uint32_t op; | |
152 | uint32_t flags; | |
153 | uint32_t ctx_id; | |
154 | uint32_t _pad; | |
155 | }; | |
156 | ||
157 | union drm_amdgpu_ctx_out { | |
158 | struct { | |
159 | uint32_t ctx_id; | |
160 | uint32_t _pad; | |
161 | } alloc; | |
162 | ||
163 | struct { | |
164 | uint64_t flags; | |
d94aed5a MO |
165 | /** Number of resets caused by this context so far. */ |
166 | uint32_t hangs; | |
167 | /** Reset status since the last call of the ioctl. */ | |
168 | uint32_t reset_status; | |
81629cba AD |
169 | } state; |
170 | }; | |
171 | ||
172 | union drm_amdgpu_ctx { | |
173 | struct drm_amdgpu_ctx_in in; | |
174 | union drm_amdgpu_ctx_out out; | |
175 | }; | |
176 | ||
177 | /* | |
178 | * This is not a reliable API and you should expect it to fail for any | |
179 | * number of reasons and have fallback path that do not use userptr to | |
180 | * perform any operation. | |
181 | */ | |
182 | #define AMDGPU_GEM_USERPTR_READONLY (1 << 0) | |
183 | #define AMDGPU_GEM_USERPTR_ANONONLY (1 << 1) | |
184 | #define AMDGPU_GEM_USERPTR_VALIDATE (1 << 2) | |
185 | #define AMDGPU_GEM_USERPTR_REGISTER (1 << 3) | |
186 | ||
187 | struct drm_amdgpu_gem_userptr { | |
188 | uint64_t addr; | |
189 | uint64_t size; | |
190 | uint32_t flags; | |
191 | uint32_t handle; | |
192 | }; | |
193 | ||
fbd76d59 MO |
194 | /* same meaning as the GB_TILE_MODE and GL_MACRO_TILE_MODE fields */ |
195 | #define AMDGPU_TILING_ARRAY_MODE_SHIFT 0 | |
196 | #define AMDGPU_TILING_ARRAY_MODE_MASK 0xf | |
197 | #define AMDGPU_TILING_PIPE_CONFIG_SHIFT 4 | |
198 | #define AMDGPU_TILING_PIPE_CONFIG_MASK 0x1f | |
199 | #define AMDGPU_TILING_TILE_SPLIT_SHIFT 9 | |
200 | #define AMDGPU_TILING_TILE_SPLIT_MASK 0x7 | |
201 | #define AMDGPU_TILING_MICRO_TILE_MODE_SHIFT 12 | |
202 | #define AMDGPU_TILING_MICRO_TILE_MODE_MASK 0x7 | |
203 | #define AMDGPU_TILING_BANK_WIDTH_SHIFT 15 | |
204 | #define AMDGPU_TILING_BANK_WIDTH_MASK 0x3 | |
205 | #define AMDGPU_TILING_BANK_HEIGHT_SHIFT 17 | |
206 | #define AMDGPU_TILING_BANK_HEIGHT_MASK 0x3 | |
207 | #define AMDGPU_TILING_MACRO_TILE_ASPECT_SHIFT 19 | |
208 | #define AMDGPU_TILING_MACRO_TILE_ASPECT_MASK 0x3 | |
209 | #define AMDGPU_TILING_NUM_BANKS_SHIFT 21 | |
210 | #define AMDGPU_TILING_NUM_BANKS_MASK 0x3 | |
211 | ||
212 | #define AMDGPU_TILING_SET(field, value) \ | |
213 | (((value) & AMDGPU_TILING_##field##_MASK) << AMDGPU_TILING_##field##_SHIFT) | |
214 | #define AMDGPU_TILING_GET(value, field) \ | |
215 | (((value) >> AMDGPU_TILING_##field##_SHIFT) & AMDGPU_TILING_##field##_MASK) | |
81629cba AD |
216 | |
217 | #define AMDGPU_GEM_METADATA_OP_SET_METADATA 1 | |
218 | #define AMDGPU_GEM_METADATA_OP_GET_METADATA 2 | |
219 | ||
220 | /** The same structure is shared for input/output */ | |
221 | struct drm_amdgpu_gem_metadata { | |
222 | uint32_t handle; /* GEM Object handle */ | |
223 | uint32_t op; /** Do we want get or set metadata */ | |
224 | struct { | |
225 | uint64_t flags; | |
226 | uint64_t tiling_info; /* family specific tiling info */ | |
227 | uint32_t data_size_bytes; | |
228 | uint32_t data[64]; | |
229 | } data; | |
230 | }; | |
231 | ||
232 | struct drm_amdgpu_gem_mmap_in { | |
233 | uint32_t handle; /** the GEM object handle */ | |
234 | uint32_t _pad; | |
235 | }; | |
236 | ||
237 | struct drm_amdgpu_gem_mmap_out { | |
238 | uint64_t addr_ptr; /** mmap offset from the vma offset manager */ | |
239 | }; | |
240 | ||
241 | union drm_amdgpu_gem_mmap { | |
242 | struct drm_amdgpu_gem_mmap_in in; | |
243 | struct drm_amdgpu_gem_mmap_out out; | |
244 | }; | |
245 | ||
246 | struct drm_amdgpu_gem_wait_idle_in { | |
247 | uint32_t handle; /* GEM object handle */ | |
248 | uint32_t flags; | |
249 | uint64_t timeout; /* Timeout to wait. If 0 then returned immediately with the status */ | |
250 | }; | |
251 | ||
252 | struct drm_amdgpu_gem_wait_idle_out { | |
253 | uint32_t status; /* BO status: 0 - BO is idle, 1 - BO is busy */ | |
254 | uint32_t domain; /* Returned current memory domain */ | |
255 | }; | |
256 | ||
257 | union drm_amdgpu_gem_wait_idle { | |
258 | struct drm_amdgpu_gem_wait_idle_in in; | |
259 | struct drm_amdgpu_gem_wait_idle_out out; | |
260 | }; | |
261 | ||
262 | struct drm_amdgpu_wait_cs_in { | |
263 | uint64_t handle; | |
264 | uint64_t timeout; | |
265 | uint32_t ip_type; | |
266 | uint32_t ip_instance; | |
267 | uint32_t ring; | |
66b3cf2a | 268 | uint32_t ctx_id; |
81629cba AD |
269 | }; |
270 | ||
271 | struct drm_amdgpu_wait_cs_out { | |
272 | uint64_t status; | |
273 | }; | |
274 | ||
275 | union drm_amdgpu_wait_cs { | |
276 | struct drm_amdgpu_wait_cs_in in; | |
277 | struct drm_amdgpu_wait_cs_out out; | |
278 | }; | |
279 | ||
280 | /* Sets or returns a value associated with a buffer. */ | |
281 | struct drm_amdgpu_gem_op { | |
282 | uint32_t handle; /* buffer */ | |
283 | uint32_t op; /* AMDGPU_GEM_OP_* */ | |
284 | uint64_t value; /* input or return value */ | |
285 | }; | |
286 | ||
d8f65a23 MO |
287 | #define AMDGPU_GEM_OP_GET_GEM_CREATE_INFO 0 |
288 | #define AMDGPU_GEM_OP_SET_PLACEMENT 1 | |
81629cba AD |
289 | |
290 | #define AMDGPU_VA_OP_MAP 1 | |
291 | #define AMDGPU_VA_OP_UNMAP 2 | |
292 | ||
293 | #define AMDGPU_VA_RESULT_OK 0 | |
294 | #define AMDGPU_VA_RESULT_ERROR 1 | |
295 | #define AMDGPU_VA_RESULT_VA_INVALID_ALIGNMENT 2 | |
296 | ||
297 | /* Mapping flags */ | |
298 | /* readable mapping */ | |
299 | #define AMDGPU_VM_PAGE_READABLE (1 << 1) | |
300 | /* writable mapping */ | |
301 | #define AMDGPU_VM_PAGE_WRITEABLE (1 << 2) | |
302 | /* executable mapping, new for VI */ | |
303 | #define AMDGPU_VM_PAGE_EXECUTABLE (1 << 3) | |
304 | ||
305 | struct drm_amdgpu_gem_va_in { | |
306 | /* GEM object handle */ | |
307 | uint32_t handle; | |
308 | uint32_t _pad; | |
309 | /* map or unmap*/ | |
310 | uint32_t operation; | |
311 | /* specify mapping flags */ | |
312 | uint32_t flags; | |
313 | /* va address to assign . Must be correctly aligned.*/ | |
314 | uint64_t va_address; | |
315 | /* Specify offset inside of BO to assign. Must be correctly aligned.*/ | |
316 | uint64_t offset_in_bo; | |
317 | /* Specify mapping size. If 0 and offset is 0 then map the whole BO.*/ | |
318 | /* Must be correctly aligned. */ | |
319 | uint64_t map_size; | |
320 | }; | |
321 | ||
322 | struct drm_amdgpu_gem_va_out { | |
323 | uint32_t result; | |
324 | uint32_t _pad; | |
325 | }; | |
326 | ||
327 | union drm_amdgpu_gem_va { | |
328 | struct drm_amdgpu_gem_va_in in; | |
329 | struct drm_amdgpu_gem_va_out out; | |
330 | }; | |
331 | ||
332 | #define AMDGPU_HW_IP_GFX 0 | |
333 | #define AMDGPU_HW_IP_COMPUTE 1 | |
334 | #define AMDGPU_HW_IP_DMA 2 | |
335 | #define AMDGPU_HW_IP_UVD 3 | |
336 | #define AMDGPU_HW_IP_VCE 4 | |
337 | #define AMDGPU_HW_IP_NUM 5 | |
338 | ||
339 | #define AMDGPU_HW_IP_INSTANCE_MAX_COUNT 1 | |
340 | ||
341 | #define AMDGPU_CHUNK_ID_IB 0x01 | |
342 | #define AMDGPU_CHUNK_ID_FENCE 0x02 | |
343 | struct drm_amdgpu_cs_chunk { | |
344 | uint32_t chunk_id; | |
345 | uint32_t length_dw; | |
346 | uint64_t chunk_data; | |
347 | }; | |
348 | ||
349 | struct drm_amdgpu_cs_in { | |
350 | /** Rendering context id */ | |
351 | uint32_t ctx_id; | |
352 | /** Handle of resource list associated with CS */ | |
353 | uint32_t bo_list_handle; | |
354 | uint32_t num_chunks; | |
355 | uint32_t _pad; | |
356 | /* this points to uint64_t * which point to cs chunks */ | |
357 | uint64_t chunks; | |
358 | }; | |
359 | ||
360 | struct drm_amdgpu_cs_out { | |
361 | uint64_t handle; | |
362 | }; | |
363 | ||
364 | union drm_amdgpu_cs { | |
365 | struct drm_amdgpu_cs_in in; | |
366 | struct drm_amdgpu_cs_out out; | |
367 | }; | |
368 | ||
369 | /* Specify flags to be used for IB */ | |
370 | ||
371 | /* This IB should be submitted to CE */ | |
372 | #define AMDGPU_IB_FLAG_CE (1<<0) | |
373 | ||
374 | /* GDS is used by this IB */ | |
375 | #define AMDGPU_IB_FLAG_GDS (1<<1) | |
376 | ||
aa2bdb24 JZ |
377 | /* CE Preamble */ |
378 | #define AMDGPU_IB_FLAG_PREAMBLE (1<<2) | |
379 | ||
81629cba AD |
380 | struct drm_amdgpu_cs_chunk_ib { |
381 | /** | |
382 | * Handle of GEM object to be used as IB or 0 if it is already in | |
383 | * residency list. | |
384 | */ | |
385 | uint32_t handle; | |
386 | uint32_t flags; /* IB Flags */ | |
387 | uint64_t va_start; /* Virtual address to begin IB execution */ | |
388 | uint32_t ib_bytes; /* Size of submission */ | |
389 | uint32_t ip_type; /* HW IP to submit to */ | |
390 | uint32_t ip_instance; /* HW IP index of the same type to submit to */ | |
391 | uint32_t ring; /* Ring index to submit to */ | |
392 | }; | |
393 | ||
394 | struct drm_amdgpu_cs_chunk_fence { | |
395 | uint32_t handle; | |
396 | uint32_t offset; | |
397 | }; | |
398 | ||
399 | struct drm_amdgpu_cs_chunk_data { | |
400 | union { | |
401 | struct drm_amdgpu_cs_chunk_ib ib_data; | |
402 | struct drm_amdgpu_cs_chunk_fence fence_data; | |
403 | }; | |
404 | }; | |
405 | ||
406 | /** | |
407 | * Query h/w info: Flag that this is integrated (a.h.a. fusion) GPU | |
408 | * | |
409 | */ | |
410 | #define AMDGPU_IDS_FLAGS_FUSION 0x1 | |
411 | ||
412 | /* indicate if acceleration can be working */ | |
413 | #define AMDGPU_INFO_ACCEL_WORKING 0x00 | |
414 | /* get the crtc_id from the mode object id? */ | |
415 | #define AMDGPU_INFO_CRTC_FROM_ID 0x01 | |
416 | /* query hw IP info */ | |
417 | #define AMDGPU_INFO_HW_IP_INFO 0x02 | |
418 | /* query hw IP instance count for the specified type */ | |
419 | #define AMDGPU_INFO_HW_IP_COUNT 0x03 | |
420 | /* timestamp for GL_ARB_timer_query */ | |
421 | #define AMDGPU_INFO_TIMESTAMP 0x05 | |
422 | /* Query the firmware version */ | |
423 | #define AMDGPU_INFO_FW_VERSION 0x0e | |
424 | /* Subquery id: Query VCE firmware version */ | |
425 | #define AMDGPU_INFO_FW_VCE 0x1 | |
426 | /* Subquery id: Query UVD firmware version */ | |
427 | #define AMDGPU_INFO_FW_UVD 0x2 | |
428 | /* Subquery id: Query GMC firmware version */ | |
429 | #define AMDGPU_INFO_FW_GMC 0x03 | |
430 | /* Subquery id: Query GFX ME firmware version */ | |
431 | #define AMDGPU_INFO_FW_GFX_ME 0x04 | |
432 | /* Subquery id: Query GFX PFP firmware version */ | |
433 | #define AMDGPU_INFO_FW_GFX_PFP 0x05 | |
434 | /* Subquery id: Query GFX CE firmware version */ | |
435 | #define AMDGPU_INFO_FW_GFX_CE 0x06 | |
436 | /* Subquery id: Query GFX RLC firmware version */ | |
437 | #define AMDGPU_INFO_FW_GFX_RLC 0x07 | |
438 | /* Subquery id: Query GFX MEC firmware version */ | |
439 | #define AMDGPU_INFO_FW_GFX_MEC 0x08 | |
440 | /* Subquery id: Query SMC firmware version */ | |
441 | #define AMDGPU_INFO_FW_SMC 0x0a | |
442 | /* Subquery id: Query SDMA firmware version */ | |
443 | #define AMDGPU_INFO_FW_SDMA 0x0b | |
444 | /* number of bytes moved for TTM migration */ | |
445 | #define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f | |
446 | /* the used VRAM size */ | |
447 | #define AMDGPU_INFO_VRAM_USAGE 0x10 | |
448 | /* the used GTT size */ | |
449 | #define AMDGPU_INFO_GTT_USAGE 0x11 | |
450 | /* Information about GDS, etc. resource configuration */ | |
451 | #define AMDGPU_INFO_GDS_CONFIG 0x13 | |
452 | /* Query information about VRAM and GTT domains */ | |
453 | #define AMDGPU_INFO_VRAM_GTT 0x14 | |
454 | /* Query information about register in MMR address space*/ | |
455 | #define AMDGPU_INFO_READ_MMR_REG 0x15 | |
456 | /* Query information about device: rev id, family, etc. */ | |
457 | #define AMDGPU_INFO_DEV_INFO 0x16 | |
458 | /* visible vram usage */ | |
459 | #define AMDGPU_INFO_VIS_VRAM_USAGE 0x17 | |
460 | ||
461 | #define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0 | |
462 | #define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff | |
463 | #define AMDGPU_INFO_MMR_SH_INDEX_SHIFT 8 | |
464 | #define AMDGPU_INFO_MMR_SH_INDEX_MASK 0xff | |
465 | ||
466 | /* Input structure for the INFO ioctl */ | |
467 | struct drm_amdgpu_info { | |
468 | /* Where the return value will be stored */ | |
469 | uint64_t return_pointer; | |
470 | /* The size of the return value. Just like "size" in "snprintf", | |
471 | * it limits how many bytes the kernel can write. */ | |
472 | uint32_t return_size; | |
473 | /* The query request id. */ | |
474 | uint32_t query; | |
475 | ||
476 | union { | |
477 | struct { | |
478 | uint32_t id; | |
479 | uint32_t _pad; | |
480 | } mode_crtc; | |
481 | ||
482 | struct { | |
483 | /** AMDGPU_HW_IP_* */ | |
484 | uint32_t type; | |
485 | /** | |
486 | * Index of the IP if there are more IPs of the same type. | |
487 | * Ignored by AMDGPU_INFO_HW_IP_COUNT. | |
488 | */ | |
489 | uint32_t ip_instance; | |
490 | } query_hw_ip; | |
491 | ||
492 | struct { | |
493 | uint32_t dword_offset; | |
494 | uint32_t count; /* number of registers to read */ | |
495 | uint32_t instance; | |
496 | uint32_t flags; | |
497 | } read_mmr_reg; | |
498 | ||
499 | struct { | |
500 | /** AMDGPU_INFO_FW_* */ | |
501 | uint32_t fw_type; | |
502 | /** Index of the IP if there are more IPs of the same type. */ | |
503 | uint32_t ip_instance; | |
504 | /** | |
505 | * Index of the engine. Whether this is used depends | |
506 | * on the firmware type. (e.g. MEC, SDMA) | |
507 | */ | |
508 | uint32_t index; | |
509 | uint32_t _pad; | |
510 | } query_fw; | |
511 | }; | |
512 | }; | |
513 | ||
514 | struct drm_amdgpu_info_gds { | |
515 | /** GDS GFX partition size */ | |
516 | uint32_t gds_gfx_partition_size; | |
517 | /** GDS compute partition size */ | |
518 | uint32_t compute_partition_size; | |
519 | /** total GDS memory size */ | |
520 | uint32_t gds_total_size; | |
521 | /** GWS size per GFX partition */ | |
522 | uint32_t gws_per_gfx_partition; | |
523 | /** GSW size per compute partition */ | |
524 | uint32_t gws_per_compute_partition; | |
525 | /** OA size per GFX partition */ | |
526 | uint32_t oa_per_gfx_partition; | |
527 | /** OA size per compute partition */ | |
528 | uint32_t oa_per_compute_partition; | |
529 | uint32_t _pad; | |
530 | }; | |
531 | ||
532 | struct drm_amdgpu_info_vram_gtt { | |
533 | uint64_t vram_size; | |
534 | uint64_t vram_cpu_accessible_size; | |
535 | uint64_t gtt_size; | |
536 | }; | |
537 | ||
538 | struct drm_amdgpu_info_firmware { | |
539 | uint32_t ver; | |
540 | uint32_t feature; | |
541 | }; | |
542 | ||
543 | struct drm_amdgpu_info_device { | |
544 | /** PCI Device ID */ | |
545 | uint32_t device_id; | |
546 | /** Internal chip revision: A0, A1, etc.) */ | |
547 | uint32_t chip_rev; | |
548 | uint32_t external_rev; | |
549 | /** Revision id in PCI Config space */ | |
550 | uint32_t pci_rev; | |
551 | uint32_t family; | |
552 | uint32_t num_shader_engines; | |
553 | uint32_t num_shader_arrays_per_engine; | |
554 | uint32_t gpu_counter_freq; /* in KHz */ | |
555 | uint64_t max_engine_clock; /* in KHz */ | |
32bf7106 | 556 | uint64_t max_memory_clock; /* in KHz */ |
81629cba AD |
557 | /* cu information */ |
558 | uint32_t cu_active_number; | |
559 | uint32_t cu_ao_mask; | |
560 | uint32_t cu_bitmap[4][4]; | |
561 | /** Render backend pipe mask. One render backend is CB+DB. */ | |
562 | uint32_t enabled_rb_pipes_mask; | |
563 | uint32_t num_rb_pipes; | |
564 | uint32_t num_hw_gfx_contexts; | |
565 | uint32_t _pad; | |
566 | uint64_t ids_flags; | |
567 | /** Starting virtual address for UMDs. */ | |
568 | uint64_t virtual_address_offset; | |
02b70c8c JZ |
569 | /** The maximum virtual address */ |
570 | uint64_t virtual_address_max; | |
81629cba AD |
571 | /** Required alignment of virtual addresses. */ |
572 | uint32_t virtual_address_alignment; | |
573 | /** Page table entry - fragment size */ | |
574 | uint32_t pte_fragment_size; | |
575 | uint32_t gart_page_size; | |
576 | }; | |
577 | ||
578 | struct drm_amdgpu_info_hw_ip { | |
579 | /** Version of h/w IP */ | |
580 | uint32_t hw_ip_version_major; | |
581 | uint32_t hw_ip_version_minor; | |
582 | /** Capabilities */ | |
583 | uint64_t capabilities_flags; | |
584 | /** Bitmask of available rings. Bit 0 means ring 0, etc. */ | |
585 | uint32_t available_rings; | |
586 | uint32_t _pad; | |
587 | }; | |
588 | ||
589 | /* | |
590 | * Supported GPU families | |
591 | */ | |
592 | #define AMDGPU_FAMILY_UNKNOWN 0 | |
593 | #define AMDGPU_FAMILY_CI 120 /* Bonaire, Hawaii */ | |
594 | #define AMDGPU_FAMILY_KV 125 /* Kaveri, Kabini, Mullins */ | |
595 | #define AMDGPU_FAMILY_VI 130 /* Iceland, Tonga */ | |
596 | #define AMDGPU_FAMILY_CZ 135 /* Carrizo */ | |
597 | ||
598 | #endif |