2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
26 #include "amdgpu_ih.h"
27 #include "amdgpu_gfx.h"
31 #include "amdgpu_ucode.h"
32 #include "clearstate_ci.h"
34 #include "dce/dce_8_0_d.h"
35 #include "dce/dce_8_0_sh_mask.h"
37 #include "bif/bif_4_1_d.h"
38 #include "bif/bif_4_1_sh_mask.h"
40 #include "gca/gfx_7_0_d.h"
41 #include "gca/gfx_7_2_enum.h"
42 #include "gca/gfx_7_2_sh_mask.h"
44 #include "gmc/gmc_7_0_d.h"
45 #include "gmc/gmc_7_0_sh_mask.h"
47 #include "oss/oss_2_0_d.h"
48 #include "oss/oss_2_0_sh_mask.h"
50 #define GFX7_NUM_GFX_RINGS 1
51 #define GFX7_NUM_COMPUTE_RINGS 8
53 static void gfx_v7_0_set_ring_funcs(struct amdgpu_device
*adev
);
54 static void gfx_v7_0_set_irq_funcs(struct amdgpu_device
*adev
);
55 static void gfx_v7_0_set_gds_init(struct amdgpu_device
*adev
);
56 int gfx_v7_0_get_cu_info(struct amdgpu_device
*, struct amdgpu_cu_info
*);
58 MODULE_FIRMWARE("radeon/bonaire_pfp.bin");
59 MODULE_FIRMWARE("radeon/bonaire_me.bin");
60 MODULE_FIRMWARE("radeon/bonaire_ce.bin");
61 MODULE_FIRMWARE("radeon/bonaire_rlc.bin");
62 MODULE_FIRMWARE("radeon/bonaire_mec.bin");
64 MODULE_FIRMWARE("radeon/hawaii_pfp.bin");
65 MODULE_FIRMWARE("radeon/hawaii_me.bin");
66 MODULE_FIRMWARE("radeon/hawaii_ce.bin");
67 MODULE_FIRMWARE("radeon/hawaii_rlc.bin");
68 MODULE_FIRMWARE("radeon/hawaii_mec.bin");
70 MODULE_FIRMWARE("radeon/kaveri_pfp.bin");
71 MODULE_FIRMWARE("radeon/kaveri_me.bin");
72 MODULE_FIRMWARE("radeon/kaveri_ce.bin");
73 MODULE_FIRMWARE("radeon/kaveri_rlc.bin");
74 MODULE_FIRMWARE("radeon/kaveri_mec.bin");
75 MODULE_FIRMWARE("radeon/kaveri_mec2.bin");
77 MODULE_FIRMWARE("radeon/kabini_pfp.bin");
78 MODULE_FIRMWARE("radeon/kabini_me.bin");
79 MODULE_FIRMWARE("radeon/kabini_ce.bin");
80 MODULE_FIRMWARE("radeon/kabini_rlc.bin");
81 MODULE_FIRMWARE("radeon/kabini_mec.bin");
83 MODULE_FIRMWARE("radeon/mullins_pfp.bin");
84 MODULE_FIRMWARE("radeon/mullins_me.bin");
85 MODULE_FIRMWARE("radeon/mullins_ce.bin");
86 MODULE_FIRMWARE("radeon/mullins_rlc.bin");
87 MODULE_FIRMWARE("radeon/mullins_mec.bin");
89 static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset
[] =
91 {mmGDS_VMID0_BASE
, mmGDS_VMID0_SIZE
, mmGDS_GWS_VMID0
, mmGDS_OA_VMID0
},
92 {mmGDS_VMID1_BASE
, mmGDS_VMID1_SIZE
, mmGDS_GWS_VMID1
, mmGDS_OA_VMID1
},
93 {mmGDS_VMID2_BASE
, mmGDS_VMID2_SIZE
, mmGDS_GWS_VMID2
, mmGDS_OA_VMID2
},
94 {mmGDS_VMID3_BASE
, mmGDS_VMID3_SIZE
, mmGDS_GWS_VMID3
, mmGDS_OA_VMID3
},
95 {mmGDS_VMID4_BASE
, mmGDS_VMID4_SIZE
, mmGDS_GWS_VMID4
, mmGDS_OA_VMID4
},
96 {mmGDS_VMID5_BASE
, mmGDS_VMID5_SIZE
, mmGDS_GWS_VMID5
, mmGDS_OA_VMID5
},
97 {mmGDS_VMID6_BASE
, mmGDS_VMID6_SIZE
, mmGDS_GWS_VMID6
, mmGDS_OA_VMID6
},
98 {mmGDS_VMID7_BASE
, mmGDS_VMID7_SIZE
, mmGDS_GWS_VMID7
, mmGDS_OA_VMID7
},
99 {mmGDS_VMID8_BASE
, mmGDS_VMID8_SIZE
, mmGDS_GWS_VMID8
, mmGDS_OA_VMID8
},
100 {mmGDS_VMID9_BASE
, mmGDS_VMID9_SIZE
, mmGDS_GWS_VMID9
, mmGDS_OA_VMID9
},
101 {mmGDS_VMID10_BASE
, mmGDS_VMID10_SIZE
, mmGDS_GWS_VMID10
, mmGDS_OA_VMID10
},
102 {mmGDS_VMID11_BASE
, mmGDS_VMID11_SIZE
, mmGDS_GWS_VMID11
, mmGDS_OA_VMID11
},
103 {mmGDS_VMID12_BASE
, mmGDS_VMID12_SIZE
, mmGDS_GWS_VMID12
, mmGDS_OA_VMID12
},
104 {mmGDS_VMID13_BASE
, mmGDS_VMID13_SIZE
, mmGDS_GWS_VMID13
, mmGDS_OA_VMID13
},
105 {mmGDS_VMID14_BASE
, mmGDS_VMID14_SIZE
, mmGDS_GWS_VMID14
, mmGDS_OA_VMID14
},
106 {mmGDS_VMID15_BASE
, mmGDS_VMID15_SIZE
, mmGDS_GWS_VMID15
, mmGDS_OA_VMID15
}
109 static const u32 spectre_rlc_save_restore_register_list
[] =
111 (0x0e00 << 16) | (0xc12c >> 2),
113 (0x0e00 << 16) | (0xc140 >> 2),
115 (0x0e00 << 16) | (0xc150 >> 2),
117 (0x0e00 << 16) | (0xc15c >> 2),
119 (0x0e00 << 16) | (0xc168 >> 2),
121 (0x0e00 << 16) | (0xc170 >> 2),
123 (0x0e00 << 16) | (0xc178 >> 2),
125 (0x0e00 << 16) | (0xc204 >> 2),
127 (0x0e00 << 16) | (0xc2b4 >> 2),
129 (0x0e00 << 16) | (0xc2b8 >> 2),
131 (0x0e00 << 16) | (0xc2bc >> 2),
133 (0x0e00 << 16) | (0xc2c0 >> 2),
135 (0x0e00 << 16) | (0x8228 >> 2),
137 (0x0e00 << 16) | (0x829c >> 2),
139 (0x0e00 << 16) | (0x869c >> 2),
141 (0x0600 << 16) | (0x98f4 >> 2),
143 (0x0e00 << 16) | (0x98f8 >> 2),
145 (0x0e00 << 16) | (0x9900 >> 2),
147 (0x0e00 << 16) | (0xc260 >> 2),
149 (0x0e00 << 16) | (0x90e8 >> 2),
151 (0x0e00 << 16) | (0x3c000 >> 2),
153 (0x0e00 << 16) | (0x3c00c >> 2),
155 (0x0e00 << 16) | (0x8c1c >> 2),
157 (0x0e00 << 16) | (0x9700 >> 2),
159 (0x0e00 << 16) | (0xcd20 >> 2),
161 (0x4e00 << 16) | (0xcd20 >> 2),
163 (0x5e00 << 16) | (0xcd20 >> 2),
165 (0x6e00 << 16) | (0xcd20 >> 2),
167 (0x7e00 << 16) | (0xcd20 >> 2),
169 (0x8e00 << 16) | (0xcd20 >> 2),
171 (0x9e00 << 16) | (0xcd20 >> 2),
173 (0xae00 << 16) | (0xcd20 >> 2),
175 (0xbe00 << 16) | (0xcd20 >> 2),
177 (0x0e00 << 16) | (0x89bc >> 2),
179 (0x0e00 << 16) | (0x8900 >> 2),
182 (0x0e00 << 16) | (0xc130 >> 2),
184 (0x0e00 << 16) | (0xc134 >> 2),
186 (0x0e00 << 16) | (0xc1fc >> 2),
188 (0x0e00 << 16) | (0xc208 >> 2),
190 (0x0e00 << 16) | (0xc264 >> 2),
192 (0x0e00 << 16) | (0xc268 >> 2),
194 (0x0e00 << 16) | (0xc26c >> 2),
196 (0x0e00 << 16) | (0xc270 >> 2),
198 (0x0e00 << 16) | (0xc274 >> 2),
200 (0x0e00 << 16) | (0xc278 >> 2),
202 (0x0e00 << 16) | (0xc27c >> 2),
204 (0x0e00 << 16) | (0xc280 >> 2),
206 (0x0e00 << 16) | (0xc284 >> 2),
208 (0x0e00 << 16) | (0xc288 >> 2),
210 (0x0e00 << 16) | (0xc28c >> 2),
212 (0x0e00 << 16) | (0xc290 >> 2),
214 (0x0e00 << 16) | (0xc294 >> 2),
216 (0x0e00 << 16) | (0xc298 >> 2),
218 (0x0e00 << 16) | (0xc29c >> 2),
220 (0x0e00 << 16) | (0xc2a0 >> 2),
222 (0x0e00 << 16) | (0xc2a4 >> 2),
224 (0x0e00 << 16) | (0xc2a8 >> 2),
226 (0x0e00 << 16) | (0xc2ac >> 2),
228 (0x0e00 << 16) | (0xc2b0 >> 2),
230 (0x0e00 << 16) | (0x301d0 >> 2),
232 (0x0e00 << 16) | (0x30238 >> 2),
234 (0x0e00 << 16) | (0x30250 >> 2),
236 (0x0e00 << 16) | (0x30254 >> 2),
238 (0x0e00 << 16) | (0x30258 >> 2),
240 (0x0e00 << 16) | (0x3025c >> 2),
242 (0x4e00 << 16) | (0xc900 >> 2),
244 (0x5e00 << 16) | (0xc900 >> 2),
246 (0x6e00 << 16) | (0xc900 >> 2),
248 (0x7e00 << 16) | (0xc900 >> 2),
250 (0x8e00 << 16) | (0xc900 >> 2),
252 (0x9e00 << 16) | (0xc900 >> 2),
254 (0xae00 << 16) | (0xc900 >> 2),
256 (0xbe00 << 16) | (0xc900 >> 2),
258 (0x4e00 << 16) | (0xc904 >> 2),
260 (0x5e00 << 16) | (0xc904 >> 2),
262 (0x6e00 << 16) | (0xc904 >> 2),
264 (0x7e00 << 16) | (0xc904 >> 2),
266 (0x8e00 << 16) | (0xc904 >> 2),
268 (0x9e00 << 16) | (0xc904 >> 2),
270 (0xae00 << 16) | (0xc904 >> 2),
272 (0xbe00 << 16) | (0xc904 >> 2),
274 (0x4e00 << 16) | (0xc908 >> 2),
276 (0x5e00 << 16) | (0xc908 >> 2),
278 (0x6e00 << 16) | (0xc908 >> 2),
280 (0x7e00 << 16) | (0xc908 >> 2),
282 (0x8e00 << 16) | (0xc908 >> 2),
284 (0x9e00 << 16) | (0xc908 >> 2),
286 (0xae00 << 16) | (0xc908 >> 2),
288 (0xbe00 << 16) | (0xc908 >> 2),
290 (0x4e00 << 16) | (0xc90c >> 2),
292 (0x5e00 << 16) | (0xc90c >> 2),
294 (0x6e00 << 16) | (0xc90c >> 2),
296 (0x7e00 << 16) | (0xc90c >> 2),
298 (0x8e00 << 16) | (0xc90c >> 2),
300 (0x9e00 << 16) | (0xc90c >> 2),
302 (0xae00 << 16) | (0xc90c >> 2),
304 (0xbe00 << 16) | (0xc90c >> 2),
306 (0x4e00 << 16) | (0xc910 >> 2),
308 (0x5e00 << 16) | (0xc910 >> 2),
310 (0x6e00 << 16) | (0xc910 >> 2),
312 (0x7e00 << 16) | (0xc910 >> 2),
314 (0x8e00 << 16) | (0xc910 >> 2),
316 (0x9e00 << 16) | (0xc910 >> 2),
318 (0xae00 << 16) | (0xc910 >> 2),
320 (0xbe00 << 16) | (0xc910 >> 2),
322 (0x0e00 << 16) | (0xc99c >> 2),
324 (0x0e00 << 16) | (0x9834 >> 2),
326 (0x0000 << 16) | (0x30f00 >> 2),
328 (0x0001 << 16) | (0x30f00 >> 2),
330 (0x0000 << 16) | (0x30f04 >> 2),
332 (0x0001 << 16) | (0x30f04 >> 2),
334 (0x0000 << 16) | (0x30f08 >> 2),
336 (0x0001 << 16) | (0x30f08 >> 2),
338 (0x0000 << 16) | (0x30f0c >> 2),
340 (0x0001 << 16) | (0x30f0c >> 2),
342 (0x0600 << 16) | (0x9b7c >> 2),
344 (0x0e00 << 16) | (0x8a14 >> 2),
346 (0x0e00 << 16) | (0x8a18 >> 2),
348 (0x0600 << 16) | (0x30a00 >> 2),
350 (0x0e00 << 16) | (0x8bf0 >> 2),
352 (0x0e00 << 16) | (0x8bcc >> 2),
354 (0x0e00 << 16) | (0x8b24 >> 2),
356 (0x0e00 << 16) | (0x30a04 >> 2),
358 (0x0600 << 16) | (0x30a10 >> 2),
360 (0x0600 << 16) | (0x30a14 >> 2),
362 (0x0600 << 16) | (0x30a18 >> 2),
364 (0x0600 << 16) | (0x30a2c >> 2),
366 (0x0e00 << 16) | (0xc700 >> 2),
368 (0x0e00 << 16) | (0xc704 >> 2),
370 (0x0e00 << 16) | (0xc708 >> 2),
372 (0x0e00 << 16) | (0xc768 >> 2),
374 (0x0400 << 16) | (0xc770 >> 2),
376 (0x0400 << 16) | (0xc774 >> 2),
378 (0x0400 << 16) | (0xc778 >> 2),
380 (0x0400 << 16) | (0xc77c >> 2),
382 (0x0400 << 16) | (0xc780 >> 2),
384 (0x0400 << 16) | (0xc784 >> 2),
386 (0x0400 << 16) | (0xc788 >> 2),
388 (0x0400 << 16) | (0xc78c >> 2),
390 (0x0400 << 16) | (0xc798 >> 2),
392 (0x0400 << 16) | (0xc79c >> 2),
394 (0x0400 << 16) | (0xc7a0 >> 2),
396 (0x0400 << 16) | (0xc7a4 >> 2),
398 (0x0400 << 16) | (0xc7a8 >> 2),
400 (0x0400 << 16) | (0xc7ac >> 2),
402 (0x0400 << 16) | (0xc7b0 >> 2),
404 (0x0400 << 16) | (0xc7b4 >> 2),
406 (0x0e00 << 16) | (0x9100 >> 2),
408 (0x0e00 << 16) | (0x3c010 >> 2),
410 (0x0e00 << 16) | (0x92a8 >> 2),
412 (0x0e00 << 16) | (0x92ac >> 2),
414 (0x0e00 << 16) | (0x92b4 >> 2),
416 (0x0e00 << 16) | (0x92b8 >> 2),
418 (0x0e00 << 16) | (0x92bc >> 2),
420 (0x0e00 << 16) | (0x92c0 >> 2),
422 (0x0e00 << 16) | (0x92c4 >> 2),
424 (0x0e00 << 16) | (0x92c8 >> 2),
426 (0x0e00 << 16) | (0x92cc >> 2),
428 (0x0e00 << 16) | (0x92d0 >> 2),
430 (0x0e00 << 16) | (0x8c00 >> 2),
432 (0x0e00 << 16) | (0x8c04 >> 2),
434 (0x0e00 << 16) | (0x8c20 >> 2),
436 (0x0e00 << 16) | (0x8c38 >> 2),
438 (0x0e00 << 16) | (0x8c3c >> 2),
440 (0x0e00 << 16) | (0xae00 >> 2),
442 (0x0e00 << 16) | (0x9604 >> 2),
444 (0x0e00 << 16) | (0xac08 >> 2),
446 (0x0e00 << 16) | (0xac0c >> 2),
448 (0x0e00 << 16) | (0xac10 >> 2),
450 (0x0e00 << 16) | (0xac14 >> 2),
452 (0x0e00 << 16) | (0xac58 >> 2),
454 (0x0e00 << 16) | (0xac68 >> 2),
456 (0x0e00 << 16) | (0xac6c >> 2),
458 (0x0e00 << 16) | (0xac70 >> 2),
460 (0x0e00 << 16) | (0xac74 >> 2),
462 (0x0e00 << 16) | (0xac78 >> 2),
464 (0x0e00 << 16) | (0xac7c >> 2),
466 (0x0e00 << 16) | (0xac80 >> 2),
468 (0x0e00 << 16) | (0xac84 >> 2),
470 (0x0e00 << 16) | (0xac88 >> 2),
472 (0x0e00 << 16) | (0xac8c >> 2),
474 (0x0e00 << 16) | (0x970c >> 2),
476 (0x0e00 << 16) | (0x9714 >> 2),
478 (0x0e00 << 16) | (0x9718 >> 2),
480 (0x0e00 << 16) | (0x971c >> 2),
482 (0x0e00 << 16) | (0x31068 >> 2),
484 (0x4e00 << 16) | (0x31068 >> 2),
486 (0x5e00 << 16) | (0x31068 >> 2),
488 (0x6e00 << 16) | (0x31068 >> 2),
490 (0x7e00 << 16) | (0x31068 >> 2),
492 (0x8e00 << 16) | (0x31068 >> 2),
494 (0x9e00 << 16) | (0x31068 >> 2),
496 (0xae00 << 16) | (0x31068 >> 2),
498 (0xbe00 << 16) | (0x31068 >> 2),
500 (0x0e00 << 16) | (0xcd10 >> 2),
502 (0x0e00 << 16) | (0xcd14 >> 2),
504 (0x0e00 << 16) | (0x88b0 >> 2),
506 (0x0e00 << 16) | (0x88b4 >> 2),
508 (0x0e00 << 16) | (0x88b8 >> 2),
510 (0x0e00 << 16) | (0x88bc >> 2),
512 (0x0400 << 16) | (0x89c0 >> 2),
514 (0x0e00 << 16) | (0x88c4 >> 2),
516 (0x0e00 << 16) | (0x88c8 >> 2),
518 (0x0e00 << 16) | (0x88d0 >> 2),
520 (0x0e00 << 16) | (0x88d4 >> 2),
522 (0x0e00 << 16) | (0x88d8 >> 2),
524 (0x0e00 << 16) | (0x8980 >> 2),
526 (0x0e00 << 16) | (0x30938 >> 2),
528 (0x0e00 << 16) | (0x3093c >> 2),
530 (0x0e00 << 16) | (0x30940 >> 2),
532 (0x0e00 << 16) | (0x89a0 >> 2),
534 (0x0e00 << 16) | (0x30900 >> 2),
536 (0x0e00 << 16) | (0x30904 >> 2),
538 (0x0e00 << 16) | (0x89b4 >> 2),
540 (0x0e00 << 16) | (0x3c210 >> 2),
542 (0x0e00 << 16) | (0x3c214 >> 2),
544 (0x0e00 << 16) | (0x3c218 >> 2),
546 (0x0e00 << 16) | (0x8904 >> 2),
549 (0x0e00 << 16) | (0x8c28 >> 2),
550 (0x0e00 << 16) | (0x8c2c >> 2),
551 (0x0e00 << 16) | (0x8c30 >> 2),
552 (0x0e00 << 16) | (0x8c34 >> 2),
553 (0x0e00 << 16) | (0x9600 >> 2),
556 static const u32 kalindi_rlc_save_restore_register_list
[] =
558 (0x0e00 << 16) | (0xc12c >> 2),
560 (0x0e00 << 16) | (0xc140 >> 2),
562 (0x0e00 << 16) | (0xc150 >> 2),
564 (0x0e00 << 16) | (0xc15c >> 2),
566 (0x0e00 << 16) | (0xc168 >> 2),
568 (0x0e00 << 16) | (0xc170 >> 2),
570 (0x0e00 << 16) | (0xc204 >> 2),
572 (0x0e00 << 16) | (0xc2b4 >> 2),
574 (0x0e00 << 16) | (0xc2b8 >> 2),
576 (0x0e00 << 16) | (0xc2bc >> 2),
578 (0x0e00 << 16) | (0xc2c0 >> 2),
580 (0x0e00 << 16) | (0x8228 >> 2),
582 (0x0e00 << 16) | (0x829c >> 2),
584 (0x0e00 << 16) | (0x869c >> 2),
586 (0x0600 << 16) | (0x98f4 >> 2),
588 (0x0e00 << 16) | (0x98f8 >> 2),
590 (0x0e00 << 16) | (0x9900 >> 2),
592 (0x0e00 << 16) | (0xc260 >> 2),
594 (0x0e00 << 16) | (0x90e8 >> 2),
596 (0x0e00 << 16) | (0x3c000 >> 2),
598 (0x0e00 << 16) | (0x3c00c >> 2),
600 (0x0e00 << 16) | (0x8c1c >> 2),
602 (0x0e00 << 16) | (0x9700 >> 2),
604 (0x0e00 << 16) | (0xcd20 >> 2),
606 (0x4e00 << 16) | (0xcd20 >> 2),
608 (0x5e00 << 16) | (0xcd20 >> 2),
610 (0x6e00 << 16) | (0xcd20 >> 2),
612 (0x7e00 << 16) | (0xcd20 >> 2),
614 (0x0e00 << 16) | (0x89bc >> 2),
616 (0x0e00 << 16) | (0x8900 >> 2),
619 (0x0e00 << 16) | (0xc130 >> 2),
621 (0x0e00 << 16) | (0xc134 >> 2),
623 (0x0e00 << 16) | (0xc1fc >> 2),
625 (0x0e00 << 16) | (0xc208 >> 2),
627 (0x0e00 << 16) | (0xc264 >> 2),
629 (0x0e00 << 16) | (0xc268 >> 2),
631 (0x0e00 << 16) | (0xc26c >> 2),
633 (0x0e00 << 16) | (0xc270 >> 2),
635 (0x0e00 << 16) | (0xc274 >> 2),
637 (0x0e00 << 16) | (0xc28c >> 2),
639 (0x0e00 << 16) | (0xc290 >> 2),
641 (0x0e00 << 16) | (0xc294 >> 2),
643 (0x0e00 << 16) | (0xc298 >> 2),
645 (0x0e00 << 16) | (0xc2a0 >> 2),
647 (0x0e00 << 16) | (0xc2a4 >> 2),
649 (0x0e00 << 16) | (0xc2a8 >> 2),
651 (0x0e00 << 16) | (0xc2ac >> 2),
653 (0x0e00 << 16) | (0x301d0 >> 2),
655 (0x0e00 << 16) | (0x30238 >> 2),
657 (0x0e00 << 16) | (0x30250 >> 2),
659 (0x0e00 << 16) | (0x30254 >> 2),
661 (0x0e00 << 16) | (0x30258 >> 2),
663 (0x0e00 << 16) | (0x3025c >> 2),
665 (0x4e00 << 16) | (0xc900 >> 2),
667 (0x5e00 << 16) | (0xc900 >> 2),
669 (0x6e00 << 16) | (0xc900 >> 2),
671 (0x7e00 << 16) | (0xc900 >> 2),
673 (0x4e00 << 16) | (0xc904 >> 2),
675 (0x5e00 << 16) | (0xc904 >> 2),
677 (0x6e00 << 16) | (0xc904 >> 2),
679 (0x7e00 << 16) | (0xc904 >> 2),
681 (0x4e00 << 16) | (0xc908 >> 2),
683 (0x5e00 << 16) | (0xc908 >> 2),
685 (0x6e00 << 16) | (0xc908 >> 2),
687 (0x7e00 << 16) | (0xc908 >> 2),
689 (0x4e00 << 16) | (0xc90c >> 2),
691 (0x5e00 << 16) | (0xc90c >> 2),
693 (0x6e00 << 16) | (0xc90c >> 2),
695 (0x7e00 << 16) | (0xc90c >> 2),
697 (0x4e00 << 16) | (0xc910 >> 2),
699 (0x5e00 << 16) | (0xc910 >> 2),
701 (0x6e00 << 16) | (0xc910 >> 2),
703 (0x7e00 << 16) | (0xc910 >> 2),
705 (0x0e00 << 16) | (0xc99c >> 2),
707 (0x0e00 << 16) | (0x9834 >> 2),
709 (0x0000 << 16) | (0x30f00 >> 2),
711 (0x0000 << 16) | (0x30f04 >> 2),
713 (0x0000 << 16) | (0x30f08 >> 2),
715 (0x0000 << 16) | (0x30f0c >> 2),
717 (0x0600 << 16) | (0x9b7c >> 2),
719 (0x0e00 << 16) | (0x8a14 >> 2),
721 (0x0e00 << 16) | (0x8a18 >> 2),
723 (0x0600 << 16) | (0x30a00 >> 2),
725 (0x0e00 << 16) | (0x8bf0 >> 2),
727 (0x0e00 << 16) | (0x8bcc >> 2),
729 (0x0e00 << 16) | (0x8b24 >> 2),
731 (0x0e00 << 16) | (0x30a04 >> 2),
733 (0x0600 << 16) | (0x30a10 >> 2),
735 (0x0600 << 16) | (0x30a14 >> 2),
737 (0x0600 << 16) | (0x30a18 >> 2),
739 (0x0600 << 16) | (0x30a2c >> 2),
741 (0x0e00 << 16) | (0xc700 >> 2),
743 (0x0e00 << 16) | (0xc704 >> 2),
745 (0x0e00 << 16) | (0xc708 >> 2),
747 (0x0e00 << 16) | (0xc768 >> 2),
749 (0x0400 << 16) | (0xc770 >> 2),
751 (0x0400 << 16) | (0xc774 >> 2),
753 (0x0400 << 16) | (0xc798 >> 2),
755 (0x0400 << 16) | (0xc79c >> 2),
757 (0x0e00 << 16) | (0x9100 >> 2),
759 (0x0e00 << 16) | (0x3c010 >> 2),
761 (0x0e00 << 16) | (0x8c00 >> 2),
763 (0x0e00 << 16) | (0x8c04 >> 2),
765 (0x0e00 << 16) | (0x8c20 >> 2),
767 (0x0e00 << 16) | (0x8c38 >> 2),
769 (0x0e00 << 16) | (0x8c3c >> 2),
771 (0x0e00 << 16) | (0xae00 >> 2),
773 (0x0e00 << 16) | (0x9604 >> 2),
775 (0x0e00 << 16) | (0xac08 >> 2),
777 (0x0e00 << 16) | (0xac0c >> 2),
779 (0x0e00 << 16) | (0xac10 >> 2),
781 (0x0e00 << 16) | (0xac14 >> 2),
783 (0x0e00 << 16) | (0xac58 >> 2),
785 (0x0e00 << 16) | (0xac68 >> 2),
787 (0x0e00 << 16) | (0xac6c >> 2),
789 (0x0e00 << 16) | (0xac70 >> 2),
791 (0x0e00 << 16) | (0xac74 >> 2),
793 (0x0e00 << 16) | (0xac78 >> 2),
795 (0x0e00 << 16) | (0xac7c >> 2),
797 (0x0e00 << 16) | (0xac80 >> 2),
799 (0x0e00 << 16) | (0xac84 >> 2),
801 (0x0e00 << 16) | (0xac88 >> 2),
803 (0x0e00 << 16) | (0xac8c >> 2),
805 (0x0e00 << 16) | (0x970c >> 2),
807 (0x0e00 << 16) | (0x9714 >> 2),
809 (0x0e00 << 16) | (0x9718 >> 2),
811 (0x0e00 << 16) | (0x971c >> 2),
813 (0x0e00 << 16) | (0x31068 >> 2),
815 (0x4e00 << 16) | (0x31068 >> 2),
817 (0x5e00 << 16) | (0x31068 >> 2),
819 (0x6e00 << 16) | (0x31068 >> 2),
821 (0x7e00 << 16) | (0x31068 >> 2),
823 (0x0e00 << 16) | (0xcd10 >> 2),
825 (0x0e00 << 16) | (0xcd14 >> 2),
827 (0x0e00 << 16) | (0x88b0 >> 2),
829 (0x0e00 << 16) | (0x88b4 >> 2),
831 (0x0e00 << 16) | (0x88b8 >> 2),
833 (0x0e00 << 16) | (0x88bc >> 2),
835 (0x0400 << 16) | (0x89c0 >> 2),
837 (0x0e00 << 16) | (0x88c4 >> 2),
839 (0x0e00 << 16) | (0x88c8 >> 2),
841 (0x0e00 << 16) | (0x88d0 >> 2),
843 (0x0e00 << 16) | (0x88d4 >> 2),
845 (0x0e00 << 16) | (0x88d8 >> 2),
847 (0x0e00 << 16) | (0x8980 >> 2),
849 (0x0e00 << 16) | (0x30938 >> 2),
851 (0x0e00 << 16) | (0x3093c >> 2),
853 (0x0e00 << 16) | (0x30940 >> 2),
855 (0x0e00 << 16) | (0x89a0 >> 2),
857 (0x0e00 << 16) | (0x30900 >> 2),
859 (0x0e00 << 16) | (0x30904 >> 2),
861 (0x0e00 << 16) | (0x89b4 >> 2),
863 (0x0e00 << 16) | (0x3e1fc >> 2),
865 (0x0e00 << 16) | (0x3c210 >> 2),
867 (0x0e00 << 16) | (0x3c214 >> 2),
869 (0x0e00 << 16) | (0x3c218 >> 2),
871 (0x0e00 << 16) | (0x8904 >> 2),
874 (0x0e00 << 16) | (0x8c28 >> 2),
875 (0x0e00 << 16) | (0x8c2c >> 2),
876 (0x0e00 << 16) | (0x8c30 >> 2),
877 (0x0e00 << 16) | (0x8c34 >> 2),
878 (0x0e00 << 16) | (0x9600 >> 2),
881 static u32
gfx_v7_0_get_csb_size(struct amdgpu_device
*adev
);
882 static void gfx_v7_0_get_csb_buffer(struct amdgpu_device
*adev
, volatile u32
*buffer
);
883 static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device
*adev
);
884 static void gfx_v7_0_init_pg(struct amdgpu_device
*adev
);
890 * gfx_v7_0_init_microcode - load ucode images from disk
892 * @adev: amdgpu_device pointer
894 * Use the firmware interface to load the ucode images into
895 * the driver (not loaded into hw).
896 * Returns 0 on success, error on failure.
898 static int gfx_v7_0_init_microcode(struct amdgpu_device
*adev
)
900 const char *chip_name
;
906 switch (adev
->asic_type
) {
908 chip_name
= "bonaire";
911 chip_name
= "hawaii";
914 chip_name
= "kaveri";
917 chip_name
= "kabini";
920 chip_name
= "mullins";
925 snprintf(fw_name
, sizeof(fw_name
), "radeon/%s_pfp.bin", chip_name
);
926 err
= request_firmware(&adev
->gfx
.pfp_fw
, fw_name
, adev
->dev
);
929 err
= amdgpu_ucode_validate(adev
->gfx
.pfp_fw
);
933 snprintf(fw_name
, sizeof(fw_name
), "radeon/%s_me.bin", chip_name
);
934 err
= request_firmware(&adev
->gfx
.me_fw
, fw_name
, adev
->dev
);
937 err
= amdgpu_ucode_validate(adev
->gfx
.me_fw
);
941 snprintf(fw_name
, sizeof(fw_name
), "radeon/%s_ce.bin", chip_name
);
942 err
= request_firmware(&adev
->gfx
.ce_fw
, fw_name
, adev
->dev
);
945 err
= amdgpu_ucode_validate(adev
->gfx
.ce_fw
);
949 snprintf(fw_name
, sizeof(fw_name
), "radeon/%s_mec.bin", chip_name
);
950 err
= request_firmware(&adev
->gfx
.mec_fw
, fw_name
, adev
->dev
);
953 err
= amdgpu_ucode_validate(adev
->gfx
.mec_fw
);
957 if (adev
->asic_type
== CHIP_KAVERI
) {
958 snprintf(fw_name
, sizeof(fw_name
), "radeon/%s_mec2.bin", chip_name
);
959 err
= request_firmware(&adev
->gfx
.mec2_fw
, fw_name
, adev
->dev
);
962 err
= amdgpu_ucode_validate(adev
->gfx
.mec2_fw
);
967 snprintf(fw_name
, sizeof(fw_name
), "radeon/%s_rlc.bin", chip_name
);
968 err
= request_firmware(&adev
->gfx
.rlc_fw
, fw_name
, adev
->dev
);
971 err
= amdgpu_ucode_validate(adev
->gfx
.rlc_fw
);
976 "gfx7: Failed to load firmware \"%s\"\n",
978 release_firmware(adev
->gfx
.pfp_fw
);
979 adev
->gfx
.pfp_fw
= NULL
;
980 release_firmware(adev
->gfx
.me_fw
);
981 adev
->gfx
.me_fw
= NULL
;
982 release_firmware(adev
->gfx
.ce_fw
);
983 adev
->gfx
.ce_fw
= NULL
;
984 release_firmware(adev
->gfx
.mec_fw
);
985 adev
->gfx
.mec_fw
= NULL
;
986 release_firmware(adev
->gfx
.mec2_fw
);
987 adev
->gfx
.mec2_fw
= NULL
;
988 release_firmware(adev
->gfx
.rlc_fw
);
989 adev
->gfx
.rlc_fw
= NULL
;
995 * gfx_v7_0_tiling_mode_table_init - init the hw tiling table
997 * @adev: amdgpu_device pointer
999 * Starting with SI, the tiling setup is done globally in a
1000 * set of 32 tiling modes. Rather than selecting each set of
1001 * parameters per surface as on older asics, we just select
1002 * which index in the tiling table we want to use, and the
1003 * surface uses those parameters (CIK).
1005 static void gfx_v7_0_tiling_mode_table_init(struct amdgpu_device
*adev
)
1007 const u32 num_tile_mode_states
=
1008 ARRAY_SIZE(adev
->gfx
.config
.tile_mode_array
);
1009 const u32 num_secondary_tile_mode_states
=
1010 ARRAY_SIZE(adev
->gfx
.config
.macrotile_mode_array
);
1011 u32 reg_offset
, split_equal_to_row_size
;
1012 uint32_t *tile
, *macrotile
;
1014 tile
= adev
->gfx
.config
.tile_mode_array
;
1015 macrotile
= adev
->gfx
.config
.macrotile_mode_array
;
1017 switch (adev
->gfx
.config
.mem_row_size_in_kb
) {
1019 split_equal_to_row_size
= ADDR_SURF_TILE_SPLIT_1KB
;
1023 split_equal_to_row_size
= ADDR_SURF_TILE_SPLIT_2KB
;
1026 split_equal_to_row_size
= ADDR_SURF_TILE_SPLIT_4KB
;
1030 for (reg_offset
= 0; reg_offset
< num_tile_mode_states
; reg_offset
++)
1031 tile
[reg_offset
] = 0;
1032 for (reg_offset
= 0; reg_offset
< num_secondary_tile_mode_states
; reg_offset
++)
1033 macrotile
[reg_offset
] = 0;
1035 switch (adev
->asic_type
) {
1037 tile
[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1038 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1039 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B
) |
1040 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1041 tile
[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1042 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1043 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B
) |
1044 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1045 tile
[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1046 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1047 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
1048 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1049 tile
[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1050 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1051 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B
) |
1052 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1053 tile
[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1054 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1055 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
) |
1056 TILE_SPLIT(split_equal_to_row_size
));
1057 tile
[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
1058 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1059 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1060 tile
[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
1061 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1062 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
) |
1063 TILE_SPLIT(split_equal_to_row_size
));
1064 tile
[7] = (TILE_SPLIT(split_equal_to_row_size
));
1065 tile
[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED
) |
1066 PIPE_CONFIG(ADDR_SURF_P4_16x16
));
1067 tile
[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
1068 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1069 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING
));
1070 tile
[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1071 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1072 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING
) |
1073 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
1074 tile
[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
1075 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1076 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING
) |
1077 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8
));
1078 tile
[12] = (TILE_SPLIT(split_equal_to_row_size
));
1079 tile
[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
1080 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1081 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
));
1082 tile
[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1083 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1084 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1085 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
1086 tile
[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1
) |
1087 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1088 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1089 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
1090 tile
[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
1091 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1092 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1093 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8
));
1094 tile
[17] = (TILE_SPLIT(split_equal_to_row_size
));
1095 tile
[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK
) |
1096 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1097 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1098 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1099 tile
[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK
) |
1100 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1101 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
));
1102 tile
[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK
) |
1103 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1104 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1105 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1106 tile
[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK
) |
1107 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1108 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1109 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1110 tile
[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK
) |
1111 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1112 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1113 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1114 tile
[23] = (TILE_SPLIT(split_equal_to_row_size
));
1115 tile
[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK
) |
1116 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1117 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1118 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1119 tile
[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK
) |
1120 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1121 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1122 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1123 tile
[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK
) |
1124 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1125 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1126 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1127 tile
[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
1128 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1129 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING
));
1130 tile
[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1131 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1132 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING
) |
1133 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
1134 tile
[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
1135 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1136 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING
) |
1137 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8
));
1138 tile
[30] = (TILE_SPLIT(split_equal_to_row_size
));
1140 macrotile
[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1141 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1142 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
1143 NUM_BANKS(ADDR_SURF_16_BANK
));
1144 macrotile
[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1145 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
1146 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
1147 NUM_BANKS(ADDR_SURF_16_BANK
));
1148 macrotile
[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1149 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1150 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1151 NUM_BANKS(ADDR_SURF_16_BANK
));
1152 macrotile
[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1153 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1154 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1155 NUM_BANKS(ADDR_SURF_16_BANK
));
1156 macrotile
[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1157 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1158 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1159 NUM_BANKS(ADDR_SURF_16_BANK
));
1160 macrotile
[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1161 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1162 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1163 NUM_BANKS(ADDR_SURF_8_BANK
));
1164 macrotile
[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1165 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1166 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1167 NUM_BANKS(ADDR_SURF_4_BANK
));
1168 macrotile
[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2
) |
1169 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8
) |
1170 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
1171 NUM_BANKS(ADDR_SURF_16_BANK
));
1172 macrotile
[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2
) |
1173 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1174 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
1175 NUM_BANKS(ADDR_SURF_16_BANK
));
1176 macrotile
[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1177 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1178 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
1179 NUM_BANKS(ADDR_SURF_16_BANK
));
1180 macrotile
[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1181 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
1182 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
1183 NUM_BANKS(ADDR_SURF_16_BANK
));
1184 macrotile
[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1185 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1186 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1187 NUM_BANKS(ADDR_SURF_16_BANK
));
1188 macrotile
[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1189 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1190 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1191 NUM_BANKS(ADDR_SURF_8_BANK
));
1192 macrotile
[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1193 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1194 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1195 NUM_BANKS(ADDR_SURF_4_BANK
));
1197 for (reg_offset
= 0; reg_offset
< num_tile_mode_states
; reg_offset
++)
1198 WREG32(mmGB_TILE_MODE0
+ reg_offset
, tile
[reg_offset
]);
1199 for (reg_offset
= 0; reg_offset
< num_secondary_tile_mode_states
; reg_offset
++)
1200 if (reg_offset
!= 7)
1201 WREG32(mmGB_MACROTILE_MODE0
+ reg_offset
, macrotile
[reg_offset
]);
1204 tile
[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1205 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1206 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B
) |
1207 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1208 tile
[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1209 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1210 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B
) |
1211 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1212 tile
[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1213 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1214 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
1215 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1216 tile
[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1217 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1218 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B
) |
1219 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1220 tile
[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1221 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1222 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
) |
1223 TILE_SPLIT(split_equal_to_row_size
));
1224 tile
[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
1225 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1226 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
) |
1227 TILE_SPLIT(split_equal_to_row_size
));
1228 tile
[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
1229 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1230 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
) |
1231 TILE_SPLIT(split_equal_to_row_size
));
1232 tile
[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
1233 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1234 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
) |
1235 TILE_SPLIT(split_equal_to_row_size
));
1236 tile
[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED
) |
1237 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
));
1238 tile
[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
1239 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1240 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING
));
1241 tile
[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1242 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1243 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING
) |
1244 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
1245 tile
[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
1246 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1247 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING
) |
1248 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8
));
1249 tile
[12] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1
) |
1250 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1251 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING
) |
1252 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8
));
1253 tile
[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
1254 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1255 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
));
1256 tile
[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1257 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1258 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1259 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
1260 tile
[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1
) |
1261 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1262 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1263 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
1264 tile
[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
1265 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1266 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1267 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8
));
1268 tile
[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
1269 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1270 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1271 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8
));
1272 tile
[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK
) |
1273 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1274 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1275 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1276 tile
[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK
) |
1277 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1278 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
));
1279 tile
[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK
) |
1280 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1281 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
1282 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1283 tile
[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK
) |
1284 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1285 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
1286 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1287 tile
[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK
) |
1288 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1289 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
1290 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1291 tile
[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK
) |
1292 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1293 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
1294 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1295 tile
[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK
) |
1296 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1297 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1298 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1299 tile
[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK
) |
1300 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1301 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
1302 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1303 tile
[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK
) |
1304 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1305 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
1306 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1307 tile
[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
1308 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1309 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING
));
1310 tile
[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1311 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1312 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING
) |
1313 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
1314 tile
[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
1315 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1316 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING
) |
1317 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8
));
1318 tile
[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
1319 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1320 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING
) |
1321 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8
));
1323 macrotile
[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1324 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1325 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1326 NUM_BANKS(ADDR_SURF_16_BANK
));
1327 macrotile
[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1328 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
1329 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1330 NUM_BANKS(ADDR_SURF_16_BANK
));
1331 macrotile
[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1332 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1333 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1334 NUM_BANKS(ADDR_SURF_16_BANK
));
1335 macrotile
[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1336 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1337 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1338 NUM_BANKS(ADDR_SURF_16_BANK
));
1339 macrotile
[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1340 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1341 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1342 NUM_BANKS(ADDR_SURF_8_BANK
));
1343 macrotile
[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1344 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1345 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1346 NUM_BANKS(ADDR_SURF_4_BANK
));
1347 macrotile
[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1348 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1349 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1350 NUM_BANKS(ADDR_SURF_4_BANK
));
1351 macrotile
[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1352 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1353 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1354 NUM_BANKS(ADDR_SURF_16_BANK
));
1355 macrotile
[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1356 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
1357 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1358 NUM_BANKS(ADDR_SURF_16_BANK
));
1359 macrotile
[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1360 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1361 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1362 NUM_BANKS(ADDR_SURF_16_BANK
));
1363 macrotile
[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1364 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1365 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1366 NUM_BANKS(ADDR_SURF_8_BANK
));
1367 macrotile
[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1368 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1369 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1370 NUM_BANKS(ADDR_SURF_16_BANK
));
1371 macrotile
[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1372 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1373 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1374 NUM_BANKS(ADDR_SURF_8_BANK
));
1375 macrotile
[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1376 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1377 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1378 NUM_BANKS(ADDR_SURF_4_BANK
));
1380 for (reg_offset
= 0; reg_offset
< num_tile_mode_states
; reg_offset
++)
1381 WREG32(mmGB_TILE_MODE0
+ reg_offset
, tile
[reg_offset
]);
1382 for (reg_offset
= 0; reg_offset
< num_secondary_tile_mode_states
; reg_offset
++)
1383 if (reg_offset
!= 7)
1384 WREG32(mmGB_MACROTILE_MODE0
+ reg_offset
, macrotile
[reg_offset
]);
1390 tile
[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1391 PIPE_CONFIG(ADDR_SURF_P2
) |
1392 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B
) |
1393 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1394 tile
[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1395 PIPE_CONFIG(ADDR_SURF_P2
) |
1396 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B
) |
1397 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1398 tile
[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1399 PIPE_CONFIG(ADDR_SURF_P2
) |
1400 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
1401 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1402 tile
[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1403 PIPE_CONFIG(ADDR_SURF_P2
) |
1404 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B
) |
1405 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1406 tile
[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1407 PIPE_CONFIG(ADDR_SURF_P2
) |
1408 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
) |
1409 TILE_SPLIT(split_equal_to_row_size
));
1410 tile
[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
1411 PIPE_CONFIG(ADDR_SURF_P2
) |
1412 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1413 tile
[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
1414 PIPE_CONFIG(ADDR_SURF_P2
) |
1415 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
) |
1416 TILE_SPLIT(split_equal_to_row_size
));
1417 tile
[7] = (TILE_SPLIT(split_equal_to_row_size
));
1418 tile
[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED
) |
1419 PIPE_CONFIG(ADDR_SURF_P2
));
1420 tile
[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
1421 PIPE_CONFIG(ADDR_SURF_P2
) |
1422 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING
));
1423 tile
[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1424 PIPE_CONFIG(ADDR_SURF_P2
) |
1425 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING
) |
1426 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
1427 tile
[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
1428 PIPE_CONFIG(ADDR_SURF_P2
) |
1429 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING
) |
1430 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8
));
1431 tile
[12] = (TILE_SPLIT(split_equal_to_row_size
));
1432 tile
[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
1433 PIPE_CONFIG(ADDR_SURF_P2
) |
1434 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
));
1435 tile
[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1436 PIPE_CONFIG(ADDR_SURF_P2
) |
1437 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1438 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
1439 tile
[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1
) |
1440 PIPE_CONFIG(ADDR_SURF_P2
) |
1441 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1442 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
1443 tile
[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
1444 PIPE_CONFIG(ADDR_SURF_P2
) |
1445 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1446 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8
));
1447 tile
[17] = (TILE_SPLIT(split_equal_to_row_size
));
1448 tile
[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK
) |
1449 PIPE_CONFIG(ADDR_SURF_P2
) |
1450 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1451 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1452 tile
[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK
) |
1453 PIPE_CONFIG(ADDR_SURF_P2
) |
1454 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
));
1455 tile
[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK
) |
1456 PIPE_CONFIG(ADDR_SURF_P2
) |
1457 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
1458 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1459 tile
[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK
) |
1460 PIPE_CONFIG(ADDR_SURF_P2
) |
1461 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
1462 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1463 tile
[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK
) |
1464 PIPE_CONFIG(ADDR_SURF_P2
) |
1465 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
1466 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1467 tile
[23] = (TILE_SPLIT(split_equal_to_row_size
));
1468 tile
[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK
) |
1469 PIPE_CONFIG(ADDR_SURF_P2
) |
1470 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1471 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1472 tile
[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK
) |
1473 PIPE_CONFIG(ADDR_SURF_P2
) |
1474 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
1475 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1476 tile
[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK
) |
1477 PIPE_CONFIG(ADDR_SURF_P2
) |
1478 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
1479 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1480 tile
[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
1481 PIPE_CONFIG(ADDR_SURF_P2
) |
1482 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING
));
1483 tile
[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1484 PIPE_CONFIG(ADDR_SURF_P2
) |
1485 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING
) |
1486 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
1487 tile
[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
1488 PIPE_CONFIG(ADDR_SURF_P2
) |
1489 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING
) |
1490 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8
));
1491 tile
[30] = (TILE_SPLIT(split_equal_to_row_size
));
1493 macrotile
[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1494 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1495 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
1496 NUM_BANKS(ADDR_SURF_8_BANK
));
1497 macrotile
[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1498 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
1499 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
1500 NUM_BANKS(ADDR_SURF_8_BANK
));
1501 macrotile
[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1502 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
1503 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1504 NUM_BANKS(ADDR_SURF_8_BANK
));
1505 macrotile
[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1506 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1507 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1508 NUM_BANKS(ADDR_SURF_8_BANK
));
1509 macrotile
[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1510 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1511 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1512 NUM_BANKS(ADDR_SURF_8_BANK
));
1513 macrotile
[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1514 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1515 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1516 NUM_BANKS(ADDR_SURF_8_BANK
));
1517 macrotile
[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1518 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1519 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1520 NUM_BANKS(ADDR_SURF_8_BANK
));
1521 macrotile
[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4
) |
1522 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8
) |
1523 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
1524 NUM_BANKS(ADDR_SURF_16_BANK
));
1525 macrotile
[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4
) |
1526 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1527 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
1528 NUM_BANKS(ADDR_SURF_16_BANK
));
1529 macrotile
[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2
) |
1530 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1531 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
1532 NUM_BANKS(ADDR_SURF_16_BANK
));
1533 macrotile
[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2
) |
1534 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
1535 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
1536 NUM_BANKS(ADDR_SURF_16_BANK
));
1537 macrotile
[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1538 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
1539 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
1540 NUM_BANKS(ADDR_SURF_16_BANK
));
1541 macrotile
[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1542 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1543 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
1544 NUM_BANKS(ADDR_SURF_16_BANK
));
1545 macrotile
[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1546 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1547 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1548 NUM_BANKS(ADDR_SURF_8_BANK
));
1550 for (reg_offset
= 0; reg_offset
< num_tile_mode_states
; reg_offset
++)
1551 WREG32(mmGB_TILE_MODE0
+ reg_offset
, tile
[reg_offset
]);
1552 for (reg_offset
= 0; reg_offset
< num_secondary_tile_mode_states
; reg_offset
++)
1553 if (reg_offset
!= 7)
1554 WREG32(mmGB_MACROTILE_MODE0
+ reg_offset
, macrotile
[reg_offset
]);
1560 * gfx_v7_0_select_se_sh - select which SE, SH to address
1562 * @adev: amdgpu_device pointer
1563 * @se_num: shader engine to address
1564 * @sh_num: sh block to address
1566 * Select which SE, SH combinations to address. Certain
1567 * registers are instanced per SE or SH. 0xffffffff means
1568 * broadcast to all SEs or SHs (CIK).
1570 void gfx_v7_0_select_se_sh(struct amdgpu_device
*adev
, u32 se_num
, u32 sh_num
)
1572 u32 data
= GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK
;
1574 if ((se_num
== 0xffffffff) && (sh_num
== 0xffffffff))
1575 data
|= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK
|
1576 GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK
;
1577 else if (se_num
== 0xffffffff)
1578 data
|= GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK
|
1579 (sh_num
<< GRBM_GFX_INDEX__SH_INDEX__SHIFT
);
1580 else if (sh_num
== 0xffffffff)
1581 data
|= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK
|
1582 (se_num
<< GRBM_GFX_INDEX__SE_INDEX__SHIFT
);
1584 data
|= (sh_num
<< GRBM_GFX_INDEX__SH_INDEX__SHIFT
) |
1585 (se_num
<< GRBM_GFX_INDEX__SE_INDEX__SHIFT
);
1586 WREG32(mmGRBM_GFX_INDEX
, data
);
1590 * gfx_v7_0_create_bitmask - create a bitmask
1592 * @bit_width: length of the mask
1594 * create a variable length bit mask (CIK).
1595 * Returns the bitmask.
1597 static u32
gfx_v7_0_create_bitmask(u32 bit_width
)
1599 return (u32
)((1ULL << bit_width
) - 1);
1603 * gfx_v7_0_get_rb_active_bitmap - computes the mask of enabled RBs
1605 * @adev: amdgpu_device pointer
1607 * Calculates the bitmask of enabled RBs (CIK).
1608 * Returns the enabled RB bitmask.
1610 static u32
gfx_v7_0_get_rb_active_bitmap(struct amdgpu_device
*adev
)
1614 data
= RREG32(mmCC_RB_BACKEND_DISABLE
);
1615 data
|= RREG32(mmGC_USER_RB_BACKEND_DISABLE
);
1617 data
&= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK
;
1618 data
>>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT
;
1620 mask
= gfx_v7_0_create_bitmask(adev
->gfx
.config
.max_backends_per_se
/
1621 adev
->gfx
.config
.max_sh_per_se
);
1623 return (~data
) & mask
;
1627 * gfx_v7_0_setup_rb - setup the RBs on the asic
1629 * @adev: amdgpu_device pointer
1630 * @se_num: number of SEs (shader engines) for the asic
1631 * @sh_per_se: number of SH blocks per SE for the asic
1633 * Configures per-SE/SH RB registers (CIK).
1635 static void gfx_v7_0_setup_rb(struct amdgpu_device
*adev
)
1640 u32 rb_bitmap_width_per_sh
= adev
->gfx
.config
.max_backends_per_se
/
1641 adev
->gfx
.config
.max_sh_per_se
;
1643 mutex_lock(&adev
->grbm_idx_mutex
);
1644 for (i
= 0; i
< adev
->gfx
.config
.max_shader_engines
; i
++) {
1645 for (j
= 0; j
< adev
->gfx
.config
.max_sh_per_se
; j
++) {
1646 gfx_v7_0_select_se_sh(adev
, i
, j
);
1647 data
= gfx_v7_0_get_rb_active_bitmap(adev
);
1648 active_rbs
|= data
<< ((i
* adev
->gfx
.config
.max_sh_per_se
+ j
) *
1649 rb_bitmap_width_per_sh
);
1652 gfx_v7_0_select_se_sh(adev
, 0xffffffff, 0xffffffff);
1653 mutex_unlock(&adev
->grbm_idx_mutex
);
1655 adev
->gfx
.config
.backend_enable_mask
= active_rbs
;
1656 adev
->gfx
.config
.num_rbs
= hweight32(active_rbs
);
1660 * gmc_v7_0_init_compute_vmid - gart enable
1662 * @rdev: amdgpu_device pointer
1664 * Initialize compute vmid sh_mem registers
1667 #define DEFAULT_SH_MEM_BASES (0x6000)
1668 #define FIRST_COMPUTE_VMID (8)
1669 #define LAST_COMPUTE_VMID (16)
1670 static void gmc_v7_0_init_compute_vmid(struct amdgpu_device
*adev
)
1673 uint32_t sh_mem_config
;
1674 uint32_t sh_mem_bases
;
1677 * Configure apertures:
1678 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
1679 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
1680 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
1682 sh_mem_bases
= DEFAULT_SH_MEM_BASES
| (DEFAULT_SH_MEM_BASES
<< 16);
1683 sh_mem_config
= SH_MEM_ALIGNMENT_MODE_UNALIGNED
<<
1684 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT
;
1685 sh_mem_config
|= MTYPE_NONCACHED
<< SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT
;
1686 mutex_lock(&adev
->srbm_mutex
);
1687 for (i
= FIRST_COMPUTE_VMID
; i
< LAST_COMPUTE_VMID
; i
++) {
1688 cik_srbm_select(adev
, 0, 0, 0, i
);
1689 /* CP and shaders */
1690 WREG32(mmSH_MEM_CONFIG
, sh_mem_config
);
1691 WREG32(mmSH_MEM_APE1_BASE
, 1);
1692 WREG32(mmSH_MEM_APE1_LIMIT
, 0);
1693 WREG32(mmSH_MEM_BASES
, sh_mem_bases
);
1695 cik_srbm_select(adev
, 0, 0, 0, 0);
1696 mutex_unlock(&adev
->srbm_mutex
);
1700 * gfx_v7_0_gpu_init - setup the 3D engine
1702 * @adev: amdgpu_device pointer
1704 * Configures the 3D engine and tiling configuration
1705 * registers so that the 3D engine is usable.
1707 static void gfx_v7_0_gpu_init(struct amdgpu_device
*adev
)
1709 u32 tmp
, sh_mem_cfg
;
1712 WREG32(mmGRBM_CNTL
, (0xff << GRBM_CNTL__READ_TIMEOUT__SHIFT
));
1714 WREG32(mmGB_ADDR_CONFIG
, adev
->gfx
.config
.gb_addr_config
);
1715 WREG32(mmHDP_ADDR_CONFIG
, adev
->gfx
.config
.gb_addr_config
);
1716 WREG32(mmDMIF_ADDR_CALC
, adev
->gfx
.config
.gb_addr_config
);
1718 gfx_v7_0_tiling_mode_table_init(adev
);
1720 gfx_v7_0_setup_rb(adev
);
1722 /* set HW defaults for 3D engine */
1723 WREG32(mmCP_MEQ_THRESHOLDS
,
1724 (0x30 << CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT
) |
1725 (0x60 << CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT
));
1727 mutex_lock(&adev
->grbm_idx_mutex
);
1729 * making sure that the following register writes will be broadcasted
1730 * to all the shaders
1732 gfx_v7_0_select_se_sh(adev
, 0xffffffff, 0xffffffff);
1734 /* XXX SH_MEM regs */
1735 /* where to put LDS, scratch, GPUVM in FSA64 space */
1736 sh_mem_cfg
= REG_SET_FIELD(0, SH_MEM_CONFIG
, ALIGNMENT_MODE
,
1737 SH_MEM_ALIGNMENT_MODE_UNALIGNED
);
1739 mutex_lock(&adev
->srbm_mutex
);
1740 for (i
= 0; i
< 16; i
++) {
1741 cik_srbm_select(adev
, 0, 0, 0, i
);
1742 /* CP and shaders */
1743 WREG32(mmSH_MEM_CONFIG
, sh_mem_cfg
);
1744 WREG32(mmSH_MEM_APE1_BASE
, 1);
1745 WREG32(mmSH_MEM_APE1_LIMIT
, 0);
1746 WREG32(mmSH_MEM_BASES
, 0);
1748 cik_srbm_select(adev
, 0, 0, 0, 0);
1749 mutex_unlock(&adev
->srbm_mutex
);
1751 gmc_v7_0_init_compute_vmid(adev
);
1753 WREG32(mmSX_DEBUG_1
, 0x20);
1755 WREG32(mmTA_CNTL_AUX
, 0x00010000);
1757 tmp
= RREG32(mmSPI_CONFIG_CNTL
);
1759 WREG32(mmSPI_CONFIG_CNTL
, tmp
);
1761 WREG32(mmSQ_CONFIG
, 1);
1763 WREG32(mmDB_DEBUG
, 0);
1765 tmp
= RREG32(mmDB_DEBUG2
) & ~0xf00fffff;
1767 WREG32(mmDB_DEBUG2
, tmp
);
1769 tmp
= RREG32(mmDB_DEBUG3
) & ~0x0002021c;
1771 WREG32(mmDB_DEBUG3
, tmp
);
1773 tmp
= RREG32(mmCB_HW_CONTROL
) & ~0x00010000;
1775 WREG32(mmCB_HW_CONTROL
, tmp
);
1777 WREG32(mmSPI_CONFIG_CNTL_1
, (4 << SPI_CONFIG_CNTL_1__VTX_DONE_DELAY__SHIFT
));
1779 WREG32(mmPA_SC_FIFO_SIZE
,
1780 ((adev
->gfx
.config
.sc_prim_fifo_size_frontend
<< PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT
) |
1781 (adev
->gfx
.config
.sc_prim_fifo_size_backend
<< PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT
) |
1782 (adev
->gfx
.config
.sc_hiz_tile_fifo_size
<< PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT
) |
1783 (adev
->gfx
.config
.sc_earlyz_tile_fifo_size
<< PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT
)));
1785 WREG32(mmVGT_NUM_INSTANCES
, 1);
1787 WREG32(mmCP_PERFMON_CNTL
, 0);
1789 WREG32(mmSQ_CONFIG
, 0);
1791 WREG32(mmPA_SC_FORCE_EOV_MAX_CNTS
,
1792 ((4095 << PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_CLK_CNT__SHIFT
) |
1793 (255 << PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_REZ_CNT__SHIFT
)));
1795 WREG32(mmVGT_CACHE_INVALIDATION
,
1796 (VC_AND_TC
<< VGT_CACHE_INVALIDATION__CACHE_INVALIDATION__SHIFT
) |
1797 (ES_AND_GS_AUTO
<< VGT_CACHE_INVALIDATION__AUTO_INVLD_EN__SHIFT
));
1799 WREG32(mmVGT_GS_VERTEX_REUSE
, 16);
1800 WREG32(mmPA_SC_LINE_STIPPLE_STATE
, 0);
1802 WREG32(mmPA_CL_ENHANCE
, PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA_MASK
|
1803 (3 << PA_CL_ENHANCE__NUM_CLIP_SEQ__SHIFT
));
1804 WREG32(mmPA_SC_ENHANCE
, PA_SC_ENHANCE__ENABLE_PA_SC_OUT_OF_ORDER_MASK
);
1805 mutex_unlock(&adev
->grbm_idx_mutex
);
1811 * GPU scratch registers helpers function.
1814 * gfx_v7_0_scratch_init - setup driver info for CP scratch regs
1816 * @adev: amdgpu_device pointer
1818 * Set up the number and offset of the CP scratch registers.
1819 * NOTE: use of CP scratch registers is a legacy inferface and
1820 * is not used by default on newer asics (r6xx+). On newer asics,
1821 * memory buffers are used for fences rather than scratch regs.
1823 static void gfx_v7_0_scratch_init(struct amdgpu_device
*adev
)
1827 adev
->gfx
.scratch
.num_reg
= 7;
1828 adev
->gfx
.scratch
.reg_base
= mmSCRATCH_REG0
;
1829 for (i
= 0; i
< adev
->gfx
.scratch
.num_reg
; i
++) {
1830 adev
->gfx
.scratch
.free
[i
] = true;
1831 adev
->gfx
.scratch
.reg
[i
] = adev
->gfx
.scratch
.reg_base
+ i
;
1836 * gfx_v7_0_ring_test_ring - basic gfx ring test
1838 * @adev: amdgpu_device pointer
1839 * @ring: amdgpu_ring structure holding ring information
1841 * Allocate a scratch register and write to it using the gfx ring (CIK).
1842 * Provides a basic gfx ring test to verify that the ring is working.
1843 * Used by gfx_v7_0_cp_gfx_resume();
1844 * Returns 0 on success, error on failure.
1846 static int gfx_v7_0_ring_test_ring(struct amdgpu_ring
*ring
)
1848 struct amdgpu_device
*adev
= ring
->adev
;
1854 r
= amdgpu_gfx_scratch_get(adev
, &scratch
);
1856 DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r
);
1859 WREG32(scratch
, 0xCAFEDEAD);
1860 r
= amdgpu_ring_alloc(ring
, 3);
1862 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring
->idx
, r
);
1863 amdgpu_gfx_scratch_free(adev
, scratch
);
1866 amdgpu_ring_write(ring
, PACKET3(PACKET3_SET_UCONFIG_REG
, 1));
1867 amdgpu_ring_write(ring
, (scratch
- PACKET3_SET_UCONFIG_REG_START
));
1868 amdgpu_ring_write(ring
, 0xDEADBEEF);
1869 amdgpu_ring_commit(ring
);
1871 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
1872 tmp
= RREG32(scratch
);
1873 if (tmp
== 0xDEADBEEF)
1877 if (i
< adev
->usec_timeout
) {
1878 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring
->idx
, i
);
1880 DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
1881 ring
->idx
, scratch
, tmp
);
1884 amdgpu_gfx_scratch_free(adev
, scratch
);
1889 * gfx_v7_0_ring_emit_hdp - emit an hdp flush on the cp
1891 * @adev: amdgpu_device pointer
1892 * @ridx: amdgpu ring index
1894 * Emits an hdp flush on the cp.
1896 static void gfx_v7_0_ring_emit_hdp_flush(struct amdgpu_ring
*ring
)
1899 int usepfp
= ring
->type
== AMDGPU_RING_TYPE_COMPUTE
? 0 : 1;
1901 if (ring
->type
== AMDGPU_RING_TYPE_COMPUTE
) {
1904 ref_and_mask
= GPU_HDP_FLUSH_DONE__CP2_MASK
<< ring
->pipe
;
1907 ref_and_mask
= GPU_HDP_FLUSH_DONE__CP6_MASK
<< ring
->pipe
;
1913 ref_and_mask
= GPU_HDP_FLUSH_DONE__CP0_MASK
;
1916 amdgpu_ring_write(ring
, PACKET3(PACKET3_WAIT_REG_MEM
, 5));
1917 amdgpu_ring_write(ring
, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */
1918 WAIT_REG_MEM_FUNCTION(3) | /* == */
1919 WAIT_REG_MEM_ENGINE(usepfp
))); /* pfp or me */
1920 amdgpu_ring_write(ring
, mmGPU_HDP_FLUSH_REQ
);
1921 amdgpu_ring_write(ring
, mmGPU_HDP_FLUSH_DONE
);
1922 amdgpu_ring_write(ring
, ref_and_mask
);
1923 amdgpu_ring_write(ring
, ref_and_mask
);
1924 amdgpu_ring_write(ring
, 0x20); /* poll interval */
1928 * gfx_v7_0_ring_emit_fence_gfx - emit a fence on the gfx ring
1930 * @adev: amdgpu_device pointer
1931 * @fence: amdgpu fence object
1933 * Emits a fence sequnce number on the gfx ring and flushes
1936 static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring
*ring
, u64 addr
,
1937 u64 seq
, unsigned flags
)
1939 bool write64bit
= flags
& AMDGPU_FENCE_FLAG_64BIT
;
1940 bool int_sel
= flags
& AMDGPU_FENCE_FLAG_INT
;
1941 /* Workaround for cache flush problems. First send a dummy EOP
1942 * event down the pipe with seq one below.
1944 amdgpu_ring_write(ring
, PACKET3(PACKET3_EVENT_WRITE_EOP
, 4));
1945 amdgpu_ring_write(ring
, (EOP_TCL1_ACTION_EN
|
1947 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT
) |
1949 amdgpu_ring_write(ring
, addr
& 0xfffffffc);
1950 amdgpu_ring_write(ring
, (upper_32_bits(addr
) & 0xffff) |
1951 DATA_SEL(1) | INT_SEL(0));
1952 amdgpu_ring_write(ring
, lower_32_bits(seq
- 1));
1953 amdgpu_ring_write(ring
, upper_32_bits(seq
- 1));
1955 /* Then send the real EOP event down the pipe. */
1956 amdgpu_ring_write(ring
, PACKET3(PACKET3_EVENT_WRITE_EOP
, 4));
1957 amdgpu_ring_write(ring
, (EOP_TCL1_ACTION_EN
|
1959 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT
) |
1961 amdgpu_ring_write(ring
, addr
& 0xfffffffc);
1962 amdgpu_ring_write(ring
, (upper_32_bits(addr
) & 0xffff) |
1963 DATA_SEL(write64bit
? 2 : 1) | INT_SEL(int_sel
? 2 : 0));
1964 amdgpu_ring_write(ring
, lower_32_bits(seq
));
1965 amdgpu_ring_write(ring
, upper_32_bits(seq
));
1969 * gfx_v7_0_ring_emit_fence_compute - emit a fence on the compute ring
1971 * @adev: amdgpu_device pointer
1972 * @fence: amdgpu fence object
1974 * Emits a fence sequnce number on the compute ring and flushes
1977 static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring
*ring
,
1981 bool write64bit
= flags
& AMDGPU_FENCE_FLAG_64BIT
;
1982 bool int_sel
= flags
& AMDGPU_FENCE_FLAG_INT
;
1984 /* RELEASE_MEM - flush caches, send int */
1985 amdgpu_ring_write(ring
, PACKET3(PACKET3_RELEASE_MEM
, 5));
1986 amdgpu_ring_write(ring
, (EOP_TCL1_ACTION_EN
|
1988 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT
) |
1990 amdgpu_ring_write(ring
, DATA_SEL(write64bit
? 2 : 1) | INT_SEL(int_sel
? 2 : 0));
1991 amdgpu_ring_write(ring
, addr
& 0xfffffffc);
1992 amdgpu_ring_write(ring
, upper_32_bits(addr
));
1993 amdgpu_ring_write(ring
, lower_32_bits(seq
));
1994 amdgpu_ring_write(ring
, upper_32_bits(seq
));
2001 * gfx_v7_0_ring_emit_ib - emit an IB (Indirect Buffer) on the ring
2003 * @ring: amdgpu_ring structure holding ring information
2004 * @ib: amdgpu indirect buffer object
2006 * Emits an DE (drawing engine) or CE (constant engine) IB
2007 * on the gfx ring. IBs are usually generated by userspace
2008 * acceleration drivers and submitted to the kernel for
2009 * sheduling on the ring. This function schedules the IB
2010 * on the gfx ring for execution by the GPU.
2012 static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring
*ring
,
2013 struct amdgpu_ib
*ib
)
2015 bool need_ctx_switch
= ring
->current_ctx
!= ib
->ctx
;
2016 u32 header
, control
= 0;
2017 u32 next_rptr
= ring
->wptr
+ 5;
2019 /* drop the CE preamble IB for the same context */
2020 if ((ib
->flags
& AMDGPU_IB_FLAG_PREAMBLE
) && !need_ctx_switch
)
2023 if (need_ctx_switch
)
2027 amdgpu_ring_write(ring
, PACKET3(PACKET3_WRITE_DATA
, 3));
2028 amdgpu_ring_write(ring
, WRITE_DATA_DST_SEL(5) | WR_CONFIRM
);
2029 amdgpu_ring_write(ring
, ring
->next_rptr_gpu_addr
& 0xfffffffc);
2030 amdgpu_ring_write(ring
, upper_32_bits(ring
->next_rptr_gpu_addr
) & 0xffffffff);
2031 amdgpu_ring_write(ring
, next_rptr
);
2033 /* insert SWITCH_BUFFER packet before first IB in the ring frame */
2034 if (need_ctx_switch
) {
2035 amdgpu_ring_write(ring
, PACKET3(PACKET3_SWITCH_BUFFER
, 0));
2036 amdgpu_ring_write(ring
, 0);
2039 if (ib
->flags
& AMDGPU_IB_FLAG_CE
)
2040 header
= PACKET3(PACKET3_INDIRECT_BUFFER_CONST
, 2);
2042 header
= PACKET3(PACKET3_INDIRECT_BUFFER
, 2);
2044 control
|= ib
->length_dw
| (ib
->vm_id
<< 24);
2046 amdgpu_ring_write(ring
, header
);
2047 amdgpu_ring_write(ring
,
2051 (ib
->gpu_addr
& 0xFFFFFFFC));
2052 amdgpu_ring_write(ring
, upper_32_bits(ib
->gpu_addr
) & 0xFFFF);
2053 amdgpu_ring_write(ring
, control
);
2056 static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring
*ring
,
2057 struct amdgpu_ib
*ib
)
2059 u32 header
, control
= 0;
2060 u32 next_rptr
= ring
->wptr
+ 5;
2062 control
|= INDIRECT_BUFFER_VALID
;
2064 amdgpu_ring_write(ring
, PACKET3(PACKET3_WRITE_DATA
, 3));
2065 amdgpu_ring_write(ring
, WRITE_DATA_DST_SEL(5) | WR_CONFIRM
);
2066 amdgpu_ring_write(ring
, ring
->next_rptr_gpu_addr
& 0xfffffffc);
2067 amdgpu_ring_write(ring
, upper_32_bits(ring
->next_rptr_gpu_addr
) & 0xffffffff);
2068 amdgpu_ring_write(ring
, next_rptr
);
2070 header
= PACKET3(PACKET3_INDIRECT_BUFFER
, 2);
2072 control
|= ib
->length_dw
| (ib
->vm_id
<< 24);
2074 amdgpu_ring_write(ring
, header
);
2075 amdgpu_ring_write(ring
,
2079 (ib
->gpu_addr
& 0xFFFFFFFC));
2080 amdgpu_ring_write(ring
, upper_32_bits(ib
->gpu_addr
) & 0xFFFF);
2081 amdgpu_ring_write(ring
, control
);
2085 * gfx_v7_0_ring_test_ib - basic ring IB test
2087 * @ring: amdgpu_ring structure holding ring information
2089 * Allocate an IB and execute it on the gfx ring (CIK).
2090 * Provides a basic gfx ring test to verify that IBs are working.
2091 * Returns 0 on success, error on failure.
2093 static int gfx_v7_0_ring_test_ib(struct amdgpu_ring
*ring
)
2095 struct amdgpu_device
*adev
= ring
->adev
;
2096 struct amdgpu_ib ib
;
2097 struct fence
*f
= NULL
;
2103 r
= amdgpu_gfx_scratch_get(adev
, &scratch
);
2105 DRM_ERROR("amdgpu: failed to get scratch reg (%d).\n", r
);
2108 WREG32(scratch
, 0xCAFEDEAD);
2109 memset(&ib
, 0, sizeof(ib
));
2110 r
= amdgpu_ib_get(adev
, NULL
, 256, &ib
);
2112 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r
);
2115 ib
.ptr
[0] = PACKET3(PACKET3_SET_UCONFIG_REG
, 1);
2116 ib
.ptr
[1] = ((scratch
- PACKET3_SET_UCONFIG_REG_START
));
2117 ib
.ptr
[2] = 0xDEADBEEF;
2120 r
= amdgpu_ib_schedule(ring
, 1, &ib
, AMDGPU_FENCE_OWNER_UNDEFINED
,
2125 r
= fence_wait(f
, false);
2127 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r
);
2130 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
2131 tmp
= RREG32(scratch
);
2132 if (tmp
== 0xDEADBEEF)
2136 if (i
< adev
->usec_timeout
) {
2137 DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
2141 DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
2148 amdgpu_ib_free(adev
, &ib
);
2150 amdgpu_gfx_scratch_free(adev
, scratch
);
2156 * On CIK, gfx and compute now have independant command processors.
2159 * Gfx consists of a single ring and can process both gfx jobs and
2160 * compute jobs. The gfx CP consists of three microengines (ME):
2161 * PFP - Pre-Fetch Parser
2163 * CE - Constant Engine
2164 * The PFP and ME make up what is considered the Drawing Engine (DE).
2165 * The CE is an asynchronous engine used for updating buffer desciptors
2166 * used by the DE so that they can be loaded into cache in parallel
2167 * while the DE is processing state update packets.
2170 * The compute CP consists of two microengines (ME):
2171 * MEC1 - Compute MicroEngine 1
2172 * MEC2 - Compute MicroEngine 2
2173 * Each MEC supports 4 compute pipes and each pipe supports 8 queues.
2174 * The queues are exposed to userspace and are programmed directly
2175 * by the compute runtime.
2178 * gfx_v7_0_cp_gfx_enable - enable/disable the gfx CP MEs
2180 * @adev: amdgpu_device pointer
2181 * @enable: enable or disable the MEs
2183 * Halts or unhalts the gfx MEs.
2185 static void gfx_v7_0_cp_gfx_enable(struct amdgpu_device
*adev
, bool enable
)
2190 WREG32(mmCP_ME_CNTL
, 0);
2192 WREG32(mmCP_ME_CNTL
, (CP_ME_CNTL__ME_HALT_MASK
| CP_ME_CNTL__PFP_HALT_MASK
| CP_ME_CNTL__CE_HALT_MASK
));
2193 for (i
= 0; i
< adev
->gfx
.num_gfx_rings
; i
++)
2194 adev
->gfx
.gfx_ring
[i
].ready
= false;
2200 * gfx_v7_0_cp_gfx_load_microcode - load the gfx CP ME ucode
2202 * @adev: amdgpu_device pointer
2204 * Loads the gfx PFP, ME, and CE ucode.
2205 * Returns 0 for success, -EINVAL if the ucode is not available.
2207 static int gfx_v7_0_cp_gfx_load_microcode(struct amdgpu_device
*adev
)
2209 const struct gfx_firmware_header_v1_0
*pfp_hdr
;
2210 const struct gfx_firmware_header_v1_0
*ce_hdr
;
2211 const struct gfx_firmware_header_v1_0
*me_hdr
;
2212 const __le32
*fw_data
;
2213 unsigned i
, fw_size
;
2215 if (!adev
->gfx
.me_fw
|| !adev
->gfx
.pfp_fw
|| !adev
->gfx
.ce_fw
)
2218 pfp_hdr
= (const struct gfx_firmware_header_v1_0
*)adev
->gfx
.pfp_fw
->data
;
2219 ce_hdr
= (const struct gfx_firmware_header_v1_0
*)adev
->gfx
.ce_fw
->data
;
2220 me_hdr
= (const struct gfx_firmware_header_v1_0
*)adev
->gfx
.me_fw
->data
;
2222 amdgpu_ucode_print_gfx_hdr(&pfp_hdr
->header
);
2223 amdgpu_ucode_print_gfx_hdr(&ce_hdr
->header
);
2224 amdgpu_ucode_print_gfx_hdr(&me_hdr
->header
);
2225 adev
->gfx
.pfp_fw_version
= le32_to_cpu(pfp_hdr
->header
.ucode_version
);
2226 adev
->gfx
.ce_fw_version
= le32_to_cpu(ce_hdr
->header
.ucode_version
);
2227 adev
->gfx
.me_fw_version
= le32_to_cpu(me_hdr
->header
.ucode_version
);
2228 adev
->gfx
.me_feature_version
= le32_to_cpu(me_hdr
->ucode_feature_version
);
2229 adev
->gfx
.ce_feature_version
= le32_to_cpu(ce_hdr
->ucode_feature_version
);
2230 adev
->gfx
.pfp_feature_version
= le32_to_cpu(pfp_hdr
->ucode_feature_version
);
2232 gfx_v7_0_cp_gfx_enable(adev
, false);
2235 fw_data
= (const __le32
*)
2236 (adev
->gfx
.pfp_fw
->data
+
2237 le32_to_cpu(pfp_hdr
->header
.ucode_array_offset_bytes
));
2238 fw_size
= le32_to_cpu(pfp_hdr
->header
.ucode_size_bytes
) / 4;
2239 WREG32(mmCP_PFP_UCODE_ADDR
, 0);
2240 for (i
= 0; i
< fw_size
; i
++)
2241 WREG32(mmCP_PFP_UCODE_DATA
, le32_to_cpup(fw_data
++));
2242 WREG32(mmCP_PFP_UCODE_ADDR
, adev
->gfx
.pfp_fw_version
);
2245 fw_data
= (const __le32
*)
2246 (adev
->gfx
.ce_fw
->data
+
2247 le32_to_cpu(ce_hdr
->header
.ucode_array_offset_bytes
));
2248 fw_size
= le32_to_cpu(ce_hdr
->header
.ucode_size_bytes
) / 4;
2249 WREG32(mmCP_CE_UCODE_ADDR
, 0);
2250 for (i
= 0; i
< fw_size
; i
++)
2251 WREG32(mmCP_CE_UCODE_DATA
, le32_to_cpup(fw_data
++));
2252 WREG32(mmCP_CE_UCODE_ADDR
, adev
->gfx
.ce_fw_version
);
2255 fw_data
= (const __le32
*)
2256 (adev
->gfx
.me_fw
->data
+
2257 le32_to_cpu(me_hdr
->header
.ucode_array_offset_bytes
));
2258 fw_size
= le32_to_cpu(me_hdr
->header
.ucode_size_bytes
) / 4;
2259 WREG32(mmCP_ME_RAM_WADDR
, 0);
2260 for (i
= 0; i
< fw_size
; i
++)
2261 WREG32(mmCP_ME_RAM_DATA
, le32_to_cpup(fw_data
++));
2262 WREG32(mmCP_ME_RAM_WADDR
, adev
->gfx
.me_fw_version
);
2268 * gfx_v7_0_cp_gfx_start - start the gfx ring
2270 * @adev: amdgpu_device pointer
2272 * Enables the ring and loads the clear state context and other
2273 * packets required to init the ring.
2274 * Returns 0 for success, error for failure.
2276 static int gfx_v7_0_cp_gfx_start(struct amdgpu_device
*adev
)
2278 struct amdgpu_ring
*ring
= &adev
->gfx
.gfx_ring
[0];
2279 const struct cs_section_def
*sect
= NULL
;
2280 const struct cs_extent_def
*ext
= NULL
;
2284 WREG32(mmCP_MAX_CONTEXT
, adev
->gfx
.config
.max_hw_contexts
- 1);
2285 WREG32(mmCP_ENDIAN_SWAP
, 0);
2286 WREG32(mmCP_DEVICE_ID
, 1);
2288 gfx_v7_0_cp_gfx_enable(adev
, true);
2290 r
= amdgpu_ring_alloc(ring
, gfx_v7_0_get_csb_size(adev
) + 8);
2292 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r
);
2296 /* init the CE partitions. CE only used for gfx on CIK */
2297 amdgpu_ring_write(ring
, PACKET3(PACKET3_SET_BASE
, 2));
2298 amdgpu_ring_write(ring
, PACKET3_BASE_INDEX(CE_PARTITION_BASE
));
2299 amdgpu_ring_write(ring
, 0x8000);
2300 amdgpu_ring_write(ring
, 0x8000);
2302 /* clear state buffer */
2303 amdgpu_ring_write(ring
, PACKET3(PACKET3_PREAMBLE_CNTL
, 0));
2304 amdgpu_ring_write(ring
, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE
);
2306 amdgpu_ring_write(ring
, PACKET3(PACKET3_CONTEXT_CONTROL
, 1));
2307 amdgpu_ring_write(ring
, 0x80000000);
2308 amdgpu_ring_write(ring
, 0x80000000);
2310 for (sect
= adev
->gfx
.rlc
.cs_data
; sect
->section
!= NULL
; ++sect
) {
2311 for (ext
= sect
->section
; ext
->extent
!= NULL
; ++ext
) {
2312 if (sect
->id
== SECT_CONTEXT
) {
2313 amdgpu_ring_write(ring
,
2314 PACKET3(PACKET3_SET_CONTEXT_REG
, ext
->reg_count
));
2315 amdgpu_ring_write(ring
, ext
->reg_index
- PACKET3_SET_CONTEXT_REG_START
);
2316 for (i
= 0; i
< ext
->reg_count
; i
++)
2317 amdgpu_ring_write(ring
, ext
->extent
[i
]);
2322 amdgpu_ring_write(ring
, PACKET3(PACKET3_SET_CONTEXT_REG
, 2));
2323 amdgpu_ring_write(ring
, mmPA_SC_RASTER_CONFIG
- PACKET3_SET_CONTEXT_REG_START
);
2324 switch (adev
->asic_type
) {
2326 amdgpu_ring_write(ring
, 0x16000012);
2327 amdgpu_ring_write(ring
, 0x00000000);
2330 amdgpu_ring_write(ring
, 0x00000000); /* XXX */
2331 amdgpu_ring_write(ring
, 0x00000000);
2335 amdgpu_ring_write(ring
, 0x00000000); /* XXX */
2336 amdgpu_ring_write(ring
, 0x00000000);
2339 amdgpu_ring_write(ring
, 0x3a00161a);
2340 amdgpu_ring_write(ring
, 0x0000002e);
2343 amdgpu_ring_write(ring
, 0x00000000);
2344 amdgpu_ring_write(ring
, 0x00000000);
2348 amdgpu_ring_write(ring
, PACKET3(PACKET3_PREAMBLE_CNTL
, 0));
2349 amdgpu_ring_write(ring
, PACKET3_PREAMBLE_END_CLEAR_STATE
);
2351 amdgpu_ring_write(ring
, PACKET3(PACKET3_CLEAR_STATE
, 0));
2352 amdgpu_ring_write(ring
, 0);
2354 amdgpu_ring_write(ring
, PACKET3(PACKET3_SET_CONTEXT_REG
, 2));
2355 amdgpu_ring_write(ring
, 0x00000316);
2356 amdgpu_ring_write(ring
, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
2357 amdgpu_ring_write(ring
, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
2359 amdgpu_ring_commit(ring
);
2365 * gfx_v7_0_cp_gfx_resume - setup the gfx ring buffer registers
2367 * @adev: amdgpu_device pointer
2369 * Program the location and size of the gfx ring buffer
2370 * and test it to make sure it's working.
2371 * Returns 0 for success, error for failure.
2373 static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device
*adev
)
2375 struct amdgpu_ring
*ring
;
2378 u64 rb_addr
, rptr_addr
;
2381 WREG32(mmCP_SEM_WAIT_TIMER
, 0x0);
2382 if (adev
->asic_type
!= CHIP_HAWAII
)
2383 WREG32(mmCP_SEM_INCOMPLETE_TIMER_CNTL
, 0x0);
2385 /* Set the write pointer delay */
2386 WREG32(mmCP_RB_WPTR_DELAY
, 0);
2388 /* set the RB to use vmid 0 */
2389 WREG32(mmCP_RB_VMID
, 0);
2391 WREG32(mmSCRATCH_ADDR
, 0);
2393 /* ring 0 - compute and gfx */
2394 /* Set ring buffer size */
2395 ring
= &adev
->gfx
.gfx_ring
[0];
2396 rb_bufsz
= order_base_2(ring
->ring_size
/ 8);
2397 tmp
= (order_base_2(AMDGPU_GPU_PAGE_SIZE
/8) << 8) | rb_bufsz
;
2399 tmp
|= 2 << CP_RB0_CNTL__BUF_SWAP__SHIFT
;
2401 WREG32(mmCP_RB0_CNTL
, tmp
);
2403 /* Initialize the ring buffer's read and write pointers */
2404 WREG32(mmCP_RB0_CNTL
, tmp
| CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK
);
2406 WREG32(mmCP_RB0_WPTR
, ring
->wptr
);
2408 /* set the wb address wether it's enabled or not */
2409 rptr_addr
= adev
->wb
.gpu_addr
+ (ring
->rptr_offs
* 4);
2410 WREG32(mmCP_RB0_RPTR_ADDR
, lower_32_bits(rptr_addr
));
2411 WREG32(mmCP_RB0_RPTR_ADDR_HI
, upper_32_bits(rptr_addr
) & 0xFF);
2413 /* scratch register shadowing is no longer supported */
2414 WREG32(mmSCRATCH_UMSK
, 0);
2417 WREG32(mmCP_RB0_CNTL
, tmp
);
2419 rb_addr
= ring
->gpu_addr
>> 8;
2420 WREG32(mmCP_RB0_BASE
, rb_addr
);
2421 WREG32(mmCP_RB0_BASE_HI
, upper_32_bits(rb_addr
));
2423 /* start the ring */
2424 gfx_v7_0_cp_gfx_start(adev
);
2426 r
= amdgpu_ring_test_ring(ring
);
2428 ring
->ready
= false;
2435 static u32
gfx_v7_0_ring_get_rptr_gfx(struct amdgpu_ring
*ring
)
2437 return ring
->adev
->wb
.wb
[ring
->rptr_offs
];
2440 static u32
gfx_v7_0_ring_get_wptr_gfx(struct amdgpu_ring
*ring
)
2442 struct amdgpu_device
*adev
= ring
->adev
;
2444 return RREG32(mmCP_RB0_WPTR
);
2447 static void gfx_v7_0_ring_set_wptr_gfx(struct amdgpu_ring
*ring
)
2449 struct amdgpu_device
*adev
= ring
->adev
;
2451 WREG32(mmCP_RB0_WPTR
, ring
->wptr
);
2452 (void)RREG32(mmCP_RB0_WPTR
);
2455 static u32
gfx_v7_0_ring_get_rptr_compute(struct amdgpu_ring
*ring
)
2457 return ring
->adev
->wb
.wb
[ring
->rptr_offs
];
2460 static u32
gfx_v7_0_ring_get_wptr_compute(struct amdgpu_ring
*ring
)
2462 /* XXX check if swapping is necessary on BE */
2463 return ring
->adev
->wb
.wb
[ring
->wptr_offs
];
2466 static void gfx_v7_0_ring_set_wptr_compute(struct amdgpu_ring
*ring
)
2468 struct amdgpu_device
*adev
= ring
->adev
;
2470 /* XXX check if swapping is necessary on BE */
2471 adev
->wb
.wb
[ring
->wptr_offs
] = ring
->wptr
;
2472 WDOORBELL32(ring
->doorbell_index
, ring
->wptr
);
2476 * gfx_v7_0_cp_compute_enable - enable/disable the compute CP MEs
2478 * @adev: amdgpu_device pointer
2479 * @enable: enable or disable the MEs
2481 * Halts or unhalts the compute MEs.
2483 static void gfx_v7_0_cp_compute_enable(struct amdgpu_device
*adev
, bool enable
)
2488 WREG32(mmCP_MEC_CNTL
, 0);
2490 WREG32(mmCP_MEC_CNTL
, (CP_MEC_CNTL__MEC_ME1_HALT_MASK
| CP_MEC_CNTL__MEC_ME2_HALT_MASK
));
2491 for (i
= 0; i
< adev
->gfx
.num_compute_rings
; i
++)
2492 adev
->gfx
.compute_ring
[i
].ready
= false;
2498 * gfx_v7_0_cp_compute_load_microcode - load the compute CP ME ucode
2500 * @adev: amdgpu_device pointer
2502 * Loads the compute MEC1&2 ucode.
2503 * Returns 0 for success, -EINVAL if the ucode is not available.
2505 static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device
*adev
)
2507 const struct gfx_firmware_header_v1_0
*mec_hdr
;
2508 const __le32
*fw_data
;
2509 unsigned i
, fw_size
;
2511 if (!adev
->gfx
.mec_fw
)
2514 mec_hdr
= (const struct gfx_firmware_header_v1_0
*)adev
->gfx
.mec_fw
->data
;
2515 amdgpu_ucode_print_gfx_hdr(&mec_hdr
->header
);
2516 adev
->gfx
.mec_fw_version
= le32_to_cpu(mec_hdr
->header
.ucode_version
);
2517 adev
->gfx
.mec_feature_version
= le32_to_cpu(
2518 mec_hdr
->ucode_feature_version
);
2520 gfx_v7_0_cp_compute_enable(adev
, false);
2523 fw_data
= (const __le32
*)
2524 (adev
->gfx
.mec_fw
->data
+
2525 le32_to_cpu(mec_hdr
->header
.ucode_array_offset_bytes
));
2526 fw_size
= le32_to_cpu(mec_hdr
->header
.ucode_size_bytes
) / 4;
2527 WREG32(mmCP_MEC_ME1_UCODE_ADDR
, 0);
2528 for (i
= 0; i
< fw_size
; i
++)
2529 WREG32(mmCP_MEC_ME1_UCODE_DATA
, le32_to_cpup(fw_data
++));
2530 WREG32(mmCP_MEC_ME1_UCODE_ADDR
, 0);
2532 if (adev
->asic_type
== CHIP_KAVERI
) {
2533 const struct gfx_firmware_header_v1_0
*mec2_hdr
;
2535 if (!adev
->gfx
.mec2_fw
)
2538 mec2_hdr
= (const struct gfx_firmware_header_v1_0
*)adev
->gfx
.mec2_fw
->data
;
2539 amdgpu_ucode_print_gfx_hdr(&mec2_hdr
->header
);
2540 adev
->gfx
.mec2_fw_version
= le32_to_cpu(mec2_hdr
->header
.ucode_version
);
2541 adev
->gfx
.mec2_feature_version
= le32_to_cpu(
2542 mec2_hdr
->ucode_feature_version
);
2545 fw_data
= (const __le32
*)
2546 (adev
->gfx
.mec2_fw
->data
+
2547 le32_to_cpu(mec2_hdr
->header
.ucode_array_offset_bytes
));
2548 fw_size
= le32_to_cpu(mec2_hdr
->header
.ucode_size_bytes
) / 4;
2549 WREG32(mmCP_MEC_ME2_UCODE_ADDR
, 0);
2550 for (i
= 0; i
< fw_size
; i
++)
2551 WREG32(mmCP_MEC_ME2_UCODE_DATA
, le32_to_cpup(fw_data
++));
2552 WREG32(mmCP_MEC_ME2_UCODE_ADDR
, 0);
2559 * gfx_v7_0_cp_compute_fini - stop the compute queues
2561 * @adev: amdgpu_device pointer
2563 * Stop the compute queues and tear down the driver queue
2566 static void gfx_v7_0_cp_compute_fini(struct amdgpu_device
*adev
)
2570 for (i
= 0; i
< adev
->gfx
.num_compute_rings
; i
++) {
2571 struct amdgpu_ring
*ring
= &adev
->gfx
.compute_ring
[i
];
2573 if (ring
->mqd_obj
) {
2574 r
= amdgpu_bo_reserve(ring
->mqd_obj
, false);
2575 if (unlikely(r
!= 0))
2576 dev_warn(adev
->dev
, "(%d) reserve MQD bo failed\n", r
);
2578 amdgpu_bo_unpin(ring
->mqd_obj
);
2579 amdgpu_bo_unreserve(ring
->mqd_obj
);
2581 amdgpu_bo_unref(&ring
->mqd_obj
);
2582 ring
->mqd_obj
= NULL
;
2587 static void gfx_v7_0_mec_fini(struct amdgpu_device
*adev
)
2591 if (adev
->gfx
.mec
.hpd_eop_obj
) {
2592 r
= amdgpu_bo_reserve(adev
->gfx
.mec
.hpd_eop_obj
, false);
2593 if (unlikely(r
!= 0))
2594 dev_warn(adev
->dev
, "(%d) reserve HPD EOP bo failed\n", r
);
2595 amdgpu_bo_unpin(adev
->gfx
.mec
.hpd_eop_obj
);
2596 amdgpu_bo_unreserve(adev
->gfx
.mec
.hpd_eop_obj
);
2598 amdgpu_bo_unref(&adev
->gfx
.mec
.hpd_eop_obj
);
2599 adev
->gfx
.mec
.hpd_eop_obj
= NULL
;
2603 #define MEC_HPD_SIZE 2048
2605 static int gfx_v7_0_mec_init(struct amdgpu_device
*adev
)
2611 * KV: 2 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 64 Queues total
2612 * CI/KB: 1 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 32 Queues total
2613 * Nonetheless, we assign only 1 pipe because all other pipes will
2616 adev
->gfx
.mec
.num_mec
= 1;
2617 adev
->gfx
.mec
.num_pipe
= 1;
2618 adev
->gfx
.mec
.num_queue
= adev
->gfx
.mec
.num_mec
* adev
->gfx
.mec
.num_pipe
* 8;
2620 if (adev
->gfx
.mec
.hpd_eop_obj
== NULL
) {
2621 r
= amdgpu_bo_create(adev
,
2622 adev
->gfx
.mec
.num_mec
*adev
->gfx
.mec
.num_pipe
* MEC_HPD_SIZE
* 2,
2624 AMDGPU_GEM_DOMAIN_GTT
, 0, NULL
, NULL
,
2625 &adev
->gfx
.mec
.hpd_eop_obj
);
2627 dev_warn(adev
->dev
, "(%d) create HDP EOP bo failed\n", r
);
2632 r
= amdgpu_bo_reserve(adev
->gfx
.mec
.hpd_eop_obj
, false);
2633 if (unlikely(r
!= 0)) {
2634 gfx_v7_0_mec_fini(adev
);
2637 r
= amdgpu_bo_pin(adev
->gfx
.mec
.hpd_eop_obj
, AMDGPU_GEM_DOMAIN_GTT
,
2638 &adev
->gfx
.mec
.hpd_eop_gpu_addr
);
2640 dev_warn(adev
->dev
, "(%d) pin HDP EOP bo failed\n", r
);
2641 gfx_v7_0_mec_fini(adev
);
2644 r
= amdgpu_bo_kmap(adev
->gfx
.mec
.hpd_eop_obj
, (void **)&hpd
);
2646 dev_warn(adev
->dev
, "(%d) map HDP EOP bo failed\n", r
);
2647 gfx_v7_0_mec_fini(adev
);
2651 /* clear memory. Not sure if this is required or not */
2652 memset(hpd
, 0, adev
->gfx
.mec
.num_mec
*adev
->gfx
.mec
.num_pipe
* MEC_HPD_SIZE
* 2);
2654 amdgpu_bo_kunmap(adev
->gfx
.mec
.hpd_eop_obj
);
2655 amdgpu_bo_unreserve(adev
->gfx
.mec
.hpd_eop_obj
);
2660 struct hqd_registers
2662 u32 cp_mqd_base_addr
;
2663 u32 cp_mqd_base_addr_hi
;
2666 u32 cp_hqd_persistent_state
;
2667 u32 cp_hqd_pipe_priority
;
2668 u32 cp_hqd_queue_priority
;
2671 u32 cp_hqd_pq_base_hi
;
2673 u32 cp_hqd_pq_rptr_report_addr
;
2674 u32 cp_hqd_pq_rptr_report_addr_hi
;
2675 u32 cp_hqd_pq_wptr_poll_addr
;
2676 u32 cp_hqd_pq_wptr_poll_addr_hi
;
2677 u32 cp_hqd_pq_doorbell_control
;
2679 u32 cp_hqd_pq_control
;
2680 u32 cp_hqd_ib_base_addr
;
2681 u32 cp_hqd_ib_base_addr_hi
;
2683 u32 cp_hqd_ib_control
;
2684 u32 cp_hqd_iq_timer
;
2686 u32 cp_hqd_dequeue_request
;
2687 u32 cp_hqd_dma_offload
;
2688 u32 cp_hqd_sema_cmd
;
2689 u32 cp_hqd_msg_type
;
2690 u32 cp_hqd_atomic0_preop_lo
;
2691 u32 cp_hqd_atomic0_preop_hi
;
2692 u32 cp_hqd_atomic1_preop_lo
;
2693 u32 cp_hqd_atomic1_preop_hi
;
2694 u32 cp_hqd_hq_scheduler0
;
2695 u32 cp_hqd_hq_scheduler1
;
2702 u32 dispatch_initiator
;
2706 u32 pipeline_stat_enable
;
2707 u32 perf_counter_enable
;
2713 u32 resource_limits
;
2714 u32 static_thread_mgmt01
[2];
2716 u32 static_thread_mgmt23
[2];
2718 u32 thread_trace_enable
;
2721 u32 vgtcs_invoke_count
[2];
2722 struct hqd_registers queue_state
;
2724 u32 interrupt_queue
[64];
2728 * gfx_v7_0_cp_compute_resume - setup the compute queue registers
2730 * @adev: amdgpu_device pointer
2732 * Program the compute queues and test them to make sure they
2734 * Returns 0 for success, error for failure.
2736 static int gfx_v7_0_cp_compute_resume(struct amdgpu_device
*adev
)
2740 bool use_doorbell
= true;
2746 struct bonaire_mqd
*mqd
;
2748 gfx_v7_0_cp_compute_enable(adev
, true);
2750 /* fix up chicken bits */
2751 tmp
= RREG32(mmCP_CPF_DEBUG
);
2753 WREG32(mmCP_CPF_DEBUG
, tmp
);
2755 /* init the pipes */
2756 mutex_lock(&adev
->srbm_mutex
);
2757 for (i
= 0; i
< (adev
->gfx
.mec
.num_pipe
* adev
->gfx
.mec
.num_mec
); i
++) {
2758 int me
= (i
< 4) ? 1 : 2;
2759 int pipe
= (i
< 4) ? i
: (i
- 4);
2761 eop_gpu_addr
= adev
->gfx
.mec
.hpd_eop_gpu_addr
+ (i
* MEC_HPD_SIZE
* 2);
2763 cik_srbm_select(adev
, me
, pipe
, 0, 0);
2765 /* write the EOP addr */
2766 WREG32(mmCP_HPD_EOP_BASE_ADDR
, eop_gpu_addr
>> 8);
2767 WREG32(mmCP_HPD_EOP_BASE_ADDR_HI
, upper_32_bits(eop_gpu_addr
) >> 8);
2769 /* set the VMID assigned */
2770 WREG32(mmCP_HPD_EOP_VMID
, 0);
2772 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2773 tmp
= RREG32(mmCP_HPD_EOP_CONTROL
);
2774 tmp
&= ~CP_HPD_EOP_CONTROL__EOP_SIZE_MASK
;
2775 tmp
|= order_base_2(MEC_HPD_SIZE
/ 8);
2776 WREG32(mmCP_HPD_EOP_CONTROL
, tmp
);
2778 cik_srbm_select(adev
, 0, 0, 0, 0);
2779 mutex_unlock(&adev
->srbm_mutex
);
2781 /* init the queues. Just two for now. */
2782 for (i
= 0; i
< adev
->gfx
.num_compute_rings
; i
++) {
2783 struct amdgpu_ring
*ring
= &adev
->gfx
.compute_ring
[i
];
2785 if (ring
->mqd_obj
== NULL
) {
2786 r
= amdgpu_bo_create(adev
,
2787 sizeof(struct bonaire_mqd
),
2789 AMDGPU_GEM_DOMAIN_GTT
, 0, NULL
, NULL
,
2792 dev_warn(adev
->dev
, "(%d) create MQD bo failed\n", r
);
2797 r
= amdgpu_bo_reserve(ring
->mqd_obj
, false);
2798 if (unlikely(r
!= 0)) {
2799 gfx_v7_0_cp_compute_fini(adev
);
2802 r
= amdgpu_bo_pin(ring
->mqd_obj
, AMDGPU_GEM_DOMAIN_GTT
,
2805 dev_warn(adev
->dev
, "(%d) pin MQD bo failed\n", r
);
2806 gfx_v7_0_cp_compute_fini(adev
);
2809 r
= amdgpu_bo_kmap(ring
->mqd_obj
, (void **)&buf
);
2811 dev_warn(adev
->dev
, "(%d) map MQD bo failed\n", r
);
2812 gfx_v7_0_cp_compute_fini(adev
);
2816 /* init the mqd struct */
2817 memset(buf
, 0, sizeof(struct bonaire_mqd
));
2819 mqd
= (struct bonaire_mqd
*)buf
;
2820 mqd
->header
= 0xC0310800;
2821 mqd
->static_thread_mgmt01
[0] = 0xffffffff;
2822 mqd
->static_thread_mgmt01
[1] = 0xffffffff;
2823 mqd
->static_thread_mgmt23
[0] = 0xffffffff;
2824 mqd
->static_thread_mgmt23
[1] = 0xffffffff;
2826 mutex_lock(&adev
->srbm_mutex
);
2827 cik_srbm_select(adev
, ring
->me
,
2831 /* disable wptr polling */
2832 tmp
= RREG32(mmCP_PQ_WPTR_POLL_CNTL
);
2833 tmp
&= ~CP_PQ_WPTR_POLL_CNTL__EN_MASK
;
2834 WREG32(mmCP_PQ_WPTR_POLL_CNTL
, tmp
);
2836 /* enable doorbell? */
2837 mqd
->queue_state
.cp_hqd_pq_doorbell_control
=
2838 RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL
);
2840 mqd
->queue_state
.cp_hqd_pq_doorbell_control
|= CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK
;
2842 mqd
->queue_state
.cp_hqd_pq_doorbell_control
&= ~CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK
;
2843 WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL
,
2844 mqd
->queue_state
.cp_hqd_pq_doorbell_control
);
2846 /* disable the queue if it's active */
2847 mqd
->queue_state
.cp_hqd_dequeue_request
= 0;
2848 mqd
->queue_state
.cp_hqd_pq_rptr
= 0;
2849 mqd
->queue_state
.cp_hqd_pq_wptr
= 0;
2850 if (RREG32(mmCP_HQD_ACTIVE
) & 1) {
2851 WREG32(mmCP_HQD_DEQUEUE_REQUEST
, 1);
2852 for (j
= 0; j
< adev
->usec_timeout
; j
++) {
2853 if (!(RREG32(mmCP_HQD_ACTIVE
) & 1))
2857 WREG32(mmCP_HQD_DEQUEUE_REQUEST
, mqd
->queue_state
.cp_hqd_dequeue_request
);
2858 WREG32(mmCP_HQD_PQ_RPTR
, mqd
->queue_state
.cp_hqd_pq_rptr
);
2859 WREG32(mmCP_HQD_PQ_WPTR
, mqd
->queue_state
.cp_hqd_pq_wptr
);
2862 /* set the pointer to the MQD */
2863 mqd
->queue_state
.cp_mqd_base_addr
= mqd_gpu_addr
& 0xfffffffc;
2864 mqd
->queue_state
.cp_mqd_base_addr_hi
= upper_32_bits(mqd_gpu_addr
);
2865 WREG32(mmCP_MQD_BASE_ADDR
, mqd
->queue_state
.cp_mqd_base_addr
);
2866 WREG32(mmCP_MQD_BASE_ADDR_HI
, mqd
->queue_state
.cp_mqd_base_addr_hi
);
2867 /* set MQD vmid to 0 */
2868 mqd
->queue_state
.cp_mqd_control
= RREG32(mmCP_MQD_CONTROL
);
2869 mqd
->queue_state
.cp_mqd_control
&= ~CP_MQD_CONTROL__VMID_MASK
;
2870 WREG32(mmCP_MQD_CONTROL
, mqd
->queue_state
.cp_mqd_control
);
2872 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2873 hqd_gpu_addr
= ring
->gpu_addr
>> 8;
2874 mqd
->queue_state
.cp_hqd_pq_base
= hqd_gpu_addr
;
2875 mqd
->queue_state
.cp_hqd_pq_base_hi
= upper_32_bits(hqd_gpu_addr
);
2876 WREG32(mmCP_HQD_PQ_BASE
, mqd
->queue_state
.cp_hqd_pq_base
);
2877 WREG32(mmCP_HQD_PQ_BASE_HI
, mqd
->queue_state
.cp_hqd_pq_base_hi
);
2879 /* set up the HQD, this is similar to CP_RB0_CNTL */
2880 mqd
->queue_state
.cp_hqd_pq_control
= RREG32(mmCP_HQD_PQ_CONTROL
);
2881 mqd
->queue_state
.cp_hqd_pq_control
&=
2882 ~(CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK
|
2883 CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE_MASK
);
2885 mqd
->queue_state
.cp_hqd_pq_control
|=
2886 order_base_2(ring
->ring_size
/ 8);
2887 mqd
->queue_state
.cp_hqd_pq_control
|=
2888 (order_base_2(AMDGPU_GPU_PAGE_SIZE
/8) << 8);
2890 mqd
->queue_state
.cp_hqd_pq_control
|=
2891 2 << CP_HQD_PQ_CONTROL__ENDIAN_SWAP__SHIFT
;
2893 mqd
->queue_state
.cp_hqd_pq_control
&=
2894 ~(CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK
|
2895 CP_HQD_PQ_CONTROL__ROQ_PQ_IB_FLIP_MASK
|
2896 CP_HQD_PQ_CONTROL__PQ_VOLATILE_MASK
);
2897 mqd
->queue_state
.cp_hqd_pq_control
|=
2898 CP_HQD_PQ_CONTROL__PRIV_STATE_MASK
|
2899 CP_HQD_PQ_CONTROL__KMD_QUEUE_MASK
; /* assuming kernel queue control */
2900 WREG32(mmCP_HQD_PQ_CONTROL
, mqd
->queue_state
.cp_hqd_pq_control
);
2902 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2903 wb_gpu_addr
= adev
->wb
.gpu_addr
+ (ring
->wptr_offs
* 4);
2904 mqd
->queue_state
.cp_hqd_pq_wptr_poll_addr
= wb_gpu_addr
& 0xfffffffc;
2905 mqd
->queue_state
.cp_hqd_pq_wptr_poll_addr_hi
= upper_32_bits(wb_gpu_addr
) & 0xffff;
2906 WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR
, mqd
->queue_state
.cp_hqd_pq_wptr_poll_addr
);
2907 WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR_HI
,
2908 mqd
->queue_state
.cp_hqd_pq_wptr_poll_addr_hi
);
2910 /* set the wb address wether it's enabled or not */
2911 wb_gpu_addr
= adev
->wb
.gpu_addr
+ (ring
->rptr_offs
* 4);
2912 mqd
->queue_state
.cp_hqd_pq_rptr_report_addr
= wb_gpu_addr
& 0xfffffffc;
2913 mqd
->queue_state
.cp_hqd_pq_rptr_report_addr_hi
=
2914 upper_32_bits(wb_gpu_addr
) & 0xffff;
2915 WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR
,
2916 mqd
->queue_state
.cp_hqd_pq_rptr_report_addr
);
2917 WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI
,
2918 mqd
->queue_state
.cp_hqd_pq_rptr_report_addr_hi
);
2920 /* enable the doorbell if requested */
2922 mqd
->queue_state
.cp_hqd_pq_doorbell_control
=
2923 RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL
);
2924 mqd
->queue_state
.cp_hqd_pq_doorbell_control
&=
2925 ~CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET_MASK
;
2926 mqd
->queue_state
.cp_hqd_pq_doorbell_control
|=
2927 (ring
->doorbell_index
<<
2928 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT
);
2929 mqd
->queue_state
.cp_hqd_pq_doorbell_control
|=
2930 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK
;
2931 mqd
->queue_state
.cp_hqd_pq_doorbell_control
&=
2932 ~(CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SOURCE_MASK
|
2933 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_HIT_MASK
);
2936 mqd
->queue_state
.cp_hqd_pq_doorbell_control
= 0;
2938 WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL
,
2939 mqd
->queue_state
.cp_hqd_pq_doorbell_control
);
2941 /* read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2943 mqd
->queue_state
.cp_hqd_pq_wptr
= ring
->wptr
;
2944 WREG32(mmCP_HQD_PQ_WPTR
, mqd
->queue_state
.cp_hqd_pq_wptr
);
2945 mqd
->queue_state
.cp_hqd_pq_rptr
= RREG32(mmCP_HQD_PQ_RPTR
);
2947 /* set the vmid for the queue */
2948 mqd
->queue_state
.cp_hqd_vmid
= 0;
2949 WREG32(mmCP_HQD_VMID
, mqd
->queue_state
.cp_hqd_vmid
);
2951 /* activate the queue */
2952 mqd
->queue_state
.cp_hqd_active
= 1;
2953 WREG32(mmCP_HQD_ACTIVE
, mqd
->queue_state
.cp_hqd_active
);
2955 cik_srbm_select(adev
, 0, 0, 0, 0);
2956 mutex_unlock(&adev
->srbm_mutex
);
2958 amdgpu_bo_kunmap(ring
->mqd_obj
);
2959 amdgpu_bo_unreserve(ring
->mqd_obj
);
2962 r
= amdgpu_ring_test_ring(ring
);
2964 ring
->ready
= false;
2970 static void gfx_v7_0_cp_enable(struct amdgpu_device
*adev
, bool enable
)
2972 gfx_v7_0_cp_gfx_enable(adev
, enable
);
2973 gfx_v7_0_cp_compute_enable(adev
, enable
);
2976 static int gfx_v7_0_cp_load_microcode(struct amdgpu_device
*adev
)
2980 r
= gfx_v7_0_cp_gfx_load_microcode(adev
);
2983 r
= gfx_v7_0_cp_compute_load_microcode(adev
);
2990 static void gfx_v7_0_enable_gui_idle_interrupt(struct amdgpu_device
*adev
,
2993 u32 tmp
= RREG32(mmCP_INT_CNTL_RING0
);
2996 tmp
|= (CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE_MASK
|
2997 CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE_MASK
);
2999 tmp
&= ~(CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE_MASK
|
3000 CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE_MASK
);
3001 WREG32(mmCP_INT_CNTL_RING0
, tmp
);
3004 static int gfx_v7_0_cp_resume(struct amdgpu_device
*adev
)
3008 gfx_v7_0_enable_gui_idle_interrupt(adev
, false);
3010 r
= gfx_v7_0_cp_load_microcode(adev
);
3014 r
= gfx_v7_0_cp_gfx_resume(adev
);
3017 r
= gfx_v7_0_cp_compute_resume(adev
);
3021 gfx_v7_0_enable_gui_idle_interrupt(adev
, true);
3028 * VMID 0 is the physical GPU addresses as used by the kernel.
3029 * VMIDs 1-15 are used for userspace clients and are handled
3030 * by the amdgpu vm/hsa code.
3033 * gfx_v7_0_ring_emit_vm_flush - cik vm flush using the CP
3035 * @adev: amdgpu_device pointer
3037 * Update the page table base and flush the VM TLB
3038 * using the CP (CIK).
3040 static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring
*ring
,
3041 unsigned vm_id
, uint64_t pd_addr
)
3043 int usepfp
= (ring
->type
== AMDGPU_RING_TYPE_GFX
);
3044 uint32_t seq
= ring
->fence_drv
.sync_seq
;
3045 uint64_t addr
= ring
->fence_drv
.gpu_addr
;
3047 amdgpu_ring_write(ring
, PACKET3(PACKET3_WAIT_REG_MEM
, 5));
3048 amdgpu_ring_write(ring
, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
3049 WAIT_REG_MEM_FUNCTION(3) | /* equal */
3050 WAIT_REG_MEM_ENGINE(usepfp
))); /* pfp or me */
3051 amdgpu_ring_write(ring
, addr
& 0xfffffffc);
3052 amdgpu_ring_write(ring
, upper_32_bits(addr
) & 0xffffffff);
3053 amdgpu_ring_write(ring
, seq
);
3054 amdgpu_ring_write(ring
, 0xffffffff);
3055 amdgpu_ring_write(ring
, 4); /* poll interval */
3058 /* synce CE with ME to prevent CE fetch CEIB before context switch done */
3059 amdgpu_ring_write(ring
, PACKET3(PACKET3_SWITCH_BUFFER
, 0));
3060 amdgpu_ring_write(ring
, 0);
3061 amdgpu_ring_write(ring
, PACKET3(PACKET3_SWITCH_BUFFER
, 0));
3062 amdgpu_ring_write(ring
, 0);
3065 amdgpu_ring_write(ring
, PACKET3(PACKET3_WRITE_DATA
, 3));
3066 amdgpu_ring_write(ring
, (WRITE_DATA_ENGINE_SEL(usepfp
) |
3067 WRITE_DATA_DST_SEL(0)));
3069 amdgpu_ring_write(ring
,
3070 (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR
+ vm_id
));
3072 amdgpu_ring_write(ring
,
3073 (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR
+ vm_id
- 8));
3075 amdgpu_ring_write(ring
, 0);
3076 amdgpu_ring_write(ring
, pd_addr
>> 12);
3078 /* bits 0-15 are the VM contexts0-15 */
3079 amdgpu_ring_write(ring
, PACKET3(PACKET3_WRITE_DATA
, 3));
3080 amdgpu_ring_write(ring
, (WRITE_DATA_ENGINE_SEL(0) |
3081 WRITE_DATA_DST_SEL(0)));
3082 amdgpu_ring_write(ring
, mmVM_INVALIDATE_REQUEST
);
3083 amdgpu_ring_write(ring
, 0);
3084 amdgpu_ring_write(ring
, 1 << vm_id
);
3086 /* wait for the invalidate to complete */
3087 amdgpu_ring_write(ring
, PACKET3(PACKET3_WAIT_REG_MEM
, 5));
3088 amdgpu_ring_write(ring
, (WAIT_REG_MEM_OPERATION(0) | /* wait */
3089 WAIT_REG_MEM_FUNCTION(0) | /* always */
3090 WAIT_REG_MEM_ENGINE(0))); /* me */
3091 amdgpu_ring_write(ring
, mmVM_INVALIDATE_REQUEST
);
3092 amdgpu_ring_write(ring
, 0);
3093 amdgpu_ring_write(ring
, 0); /* ref */
3094 amdgpu_ring_write(ring
, 0); /* mask */
3095 amdgpu_ring_write(ring
, 0x20); /* poll interval */
3097 /* compute doesn't have PFP */
3099 /* sync PFP to ME, otherwise we might get invalid PFP reads */
3100 amdgpu_ring_write(ring
, PACKET3(PACKET3_PFP_SYNC_ME
, 0));
3101 amdgpu_ring_write(ring
, 0x0);
3103 /* synce CE with ME to prevent CE fetch CEIB before context switch done */
3104 amdgpu_ring_write(ring
, PACKET3(PACKET3_SWITCH_BUFFER
, 0));
3105 amdgpu_ring_write(ring
, 0);
3106 amdgpu_ring_write(ring
, PACKET3(PACKET3_SWITCH_BUFFER
, 0));
3107 amdgpu_ring_write(ring
, 0);
3113 * The RLC is a multi-purpose microengine that handles a
3114 * variety of functions.
3116 static void gfx_v7_0_rlc_fini(struct amdgpu_device
*adev
)
3120 /* save restore block */
3121 if (adev
->gfx
.rlc
.save_restore_obj
) {
3122 r
= amdgpu_bo_reserve(adev
->gfx
.rlc
.save_restore_obj
, false);
3123 if (unlikely(r
!= 0))
3124 dev_warn(adev
->dev
, "(%d) reserve RLC sr bo failed\n", r
);
3125 amdgpu_bo_unpin(adev
->gfx
.rlc
.save_restore_obj
);
3126 amdgpu_bo_unreserve(adev
->gfx
.rlc
.save_restore_obj
);
3128 amdgpu_bo_unref(&adev
->gfx
.rlc
.save_restore_obj
);
3129 adev
->gfx
.rlc
.save_restore_obj
= NULL
;
3132 /* clear state block */
3133 if (adev
->gfx
.rlc
.clear_state_obj
) {
3134 r
= amdgpu_bo_reserve(adev
->gfx
.rlc
.clear_state_obj
, false);
3135 if (unlikely(r
!= 0))
3136 dev_warn(adev
->dev
, "(%d) reserve RLC c bo failed\n", r
);
3137 amdgpu_bo_unpin(adev
->gfx
.rlc
.clear_state_obj
);
3138 amdgpu_bo_unreserve(adev
->gfx
.rlc
.clear_state_obj
);
3140 amdgpu_bo_unref(&adev
->gfx
.rlc
.clear_state_obj
);
3141 adev
->gfx
.rlc
.clear_state_obj
= NULL
;
3144 /* clear state block */
3145 if (adev
->gfx
.rlc
.cp_table_obj
) {
3146 r
= amdgpu_bo_reserve(adev
->gfx
.rlc
.cp_table_obj
, false);
3147 if (unlikely(r
!= 0))
3148 dev_warn(adev
->dev
, "(%d) reserve RLC cp table bo failed\n", r
);
3149 amdgpu_bo_unpin(adev
->gfx
.rlc
.cp_table_obj
);
3150 amdgpu_bo_unreserve(adev
->gfx
.rlc
.cp_table_obj
);
3152 amdgpu_bo_unref(&adev
->gfx
.rlc
.cp_table_obj
);
3153 adev
->gfx
.rlc
.cp_table_obj
= NULL
;
3157 static int gfx_v7_0_rlc_init(struct amdgpu_device
*adev
)
3160 volatile u32
*dst_ptr
;
3162 const struct cs_section_def
*cs_data
;
3165 /* allocate rlc buffers */
3166 if (adev
->flags
& AMD_IS_APU
) {
3167 if (adev
->asic_type
== CHIP_KAVERI
) {
3168 adev
->gfx
.rlc
.reg_list
= spectre_rlc_save_restore_register_list
;
3169 adev
->gfx
.rlc
.reg_list_size
=
3170 (u32
)ARRAY_SIZE(spectre_rlc_save_restore_register_list
);
3172 adev
->gfx
.rlc
.reg_list
= kalindi_rlc_save_restore_register_list
;
3173 adev
->gfx
.rlc
.reg_list_size
=
3174 (u32
)ARRAY_SIZE(kalindi_rlc_save_restore_register_list
);
3177 adev
->gfx
.rlc
.cs_data
= ci_cs_data
;
3178 adev
->gfx
.rlc
.cp_table_size
= CP_ME_TABLE_SIZE
* 5 * 4;
3180 src_ptr
= adev
->gfx
.rlc
.reg_list
;
3181 dws
= adev
->gfx
.rlc
.reg_list_size
;
3182 dws
+= (5 * 16) + 48 + 48 + 64;
3184 cs_data
= adev
->gfx
.rlc
.cs_data
;
3187 /* save restore block */
3188 if (adev
->gfx
.rlc
.save_restore_obj
== NULL
) {
3189 r
= amdgpu_bo_create(adev
, dws
* 4, PAGE_SIZE
, true,
3190 AMDGPU_GEM_DOMAIN_VRAM
,
3191 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
,
3193 &adev
->gfx
.rlc
.save_restore_obj
);
3195 dev_warn(adev
->dev
, "(%d) create RLC sr bo failed\n", r
);
3200 r
= amdgpu_bo_reserve(adev
->gfx
.rlc
.save_restore_obj
, false);
3201 if (unlikely(r
!= 0)) {
3202 gfx_v7_0_rlc_fini(adev
);
3205 r
= amdgpu_bo_pin(adev
->gfx
.rlc
.save_restore_obj
, AMDGPU_GEM_DOMAIN_VRAM
,
3206 &adev
->gfx
.rlc
.save_restore_gpu_addr
);
3208 amdgpu_bo_unreserve(adev
->gfx
.rlc
.save_restore_obj
);
3209 dev_warn(adev
->dev
, "(%d) pin RLC sr bo failed\n", r
);
3210 gfx_v7_0_rlc_fini(adev
);
3214 r
= amdgpu_bo_kmap(adev
->gfx
.rlc
.save_restore_obj
, (void **)&adev
->gfx
.rlc
.sr_ptr
);
3216 dev_warn(adev
->dev
, "(%d) map RLC sr bo failed\n", r
);
3217 gfx_v7_0_rlc_fini(adev
);
3220 /* write the sr buffer */
3221 dst_ptr
= adev
->gfx
.rlc
.sr_ptr
;
3222 for (i
= 0; i
< adev
->gfx
.rlc
.reg_list_size
; i
++)
3223 dst_ptr
[i
] = cpu_to_le32(src_ptr
[i
]);
3224 amdgpu_bo_kunmap(adev
->gfx
.rlc
.save_restore_obj
);
3225 amdgpu_bo_unreserve(adev
->gfx
.rlc
.save_restore_obj
);
3229 /* clear state block */
3230 adev
->gfx
.rlc
.clear_state_size
= dws
= gfx_v7_0_get_csb_size(adev
);
3232 if (adev
->gfx
.rlc
.clear_state_obj
== NULL
) {
3233 r
= amdgpu_bo_create(adev
, dws
* 4, PAGE_SIZE
, true,
3234 AMDGPU_GEM_DOMAIN_VRAM
,
3235 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
,
3237 &adev
->gfx
.rlc
.clear_state_obj
);
3239 dev_warn(adev
->dev
, "(%d) create RLC c bo failed\n", r
);
3240 gfx_v7_0_rlc_fini(adev
);
3244 r
= amdgpu_bo_reserve(adev
->gfx
.rlc
.clear_state_obj
, false);
3245 if (unlikely(r
!= 0)) {
3246 gfx_v7_0_rlc_fini(adev
);
3249 r
= amdgpu_bo_pin(adev
->gfx
.rlc
.clear_state_obj
, AMDGPU_GEM_DOMAIN_VRAM
,
3250 &adev
->gfx
.rlc
.clear_state_gpu_addr
);
3252 amdgpu_bo_unreserve(adev
->gfx
.rlc
.clear_state_obj
);
3253 dev_warn(adev
->dev
, "(%d) pin RLC c bo failed\n", r
);
3254 gfx_v7_0_rlc_fini(adev
);
3258 r
= amdgpu_bo_kmap(adev
->gfx
.rlc
.clear_state_obj
, (void **)&adev
->gfx
.rlc
.cs_ptr
);
3260 dev_warn(adev
->dev
, "(%d) map RLC c bo failed\n", r
);
3261 gfx_v7_0_rlc_fini(adev
);
3264 /* set up the cs buffer */
3265 dst_ptr
= adev
->gfx
.rlc
.cs_ptr
;
3266 gfx_v7_0_get_csb_buffer(adev
, dst_ptr
);
3267 amdgpu_bo_kunmap(adev
->gfx
.rlc
.clear_state_obj
);
3268 amdgpu_bo_unreserve(adev
->gfx
.rlc
.clear_state_obj
);
3271 if (adev
->gfx
.rlc
.cp_table_size
) {
3272 if (adev
->gfx
.rlc
.cp_table_obj
== NULL
) {
3273 r
= amdgpu_bo_create(adev
, adev
->gfx
.rlc
.cp_table_size
, PAGE_SIZE
, true,
3274 AMDGPU_GEM_DOMAIN_VRAM
,
3275 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
,
3277 &adev
->gfx
.rlc
.cp_table_obj
);
3279 dev_warn(adev
->dev
, "(%d) create RLC cp table bo failed\n", r
);
3280 gfx_v7_0_rlc_fini(adev
);
3285 r
= amdgpu_bo_reserve(adev
->gfx
.rlc
.cp_table_obj
, false);
3286 if (unlikely(r
!= 0)) {
3287 dev_warn(adev
->dev
, "(%d) reserve RLC cp table bo failed\n", r
);
3288 gfx_v7_0_rlc_fini(adev
);
3291 r
= amdgpu_bo_pin(adev
->gfx
.rlc
.cp_table_obj
, AMDGPU_GEM_DOMAIN_VRAM
,
3292 &adev
->gfx
.rlc
.cp_table_gpu_addr
);
3294 amdgpu_bo_unreserve(adev
->gfx
.rlc
.cp_table_obj
);
3295 dev_warn(adev
->dev
, "(%d) pin RLC cp_table bo failed\n", r
);
3296 gfx_v7_0_rlc_fini(adev
);
3299 r
= amdgpu_bo_kmap(adev
->gfx
.rlc
.cp_table_obj
, (void **)&adev
->gfx
.rlc
.cp_table_ptr
);
3301 dev_warn(adev
->dev
, "(%d) map RLC cp table bo failed\n", r
);
3302 gfx_v7_0_rlc_fini(adev
);
3306 gfx_v7_0_init_cp_pg_table(adev
);
3308 amdgpu_bo_kunmap(adev
->gfx
.rlc
.cp_table_obj
);
3309 amdgpu_bo_unreserve(adev
->gfx
.rlc
.cp_table_obj
);
3316 static void gfx_v7_0_enable_lbpw(struct amdgpu_device
*adev
, bool enable
)
3320 tmp
= RREG32(mmRLC_LB_CNTL
);
3322 tmp
|= RLC_LB_CNTL__LOAD_BALANCE_ENABLE_MASK
;
3324 tmp
&= ~RLC_LB_CNTL__LOAD_BALANCE_ENABLE_MASK
;
3325 WREG32(mmRLC_LB_CNTL
, tmp
);
3328 static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device
*adev
)
3333 mutex_lock(&adev
->grbm_idx_mutex
);
3334 for (i
= 0; i
< adev
->gfx
.config
.max_shader_engines
; i
++) {
3335 for (j
= 0; j
< adev
->gfx
.config
.max_sh_per_se
; j
++) {
3336 gfx_v7_0_select_se_sh(adev
, i
, j
);
3337 for (k
= 0; k
< adev
->usec_timeout
; k
++) {
3338 if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY
) == 0)
3344 gfx_v7_0_select_se_sh(adev
, 0xffffffff, 0xffffffff);
3345 mutex_unlock(&adev
->grbm_idx_mutex
);
3347 mask
= RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK
|
3348 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK
|
3349 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK
|
3350 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK
;
3351 for (k
= 0; k
< adev
->usec_timeout
; k
++) {
3352 if ((RREG32(mmRLC_SERDES_NONCU_MASTER_BUSY
) & mask
) == 0)
3358 static void gfx_v7_0_update_rlc(struct amdgpu_device
*adev
, u32 rlc
)
3362 tmp
= RREG32(mmRLC_CNTL
);
3364 WREG32(mmRLC_CNTL
, rlc
);
3367 static u32
gfx_v7_0_halt_rlc(struct amdgpu_device
*adev
)
3371 orig
= data
= RREG32(mmRLC_CNTL
);
3373 if (data
& RLC_CNTL__RLC_ENABLE_F32_MASK
) {
3376 data
&= ~RLC_CNTL__RLC_ENABLE_F32_MASK
;
3377 WREG32(mmRLC_CNTL
, data
);
3379 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
3380 if ((RREG32(mmRLC_GPM_STAT
) & RLC_GPM_STAT__RLC_BUSY_MASK
) == 0)
3385 gfx_v7_0_wait_for_rlc_serdes(adev
);
3391 void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device
*adev
)
3395 tmp
= 0x1 | (1 << 1);
3396 WREG32(mmRLC_GPR_REG2
, tmp
);
3398 mask
= RLC_GPM_STAT__GFX_POWER_STATUS_MASK
|
3399 RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK
;
3400 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
3401 if ((RREG32(mmRLC_GPM_STAT
) & mask
) == mask
)
3406 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
3407 if ((RREG32(mmRLC_GPR_REG2
) & 0x1) == 0)
3413 void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device
*adev
)
3417 tmp
= 0x1 | (0 << 1);
3418 WREG32(mmRLC_GPR_REG2
, tmp
);
3422 * gfx_v7_0_rlc_stop - stop the RLC ME
3424 * @adev: amdgpu_device pointer
3426 * Halt the RLC ME (MicroEngine) (CIK).
3428 void gfx_v7_0_rlc_stop(struct amdgpu_device
*adev
)
3430 WREG32(mmRLC_CNTL
, 0);
3432 gfx_v7_0_enable_gui_idle_interrupt(adev
, false);
3434 gfx_v7_0_wait_for_rlc_serdes(adev
);
3438 * gfx_v7_0_rlc_start - start the RLC ME
3440 * @adev: amdgpu_device pointer
3442 * Unhalt the RLC ME (MicroEngine) (CIK).
3444 static void gfx_v7_0_rlc_start(struct amdgpu_device
*adev
)
3446 WREG32(mmRLC_CNTL
, RLC_CNTL__RLC_ENABLE_F32_MASK
);
3448 gfx_v7_0_enable_gui_idle_interrupt(adev
, true);
3453 static void gfx_v7_0_rlc_reset(struct amdgpu_device
*adev
)
3455 u32 tmp
= RREG32(mmGRBM_SOFT_RESET
);
3457 tmp
|= GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK
;
3458 WREG32(mmGRBM_SOFT_RESET
, tmp
);
3460 tmp
&= ~GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK
;
3461 WREG32(mmGRBM_SOFT_RESET
, tmp
);
3466 * gfx_v7_0_rlc_resume - setup the RLC hw
3468 * @adev: amdgpu_device pointer
3470 * Initialize the RLC registers, load the ucode,
3471 * and start the RLC (CIK).
3472 * Returns 0 for success, -EINVAL if the ucode is not available.
3474 static int gfx_v7_0_rlc_resume(struct amdgpu_device
*adev
)
3476 const struct rlc_firmware_header_v1_0
*hdr
;
3477 const __le32
*fw_data
;
3478 unsigned i
, fw_size
;
3481 if (!adev
->gfx
.rlc_fw
)
3484 hdr
= (const struct rlc_firmware_header_v1_0
*)adev
->gfx
.rlc_fw
->data
;
3485 amdgpu_ucode_print_rlc_hdr(&hdr
->header
);
3486 adev
->gfx
.rlc_fw_version
= le32_to_cpu(hdr
->header
.ucode_version
);
3487 adev
->gfx
.rlc_feature_version
= le32_to_cpu(
3488 hdr
->ucode_feature_version
);
3490 gfx_v7_0_rlc_stop(adev
);
3493 tmp
= RREG32(mmRLC_CGCG_CGLS_CTRL
) & 0xfffffffc;
3494 WREG32(mmRLC_CGCG_CGLS_CTRL
, tmp
);
3496 gfx_v7_0_rlc_reset(adev
);
3498 gfx_v7_0_init_pg(adev
);
3500 WREG32(mmRLC_LB_CNTR_INIT
, 0);
3501 WREG32(mmRLC_LB_CNTR_MAX
, 0x00008000);
3503 mutex_lock(&adev
->grbm_idx_mutex
);
3504 gfx_v7_0_select_se_sh(adev
, 0xffffffff, 0xffffffff);
3505 WREG32(mmRLC_LB_INIT_CU_MASK
, 0xffffffff);
3506 WREG32(mmRLC_LB_PARAMS
, 0x00600408);
3507 WREG32(mmRLC_LB_CNTL
, 0x80000004);
3508 mutex_unlock(&adev
->grbm_idx_mutex
);
3510 WREG32(mmRLC_MC_CNTL
, 0);
3511 WREG32(mmRLC_UCODE_CNTL
, 0);
3513 fw_data
= (const __le32
*)
3514 (adev
->gfx
.rlc_fw
->data
+ le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
));
3515 fw_size
= le32_to_cpu(hdr
->header
.ucode_size_bytes
) / 4;
3516 WREG32(mmRLC_GPM_UCODE_ADDR
, 0);
3517 for (i
= 0; i
< fw_size
; i
++)
3518 WREG32(mmRLC_GPM_UCODE_DATA
, le32_to_cpup(fw_data
++));
3519 WREG32(mmRLC_GPM_UCODE_ADDR
, adev
->gfx
.rlc_fw_version
);
3521 /* XXX - find out what chips support lbpw */
3522 gfx_v7_0_enable_lbpw(adev
, false);
3524 if (adev
->asic_type
== CHIP_BONAIRE
)
3525 WREG32(mmRLC_DRIVER_CPDMA_STATUS
, 0);
3527 gfx_v7_0_rlc_start(adev
);
3532 static void gfx_v7_0_enable_cgcg(struct amdgpu_device
*adev
, bool enable
)
3534 u32 data
, orig
, tmp
, tmp2
;
3536 orig
= data
= RREG32(mmRLC_CGCG_CGLS_CTRL
);
3538 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_GFX_CGCG
)) {
3539 gfx_v7_0_enable_gui_idle_interrupt(adev
, true);
3541 tmp
= gfx_v7_0_halt_rlc(adev
);
3543 mutex_lock(&adev
->grbm_idx_mutex
);
3544 gfx_v7_0_select_se_sh(adev
, 0xffffffff, 0xffffffff);
3545 WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK
, 0xffffffff);
3546 WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK
, 0xffffffff);
3547 tmp2
= RLC_SERDES_WR_CTRL__BPM_ADDR_MASK
|
3548 RLC_SERDES_WR_CTRL__CGCG_OVERRIDE_0_MASK
|
3549 RLC_SERDES_WR_CTRL__CGLS_ENABLE_MASK
;
3550 WREG32(mmRLC_SERDES_WR_CTRL
, tmp2
);
3551 mutex_unlock(&adev
->grbm_idx_mutex
);
3553 gfx_v7_0_update_rlc(adev
, tmp
);
3555 data
|= RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK
| RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK
;
3557 gfx_v7_0_enable_gui_idle_interrupt(adev
, false);
3559 RREG32(mmCB_CGTT_SCLK_CTRL
);
3560 RREG32(mmCB_CGTT_SCLK_CTRL
);
3561 RREG32(mmCB_CGTT_SCLK_CTRL
);
3562 RREG32(mmCB_CGTT_SCLK_CTRL
);
3564 data
&= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK
| RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK
);
3568 WREG32(mmRLC_CGCG_CGLS_CTRL
, data
);
3572 static void gfx_v7_0_enable_mgcg(struct amdgpu_device
*adev
, bool enable
)
3574 u32 data
, orig
, tmp
= 0;
3576 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_GFX_MGCG
)) {
3577 if (adev
->cg_flags
& AMD_CG_SUPPORT_GFX_MGLS
) {
3578 if (adev
->cg_flags
& AMD_CG_SUPPORT_GFX_CP_LS
) {
3579 orig
= data
= RREG32(mmCP_MEM_SLP_CNTL
);
3580 data
|= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK
;
3582 WREG32(mmCP_MEM_SLP_CNTL
, data
);
3586 orig
= data
= RREG32(mmRLC_CGTT_MGCG_OVERRIDE
);
3590 WREG32(mmRLC_CGTT_MGCG_OVERRIDE
, data
);
3592 tmp
= gfx_v7_0_halt_rlc(adev
);
3594 mutex_lock(&adev
->grbm_idx_mutex
);
3595 gfx_v7_0_select_se_sh(adev
, 0xffffffff, 0xffffffff);
3596 WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK
, 0xffffffff);
3597 WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK
, 0xffffffff);
3598 data
= RLC_SERDES_WR_CTRL__BPM_ADDR_MASK
|
3599 RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_0_MASK
;
3600 WREG32(mmRLC_SERDES_WR_CTRL
, data
);
3601 mutex_unlock(&adev
->grbm_idx_mutex
);
3603 gfx_v7_0_update_rlc(adev
, tmp
);
3605 if (adev
->cg_flags
& AMD_CG_SUPPORT_GFX_CGTS
) {
3606 orig
= data
= RREG32(mmCGTS_SM_CTRL_REG
);
3607 data
&= ~CGTS_SM_CTRL_REG__SM_MODE_MASK
;
3608 data
|= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT
);
3609 data
|= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK
;
3610 data
&= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK
;
3611 if ((adev
->cg_flags
& AMD_CG_SUPPORT_GFX_MGLS
) &&
3612 (adev
->cg_flags
& AMD_CG_SUPPORT_GFX_CGTS_LS
))
3613 data
&= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK
;
3614 data
&= ~CGTS_SM_CTRL_REG__ON_MONITOR_ADD_MASK
;
3615 data
|= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK
;
3616 data
|= (0x96 << CGTS_SM_CTRL_REG__ON_MONITOR_ADD__SHIFT
);
3618 WREG32(mmCGTS_SM_CTRL_REG
, data
);
3621 orig
= data
= RREG32(mmRLC_CGTT_MGCG_OVERRIDE
);
3624 WREG32(mmRLC_CGTT_MGCG_OVERRIDE
, data
);
3626 data
= RREG32(mmRLC_MEM_SLP_CNTL
);
3627 if (data
& RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK
) {
3628 data
&= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK
;
3629 WREG32(mmRLC_MEM_SLP_CNTL
, data
);
3632 data
= RREG32(mmCP_MEM_SLP_CNTL
);
3633 if (data
& CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK
) {
3634 data
&= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK
;
3635 WREG32(mmCP_MEM_SLP_CNTL
, data
);
3638 orig
= data
= RREG32(mmCGTS_SM_CTRL_REG
);
3639 data
|= CGTS_SM_CTRL_REG__OVERRIDE_MASK
| CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK
;
3641 WREG32(mmCGTS_SM_CTRL_REG
, data
);
3643 tmp
= gfx_v7_0_halt_rlc(adev
);
3645 mutex_lock(&adev
->grbm_idx_mutex
);
3646 gfx_v7_0_select_se_sh(adev
, 0xffffffff, 0xffffffff);
3647 WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK
, 0xffffffff);
3648 WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK
, 0xffffffff);
3649 data
= RLC_SERDES_WR_CTRL__BPM_ADDR_MASK
| RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_1_MASK
;
3650 WREG32(mmRLC_SERDES_WR_CTRL
, data
);
3651 mutex_unlock(&adev
->grbm_idx_mutex
);
3653 gfx_v7_0_update_rlc(adev
, tmp
);
3657 static void gfx_v7_0_update_cg(struct amdgpu_device
*adev
,
3660 gfx_v7_0_enable_gui_idle_interrupt(adev
, false);
3661 /* order matters! */
3663 gfx_v7_0_enable_mgcg(adev
, true);
3664 gfx_v7_0_enable_cgcg(adev
, true);
3666 gfx_v7_0_enable_cgcg(adev
, false);
3667 gfx_v7_0_enable_mgcg(adev
, false);
3669 gfx_v7_0_enable_gui_idle_interrupt(adev
, true);
3672 static void gfx_v7_0_enable_sclk_slowdown_on_pu(struct amdgpu_device
*adev
,
3677 orig
= data
= RREG32(mmRLC_PG_CNTL
);
3678 if (enable
&& (adev
->pg_flags
& AMD_PG_SUPPORT_RLC_SMU_HS
))
3679 data
|= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK
;
3681 data
&= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK
;
3683 WREG32(mmRLC_PG_CNTL
, data
);
3686 static void gfx_v7_0_enable_sclk_slowdown_on_pd(struct amdgpu_device
*adev
,
3691 orig
= data
= RREG32(mmRLC_PG_CNTL
);
3692 if (enable
&& (adev
->pg_flags
& AMD_PG_SUPPORT_RLC_SMU_HS
))
3693 data
|= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK
;
3695 data
&= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK
;
3697 WREG32(mmRLC_PG_CNTL
, data
);
3700 static void gfx_v7_0_enable_cp_pg(struct amdgpu_device
*adev
, bool enable
)
3704 orig
= data
= RREG32(mmRLC_PG_CNTL
);
3705 if (enable
&& (adev
->pg_flags
& AMD_PG_SUPPORT_CP
))
3710 WREG32(mmRLC_PG_CNTL
, data
);
3713 static void gfx_v7_0_enable_gds_pg(struct amdgpu_device
*adev
, bool enable
)
3717 orig
= data
= RREG32(mmRLC_PG_CNTL
);
3718 if (enable
&& (adev
->pg_flags
& AMD_PG_SUPPORT_GDS
))
3723 WREG32(mmRLC_PG_CNTL
, data
);
3726 static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device
*adev
)
3728 const __le32
*fw_data
;
3729 volatile u32
*dst_ptr
;
3730 int me
, i
, max_me
= 4;
3732 u32 table_offset
, table_size
;
3734 if (adev
->asic_type
== CHIP_KAVERI
)
3737 if (adev
->gfx
.rlc
.cp_table_ptr
== NULL
)
3740 /* write the cp table buffer */
3741 dst_ptr
= adev
->gfx
.rlc
.cp_table_ptr
;
3742 for (me
= 0; me
< max_me
; me
++) {
3744 const struct gfx_firmware_header_v1_0
*hdr
=
3745 (const struct gfx_firmware_header_v1_0
*)adev
->gfx
.ce_fw
->data
;
3746 fw_data
= (const __le32
*)
3747 (adev
->gfx
.ce_fw
->data
+
3748 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
));
3749 table_offset
= le32_to_cpu(hdr
->jt_offset
);
3750 table_size
= le32_to_cpu(hdr
->jt_size
);
3751 } else if (me
== 1) {
3752 const struct gfx_firmware_header_v1_0
*hdr
=
3753 (const struct gfx_firmware_header_v1_0
*)adev
->gfx
.pfp_fw
->data
;
3754 fw_data
= (const __le32
*)
3755 (adev
->gfx
.pfp_fw
->data
+
3756 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
));
3757 table_offset
= le32_to_cpu(hdr
->jt_offset
);
3758 table_size
= le32_to_cpu(hdr
->jt_size
);
3759 } else if (me
== 2) {
3760 const struct gfx_firmware_header_v1_0
*hdr
=
3761 (const struct gfx_firmware_header_v1_0
*)adev
->gfx
.me_fw
->data
;
3762 fw_data
= (const __le32
*)
3763 (adev
->gfx
.me_fw
->data
+
3764 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
));
3765 table_offset
= le32_to_cpu(hdr
->jt_offset
);
3766 table_size
= le32_to_cpu(hdr
->jt_size
);
3767 } else if (me
== 3) {
3768 const struct gfx_firmware_header_v1_0
*hdr
=
3769 (const struct gfx_firmware_header_v1_0
*)adev
->gfx
.mec_fw
->data
;
3770 fw_data
= (const __le32
*)
3771 (adev
->gfx
.mec_fw
->data
+
3772 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
));
3773 table_offset
= le32_to_cpu(hdr
->jt_offset
);
3774 table_size
= le32_to_cpu(hdr
->jt_size
);
3776 const struct gfx_firmware_header_v1_0
*hdr
=
3777 (const struct gfx_firmware_header_v1_0
*)adev
->gfx
.mec2_fw
->data
;
3778 fw_data
= (const __le32
*)
3779 (adev
->gfx
.mec2_fw
->data
+
3780 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
));
3781 table_offset
= le32_to_cpu(hdr
->jt_offset
);
3782 table_size
= le32_to_cpu(hdr
->jt_size
);
3785 for (i
= 0; i
< table_size
; i
++) {
3786 dst_ptr
[bo_offset
+ i
] =
3787 cpu_to_le32(le32_to_cpu(fw_data
[table_offset
+ i
]));
3790 bo_offset
+= table_size
;
3794 static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device
*adev
,
3799 if (enable
&& (adev
->pg_flags
& AMD_PG_SUPPORT_GFX_PG
)) {
3800 orig
= data
= RREG32(mmRLC_PG_CNTL
);
3801 data
|= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK
;
3803 WREG32(mmRLC_PG_CNTL
, data
);
3805 orig
= data
= RREG32(mmRLC_AUTO_PG_CTRL
);
3806 data
|= RLC_AUTO_PG_CTRL__AUTO_PG_EN_MASK
;
3808 WREG32(mmRLC_AUTO_PG_CTRL
, data
);
3810 orig
= data
= RREG32(mmRLC_PG_CNTL
);
3811 data
&= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK
;
3813 WREG32(mmRLC_PG_CNTL
, data
);
3815 orig
= data
= RREG32(mmRLC_AUTO_PG_CTRL
);
3816 data
&= ~RLC_AUTO_PG_CTRL__AUTO_PG_EN_MASK
;
3818 WREG32(mmRLC_AUTO_PG_CTRL
, data
);
3820 data
= RREG32(mmDB_RENDER_CONTROL
);
3824 static u32
gfx_v7_0_get_cu_active_bitmap(struct amdgpu_device
*adev
)
3828 data
= RREG32(mmCC_GC_SHADER_ARRAY_CONFIG
);
3829 data
|= RREG32(mmGC_USER_SHADER_ARRAY_CONFIG
);
3831 data
&= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK
;
3832 data
>>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT
;
3834 mask
= gfx_v7_0_create_bitmask(adev
->gfx
.config
.max_cu_per_sh
);
3836 return (~data
) & mask
;
3839 static void gfx_v7_0_init_ao_cu_mask(struct amdgpu_device
*adev
)
3841 uint32_t tmp
, active_cu_number
;
3842 struct amdgpu_cu_info cu_info
;
3844 gfx_v7_0_get_cu_info(adev
, &cu_info
);
3845 tmp
= cu_info
.ao_cu_mask
;
3846 active_cu_number
= cu_info
.number
;
3848 WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK
, tmp
);
3850 tmp
= RREG32(mmRLC_MAX_PG_CU
);
3851 tmp
&= ~RLC_MAX_PG_CU__MAX_POWERED_UP_CU_MASK
;
3852 tmp
|= (active_cu_number
<< RLC_MAX_PG_CU__MAX_POWERED_UP_CU__SHIFT
);
3853 WREG32(mmRLC_MAX_PG_CU
, tmp
);
3856 static void gfx_v7_0_enable_gfx_static_mgpg(struct amdgpu_device
*adev
,
3861 orig
= data
= RREG32(mmRLC_PG_CNTL
);
3862 if (enable
&& (adev
->pg_flags
& AMD_PG_SUPPORT_GFX_SMG
))
3863 data
|= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK
;
3865 data
&= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK
;
3867 WREG32(mmRLC_PG_CNTL
, data
);
3870 static void gfx_v7_0_enable_gfx_dynamic_mgpg(struct amdgpu_device
*adev
,
3875 orig
= data
= RREG32(mmRLC_PG_CNTL
);
3876 if (enable
&& (adev
->pg_flags
& AMD_PG_SUPPORT_GFX_DMG
))
3877 data
|= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK
;
3879 data
&= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK
;
3881 WREG32(mmRLC_PG_CNTL
, data
);
3884 #define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90
3885 #define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D
3887 static void gfx_v7_0_init_gfx_cgpg(struct amdgpu_device
*adev
)
3892 if (adev
->gfx
.rlc
.cs_data
) {
3893 WREG32(mmRLC_GPM_SCRATCH_ADDR
, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET
);
3894 WREG32(mmRLC_GPM_SCRATCH_DATA
, upper_32_bits(adev
->gfx
.rlc
.clear_state_gpu_addr
));
3895 WREG32(mmRLC_GPM_SCRATCH_DATA
, lower_32_bits(adev
->gfx
.rlc
.clear_state_gpu_addr
));
3896 WREG32(mmRLC_GPM_SCRATCH_DATA
, adev
->gfx
.rlc
.clear_state_size
);
3898 WREG32(mmRLC_GPM_SCRATCH_ADDR
, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET
);
3899 for (i
= 0; i
< 3; i
++)
3900 WREG32(mmRLC_GPM_SCRATCH_DATA
, 0);
3902 if (adev
->gfx
.rlc
.reg_list
) {
3903 WREG32(mmRLC_GPM_SCRATCH_ADDR
, RLC_SAVE_AND_RESTORE_STARTING_OFFSET
);
3904 for (i
= 0; i
< adev
->gfx
.rlc
.reg_list_size
; i
++)
3905 WREG32(mmRLC_GPM_SCRATCH_DATA
, adev
->gfx
.rlc
.reg_list
[i
]);
3908 orig
= data
= RREG32(mmRLC_PG_CNTL
);
3909 data
|= RLC_PG_CNTL__GFX_POWER_GATING_SRC_MASK
;
3911 WREG32(mmRLC_PG_CNTL
, data
);
3913 WREG32(mmRLC_SAVE_AND_RESTORE_BASE
, adev
->gfx
.rlc
.save_restore_gpu_addr
>> 8);
3914 WREG32(mmRLC_JUMP_TABLE_RESTORE
, adev
->gfx
.rlc
.cp_table_gpu_addr
>> 8);
3916 data
= RREG32(mmCP_RB_WPTR_POLL_CNTL
);
3917 data
&= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK
;
3918 data
|= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT
);
3919 WREG32(mmCP_RB_WPTR_POLL_CNTL
, data
);
3922 WREG32(mmRLC_PG_DELAY
, data
);
3924 data
= RREG32(mmRLC_PG_DELAY_2
);
3927 WREG32(mmRLC_PG_DELAY_2
, data
);
3929 data
= RREG32(mmRLC_AUTO_PG_CTRL
);
3930 data
&= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK
;
3931 data
|= (0x700 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT
);
3932 WREG32(mmRLC_AUTO_PG_CTRL
, data
);
3936 static void gfx_v7_0_update_gfx_pg(struct amdgpu_device
*adev
, bool enable
)
3938 gfx_v7_0_enable_gfx_cgpg(adev
, enable
);
3939 gfx_v7_0_enable_gfx_static_mgpg(adev
, enable
);
3940 gfx_v7_0_enable_gfx_dynamic_mgpg(adev
, enable
);
3943 static u32
gfx_v7_0_get_csb_size(struct amdgpu_device
*adev
)
3946 const struct cs_section_def
*sect
= NULL
;
3947 const struct cs_extent_def
*ext
= NULL
;
3949 if (adev
->gfx
.rlc
.cs_data
== NULL
)
3952 /* begin clear state */
3954 /* context control state */
3957 for (sect
= adev
->gfx
.rlc
.cs_data
; sect
->section
!= NULL
; ++sect
) {
3958 for (ext
= sect
->section
; ext
->extent
!= NULL
; ++ext
) {
3959 if (sect
->id
== SECT_CONTEXT
)
3960 count
+= 2 + ext
->reg_count
;
3965 /* pa_sc_raster_config/pa_sc_raster_config1 */
3967 /* end clear state */
3975 static void gfx_v7_0_get_csb_buffer(struct amdgpu_device
*adev
,
3976 volatile u32
*buffer
)
3979 const struct cs_section_def
*sect
= NULL
;
3980 const struct cs_extent_def
*ext
= NULL
;
3982 if (adev
->gfx
.rlc
.cs_data
== NULL
)
3987 buffer
[count
++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL
, 0));
3988 buffer
[count
++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE
);
3990 buffer
[count
++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL
, 1));
3991 buffer
[count
++] = cpu_to_le32(0x80000000);
3992 buffer
[count
++] = cpu_to_le32(0x80000000);
3994 for (sect
= adev
->gfx
.rlc
.cs_data
; sect
->section
!= NULL
; ++sect
) {
3995 for (ext
= sect
->section
; ext
->extent
!= NULL
; ++ext
) {
3996 if (sect
->id
== SECT_CONTEXT
) {
3998 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG
, ext
->reg_count
));
3999 buffer
[count
++] = cpu_to_le32(ext
->reg_index
- PACKET3_SET_CONTEXT_REG_START
);
4000 for (i
= 0; i
< ext
->reg_count
; i
++)
4001 buffer
[count
++] = cpu_to_le32(ext
->extent
[i
]);
4008 buffer
[count
++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG
, 2));
4009 buffer
[count
++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG
- PACKET3_SET_CONTEXT_REG_START
);
4010 switch (adev
->asic_type
) {
4012 buffer
[count
++] = cpu_to_le32(0x16000012);
4013 buffer
[count
++] = cpu_to_le32(0x00000000);
4016 buffer
[count
++] = cpu_to_le32(0x00000000); /* XXX */
4017 buffer
[count
++] = cpu_to_le32(0x00000000);
4021 buffer
[count
++] = cpu_to_le32(0x00000000); /* XXX */
4022 buffer
[count
++] = cpu_to_le32(0x00000000);
4025 buffer
[count
++] = cpu_to_le32(0x3a00161a);
4026 buffer
[count
++] = cpu_to_le32(0x0000002e);
4029 buffer
[count
++] = cpu_to_le32(0x00000000);
4030 buffer
[count
++] = cpu_to_le32(0x00000000);
4034 buffer
[count
++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL
, 0));
4035 buffer
[count
++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE
);
4037 buffer
[count
++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE
, 0));
4038 buffer
[count
++] = cpu_to_le32(0);
4041 static void gfx_v7_0_init_pg(struct amdgpu_device
*adev
)
4043 if (adev
->pg_flags
& (AMD_PG_SUPPORT_GFX_PG
|
4044 AMD_PG_SUPPORT_GFX_SMG
|
4045 AMD_PG_SUPPORT_GFX_DMG
|
4047 AMD_PG_SUPPORT_GDS
|
4048 AMD_PG_SUPPORT_RLC_SMU_HS
)) {
4049 gfx_v7_0_enable_sclk_slowdown_on_pu(adev
, true);
4050 gfx_v7_0_enable_sclk_slowdown_on_pd(adev
, true);
4051 if (adev
->pg_flags
& AMD_PG_SUPPORT_GFX_PG
) {
4052 gfx_v7_0_init_gfx_cgpg(adev
);
4053 gfx_v7_0_enable_cp_pg(adev
, true);
4054 gfx_v7_0_enable_gds_pg(adev
, true);
4056 gfx_v7_0_init_ao_cu_mask(adev
);
4057 gfx_v7_0_update_gfx_pg(adev
, true);
4061 static void gfx_v7_0_fini_pg(struct amdgpu_device
*adev
)
4063 if (adev
->pg_flags
& (AMD_PG_SUPPORT_GFX_PG
|
4064 AMD_PG_SUPPORT_GFX_SMG
|
4065 AMD_PG_SUPPORT_GFX_DMG
|
4067 AMD_PG_SUPPORT_GDS
|
4068 AMD_PG_SUPPORT_RLC_SMU_HS
)) {
4069 gfx_v7_0_update_gfx_pg(adev
, false);
4070 if (adev
->pg_flags
& AMD_PG_SUPPORT_GFX_PG
) {
4071 gfx_v7_0_enable_cp_pg(adev
, false);
4072 gfx_v7_0_enable_gds_pg(adev
, false);
4078 * gfx_v7_0_get_gpu_clock_counter - return GPU clock counter snapshot
4080 * @adev: amdgpu_device pointer
4082 * Fetches a GPU clock counter snapshot (SI).
4083 * Returns the 64 bit clock counter snapshot.
4085 uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device
*adev
)
4089 mutex_lock(&adev
->gfx
.gpu_clock_mutex
);
4090 WREG32(mmRLC_CAPTURE_GPU_CLOCK_COUNT
, 1);
4091 clock
= (uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_LSB
) |
4092 ((uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_MSB
) << 32ULL);
4093 mutex_unlock(&adev
->gfx
.gpu_clock_mutex
);
4097 static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring
*ring
,
4099 uint32_t gds_base
, uint32_t gds_size
,
4100 uint32_t gws_base
, uint32_t gws_size
,
4101 uint32_t oa_base
, uint32_t oa_size
)
4103 gds_base
= gds_base
>> AMDGPU_GDS_SHIFT
;
4104 gds_size
= gds_size
>> AMDGPU_GDS_SHIFT
;
4106 gws_base
= gws_base
>> AMDGPU_GWS_SHIFT
;
4107 gws_size
= gws_size
>> AMDGPU_GWS_SHIFT
;
4109 oa_base
= oa_base
>> AMDGPU_OA_SHIFT
;
4110 oa_size
= oa_size
>> AMDGPU_OA_SHIFT
;
4113 amdgpu_ring_write(ring
, PACKET3(PACKET3_WRITE_DATA
, 3));
4114 amdgpu_ring_write(ring
, (WRITE_DATA_ENGINE_SEL(0) |
4115 WRITE_DATA_DST_SEL(0)));
4116 amdgpu_ring_write(ring
, amdgpu_gds_reg_offset
[vmid
].mem_base
);
4117 amdgpu_ring_write(ring
, 0);
4118 amdgpu_ring_write(ring
, gds_base
);
4121 amdgpu_ring_write(ring
, PACKET3(PACKET3_WRITE_DATA
, 3));
4122 amdgpu_ring_write(ring
, (WRITE_DATA_ENGINE_SEL(0) |
4123 WRITE_DATA_DST_SEL(0)));
4124 amdgpu_ring_write(ring
, amdgpu_gds_reg_offset
[vmid
].mem_size
);
4125 amdgpu_ring_write(ring
, 0);
4126 amdgpu_ring_write(ring
, gds_size
);
4129 amdgpu_ring_write(ring
, PACKET3(PACKET3_WRITE_DATA
, 3));
4130 amdgpu_ring_write(ring
, (WRITE_DATA_ENGINE_SEL(0) |
4131 WRITE_DATA_DST_SEL(0)));
4132 amdgpu_ring_write(ring
, amdgpu_gds_reg_offset
[vmid
].gws
);
4133 amdgpu_ring_write(ring
, 0);
4134 amdgpu_ring_write(ring
, gws_size
<< GDS_GWS_VMID0__SIZE__SHIFT
| gws_base
);
4137 amdgpu_ring_write(ring
, PACKET3(PACKET3_WRITE_DATA
, 3));
4138 amdgpu_ring_write(ring
, (WRITE_DATA_ENGINE_SEL(0) |
4139 WRITE_DATA_DST_SEL(0)));
4140 amdgpu_ring_write(ring
, amdgpu_gds_reg_offset
[vmid
].oa
);
4141 amdgpu_ring_write(ring
, 0);
4142 amdgpu_ring_write(ring
, (1 << (oa_size
+ oa_base
)) - (1 << oa_base
));
4145 static int gfx_v7_0_early_init(void *handle
)
4147 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
4149 adev
->gfx
.num_gfx_rings
= GFX7_NUM_GFX_RINGS
;
4150 adev
->gfx
.num_compute_rings
= GFX7_NUM_COMPUTE_RINGS
;
4151 gfx_v7_0_set_ring_funcs(adev
);
4152 gfx_v7_0_set_irq_funcs(adev
);
4153 gfx_v7_0_set_gds_init(adev
);
4158 static int gfx_v7_0_late_init(void *handle
)
4160 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
4163 r
= amdgpu_irq_get(adev
, &adev
->gfx
.priv_reg_irq
, 0);
4167 r
= amdgpu_irq_get(adev
, &adev
->gfx
.priv_inst_irq
, 0);
4174 static void gfx_v7_0_gpu_early_init(struct amdgpu_device
*adev
)
4177 u32 mc_shared_chmap
, mc_arb_ramcfg
;
4178 u32 dimm00_addr_map
, dimm01_addr_map
, dimm10_addr_map
, dimm11_addr_map
;
4181 switch (adev
->asic_type
) {
4183 adev
->gfx
.config
.max_shader_engines
= 2;
4184 adev
->gfx
.config
.max_tile_pipes
= 4;
4185 adev
->gfx
.config
.max_cu_per_sh
= 7;
4186 adev
->gfx
.config
.max_sh_per_se
= 1;
4187 adev
->gfx
.config
.max_backends_per_se
= 2;
4188 adev
->gfx
.config
.max_texture_channel_caches
= 4;
4189 adev
->gfx
.config
.max_gprs
= 256;
4190 adev
->gfx
.config
.max_gs_threads
= 32;
4191 adev
->gfx
.config
.max_hw_contexts
= 8;
4193 adev
->gfx
.config
.sc_prim_fifo_size_frontend
= 0x20;
4194 adev
->gfx
.config
.sc_prim_fifo_size_backend
= 0x100;
4195 adev
->gfx
.config
.sc_hiz_tile_fifo_size
= 0x30;
4196 adev
->gfx
.config
.sc_earlyz_tile_fifo_size
= 0x130;
4197 gb_addr_config
= BONAIRE_GB_ADDR_CONFIG_GOLDEN
;
4200 adev
->gfx
.config
.max_shader_engines
= 4;
4201 adev
->gfx
.config
.max_tile_pipes
= 16;
4202 adev
->gfx
.config
.max_cu_per_sh
= 11;
4203 adev
->gfx
.config
.max_sh_per_se
= 1;
4204 adev
->gfx
.config
.max_backends_per_se
= 4;
4205 adev
->gfx
.config
.max_texture_channel_caches
= 16;
4206 adev
->gfx
.config
.max_gprs
= 256;
4207 adev
->gfx
.config
.max_gs_threads
= 32;
4208 adev
->gfx
.config
.max_hw_contexts
= 8;
4210 adev
->gfx
.config
.sc_prim_fifo_size_frontend
= 0x20;
4211 adev
->gfx
.config
.sc_prim_fifo_size_backend
= 0x100;
4212 adev
->gfx
.config
.sc_hiz_tile_fifo_size
= 0x30;
4213 adev
->gfx
.config
.sc_earlyz_tile_fifo_size
= 0x130;
4214 gb_addr_config
= HAWAII_GB_ADDR_CONFIG_GOLDEN
;
4217 adev
->gfx
.config
.max_shader_engines
= 1;
4218 adev
->gfx
.config
.max_tile_pipes
= 4;
4219 if ((adev
->pdev
->device
== 0x1304) ||
4220 (adev
->pdev
->device
== 0x1305) ||
4221 (adev
->pdev
->device
== 0x130C) ||
4222 (adev
->pdev
->device
== 0x130F) ||
4223 (adev
->pdev
->device
== 0x1310) ||
4224 (adev
->pdev
->device
== 0x1311) ||
4225 (adev
->pdev
->device
== 0x131C)) {
4226 adev
->gfx
.config
.max_cu_per_sh
= 8;
4227 adev
->gfx
.config
.max_backends_per_se
= 2;
4228 } else if ((adev
->pdev
->device
== 0x1309) ||
4229 (adev
->pdev
->device
== 0x130A) ||
4230 (adev
->pdev
->device
== 0x130D) ||
4231 (adev
->pdev
->device
== 0x1313) ||
4232 (adev
->pdev
->device
== 0x131D)) {
4233 adev
->gfx
.config
.max_cu_per_sh
= 6;
4234 adev
->gfx
.config
.max_backends_per_se
= 2;
4235 } else if ((adev
->pdev
->device
== 0x1306) ||
4236 (adev
->pdev
->device
== 0x1307) ||
4237 (adev
->pdev
->device
== 0x130B) ||
4238 (adev
->pdev
->device
== 0x130E) ||
4239 (adev
->pdev
->device
== 0x1315) ||
4240 (adev
->pdev
->device
== 0x131B)) {
4241 adev
->gfx
.config
.max_cu_per_sh
= 4;
4242 adev
->gfx
.config
.max_backends_per_se
= 1;
4244 adev
->gfx
.config
.max_cu_per_sh
= 3;
4245 adev
->gfx
.config
.max_backends_per_se
= 1;
4247 adev
->gfx
.config
.max_sh_per_se
= 1;
4248 adev
->gfx
.config
.max_texture_channel_caches
= 4;
4249 adev
->gfx
.config
.max_gprs
= 256;
4250 adev
->gfx
.config
.max_gs_threads
= 16;
4251 adev
->gfx
.config
.max_hw_contexts
= 8;
4253 adev
->gfx
.config
.sc_prim_fifo_size_frontend
= 0x20;
4254 adev
->gfx
.config
.sc_prim_fifo_size_backend
= 0x100;
4255 adev
->gfx
.config
.sc_hiz_tile_fifo_size
= 0x30;
4256 adev
->gfx
.config
.sc_earlyz_tile_fifo_size
= 0x130;
4257 gb_addr_config
= BONAIRE_GB_ADDR_CONFIG_GOLDEN
;
4262 adev
->gfx
.config
.max_shader_engines
= 1;
4263 adev
->gfx
.config
.max_tile_pipes
= 2;
4264 adev
->gfx
.config
.max_cu_per_sh
= 2;
4265 adev
->gfx
.config
.max_sh_per_se
= 1;
4266 adev
->gfx
.config
.max_backends_per_se
= 1;
4267 adev
->gfx
.config
.max_texture_channel_caches
= 2;
4268 adev
->gfx
.config
.max_gprs
= 256;
4269 adev
->gfx
.config
.max_gs_threads
= 16;
4270 adev
->gfx
.config
.max_hw_contexts
= 8;
4272 adev
->gfx
.config
.sc_prim_fifo_size_frontend
= 0x20;
4273 adev
->gfx
.config
.sc_prim_fifo_size_backend
= 0x100;
4274 adev
->gfx
.config
.sc_hiz_tile_fifo_size
= 0x30;
4275 adev
->gfx
.config
.sc_earlyz_tile_fifo_size
= 0x130;
4276 gb_addr_config
= BONAIRE_GB_ADDR_CONFIG_GOLDEN
;
4280 mc_shared_chmap
= RREG32(mmMC_SHARED_CHMAP
);
4281 adev
->gfx
.config
.mc_arb_ramcfg
= RREG32(mmMC_ARB_RAMCFG
);
4282 mc_arb_ramcfg
= adev
->gfx
.config
.mc_arb_ramcfg
;
4284 adev
->gfx
.config
.num_tile_pipes
= adev
->gfx
.config
.max_tile_pipes
;
4285 adev
->gfx
.config
.mem_max_burst_length_bytes
= 256;
4286 if (adev
->flags
& AMD_IS_APU
) {
4287 /* Get memory bank mapping mode. */
4288 tmp
= RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING
);
4289 dimm00_addr_map
= REG_GET_FIELD(tmp
, MC_FUS_DRAM0_BANK_ADDR_MAPPING
, DIMM0ADDRMAP
);
4290 dimm01_addr_map
= REG_GET_FIELD(tmp
, MC_FUS_DRAM0_BANK_ADDR_MAPPING
, DIMM1ADDRMAP
);
4292 tmp
= RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING
);
4293 dimm10_addr_map
= REG_GET_FIELD(tmp
, MC_FUS_DRAM1_BANK_ADDR_MAPPING
, DIMM0ADDRMAP
);
4294 dimm11_addr_map
= REG_GET_FIELD(tmp
, MC_FUS_DRAM1_BANK_ADDR_MAPPING
, DIMM1ADDRMAP
);
4296 /* Validate settings in case only one DIMM installed. */
4297 if ((dimm00_addr_map
== 0) || (dimm00_addr_map
== 3) || (dimm00_addr_map
== 4) || (dimm00_addr_map
> 12))
4298 dimm00_addr_map
= 0;
4299 if ((dimm01_addr_map
== 0) || (dimm01_addr_map
== 3) || (dimm01_addr_map
== 4) || (dimm01_addr_map
> 12))
4300 dimm01_addr_map
= 0;
4301 if ((dimm10_addr_map
== 0) || (dimm10_addr_map
== 3) || (dimm10_addr_map
== 4) || (dimm10_addr_map
> 12))
4302 dimm10_addr_map
= 0;
4303 if ((dimm11_addr_map
== 0) || (dimm11_addr_map
== 3) || (dimm11_addr_map
== 4) || (dimm11_addr_map
> 12))
4304 dimm11_addr_map
= 0;
4306 /* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */
4307 /* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */
4308 if ((dimm00_addr_map
== 11) || (dimm01_addr_map
== 11) || (dimm10_addr_map
== 11) || (dimm11_addr_map
== 11))
4309 adev
->gfx
.config
.mem_row_size_in_kb
= 2;
4311 adev
->gfx
.config
.mem_row_size_in_kb
= 1;
4313 tmp
= (mc_arb_ramcfg
& MC_ARB_RAMCFG__NOOFCOLS_MASK
) >> MC_ARB_RAMCFG__NOOFCOLS__SHIFT
;
4314 adev
->gfx
.config
.mem_row_size_in_kb
= (4 * (1 << (8 + tmp
))) / 1024;
4315 if (adev
->gfx
.config
.mem_row_size_in_kb
> 4)
4316 adev
->gfx
.config
.mem_row_size_in_kb
= 4;
4318 /* XXX use MC settings? */
4319 adev
->gfx
.config
.shader_engine_tile_size
= 32;
4320 adev
->gfx
.config
.num_gpus
= 1;
4321 adev
->gfx
.config
.multi_gpu_tile_size
= 64;
4323 /* fix up row size */
4324 gb_addr_config
&= ~GB_ADDR_CONFIG__ROW_SIZE_MASK
;
4325 switch (adev
->gfx
.config
.mem_row_size_in_kb
) {
4328 gb_addr_config
|= (0 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT
);
4331 gb_addr_config
|= (1 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT
);
4334 gb_addr_config
|= (2 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT
);
4337 adev
->gfx
.config
.gb_addr_config
= gb_addr_config
;
4340 static int gfx_v7_0_sw_init(void *handle
)
4342 struct amdgpu_ring
*ring
;
4343 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
4347 r
= amdgpu_irq_add_id(adev
, 181, &adev
->gfx
.eop_irq
);
4351 /* Privileged reg */
4352 r
= amdgpu_irq_add_id(adev
, 184, &adev
->gfx
.priv_reg_irq
);
4356 /* Privileged inst */
4357 r
= amdgpu_irq_add_id(adev
, 185, &adev
->gfx
.priv_inst_irq
);
4361 gfx_v7_0_scratch_init(adev
);
4363 r
= gfx_v7_0_init_microcode(adev
);
4365 DRM_ERROR("Failed to load gfx firmware!\n");
4369 r
= gfx_v7_0_rlc_init(adev
);
4371 DRM_ERROR("Failed to init rlc BOs!\n");
4375 /* allocate mec buffers */
4376 r
= gfx_v7_0_mec_init(adev
);
4378 DRM_ERROR("Failed to init MEC BOs!\n");
4382 for (i
= 0; i
< adev
->gfx
.num_gfx_rings
; i
++) {
4383 ring
= &adev
->gfx
.gfx_ring
[i
];
4384 ring
->ring_obj
= NULL
;
4385 sprintf(ring
->name
, "gfx");
4386 r
= amdgpu_ring_init(adev
, ring
, 1024 * 1024,
4387 PACKET3(PACKET3_NOP
, 0x3FFF), 0xf,
4388 &adev
->gfx
.eop_irq
, AMDGPU_CP_IRQ_GFX_EOP
,
4389 AMDGPU_RING_TYPE_GFX
);
4394 /* set up the compute queues */
4395 for (i
= 0; i
< adev
->gfx
.num_compute_rings
; i
++) {
4398 /* max 32 queues per MEC */
4399 if ((i
>= 32) || (i
>= AMDGPU_MAX_COMPUTE_RINGS
)) {
4400 DRM_ERROR("Too many (%d) compute rings!\n", i
);
4403 ring
= &adev
->gfx
.compute_ring
[i
];
4404 ring
->ring_obj
= NULL
;
4405 ring
->use_doorbell
= true;
4406 ring
->doorbell_index
= AMDGPU_DOORBELL_MEC_RING0
+ i
;
4407 ring
->me
= 1; /* first MEC */
4409 ring
->queue
= i
% 8;
4410 sprintf(ring
->name
, "comp %d.%d.%d", ring
->me
, ring
->pipe
, ring
->queue
);
4411 irq_type
= AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
+ ring
->pipe
;
4412 /* type-2 packets are deprecated on MEC, use type-3 instead */
4413 r
= amdgpu_ring_init(adev
, ring
, 1024 * 1024,
4414 PACKET3(PACKET3_NOP
, 0x3FFF), 0xf,
4415 &adev
->gfx
.eop_irq
, irq_type
,
4416 AMDGPU_RING_TYPE_COMPUTE
);
4421 /* reserve GDS, GWS and OA resource for gfx */
4422 r
= amdgpu_bo_create(adev
, adev
->gds
.mem
.gfx_partition_size
,
4424 AMDGPU_GEM_DOMAIN_GDS
, 0,
4425 NULL
, NULL
, &adev
->gds
.gds_gfx_bo
);
4429 r
= amdgpu_bo_create(adev
, adev
->gds
.gws
.gfx_partition_size
,
4431 AMDGPU_GEM_DOMAIN_GWS
, 0,
4432 NULL
, NULL
, &adev
->gds
.gws_gfx_bo
);
4436 r
= amdgpu_bo_create(adev
, adev
->gds
.oa
.gfx_partition_size
,
4438 AMDGPU_GEM_DOMAIN_OA
, 0,
4439 NULL
, NULL
, &adev
->gds
.oa_gfx_bo
);
4443 adev
->gfx
.ce_ram_size
= 0x8000;
4445 gfx_v7_0_gpu_early_init(adev
);
4450 static int gfx_v7_0_sw_fini(void *handle
)
4453 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
4455 amdgpu_bo_unref(&adev
->gds
.oa_gfx_bo
);
4456 amdgpu_bo_unref(&adev
->gds
.gws_gfx_bo
);
4457 amdgpu_bo_unref(&adev
->gds
.gds_gfx_bo
);
4459 for (i
= 0; i
< adev
->gfx
.num_gfx_rings
; i
++)
4460 amdgpu_ring_fini(&adev
->gfx
.gfx_ring
[i
]);
4461 for (i
= 0; i
< adev
->gfx
.num_compute_rings
; i
++)
4462 amdgpu_ring_fini(&adev
->gfx
.compute_ring
[i
]);
4464 gfx_v7_0_cp_compute_fini(adev
);
4465 gfx_v7_0_rlc_fini(adev
);
4466 gfx_v7_0_mec_fini(adev
);
4471 static int gfx_v7_0_hw_init(void *handle
)
4474 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
4476 gfx_v7_0_gpu_init(adev
);
4479 r
= gfx_v7_0_rlc_resume(adev
);
4483 r
= gfx_v7_0_cp_resume(adev
);
4490 static int gfx_v7_0_hw_fini(void *handle
)
4492 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
4494 amdgpu_irq_put(adev
, &adev
->gfx
.priv_reg_irq
, 0);
4495 amdgpu_irq_put(adev
, &adev
->gfx
.priv_inst_irq
, 0);
4496 gfx_v7_0_cp_enable(adev
, false);
4497 gfx_v7_0_rlc_stop(adev
);
4498 gfx_v7_0_fini_pg(adev
);
4503 static int gfx_v7_0_suspend(void *handle
)
4505 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
4507 return gfx_v7_0_hw_fini(adev
);
4510 static int gfx_v7_0_resume(void *handle
)
4512 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
4514 return gfx_v7_0_hw_init(adev
);
4517 static bool gfx_v7_0_is_idle(void *handle
)
4519 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
4521 if (RREG32(mmGRBM_STATUS
) & GRBM_STATUS__GUI_ACTIVE_MASK
)
4527 static int gfx_v7_0_wait_for_idle(void *handle
)
4531 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
4533 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
4534 /* read MC_STATUS */
4535 tmp
= RREG32(mmGRBM_STATUS
) & GRBM_STATUS__GUI_ACTIVE_MASK
;
4544 static void gfx_v7_0_print_status(void *handle
)
4547 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
4549 dev_info(adev
->dev
, "GFX 7.x registers\n");
4550 dev_info(adev
->dev
, " GRBM_STATUS=0x%08X\n",
4551 RREG32(mmGRBM_STATUS
));
4552 dev_info(adev
->dev
, " GRBM_STATUS2=0x%08X\n",
4553 RREG32(mmGRBM_STATUS2
));
4554 dev_info(adev
->dev
, " GRBM_STATUS_SE0=0x%08X\n",
4555 RREG32(mmGRBM_STATUS_SE0
));
4556 dev_info(adev
->dev
, " GRBM_STATUS_SE1=0x%08X\n",
4557 RREG32(mmGRBM_STATUS_SE1
));
4558 dev_info(adev
->dev
, " GRBM_STATUS_SE2=0x%08X\n",
4559 RREG32(mmGRBM_STATUS_SE2
));
4560 dev_info(adev
->dev
, " GRBM_STATUS_SE3=0x%08X\n",
4561 RREG32(mmGRBM_STATUS_SE3
));
4562 dev_info(adev
->dev
, " CP_STAT = 0x%08x\n", RREG32(mmCP_STAT
));
4563 dev_info(adev
->dev
, " CP_STALLED_STAT1 = 0x%08x\n",
4564 RREG32(mmCP_STALLED_STAT1
));
4565 dev_info(adev
->dev
, " CP_STALLED_STAT2 = 0x%08x\n",
4566 RREG32(mmCP_STALLED_STAT2
));
4567 dev_info(adev
->dev
, " CP_STALLED_STAT3 = 0x%08x\n",
4568 RREG32(mmCP_STALLED_STAT3
));
4569 dev_info(adev
->dev
, " CP_CPF_BUSY_STAT = 0x%08x\n",
4570 RREG32(mmCP_CPF_BUSY_STAT
));
4571 dev_info(adev
->dev
, " CP_CPF_STALLED_STAT1 = 0x%08x\n",
4572 RREG32(mmCP_CPF_STALLED_STAT1
));
4573 dev_info(adev
->dev
, " CP_CPF_STATUS = 0x%08x\n", RREG32(mmCP_CPF_STATUS
));
4574 dev_info(adev
->dev
, " CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(mmCP_CPC_BUSY_STAT
));
4575 dev_info(adev
->dev
, " CP_CPC_STALLED_STAT1 = 0x%08x\n",
4576 RREG32(mmCP_CPC_STALLED_STAT1
));
4577 dev_info(adev
->dev
, " CP_CPC_STATUS = 0x%08x\n", RREG32(mmCP_CPC_STATUS
));
4579 for (i
= 0; i
< 32; i
++) {
4580 dev_info(adev
->dev
, " GB_TILE_MODE%d=0x%08X\n",
4581 i
, RREG32(mmGB_TILE_MODE0
+ (i
* 4)));
4583 for (i
= 0; i
< 16; i
++) {
4584 dev_info(adev
->dev
, " GB_MACROTILE_MODE%d=0x%08X\n",
4585 i
, RREG32(mmGB_MACROTILE_MODE0
+ (i
* 4)));
4587 for (i
= 0; i
< adev
->gfx
.config
.max_shader_engines
; i
++) {
4588 dev_info(adev
->dev
, " se: %d\n", i
);
4589 gfx_v7_0_select_se_sh(adev
, i
, 0xffffffff);
4590 dev_info(adev
->dev
, " PA_SC_RASTER_CONFIG=0x%08X\n",
4591 RREG32(mmPA_SC_RASTER_CONFIG
));
4592 dev_info(adev
->dev
, " PA_SC_RASTER_CONFIG_1=0x%08X\n",
4593 RREG32(mmPA_SC_RASTER_CONFIG_1
));
4595 gfx_v7_0_select_se_sh(adev
, 0xffffffff, 0xffffffff);
4597 dev_info(adev
->dev
, " GB_ADDR_CONFIG=0x%08X\n",
4598 RREG32(mmGB_ADDR_CONFIG
));
4599 dev_info(adev
->dev
, " HDP_ADDR_CONFIG=0x%08X\n",
4600 RREG32(mmHDP_ADDR_CONFIG
));
4601 dev_info(adev
->dev
, " DMIF_ADDR_CALC=0x%08X\n",
4602 RREG32(mmDMIF_ADDR_CALC
));
4604 dev_info(adev
->dev
, " CP_MEQ_THRESHOLDS=0x%08X\n",
4605 RREG32(mmCP_MEQ_THRESHOLDS
));
4606 dev_info(adev
->dev
, " SX_DEBUG_1=0x%08X\n",
4607 RREG32(mmSX_DEBUG_1
));
4608 dev_info(adev
->dev
, " TA_CNTL_AUX=0x%08X\n",
4609 RREG32(mmTA_CNTL_AUX
));
4610 dev_info(adev
->dev
, " SPI_CONFIG_CNTL=0x%08X\n",
4611 RREG32(mmSPI_CONFIG_CNTL
));
4612 dev_info(adev
->dev
, " SQ_CONFIG=0x%08X\n",
4613 RREG32(mmSQ_CONFIG
));
4614 dev_info(adev
->dev
, " DB_DEBUG=0x%08X\n",
4615 RREG32(mmDB_DEBUG
));
4616 dev_info(adev
->dev
, " DB_DEBUG2=0x%08X\n",
4617 RREG32(mmDB_DEBUG2
));
4618 dev_info(adev
->dev
, " DB_DEBUG3=0x%08X\n",
4619 RREG32(mmDB_DEBUG3
));
4620 dev_info(adev
->dev
, " CB_HW_CONTROL=0x%08X\n",
4621 RREG32(mmCB_HW_CONTROL
));
4622 dev_info(adev
->dev
, " SPI_CONFIG_CNTL_1=0x%08X\n",
4623 RREG32(mmSPI_CONFIG_CNTL_1
));
4624 dev_info(adev
->dev
, " PA_SC_FIFO_SIZE=0x%08X\n",
4625 RREG32(mmPA_SC_FIFO_SIZE
));
4626 dev_info(adev
->dev
, " VGT_NUM_INSTANCES=0x%08X\n",
4627 RREG32(mmVGT_NUM_INSTANCES
));
4628 dev_info(adev
->dev
, " CP_PERFMON_CNTL=0x%08X\n",
4629 RREG32(mmCP_PERFMON_CNTL
));
4630 dev_info(adev
->dev
, " PA_SC_FORCE_EOV_MAX_CNTS=0x%08X\n",
4631 RREG32(mmPA_SC_FORCE_EOV_MAX_CNTS
));
4632 dev_info(adev
->dev
, " VGT_CACHE_INVALIDATION=0x%08X\n",
4633 RREG32(mmVGT_CACHE_INVALIDATION
));
4634 dev_info(adev
->dev
, " VGT_GS_VERTEX_REUSE=0x%08X\n",
4635 RREG32(mmVGT_GS_VERTEX_REUSE
));
4636 dev_info(adev
->dev
, " PA_SC_LINE_STIPPLE_STATE=0x%08X\n",
4637 RREG32(mmPA_SC_LINE_STIPPLE_STATE
));
4638 dev_info(adev
->dev
, " PA_CL_ENHANCE=0x%08X\n",
4639 RREG32(mmPA_CL_ENHANCE
));
4640 dev_info(adev
->dev
, " PA_SC_ENHANCE=0x%08X\n",
4641 RREG32(mmPA_SC_ENHANCE
));
4643 dev_info(adev
->dev
, " CP_ME_CNTL=0x%08X\n",
4644 RREG32(mmCP_ME_CNTL
));
4645 dev_info(adev
->dev
, " CP_MAX_CONTEXT=0x%08X\n",
4646 RREG32(mmCP_MAX_CONTEXT
));
4647 dev_info(adev
->dev
, " CP_ENDIAN_SWAP=0x%08X\n",
4648 RREG32(mmCP_ENDIAN_SWAP
));
4649 dev_info(adev
->dev
, " CP_DEVICE_ID=0x%08X\n",
4650 RREG32(mmCP_DEVICE_ID
));
4652 dev_info(adev
->dev
, " CP_SEM_WAIT_TIMER=0x%08X\n",
4653 RREG32(mmCP_SEM_WAIT_TIMER
));
4654 if (adev
->asic_type
!= CHIP_HAWAII
)
4655 dev_info(adev
->dev
, " CP_SEM_INCOMPLETE_TIMER_CNTL=0x%08X\n",
4656 RREG32(mmCP_SEM_INCOMPLETE_TIMER_CNTL
));
4658 dev_info(adev
->dev
, " CP_RB_WPTR_DELAY=0x%08X\n",
4659 RREG32(mmCP_RB_WPTR_DELAY
));
4660 dev_info(adev
->dev
, " CP_RB_VMID=0x%08X\n",
4661 RREG32(mmCP_RB_VMID
));
4662 dev_info(adev
->dev
, " CP_RB0_CNTL=0x%08X\n",
4663 RREG32(mmCP_RB0_CNTL
));
4664 dev_info(adev
->dev
, " CP_RB0_WPTR=0x%08X\n",
4665 RREG32(mmCP_RB0_WPTR
));
4666 dev_info(adev
->dev
, " CP_RB0_RPTR_ADDR=0x%08X\n",
4667 RREG32(mmCP_RB0_RPTR_ADDR
));
4668 dev_info(adev
->dev
, " CP_RB0_RPTR_ADDR_HI=0x%08X\n",
4669 RREG32(mmCP_RB0_RPTR_ADDR_HI
));
4670 dev_info(adev
->dev
, " CP_RB0_CNTL=0x%08X\n",
4671 RREG32(mmCP_RB0_CNTL
));
4672 dev_info(adev
->dev
, " CP_RB0_BASE=0x%08X\n",
4673 RREG32(mmCP_RB0_BASE
));
4674 dev_info(adev
->dev
, " CP_RB0_BASE_HI=0x%08X\n",
4675 RREG32(mmCP_RB0_BASE_HI
));
4676 dev_info(adev
->dev
, " CP_MEC_CNTL=0x%08X\n",
4677 RREG32(mmCP_MEC_CNTL
));
4678 dev_info(adev
->dev
, " CP_CPF_DEBUG=0x%08X\n",
4679 RREG32(mmCP_CPF_DEBUG
));
4681 dev_info(adev
->dev
, " SCRATCH_ADDR=0x%08X\n",
4682 RREG32(mmSCRATCH_ADDR
));
4683 dev_info(adev
->dev
, " SCRATCH_UMSK=0x%08X\n",
4684 RREG32(mmSCRATCH_UMSK
));
4686 /* init the pipes */
4687 mutex_lock(&adev
->srbm_mutex
);
4688 for (i
= 0; i
< (adev
->gfx
.mec
.num_pipe
* adev
->gfx
.mec
.num_mec
); i
++) {
4689 int me
= (i
< 4) ? 1 : 2;
4690 int pipe
= (i
< 4) ? i
: (i
- 4);
4693 dev_info(adev
->dev
, " me: %d, pipe: %d\n", me
, pipe
);
4694 cik_srbm_select(adev
, me
, pipe
, 0, 0);
4695 dev_info(adev
->dev
, " CP_HPD_EOP_BASE_ADDR=0x%08X\n",
4696 RREG32(mmCP_HPD_EOP_BASE_ADDR
));
4697 dev_info(adev
->dev
, " CP_HPD_EOP_BASE_ADDR_HI=0x%08X\n",
4698 RREG32(mmCP_HPD_EOP_BASE_ADDR_HI
));
4699 dev_info(adev
->dev
, " CP_HPD_EOP_VMID=0x%08X\n",
4700 RREG32(mmCP_HPD_EOP_VMID
));
4701 dev_info(adev
->dev
, " CP_HPD_EOP_CONTROL=0x%08X\n",
4702 RREG32(mmCP_HPD_EOP_CONTROL
));
4704 for (queue
= 0; queue
< 8; queue
++) {
4705 cik_srbm_select(adev
, me
, pipe
, queue
, 0);
4706 dev_info(adev
->dev
, " queue: %d\n", queue
);
4707 dev_info(adev
->dev
, " CP_PQ_WPTR_POLL_CNTL=0x%08X\n",
4708 RREG32(mmCP_PQ_WPTR_POLL_CNTL
));
4709 dev_info(adev
->dev
, " CP_HQD_PQ_DOORBELL_CONTROL=0x%08X\n",
4710 RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL
));
4711 dev_info(adev
->dev
, " CP_HQD_ACTIVE=0x%08X\n",
4712 RREG32(mmCP_HQD_ACTIVE
));
4713 dev_info(adev
->dev
, " CP_HQD_DEQUEUE_REQUEST=0x%08X\n",
4714 RREG32(mmCP_HQD_DEQUEUE_REQUEST
));
4715 dev_info(adev
->dev
, " CP_HQD_PQ_RPTR=0x%08X\n",
4716 RREG32(mmCP_HQD_PQ_RPTR
));
4717 dev_info(adev
->dev
, " CP_HQD_PQ_WPTR=0x%08X\n",
4718 RREG32(mmCP_HQD_PQ_WPTR
));
4719 dev_info(adev
->dev
, " CP_HQD_PQ_BASE=0x%08X\n",
4720 RREG32(mmCP_HQD_PQ_BASE
));
4721 dev_info(adev
->dev
, " CP_HQD_PQ_BASE_HI=0x%08X\n",
4722 RREG32(mmCP_HQD_PQ_BASE_HI
));
4723 dev_info(adev
->dev
, " CP_HQD_PQ_CONTROL=0x%08X\n",
4724 RREG32(mmCP_HQD_PQ_CONTROL
));
4725 dev_info(adev
->dev
, " CP_HQD_PQ_WPTR_POLL_ADDR=0x%08X\n",
4726 RREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR
));
4727 dev_info(adev
->dev
, " CP_HQD_PQ_WPTR_POLL_ADDR_HI=0x%08X\n",
4728 RREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR_HI
));
4729 dev_info(adev
->dev
, " CP_HQD_PQ_RPTR_REPORT_ADDR=0x%08X\n",
4730 RREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR
));
4731 dev_info(adev
->dev
, " CP_HQD_PQ_RPTR_REPORT_ADDR_HI=0x%08X\n",
4732 RREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI
));
4733 dev_info(adev
->dev
, " CP_HQD_PQ_DOORBELL_CONTROL=0x%08X\n",
4734 RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL
));
4735 dev_info(adev
->dev
, " CP_HQD_PQ_WPTR=0x%08X\n",
4736 RREG32(mmCP_HQD_PQ_WPTR
));
4737 dev_info(adev
->dev
, " CP_HQD_VMID=0x%08X\n",
4738 RREG32(mmCP_HQD_VMID
));
4739 dev_info(adev
->dev
, " CP_MQD_BASE_ADDR=0x%08X\n",
4740 RREG32(mmCP_MQD_BASE_ADDR
));
4741 dev_info(adev
->dev
, " CP_MQD_BASE_ADDR_HI=0x%08X\n",
4742 RREG32(mmCP_MQD_BASE_ADDR_HI
));
4743 dev_info(adev
->dev
, " CP_MQD_CONTROL=0x%08X\n",
4744 RREG32(mmCP_MQD_CONTROL
));
4747 cik_srbm_select(adev
, 0, 0, 0, 0);
4748 mutex_unlock(&adev
->srbm_mutex
);
4750 dev_info(adev
->dev
, " CP_INT_CNTL_RING0=0x%08X\n",
4751 RREG32(mmCP_INT_CNTL_RING0
));
4752 dev_info(adev
->dev
, " RLC_LB_CNTL=0x%08X\n",
4753 RREG32(mmRLC_LB_CNTL
));
4754 dev_info(adev
->dev
, " RLC_CNTL=0x%08X\n",
4755 RREG32(mmRLC_CNTL
));
4756 dev_info(adev
->dev
, " RLC_CGCG_CGLS_CTRL=0x%08X\n",
4757 RREG32(mmRLC_CGCG_CGLS_CTRL
));
4758 dev_info(adev
->dev
, " RLC_LB_CNTR_INIT=0x%08X\n",
4759 RREG32(mmRLC_LB_CNTR_INIT
));
4760 dev_info(adev
->dev
, " RLC_LB_CNTR_MAX=0x%08X\n",
4761 RREG32(mmRLC_LB_CNTR_MAX
));
4762 dev_info(adev
->dev
, " RLC_LB_INIT_CU_MASK=0x%08X\n",
4763 RREG32(mmRLC_LB_INIT_CU_MASK
));
4764 dev_info(adev
->dev
, " RLC_LB_PARAMS=0x%08X\n",
4765 RREG32(mmRLC_LB_PARAMS
));
4766 dev_info(adev
->dev
, " RLC_LB_CNTL=0x%08X\n",
4767 RREG32(mmRLC_LB_CNTL
));
4768 dev_info(adev
->dev
, " RLC_MC_CNTL=0x%08X\n",
4769 RREG32(mmRLC_MC_CNTL
));
4770 dev_info(adev
->dev
, " RLC_UCODE_CNTL=0x%08X\n",
4771 RREG32(mmRLC_UCODE_CNTL
));
4773 if (adev
->asic_type
== CHIP_BONAIRE
)
4774 dev_info(adev
->dev
, " RLC_DRIVER_CPDMA_STATUS=0x%08X\n",
4775 RREG32(mmRLC_DRIVER_CPDMA_STATUS
));
4777 mutex_lock(&adev
->srbm_mutex
);
4778 for (i
= 0; i
< 16; i
++) {
4779 cik_srbm_select(adev
, 0, 0, 0, i
);
4780 dev_info(adev
->dev
, " VM %d:\n", i
);
4781 dev_info(adev
->dev
, " SH_MEM_CONFIG=0x%08X\n",
4782 RREG32(mmSH_MEM_CONFIG
));
4783 dev_info(adev
->dev
, " SH_MEM_APE1_BASE=0x%08X\n",
4784 RREG32(mmSH_MEM_APE1_BASE
));
4785 dev_info(adev
->dev
, " SH_MEM_APE1_LIMIT=0x%08X\n",
4786 RREG32(mmSH_MEM_APE1_LIMIT
));
4787 dev_info(adev
->dev
, " SH_MEM_BASES=0x%08X\n",
4788 RREG32(mmSH_MEM_BASES
));
4790 cik_srbm_select(adev
, 0, 0, 0, 0);
4791 mutex_unlock(&adev
->srbm_mutex
);
4794 static int gfx_v7_0_soft_reset(void *handle
)
4796 u32 grbm_soft_reset
= 0, srbm_soft_reset
= 0;
4798 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
4801 tmp
= RREG32(mmGRBM_STATUS
);
4802 if (tmp
& (GRBM_STATUS__PA_BUSY_MASK
| GRBM_STATUS__SC_BUSY_MASK
|
4803 GRBM_STATUS__BCI_BUSY_MASK
| GRBM_STATUS__SX_BUSY_MASK
|
4804 GRBM_STATUS__TA_BUSY_MASK
| GRBM_STATUS__VGT_BUSY_MASK
|
4805 GRBM_STATUS__DB_BUSY_MASK
| GRBM_STATUS__CB_BUSY_MASK
|
4806 GRBM_STATUS__GDS_BUSY_MASK
| GRBM_STATUS__SPI_BUSY_MASK
|
4807 GRBM_STATUS__IA_BUSY_MASK
| GRBM_STATUS__IA_BUSY_NO_DMA_MASK
))
4808 grbm_soft_reset
|= GRBM_SOFT_RESET__SOFT_RESET_CP_MASK
|
4809 GRBM_SOFT_RESET__SOFT_RESET_GFX_MASK
;
4811 if (tmp
& (GRBM_STATUS__CP_BUSY_MASK
| GRBM_STATUS__CP_COHERENCY_BUSY_MASK
)) {
4812 grbm_soft_reset
|= GRBM_SOFT_RESET__SOFT_RESET_CP_MASK
;
4813 srbm_soft_reset
|= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK
;
4817 tmp
= RREG32(mmGRBM_STATUS2
);
4818 if (tmp
& GRBM_STATUS2__RLC_BUSY_MASK
)
4819 grbm_soft_reset
|= GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK
;
4822 tmp
= RREG32(mmSRBM_STATUS
);
4823 if (tmp
& SRBM_STATUS__GRBM_RQ_PENDING_MASK
)
4824 srbm_soft_reset
|= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK
;
4826 if (grbm_soft_reset
|| srbm_soft_reset
) {
4827 gfx_v7_0_print_status((void *)adev
);
4829 gfx_v7_0_fini_pg(adev
);
4830 gfx_v7_0_update_cg(adev
, false);
4833 gfx_v7_0_rlc_stop(adev
);
4835 /* Disable GFX parsing/prefetching */
4836 WREG32(mmCP_ME_CNTL
, CP_ME_CNTL__ME_HALT_MASK
| CP_ME_CNTL__PFP_HALT_MASK
| CP_ME_CNTL__CE_HALT_MASK
);
4838 /* Disable MEC parsing/prefetching */
4839 WREG32(mmCP_MEC_CNTL
, CP_MEC_CNTL__MEC_ME1_HALT_MASK
| CP_MEC_CNTL__MEC_ME2_HALT_MASK
);
4841 if (grbm_soft_reset
) {
4842 tmp
= RREG32(mmGRBM_SOFT_RESET
);
4843 tmp
|= grbm_soft_reset
;
4844 dev_info(adev
->dev
, "GRBM_SOFT_RESET=0x%08X\n", tmp
);
4845 WREG32(mmGRBM_SOFT_RESET
, tmp
);
4846 tmp
= RREG32(mmGRBM_SOFT_RESET
);
4850 tmp
&= ~grbm_soft_reset
;
4851 WREG32(mmGRBM_SOFT_RESET
, tmp
);
4852 tmp
= RREG32(mmGRBM_SOFT_RESET
);
4855 if (srbm_soft_reset
) {
4856 tmp
= RREG32(mmSRBM_SOFT_RESET
);
4857 tmp
|= srbm_soft_reset
;
4858 dev_info(adev
->dev
, "SRBM_SOFT_RESET=0x%08X\n", tmp
);
4859 WREG32(mmSRBM_SOFT_RESET
, tmp
);
4860 tmp
= RREG32(mmSRBM_SOFT_RESET
);
4864 tmp
&= ~srbm_soft_reset
;
4865 WREG32(mmSRBM_SOFT_RESET
, tmp
);
4866 tmp
= RREG32(mmSRBM_SOFT_RESET
);
4868 /* Wait a little for things to settle down */
4870 gfx_v7_0_print_status((void *)adev
);
4875 static void gfx_v7_0_set_gfx_eop_interrupt_state(struct amdgpu_device
*adev
,
4876 enum amdgpu_interrupt_state state
)
4881 case AMDGPU_IRQ_STATE_DISABLE
:
4882 cp_int_cntl
= RREG32(mmCP_INT_CNTL_RING0
);
4883 cp_int_cntl
&= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK
;
4884 WREG32(mmCP_INT_CNTL_RING0
, cp_int_cntl
);
4886 case AMDGPU_IRQ_STATE_ENABLE
:
4887 cp_int_cntl
= RREG32(mmCP_INT_CNTL_RING0
);
4888 cp_int_cntl
|= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK
;
4889 WREG32(mmCP_INT_CNTL_RING0
, cp_int_cntl
);
4896 static void gfx_v7_0_set_compute_eop_interrupt_state(struct amdgpu_device
*adev
,
4898 enum amdgpu_interrupt_state state
)
4900 u32 mec_int_cntl
, mec_int_cntl_reg
;
4903 * amdgpu controls only pipe 0 of MEC1. That's why this function only
4904 * handles the setting of interrupts for this specific pipe. All other
4905 * pipes' interrupts are set by amdkfd.
4911 mec_int_cntl_reg
= mmCP_ME1_PIPE0_INT_CNTL
;
4914 DRM_DEBUG("invalid pipe %d\n", pipe
);
4918 DRM_DEBUG("invalid me %d\n", me
);
4923 case AMDGPU_IRQ_STATE_DISABLE
:
4924 mec_int_cntl
= RREG32(mec_int_cntl_reg
);
4925 mec_int_cntl
&= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK
;
4926 WREG32(mec_int_cntl_reg
, mec_int_cntl
);
4928 case AMDGPU_IRQ_STATE_ENABLE
:
4929 mec_int_cntl
= RREG32(mec_int_cntl_reg
);
4930 mec_int_cntl
|= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK
;
4931 WREG32(mec_int_cntl_reg
, mec_int_cntl
);
4938 static int gfx_v7_0_set_priv_reg_fault_state(struct amdgpu_device
*adev
,
4939 struct amdgpu_irq_src
*src
,
4941 enum amdgpu_interrupt_state state
)
4946 case AMDGPU_IRQ_STATE_DISABLE
:
4947 cp_int_cntl
= RREG32(mmCP_INT_CNTL_RING0
);
4948 cp_int_cntl
&= ~CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK
;
4949 WREG32(mmCP_INT_CNTL_RING0
, cp_int_cntl
);
4951 case AMDGPU_IRQ_STATE_ENABLE
:
4952 cp_int_cntl
= RREG32(mmCP_INT_CNTL_RING0
);
4953 cp_int_cntl
|= CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK
;
4954 WREG32(mmCP_INT_CNTL_RING0
, cp_int_cntl
);
4963 static int gfx_v7_0_set_priv_inst_fault_state(struct amdgpu_device
*adev
,
4964 struct amdgpu_irq_src
*src
,
4966 enum amdgpu_interrupt_state state
)
4971 case AMDGPU_IRQ_STATE_DISABLE
:
4972 cp_int_cntl
= RREG32(mmCP_INT_CNTL_RING0
);
4973 cp_int_cntl
&= ~CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK
;
4974 WREG32(mmCP_INT_CNTL_RING0
, cp_int_cntl
);
4976 case AMDGPU_IRQ_STATE_ENABLE
:
4977 cp_int_cntl
= RREG32(mmCP_INT_CNTL_RING0
);
4978 cp_int_cntl
|= CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK
;
4979 WREG32(mmCP_INT_CNTL_RING0
, cp_int_cntl
);
4988 static int gfx_v7_0_set_eop_interrupt_state(struct amdgpu_device
*adev
,
4989 struct amdgpu_irq_src
*src
,
4991 enum amdgpu_interrupt_state state
)
4994 case AMDGPU_CP_IRQ_GFX_EOP
:
4995 gfx_v7_0_set_gfx_eop_interrupt_state(adev
, state
);
4997 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
:
4998 gfx_v7_0_set_compute_eop_interrupt_state(adev
, 1, 0, state
);
5000 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP
:
5001 gfx_v7_0_set_compute_eop_interrupt_state(adev
, 1, 1, state
);
5003 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP
:
5004 gfx_v7_0_set_compute_eop_interrupt_state(adev
, 1, 2, state
);
5006 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP
:
5007 gfx_v7_0_set_compute_eop_interrupt_state(adev
, 1, 3, state
);
5009 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP
:
5010 gfx_v7_0_set_compute_eop_interrupt_state(adev
, 2, 0, state
);
5012 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP
:
5013 gfx_v7_0_set_compute_eop_interrupt_state(adev
, 2, 1, state
);
5015 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP
:
5016 gfx_v7_0_set_compute_eop_interrupt_state(adev
, 2, 2, state
);
5018 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP
:
5019 gfx_v7_0_set_compute_eop_interrupt_state(adev
, 2, 3, state
);
5027 static int gfx_v7_0_eop_irq(struct amdgpu_device
*adev
,
5028 struct amdgpu_irq_src
*source
,
5029 struct amdgpu_iv_entry
*entry
)
5032 struct amdgpu_ring
*ring
;
5035 DRM_DEBUG("IH: CP EOP\n");
5036 me_id
= (entry
->ring_id
& 0x0c) >> 2;
5037 pipe_id
= (entry
->ring_id
& 0x03) >> 0;
5040 amdgpu_fence_process(&adev
->gfx
.gfx_ring
[0]);
5044 for (i
= 0; i
< adev
->gfx
.num_compute_rings
; i
++) {
5045 ring
= &adev
->gfx
.compute_ring
[i
];
5046 if ((ring
->me
== me_id
) & (ring
->pipe
== pipe_id
))
5047 amdgpu_fence_process(ring
);
5054 static int gfx_v7_0_priv_reg_irq(struct amdgpu_device
*adev
,
5055 struct amdgpu_irq_src
*source
,
5056 struct amdgpu_iv_entry
*entry
)
5058 DRM_ERROR("Illegal register access in command stream\n");
5059 schedule_work(&adev
->reset_work
);
5063 static int gfx_v7_0_priv_inst_irq(struct amdgpu_device
*adev
,
5064 struct amdgpu_irq_src
*source
,
5065 struct amdgpu_iv_entry
*entry
)
5067 DRM_ERROR("Illegal instruction in command stream\n");
5068 // XXX soft reset the gfx block only
5069 schedule_work(&adev
->reset_work
);
5073 static int gfx_v7_0_set_clockgating_state(void *handle
,
5074 enum amd_clockgating_state state
)
5077 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
5079 if (state
== AMD_CG_STATE_GATE
)
5082 gfx_v7_0_enable_gui_idle_interrupt(adev
, false);
5083 /* order matters! */
5085 gfx_v7_0_enable_mgcg(adev
, true);
5086 gfx_v7_0_enable_cgcg(adev
, true);
5088 gfx_v7_0_enable_cgcg(adev
, false);
5089 gfx_v7_0_enable_mgcg(adev
, false);
5091 gfx_v7_0_enable_gui_idle_interrupt(adev
, true);
5096 static int gfx_v7_0_set_powergating_state(void *handle
,
5097 enum amd_powergating_state state
)
5100 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
5102 if (state
== AMD_PG_STATE_GATE
)
5105 if (adev
->pg_flags
& (AMD_PG_SUPPORT_GFX_PG
|
5106 AMD_PG_SUPPORT_GFX_SMG
|
5107 AMD_PG_SUPPORT_GFX_DMG
|
5109 AMD_PG_SUPPORT_GDS
|
5110 AMD_PG_SUPPORT_RLC_SMU_HS
)) {
5111 gfx_v7_0_update_gfx_pg(adev
, gate
);
5112 if (adev
->pg_flags
& AMD_PG_SUPPORT_GFX_PG
) {
5113 gfx_v7_0_enable_cp_pg(adev
, gate
);
5114 gfx_v7_0_enable_gds_pg(adev
, gate
);
5121 const struct amd_ip_funcs gfx_v7_0_ip_funcs
= {
5122 .early_init
= gfx_v7_0_early_init
,
5123 .late_init
= gfx_v7_0_late_init
,
5124 .sw_init
= gfx_v7_0_sw_init
,
5125 .sw_fini
= gfx_v7_0_sw_fini
,
5126 .hw_init
= gfx_v7_0_hw_init
,
5127 .hw_fini
= gfx_v7_0_hw_fini
,
5128 .suspend
= gfx_v7_0_suspend
,
5129 .resume
= gfx_v7_0_resume
,
5130 .is_idle
= gfx_v7_0_is_idle
,
5131 .wait_for_idle
= gfx_v7_0_wait_for_idle
,
5132 .soft_reset
= gfx_v7_0_soft_reset
,
5133 .print_status
= gfx_v7_0_print_status
,
5134 .set_clockgating_state
= gfx_v7_0_set_clockgating_state
,
5135 .set_powergating_state
= gfx_v7_0_set_powergating_state
,
5138 static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx
= {
5139 .get_rptr
= gfx_v7_0_ring_get_rptr_gfx
,
5140 .get_wptr
= gfx_v7_0_ring_get_wptr_gfx
,
5141 .set_wptr
= gfx_v7_0_ring_set_wptr_gfx
,
5143 .emit_ib
= gfx_v7_0_ring_emit_ib_gfx
,
5144 .emit_fence
= gfx_v7_0_ring_emit_fence_gfx
,
5145 .emit_vm_flush
= gfx_v7_0_ring_emit_vm_flush
,
5146 .emit_gds_switch
= gfx_v7_0_ring_emit_gds_switch
,
5147 .emit_hdp_flush
= gfx_v7_0_ring_emit_hdp_flush
,
5148 .test_ring
= gfx_v7_0_ring_test_ring
,
5149 .test_ib
= gfx_v7_0_ring_test_ib
,
5150 .insert_nop
= amdgpu_ring_insert_nop
,
5151 .pad_ib
= amdgpu_ring_generic_pad_ib
,
5154 static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute
= {
5155 .get_rptr
= gfx_v7_0_ring_get_rptr_compute
,
5156 .get_wptr
= gfx_v7_0_ring_get_wptr_compute
,
5157 .set_wptr
= gfx_v7_0_ring_set_wptr_compute
,
5159 .emit_ib
= gfx_v7_0_ring_emit_ib_compute
,
5160 .emit_fence
= gfx_v7_0_ring_emit_fence_compute
,
5161 .emit_vm_flush
= gfx_v7_0_ring_emit_vm_flush
,
5162 .emit_gds_switch
= gfx_v7_0_ring_emit_gds_switch
,
5163 .emit_hdp_flush
= gfx_v7_0_ring_emit_hdp_flush
,
5164 .test_ring
= gfx_v7_0_ring_test_ring
,
5165 .test_ib
= gfx_v7_0_ring_test_ib
,
5166 .insert_nop
= amdgpu_ring_insert_nop
,
5167 .pad_ib
= amdgpu_ring_generic_pad_ib
,
5170 static void gfx_v7_0_set_ring_funcs(struct amdgpu_device
*adev
)
5174 for (i
= 0; i
< adev
->gfx
.num_gfx_rings
; i
++)
5175 adev
->gfx
.gfx_ring
[i
].funcs
= &gfx_v7_0_ring_funcs_gfx
;
5176 for (i
= 0; i
< adev
->gfx
.num_compute_rings
; i
++)
5177 adev
->gfx
.compute_ring
[i
].funcs
= &gfx_v7_0_ring_funcs_compute
;
5180 static const struct amdgpu_irq_src_funcs gfx_v7_0_eop_irq_funcs
= {
5181 .set
= gfx_v7_0_set_eop_interrupt_state
,
5182 .process
= gfx_v7_0_eop_irq
,
5185 static const struct amdgpu_irq_src_funcs gfx_v7_0_priv_reg_irq_funcs
= {
5186 .set
= gfx_v7_0_set_priv_reg_fault_state
,
5187 .process
= gfx_v7_0_priv_reg_irq
,
5190 static const struct amdgpu_irq_src_funcs gfx_v7_0_priv_inst_irq_funcs
= {
5191 .set
= gfx_v7_0_set_priv_inst_fault_state
,
5192 .process
= gfx_v7_0_priv_inst_irq
,
5195 static void gfx_v7_0_set_irq_funcs(struct amdgpu_device
*adev
)
5197 adev
->gfx
.eop_irq
.num_types
= AMDGPU_CP_IRQ_LAST
;
5198 adev
->gfx
.eop_irq
.funcs
= &gfx_v7_0_eop_irq_funcs
;
5200 adev
->gfx
.priv_reg_irq
.num_types
= 1;
5201 adev
->gfx
.priv_reg_irq
.funcs
= &gfx_v7_0_priv_reg_irq_funcs
;
5203 adev
->gfx
.priv_inst_irq
.num_types
= 1;
5204 adev
->gfx
.priv_inst_irq
.funcs
= &gfx_v7_0_priv_inst_irq_funcs
;
5207 static void gfx_v7_0_set_gds_init(struct amdgpu_device
*adev
)
5209 /* init asci gds info */
5210 adev
->gds
.mem
.total_size
= RREG32(mmGDS_VMID0_SIZE
);
5211 adev
->gds
.gws
.total_size
= 64;
5212 adev
->gds
.oa
.total_size
= 16;
5214 if (adev
->gds
.mem
.total_size
== 64 * 1024) {
5215 adev
->gds
.mem
.gfx_partition_size
= 4096;
5216 adev
->gds
.mem
.cs_partition_size
= 4096;
5218 adev
->gds
.gws
.gfx_partition_size
= 4;
5219 adev
->gds
.gws
.cs_partition_size
= 4;
5221 adev
->gds
.oa
.gfx_partition_size
= 4;
5222 adev
->gds
.oa
.cs_partition_size
= 1;
5224 adev
->gds
.mem
.gfx_partition_size
= 1024;
5225 adev
->gds
.mem
.cs_partition_size
= 1024;
5227 adev
->gds
.gws
.gfx_partition_size
= 16;
5228 adev
->gds
.gws
.cs_partition_size
= 16;
5230 adev
->gds
.oa
.gfx_partition_size
= 4;
5231 adev
->gds
.oa
.cs_partition_size
= 4;
5236 int gfx_v7_0_get_cu_info(struct amdgpu_device
*adev
,
5237 struct amdgpu_cu_info
*cu_info
)
5239 int i
, j
, k
, counter
, active_cu_number
= 0;
5240 u32 mask
, bitmap
, ao_bitmap
, ao_cu_mask
= 0;
5242 if (!adev
|| !cu_info
)
5245 memset(cu_info
, 0, sizeof(*cu_info
));
5247 mutex_lock(&adev
->grbm_idx_mutex
);
5248 for (i
= 0; i
< adev
->gfx
.config
.max_shader_engines
; i
++) {
5249 for (j
= 0; j
< adev
->gfx
.config
.max_sh_per_se
; j
++) {
5253 gfx_v7_0_select_se_sh(adev
, i
, j
);
5254 bitmap
= gfx_v7_0_get_cu_active_bitmap(adev
);
5255 cu_info
->bitmap
[i
][j
] = bitmap
;
5257 for (k
= 0; k
< 16; k
++) {
5258 if (bitmap
& mask
) {
5265 active_cu_number
+= counter
;
5266 ao_cu_mask
|= (ao_bitmap
<< (i
* 16 + j
* 8));
5269 gfx_v7_0_select_se_sh(adev
, 0xffffffff, 0xffffffff);
5270 mutex_unlock(&adev
->grbm_idx_mutex
);
5272 cu_info
->number
= active_cu_number
;
5273 cu_info
->ao_cu_mask
= ao_cu_mask
;