2 * Copyright (C) 2015 Etnaviv Project
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
17 #include <linux/component.h>
18 #include <linux/dma-fence.h>
19 #include <linux/moduleparam.h>
20 #include <linux/of_device.h>
22 #include "etnaviv_cmdbuf.h"
23 #include "etnaviv_dump.h"
24 #include "etnaviv_gpu.h"
25 #include "etnaviv_gem.h"
26 #include "etnaviv_mmu.h"
27 #include "common.xml.h"
28 #include "state.xml.h"
29 #include "state_hi.xml.h"
30 #include "cmdstream.xml.h"
32 static const struct platform_device_id gpu_ids
[] = {
33 { .name
= "etnaviv-gpu,2d" },
37 static bool etnaviv_dump_core
= true;
38 module_param_named(dump_core
, etnaviv_dump_core
, bool, 0600);
44 int etnaviv_gpu_get_param(struct etnaviv_gpu
*gpu
, u32 param
, u64
*value
)
47 case ETNAVIV_PARAM_GPU_MODEL
:
48 *value
= gpu
->identity
.model
;
51 case ETNAVIV_PARAM_GPU_REVISION
:
52 *value
= gpu
->identity
.revision
;
55 case ETNAVIV_PARAM_GPU_FEATURES_0
:
56 *value
= gpu
->identity
.features
;
59 case ETNAVIV_PARAM_GPU_FEATURES_1
:
60 *value
= gpu
->identity
.minor_features0
;
63 case ETNAVIV_PARAM_GPU_FEATURES_2
:
64 *value
= gpu
->identity
.minor_features1
;
67 case ETNAVIV_PARAM_GPU_FEATURES_3
:
68 *value
= gpu
->identity
.minor_features2
;
71 case ETNAVIV_PARAM_GPU_FEATURES_4
:
72 *value
= gpu
->identity
.minor_features3
;
75 case ETNAVIV_PARAM_GPU_FEATURES_5
:
76 *value
= gpu
->identity
.minor_features4
;
79 case ETNAVIV_PARAM_GPU_FEATURES_6
:
80 *value
= gpu
->identity
.minor_features5
;
83 case ETNAVIV_PARAM_GPU_STREAM_COUNT
:
84 *value
= gpu
->identity
.stream_count
;
87 case ETNAVIV_PARAM_GPU_REGISTER_MAX
:
88 *value
= gpu
->identity
.register_max
;
91 case ETNAVIV_PARAM_GPU_THREAD_COUNT
:
92 *value
= gpu
->identity
.thread_count
;
95 case ETNAVIV_PARAM_GPU_VERTEX_CACHE_SIZE
:
96 *value
= gpu
->identity
.vertex_cache_size
;
99 case ETNAVIV_PARAM_GPU_SHADER_CORE_COUNT
:
100 *value
= gpu
->identity
.shader_core_count
;
103 case ETNAVIV_PARAM_GPU_PIXEL_PIPES
:
104 *value
= gpu
->identity
.pixel_pipes
;
107 case ETNAVIV_PARAM_GPU_VERTEX_OUTPUT_BUFFER_SIZE
:
108 *value
= gpu
->identity
.vertex_output_buffer_size
;
111 case ETNAVIV_PARAM_GPU_BUFFER_SIZE
:
112 *value
= gpu
->identity
.buffer_size
;
115 case ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT
:
116 *value
= gpu
->identity
.instruction_count
;
119 case ETNAVIV_PARAM_GPU_NUM_CONSTANTS
:
120 *value
= gpu
->identity
.num_constants
;
123 case ETNAVIV_PARAM_GPU_NUM_VARYINGS
:
124 *value
= gpu
->identity
.varyings_count
;
128 DBG("%s: invalid param: %u", dev_name(gpu
->dev
), param
);
136 #define etnaviv_is_model_rev(gpu, mod, rev) \
137 ((gpu)->identity.model == chipModel_##mod && \
138 (gpu)->identity.revision == rev)
139 #define etnaviv_field(val, field) \
140 (((val) & field##__MASK) >> field##__SHIFT)
142 static void etnaviv_hw_specs(struct etnaviv_gpu
*gpu
)
144 if (gpu
->identity
.minor_features0
&
145 chipMinorFeatures0_MORE_MINOR_FEATURES
) {
147 unsigned int streams
;
149 specs
[0] = gpu_read(gpu
, VIVS_HI_CHIP_SPECS
);
150 specs
[1] = gpu_read(gpu
, VIVS_HI_CHIP_SPECS_2
);
151 specs
[2] = gpu_read(gpu
, VIVS_HI_CHIP_SPECS_3
);
152 specs
[3] = gpu_read(gpu
, VIVS_HI_CHIP_SPECS_4
);
154 gpu
->identity
.stream_count
= etnaviv_field(specs
[0],
155 VIVS_HI_CHIP_SPECS_STREAM_COUNT
);
156 gpu
->identity
.register_max
= etnaviv_field(specs
[0],
157 VIVS_HI_CHIP_SPECS_REGISTER_MAX
);
158 gpu
->identity
.thread_count
= etnaviv_field(specs
[0],
159 VIVS_HI_CHIP_SPECS_THREAD_COUNT
);
160 gpu
->identity
.vertex_cache_size
= etnaviv_field(specs
[0],
161 VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE
);
162 gpu
->identity
.shader_core_count
= etnaviv_field(specs
[0],
163 VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT
);
164 gpu
->identity
.pixel_pipes
= etnaviv_field(specs
[0],
165 VIVS_HI_CHIP_SPECS_PIXEL_PIPES
);
166 gpu
->identity
.vertex_output_buffer_size
=
167 etnaviv_field(specs
[0],
168 VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE
);
170 gpu
->identity
.buffer_size
= etnaviv_field(specs
[1],
171 VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE
);
172 gpu
->identity
.instruction_count
= etnaviv_field(specs
[1],
173 VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT
);
174 gpu
->identity
.num_constants
= etnaviv_field(specs
[1],
175 VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS
);
177 gpu
->identity
.varyings_count
= etnaviv_field(specs
[2],
178 VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT
);
180 /* This overrides the value from older register if non-zero */
181 streams
= etnaviv_field(specs
[3],
182 VIVS_HI_CHIP_SPECS_4_STREAM_COUNT
);
184 gpu
->identity
.stream_count
= streams
;
187 /* Fill in the stream count if not specified */
188 if (gpu
->identity
.stream_count
== 0) {
189 if (gpu
->identity
.model
>= 0x1000)
190 gpu
->identity
.stream_count
= 4;
192 gpu
->identity
.stream_count
= 1;
195 /* Convert the register max value */
196 if (gpu
->identity
.register_max
)
197 gpu
->identity
.register_max
= 1 << gpu
->identity
.register_max
;
198 else if (gpu
->identity
.model
== chipModel_GC400
)
199 gpu
->identity
.register_max
= 32;
201 gpu
->identity
.register_max
= 64;
203 /* Convert thread count */
204 if (gpu
->identity
.thread_count
)
205 gpu
->identity
.thread_count
= 1 << gpu
->identity
.thread_count
;
206 else if (gpu
->identity
.model
== chipModel_GC400
)
207 gpu
->identity
.thread_count
= 64;
208 else if (gpu
->identity
.model
== chipModel_GC500
||
209 gpu
->identity
.model
== chipModel_GC530
)
210 gpu
->identity
.thread_count
= 128;
212 gpu
->identity
.thread_count
= 256;
214 if (gpu
->identity
.vertex_cache_size
== 0)
215 gpu
->identity
.vertex_cache_size
= 8;
217 if (gpu
->identity
.shader_core_count
== 0) {
218 if (gpu
->identity
.model
>= 0x1000)
219 gpu
->identity
.shader_core_count
= 2;
221 gpu
->identity
.shader_core_count
= 1;
224 if (gpu
->identity
.pixel_pipes
== 0)
225 gpu
->identity
.pixel_pipes
= 1;
227 /* Convert virtex buffer size */
228 if (gpu
->identity
.vertex_output_buffer_size
) {
229 gpu
->identity
.vertex_output_buffer_size
=
230 1 << gpu
->identity
.vertex_output_buffer_size
;
231 } else if (gpu
->identity
.model
== chipModel_GC400
) {
232 if (gpu
->identity
.revision
< 0x4000)
233 gpu
->identity
.vertex_output_buffer_size
= 512;
234 else if (gpu
->identity
.revision
< 0x4200)
235 gpu
->identity
.vertex_output_buffer_size
= 256;
237 gpu
->identity
.vertex_output_buffer_size
= 128;
239 gpu
->identity
.vertex_output_buffer_size
= 512;
242 switch (gpu
->identity
.instruction_count
) {
244 if (etnaviv_is_model_rev(gpu
, GC2000
, 0x5108) ||
245 gpu
->identity
.model
== chipModel_GC880
)
246 gpu
->identity
.instruction_count
= 512;
248 gpu
->identity
.instruction_count
= 256;
252 gpu
->identity
.instruction_count
= 1024;
256 gpu
->identity
.instruction_count
= 2048;
260 gpu
->identity
.instruction_count
= 256;
264 if (gpu
->identity
.num_constants
== 0)
265 gpu
->identity
.num_constants
= 168;
267 if (gpu
->identity
.varyings_count
== 0) {
268 if (gpu
->identity
.minor_features1
& chipMinorFeatures1_HALTI0
)
269 gpu
->identity
.varyings_count
= 12;
271 gpu
->identity
.varyings_count
= 8;
275 * For some cores, two varyings are consumed for position, so the
276 * maximum varying count needs to be reduced by one.
278 if (etnaviv_is_model_rev(gpu
, GC5000
, 0x5434) ||
279 etnaviv_is_model_rev(gpu
, GC4000
, 0x5222) ||
280 etnaviv_is_model_rev(gpu
, GC4000
, 0x5245) ||
281 etnaviv_is_model_rev(gpu
, GC4000
, 0x5208) ||
282 etnaviv_is_model_rev(gpu
, GC3000
, 0x5435) ||
283 etnaviv_is_model_rev(gpu
, GC2200
, 0x5244) ||
284 etnaviv_is_model_rev(gpu
, GC2100
, 0x5108) ||
285 etnaviv_is_model_rev(gpu
, GC2000
, 0x5108) ||
286 etnaviv_is_model_rev(gpu
, GC1500
, 0x5246) ||
287 etnaviv_is_model_rev(gpu
, GC880
, 0x5107) ||
288 etnaviv_is_model_rev(gpu
, GC880
, 0x5106))
289 gpu
->identity
.varyings_count
-= 1;
292 static void etnaviv_hw_identify(struct etnaviv_gpu
*gpu
)
296 chipIdentity
= gpu_read(gpu
, VIVS_HI_CHIP_IDENTITY
);
298 /* Special case for older graphic cores. */
299 if (etnaviv_field(chipIdentity
, VIVS_HI_CHIP_IDENTITY_FAMILY
) == 0x01) {
300 gpu
->identity
.model
= chipModel_GC500
;
301 gpu
->identity
.revision
= etnaviv_field(chipIdentity
,
302 VIVS_HI_CHIP_IDENTITY_REVISION
);
305 gpu
->identity
.model
= gpu_read(gpu
, VIVS_HI_CHIP_MODEL
);
306 gpu
->identity
.revision
= gpu_read(gpu
, VIVS_HI_CHIP_REV
);
309 * !!!! HACK ALERT !!!!
310 * Because people change device IDs without letting software
311 * know about it - here is the hack to make it all look the
312 * same. Only for GC400 family.
314 if ((gpu
->identity
.model
& 0xff00) == 0x0400 &&
315 gpu
->identity
.model
!= chipModel_GC420
) {
316 gpu
->identity
.model
= gpu
->identity
.model
& 0x0400;
319 /* Another special case */
320 if (etnaviv_is_model_rev(gpu
, GC300
, 0x2201)) {
321 u32 chipDate
= gpu_read(gpu
, VIVS_HI_CHIP_DATE
);
322 u32 chipTime
= gpu_read(gpu
, VIVS_HI_CHIP_TIME
);
324 if (chipDate
== 0x20080814 && chipTime
== 0x12051100) {
326 * This IP has an ECO; put the correct
329 gpu
->identity
.revision
= 0x1051;
334 * NXP likes to call the GPU on the i.MX6QP GC2000+, but in
335 * reality it's just a re-branded GC3000. We can identify this
336 * core by the upper half of the revision register being all 1.
337 * Fix model/rev here, so all other places can refer to this
338 * core by its real identity.
340 if (etnaviv_is_model_rev(gpu
, GC2000
, 0xffff5450)) {
341 gpu
->identity
.model
= chipModel_GC3000
;
342 gpu
->identity
.revision
&= 0xffff;
346 dev_info(gpu
->dev
, "model: GC%x, revision: %x\n",
347 gpu
->identity
.model
, gpu
->identity
.revision
);
349 gpu
->identity
.features
= gpu_read(gpu
, VIVS_HI_CHIP_FEATURE
);
351 /* Disable fast clear on GC700. */
352 if (gpu
->identity
.model
== chipModel_GC700
)
353 gpu
->identity
.features
&= ~chipFeatures_FAST_CLEAR
;
355 if ((gpu
->identity
.model
== chipModel_GC500
&&
356 gpu
->identity
.revision
< 2) ||
357 (gpu
->identity
.model
== chipModel_GC300
&&
358 gpu
->identity
.revision
< 0x2000)) {
361 * GC500 rev 1.x and GC300 rev < 2.0 doesn't have these
364 gpu
->identity
.minor_features0
= 0;
365 gpu
->identity
.minor_features1
= 0;
366 gpu
->identity
.minor_features2
= 0;
367 gpu
->identity
.minor_features3
= 0;
368 gpu
->identity
.minor_features4
= 0;
369 gpu
->identity
.minor_features5
= 0;
371 gpu
->identity
.minor_features0
=
372 gpu_read(gpu
, VIVS_HI_CHIP_MINOR_FEATURE_0
);
374 if (gpu
->identity
.minor_features0
&
375 chipMinorFeatures0_MORE_MINOR_FEATURES
) {
376 gpu
->identity
.minor_features1
=
377 gpu_read(gpu
, VIVS_HI_CHIP_MINOR_FEATURE_1
);
378 gpu
->identity
.minor_features2
=
379 gpu_read(gpu
, VIVS_HI_CHIP_MINOR_FEATURE_2
);
380 gpu
->identity
.minor_features3
=
381 gpu_read(gpu
, VIVS_HI_CHIP_MINOR_FEATURE_3
);
382 gpu
->identity
.minor_features4
=
383 gpu_read(gpu
, VIVS_HI_CHIP_MINOR_FEATURE_4
);
384 gpu
->identity
.minor_features5
=
385 gpu_read(gpu
, VIVS_HI_CHIP_MINOR_FEATURE_5
);
388 /* GC600 idle register reports zero bits where modules aren't present */
389 if (gpu
->identity
.model
== chipModel_GC600
) {
390 gpu
->idle_mask
= VIVS_HI_IDLE_STATE_TX
|
391 VIVS_HI_IDLE_STATE_RA
|
392 VIVS_HI_IDLE_STATE_SE
|
393 VIVS_HI_IDLE_STATE_PA
|
394 VIVS_HI_IDLE_STATE_SH
|
395 VIVS_HI_IDLE_STATE_PE
|
396 VIVS_HI_IDLE_STATE_DE
|
397 VIVS_HI_IDLE_STATE_FE
;
399 gpu
->idle_mask
= ~VIVS_HI_IDLE_STATE_AXI_LP
;
402 etnaviv_hw_specs(gpu
);
405 static void etnaviv_gpu_load_clock(struct etnaviv_gpu
*gpu
, u32 clock
)
407 gpu_write(gpu
, VIVS_HI_CLOCK_CONTROL
, clock
|
408 VIVS_HI_CLOCK_CONTROL_FSCALE_CMD_LOAD
);
409 gpu_write(gpu
, VIVS_HI_CLOCK_CONTROL
, clock
);
412 static int etnaviv_hw_reset(struct etnaviv_gpu
*gpu
)
415 unsigned long timeout
;
425 /* We hope that the GPU resets in under one second */
426 timeout
= jiffies
+ msecs_to_jiffies(1000);
428 while (time_is_after_jiffies(timeout
)) {
429 control
= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS
|
430 VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
433 etnaviv_gpu_load_clock(gpu
, control
);
435 /* Wait for stable clock. Vivante's code waited for 1ms */
436 usleep_range(1000, 10000);
438 /* isolate the GPU. */
439 control
|= VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU
;
440 gpu_write(gpu
, VIVS_HI_CLOCK_CONTROL
, control
);
442 /* set soft reset. */
443 control
|= VIVS_HI_CLOCK_CONTROL_SOFT_RESET
;
444 gpu_write(gpu
, VIVS_HI_CLOCK_CONTROL
, control
);
446 /* wait for reset. */
449 /* reset soft reset bit. */
450 control
&= ~VIVS_HI_CLOCK_CONTROL_SOFT_RESET
;
451 gpu_write(gpu
, VIVS_HI_CLOCK_CONTROL
, control
);
453 /* reset GPU isolation. */
454 control
&= ~VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU
;
455 gpu_write(gpu
, VIVS_HI_CLOCK_CONTROL
, control
);
457 /* read idle register. */
458 idle
= gpu_read(gpu
, VIVS_HI_IDLE_STATE
);
460 /* try reseting again if FE it not idle */
461 if ((idle
& VIVS_HI_IDLE_STATE_FE
) == 0) {
462 dev_dbg(gpu
->dev
, "FE is not idle\n");
466 /* read reset register. */
467 control
= gpu_read(gpu
, VIVS_HI_CLOCK_CONTROL
);
469 /* is the GPU idle? */
470 if (((control
& VIVS_HI_CLOCK_CONTROL_IDLE_3D
) == 0) ||
471 ((control
& VIVS_HI_CLOCK_CONTROL_IDLE_2D
) == 0)) {
472 dev_dbg(gpu
->dev
, "GPU is not idle\n");
481 idle
= gpu_read(gpu
, VIVS_HI_IDLE_STATE
);
482 control
= gpu_read(gpu
, VIVS_HI_CLOCK_CONTROL
);
484 dev_err(gpu
->dev
, "GPU failed to reset: FE %sidle, 3D %sidle, 2D %sidle\n",
485 idle
& VIVS_HI_IDLE_STATE_FE
? "" : "not ",
486 control
& VIVS_HI_CLOCK_CONTROL_IDLE_3D
? "" : "not ",
487 control
& VIVS_HI_CLOCK_CONTROL_IDLE_2D
? "" : "not ");
492 /* We rely on the GPU running, so program the clock */
493 control
= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS
|
494 VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
497 etnaviv_gpu_load_clock(gpu
, control
);
502 static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu
*gpu
)
506 /* enable clock gating */
507 ppc
= gpu_read(gpu
, VIVS_PM_POWER_CONTROLS
);
508 ppc
|= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING
;
510 /* Disable stall module clock gating for 4.3.0.1 and 4.3.0.2 revs */
511 if (gpu
->identity
.revision
== 0x4301 ||
512 gpu
->identity
.revision
== 0x4302)
513 ppc
|= VIVS_PM_POWER_CONTROLS_DISABLE_STALL_MODULE_CLOCK_GATING
;
515 gpu_write(gpu
, VIVS_PM_POWER_CONTROLS
, ppc
);
517 pmc
= gpu_read(gpu
, VIVS_PM_MODULE_CONTROLS
);
519 /* Disable PA clock gating for GC400+ except for GC420 */
520 if (gpu
->identity
.model
>= chipModel_GC400
&&
521 gpu
->identity
.model
!= chipModel_GC420
)
522 pmc
|= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PA
;
525 * Disable PE clock gating on revs < 5.0.0.0 when HZ is
526 * present without a bug fix.
528 if (gpu
->identity
.revision
< 0x5000 &&
529 gpu
->identity
.minor_features0
& chipMinorFeatures0_HZ
&&
530 !(gpu
->identity
.minor_features1
&
531 chipMinorFeatures1_DISABLE_PE_GATING
))
532 pmc
|= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PE
;
534 if (gpu
->identity
.revision
< 0x5422)
535 pmc
|= BIT(15); /* Unknown bit */
537 pmc
|= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_HZ
;
538 pmc
|= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_EZ
;
540 gpu_write(gpu
, VIVS_PM_MODULE_CONTROLS
, pmc
);
543 void etnaviv_gpu_start_fe(struct etnaviv_gpu
*gpu
, u32 address
, u16 prefetch
)
545 gpu_write(gpu
, VIVS_FE_COMMAND_ADDRESS
, address
);
546 gpu_write(gpu
, VIVS_FE_COMMAND_CONTROL
,
547 VIVS_FE_COMMAND_CONTROL_ENABLE
|
548 VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch
));
551 static void etnaviv_gpu_setup_pulse_eater(struct etnaviv_gpu
*gpu
)
554 * Base value for VIVS_PM_PULSE_EATER register on models where it
555 * cannot be read, extracted from vivante kernel driver.
557 u32 pulse_eater
= 0x01590880;
559 if (etnaviv_is_model_rev(gpu
, GC4000
, 0x5208) ||
560 etnaviv_is_model_rev(gpu
, GC4000
, 0x5222)) {
561 pulse_eater
|= BIT(23);
565 if (etnaviv_is_model_rev(gpu
, GC1000
, 0x5039) ||
566 etnaviv_is_model_rev(gpu
, GC1000
, 0x5040)) {
567 pulse_eater
&= ~BIT(16);
568 pulse_eater
|= BIT(17);
571 if ((gpu
->identity
.revision
> 0x5420) &&
572 (gpu
->identity
.features
& chipFeatures_PIPE_3D
))
574 /* Performance fix: disable internal DFS */
575 pulse_eater
= gpu_read(gpu
, VIVS_PM_PULSE_EATER
);
576 pulse_eater
|= BIT(18);
579 gpu_write(gpu
, VIVS_PM_PULSE_EATER
, pulse_eater
);
582 static void etnaviv_gpu_hw_init(struct etnaviv_gpu
*gpu
)
586 if ((etnaviv_is_model_rev(gpu
, GC320
, 0x5007) ||
587 etnaviv_is_model_rev(gpu
, GC320
, 0x5220)) &&
588 gpu_read(gpu
, VIVS_HI_CHIP_TIME
) != 0x2062400) {
591 mc_memory_debug
= gpu_read(gpu
, VIVS_MC_DEBUG_MEMORY
) & ~0xff;
593 if (gpu
->identity
.revision
== 0x5007)
594 mc_memory_debug
|= 0x0c;
596 mc_memory_debug
|= 0x08;
598 gpu_write(gpu
, VIVS_MC_DEBUG_MEMORY
, mc_memory_debug
);
601 /* enable module-level clock gating */
602 etnaviv_gpu_enable_mlcg(gpu
);
605 * Update GPU AXI cache atttribute to "cacheable, no allocate".
606 * This is necessary to prevent the iMX6 SoC locking up.
608 gpu_write(gpu
, VIVS_HI_AXI_CONFIG
,
609 VIVS_HI_AXI_CONFIG_AWCACHE(2) |
610 VIVS_HI_AXI_CONFIG_ARCACHE(2));
612 /* GC2000 rev 5108 needs a special bus config */
613 if (etnaviv_is_model_rev(gpu
, GC2000
, 0x5108)) {
614 u32 bus_config
= gpu_read(gpu
, VIVS_MC_BUS_CONFIG
);
615 bus_config
&= ~(VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK
|
616 VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK
);
617 bus_config
|= VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG(1) |
618 VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG(0);
619 gpu_write(gpu
, VIVS_MC_BUS_CONFIG
, bus_config
);
622 /* setup the pulse eater */
623 etnaviv_gpu_setup_pulse_eater(gpu
);
626 etnaviv_iommu_restore(gpu
);
628 /* Start command processor */
629 prefetch
= etnaviv_buffer_init(gpu
);
631 gpu_write(gpu
, VIVS_HI_INTR_ENBL
, ~0U);
632 etnaviv_gpu_start_fe(gpu
, etnaviv_cmdbuf_get_va(gpu
->buffer
),
636 int etnaviv_gpu_init(struct etnaviv_gpu
*gpu
)
640 ret
= pm_runtime_get_sync(gpu
->dev
);
642 dev_err(gpu
->dev
, "Failed to enable GPU power domain\n");
646 etnaviv_hw_identify(gpu
);
648 if (gpu
->identity
.model
== 0) {
649 dev_err(gpu
->dev
, "Unknown GPU model\n");
654 /* Exclude VG cores with FE2.0 */
655 if (gpu
->identity
.features
& chipFeatures_PIPE_VG
&&
656 gpu
->identity
.features
& chipFeatures_FE20
) {
657 dev_info(gpu
->dev
, "Ignoring GPU with VG and FE2.0\n");
663 * Set the GPU linear window to be at the end of the DMA window, where
664 * the CMA area is likely to reside. This ensures that we are able to
665 * map the command buffers while having the linear window overlap as
666 * much RAM as possible, so we can optimize mappings for other buffers.
668 * For 3D cores only do this if MC2.0 is present, as with MC1.0 it leads
669 * to different views of the memory on the individual engines.
671 if (!(gpu
->identity
.features
& chipFeatures_PIPE_3D
) ||
672 (gpu
->identity
.minor_features0
& chipMinorFeatures0_MC20
)) {
673 u32 dma_mask
= (u32
)dma_get_required_mask(gpu
->dev
);
674 if (dma_mask
< PHYS_OFFSET
+ SZ_2G
)
675 gpu
->memory_base
= PHYS_OFFSET
;
677 gpu
->memory_base
= dma_mask
- SZ_2G
+ 1;
678 } else if (PHYS_OFFSET
>= SZ_2G
) {
679 dev_info(gpu
->dev
, "Need to move linear window on MC1.0, disabling TS\n");
680 gpu
->memory_base
= PHYS_OFFSET
;
681 gpu
->identity
.features
&= ~chipFeatures_FAST_CLEAR
;
684 ret
= etnaviv_hw_reset(gpu
);
686 dev_err(gpu
->dev
, "GPU reset failed\n");
690 gpu
->mmu
= etnaviv_iommu_new(gpu
);
691 if (IS_ERR(gpu
->mmu
)) {
692 dev_err(gpu
->dev
, "Failed to instantiate GPU IOMMU\n");
693 ret
= PTR_ERR(gpu
->mmu
);
697 gpu
->cmdbuf_suballoc
= etnaviv_cmdbuf_suballoc_new(gpu
);
698 if (IS_ERR(gpu
->cmdbuf_suballoc
)) {
699 dev_err(gpu
->dev
, "Failed to create cmdbuf suballocator\n");
700 ret
= PTR_ERR(gpu
->cmdbuf_suballoc
);
705 gpu
->buffer
= etnaviv_cmdbuf_new(gpu
->cmdbuf_suballoc
, PAGE_SIZE
, 0);
708 dev_err(gpu
->dev
, "could not create command buffer\n");
712 if (gpu
->mmu
->version
== ETNAVIV_IOMMU_V1
&&
713 etnaviv_cmdbuf_get_va(gpu
->buffer
) > 0x80000000) {
716 "command buffer outside valid memory window\n");
720 /* Setup event management */
721 spin_lock_init(&gpu
->event_spinlock
);
722 init_completion(&gpu
->event_free
);
723 for (i
= 0; i
< ARRAY_SIZE(gpu
->event
); i
++) {
724 gpu
->event
[i
].used
= false;
725 complete(&gpu
->event_free
);
728 /* Now program the hardware */
729 mutex_lock(&gpu
->lock
);
730 etnaviv_gpu_hw_init(gpu
);
731 gpu
->exec_state
= -1;
732 mutex_unlock(&gpu
->lock
);
734 pm_runtime_mark_last_busy(gpu
->dev
);
735 pm_runtime_put_autosuspend(gpu
->dev
);
740 etnaviv_cmdbuf_free(gpu
->buffer
);
743 etnaviv_iommu_destroy(gpu
->mmu
);
746 pm_runtime_mark_last_busy(gpu
->dev
);
747 pm_runtime_put_autosuspend(gpu
->dev
);
752 #ifdef CONFIG_DEBUG_FS
758 static void verify_dma(struct etnaviv_gpu
*gpu
, struct dma_debug
*debug
)
762 debug
->address
[0] = gpu_read(gpu
, VIVS_FE_DMA_ADDRESS
);
763 debug
->state
[0] = gpu_read(gpu
, VIVS_FE_DMA_DEBUG_STATE
);
765 for (i
= 0; i
< 500; i
++) {
766 debug
->address
[1] = gpu_read(gpu
, VIVS_FE_DMA_ADDRESS
);
767 debug
->state
[1] = gpu_read(gpu
, VIVS_FE_DMA_DEBUG_STATE
);
769 if (debug
->address
[0] != debug
->address
[1])
772 if (debug
->state
[0] != debug
->state
[1])
777 int etnaviv_gpu_debugfs(struct etnaviv_gpu
*gpu
, struct seq_file
*m
)
779 struct dma_debug debug
;
780 u32 dma_lo
, dma_hi
, axi
, idle
;
783 seq_printf(m
, "%s Status:\n", dev_name(gpu
->dev
));
785 ret
= pm_runtime_get_sync(gpu
->dev
);
789 dma_lo
= gpu_read(gpu
, VIVS_FE_DMA_LOW
);
790 dma_hi
= gpu_read(gpu
, VIVS_FE_DMA_HIGH
);
791 axi
= gpu_read(gpu
, VIVS_HI_AXI_STATUS
);
792 idle
= gpu_read(gpu
, VIVS_HI_IDLE_STATE
);
794 verify_dma(gpu
, &debug
);
796 seq_puts(m
, "\tfeatures\n");
797 seq_printf(m
, "\t minor_features0: 0x%08x\n",
798 gpu
->identity
.minor_features0
);
799 seq_printf(m
, "\t minor_features1: 0x%08x\n",
800 gpu
->identity
.minor_features1
);
801 seq_printf(m
, "\t minor_features2: 0x%08x\n",
802 gpu
->identity
.minor_features2
);
803 seq_printf(m
, "\t minor_features3: 0x%08x\n",
804 gpu
->identity
.minor_features3
);
805 seq_printf(m
, "\t minor_features4: 0x%08x\n",
806 gpu
->identity
.minor_features4
);
807 seq_printf(m
, "\t minor_features5: 0x%08x\n",
808 gpu
->identity
.minor_features5
);
810 seq_puts(m
, "\tspecs\n");
811 seq_printf(m
, "\t stream_count: %d\n",
812 gpu
->identity
.stream_count
);
813 seq_printf(m
, "\t register_max: %d\n",
814 gpu
->identity
.register_max
);
815 seq_printf(m
, "\t thread_count: %d\n",
816 gpu
->identity
.thread_count
);
817 seq_printf(m
, "\t vertex_cache_size: %d\n",
818 gpu
->identity
.vertex_cache_size
);
819 seq_printf(m
, "\t shader_core_count: %d\n",
820 gpu
->identity
.shader_core_count
);
821 seq_printf(m
, "\t pixel_pipes: %d\n",
822 gpu
->identity
.pixel_pipes
);
823 seq_printf(m
, "\t vertex_output_buffer_size: %d\n",
824 gpu
->identity
.vertex_output_buffer_size
);
825 seq_printf(m
, "\t buffer_size: %d\n",
826 gpu
->identity
.buffer_size
);
827 seq_printf(m
, "\t instruction_count: %d\n",
828 gpu
->identity
.instruction_count
);
829 seq_printf(m
, "\t num_constants: %d\n",
830 gpu
->identity
.num_constants
);
831 seq_printf(m
, "\t varyings_count: %d\n",
832 gpu
->identity
.varyings_count
);
834 seq_printf(m
, "\taxi: 0x%08x\n", axi
);
835 seq_printf(m
, "\tidle: 0x%08x\n", idle
);
836 idle
|= ~gpu
->idle_mask
& ~VIVS_HI_IDLE_STATE_AXI_LP
;
837 if ((idle
& VIVS_HI_IDLE_STATE_FE
) == 0)
838 seq_puts(m
, "\t FE is not idle\n");
839 if ((idle
& VIVS_HI_IDLE_STATE_DE
) == 0)
840 seq_puts(m
, "\t DE is not idle\n");
841 if ((idle
& VIVS_HI_IDLE_STATE_PE
) == 0)
842 seq_puts(m
, "\t PE is not idle\n");
843 if ((idle
& VIVS_HI_IDLE_STATE_SH
) == 0)
844 seq_puts(m
, "\t SH is not idle\n");
845 if ((idle
& VIVS_HI_IDLE_STATE_PA
) == 0)
846 seq_puts(m
, "\t PA is not idle\n");
847 if ((idle
& VIVS_HI_IDLE_STATE_SE
) == 0)
848 seq_puts(m
, "\t SE is not idle\n");
849 if ((idle
& VIVS_HI_IDLE_STATE_RA
) == 0)
850 seq_puts(m
, "\t RA is not idle\n");
851 if ((idle
& VIVS_HI_IDLE_STATE_TX
) == 0)
852 seq_puts(m
, "\t TX is not idle\n");
853 if ((idle
& VIVS_HI_IDLE_STATE_VG
) == 0)
854 seq_puts(m
, "\t VG is not idle\n");
855 if ((idle
& VIVS_HI_IDLE_STATE_IM
) == 0)
856 seq_puts(m
, "\t IM is not idle\n");
857 if ((idle
& VIVS_HI_IDLE_STATE_FP
) == 0)
858 seq_puts(m
, "\t FP is not idle\n");
859 if ((idle
& VIVS_HI_IDLE_STATE_TS
) == 0)
860 seq_puts(m
, "\t TS is not idle\n");
861 if (idle
& VIVS_HI_IDLE_STATE_AXI_LP
)
862 seq_puts(m
, "\t AXI low power mode\n");
864 if (gpu
->identity
.features
& chipFeatures_DEBUG_MODE
) {
865 u32 read0
= gpu_read(gpu
, VIVS_MC_DEBUG_READ0
);
866 u32 read1
= gpu_read(gpu
, VIVS_MC_DEBUG_READ1
);
867 u32 write
= gpu_read(gpu
, VIVS_MC_DEBUG_WRITE
);
869 seq_puts(m
, "\tMC\n");
870 seq_printf(m
, "\t read0: 0x%08x\n", read0
);
871 seq_printf(m
, "\t read1: 0x%08x\n", read1
);
872 seq_printf(m
, "\t write: 0x%08x\n", write
);
875 seq_puts(m
, "\tDMA ");
877 if (debug
.address
[0] == debug
.address
[1] &&
878 debug
.state
[0] == debug
.state
[1]) {
879 seq_puts(m
, "seems to be stuck\n");
880 } else if (debug
.address
[0] == debug
.address
[1]) {
881 seq_puts(m
, "address is constant\n");
883 seq_puts(m
, "is running\n");
886 seq_printf(m
, "\t address 0: 0x%08x\n", debug
.address
[0]);
887 seq_printf(m
, "\t address 1: 0x%08x\n", debug
.address
[1]);
888 seq_printf(m
, "\t state 0: 0x%08x\n", debug
.state
[0]);
889 seq_printf(m
, "\t state 1: 0x%08x\n", debug
.state
[1]);
890 seq_printf(m
, "\t last fetch 64 bit word: 0x%08x 0x%08x\n",
895 pm_runtime_mark_last_busy(gpu
->dev
);
896 pm_runtime_put_autosuspend(gpu
->dev
);
903 * Hangcheck detection for locked gpu:
905 static void recover_worker(struct work_struct
*work
)
907 struct etnaviv_gpu
*gpu
= container_of(work
, struct etnaviv_gpu
,
912 dev_err(gpu
->dev
, "hangcheck recover!\n");
914 if (pm_runtime_get_sync(gpu
->dev
) < 0)
917 mutex_lock(&gpu
->lock
);
919 /* Only catch the first event, or when manually re-armed */
920 if (etnaviv_dump_core
) {
921 etnaviv_core_dump(gpu
);
922 etnaviv_dump_core
= false;
925 etnaviv_hw_reset(gpu
);
927 /* complete all events, the GPU won't do it after the reset */
928 spin_lock_irqsave(&gpu
->event_spinlock
, flags
);
929 for (i
= 0; i
< ARRAY_SIZE(gpu
->event
); i
++) {
930 if (!gpu
->event
[i
].used
)
932 dma_fence_signal(gpu
->event
[i
].fence
);
933 gpu
->event
[i
].fence
= NULL
;
934 gpu
->event
[i
].used
= false;
935 complete(&gpu
->event_free
);
937 spin_unlock_irqrestore(&gpu
->event_spinlock
, flags
);
938 gpu
->completed_fence
= gpu
->active_fence
;
940 etnaviv_gpu_hw_init(gpu
);
942 gpu
->exec_state
= -1;
944 mutex_unlock(&gpu
->lock
);
945 pm_runtime_mark_last_busy(gpu
->dev
);
946 pm_runtime_put_autosuspend(gpu
->dev
);
948 /* Retire the buffer objects in a work */
949 etnaviv_queue_work(gpu
->drm
, &gpu
->retire_work
);
952 static void hangcheck_timer_reset(struct etnaviv_gpu
*gpu
)
954 DBG("%s", dev_name(gpu
->dev
));
955 mod_timer(&gpu
->hangcheck_timer
,
956 round_jiffies_up(jiffies
+ DRM_ETNAVIV_HANGCHECK_JIFFIES
));
959 static void hangcheck_handler(unsigned long data
)
961 struct etnaviv_gpu
*gpu
= (struct etnaviv_gpu
*)data
;
962 u32 fence
= gpu
->completed_fence
;
963 bool progress
= false;
965 if (fence
!= gpu
->hangcheck_fence
) {
966 gpu
->hangcheck_fence
= fence
;
971 u32 dma_addr
= gpu_read(gpu
, VIVS_FE_DMA_ADDRESS
);
972 int change
= dma_addr
- gpu
->hangcheck_dma_addr
;
974 if (change
< 0 || change
> 16) {
975 gpu
->hangcheck_dma_addr
= dma_addr
;
980 if (!progress
&& fence_after(gpu
->active_fence
, fence
)) {
981 dev_err(gpu
->dev
, "hangcheck detected gpu lockup!\n");
982 dev_err(gpu
->dev
, " completed fence: %u\n", fence
);
983 dev_err(gpu
->dev
, " active fence: %u\n",
985 etnaviv_queue_work(gpu
->drm
, &gpu
->recover_work
);
988 /* if still more pending work, reset the hangcheck timer: */
989 if (fence_after(gpu
->active_fence
, gpu
->hangcheck_fence
))
990 hangcheck_timer_reset(gpu
);
993 static void hangcheck_disable(struct etnaviv_gpu
*gpu
)
995 del_timer_sync(&gpu
->hangcheck_timer
);
996 cancel_work_sync(&gpu
->recover_work
);
999 /* fence object management */
1000 struct etnaviv_fence
{
1001 struct etnaviv_gpu
*gpu
;
1002 struct dma_fence base
;
1005 static inline struct etnaviv_fence
*to_etnaviv_fence(struct dma_fence
*fence
)
1007 return container_of(fence
, struct etnaviv_fence
, base
);
1010 static const char *etnaviv_fence_get_driver_name(struct dma_fence
*fence
)
1015 static const char *etnaviv_fence_get_timeline_name(struct dma_fence
*fence
)
1017 struct etnaviv_fence
*f
= to_etnaviv_fence(fence
);
1019 return dev_name(f
->gpu
->dev
);
1022 static bool etnaviv_fence_enable_signaling(struct dma_fence
*fence
)
1027 static bool etnaviv_fence_signaled(struct dma_fence
*fence
)
1029 struct etnaviv_fence
*f
= to_etnaviv_fence(fence
);
1031 return fence_completed(f
->gpu
, f
->base
.seqno
);
1034 static void etnaviv_fence_release(struct dma_fence
*fence
)
1036 struct etnaviv_fence
*f
= to_etnaviv_fence(fence
);
1038 kfree_rcu(f
, base
.rcu
);
1041 static const struct dma_fence_ops etnaviv_fence_ops
= {
1042 .get_driver_name
= etnaviv_fence_get_driver_name
,
1043 .get_timeline_name
= etnaviv_fence_get_timeline_name
,
1044 .enable_signaling
= etnaviv_fence_enable_signaling
,
1045 .signaled
= etnaviv_fence_signaled
,
1046 .wait
= dma_fence_default_wait
,
1047 .release
= etnaviv_fence_release
,
1050 static struct dma_fence
*etnaviv_gpu_fence_alloc(struct etnaviv_gpu
*gpu
)
1052 struct etnaviv_fence
*f
;
1054 f
= kzalloc(sizeof(*f
), GFP_KERNEL
);
1060 dma_fence_init(&f
->base
, &etnaviv_fence_ops
, &gpu
->fence_spinlock
,
1061 gpu
->fence_context
, ++gpu
->next_fence
);
1066 int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object
*etnaviv_obj
,
1067 unsigned int context
, bool exclusive
)
1069 struct reservation_object
*robj
= etnaviv_obj
->resv
;
1070 struct reservation_object_list
*fobj
;
1071 struct dma_fence
*fence
;
1075 ret
= reservation_object_reserve_shared(robj
);
1081 * If we have any shared fences, then the exclusive fence
1082 * should be ignored as it will already have been signalled.
1084 fobj
= reservation_object_get_list(robj
);
1085 if (!fobj
|| fobj
->shared_count
== 0) {
1086 /* Wait on any existing exclusive fence which isn't our own */
1087 fence
= reservation_object_get_excl(robj
);
1088 if (fence
&& fence
->context
!= context
) {
1089 ret
= dma_fence_wait(fence
, true);
1095 if (!exclusive
|| !fobj
)
1098 for (i
= 0; i
< fobj
->shared_count
; i
++) {
1099 fence
= rcu_dereference_protected(fobj
->shared
[i
],
1100 reservation_object_held(robj
));
1101 if (fence
->context
!= context
) {
1102 ret
= dma_fence_wait(fence
, true);
1115 static unsigned int event_alloc(struct etnaviv_gpu
*gpu
)
1117 unsigned long ret
, flags
;
1118 unsigned int i
, event
= ~0U;
1120 ret
= wait_for_completion_timeout(&gpu
->event_free
,
1121 msecs_to_jiffies(10 * 10000));
1123 dev_err(gpu
->dev
, "wait_for_completion_timeout failed");
1125 spin_lock_irqsave(&gpu
->event_spinlock
, flags
);
1127 /* find first free event */
1128 for (i
= 0; i
< ARRAY_SIZE(gpu
->event
); i
++) {
1129 if (gpu
->event
[i
].used
== false) {
1130 gpu
->event
[i
].used
= true;
1136 spin_unlock_irqrestore(&gpu
->event_spinlock
, flags
);
1141 static void event_free(struct etnaviv_gpu
*gpu
, unsigned int event
)
1143 unsigned long flags
;
1145 spin_lock_irqsave(&gpu
->event_spinlock
, flags
);
1147 if (gpu
->event
[event
].used
== false) {
1148 dev_warn(gpu
->dev
, "event %u is already marked as free",
1150 spin_unlock_irqrestore(&gpu
->event_spinlock
, flags
);
1152 gpu
->event
[event
].used
= false;
1153 spin_unlock_irqrestore(&gpu
->event_spinlock
, flags
);
1155 complete(&gpu
->event_free
);
1160 * Cmdstream submission/retirement:
1163 static void retire_worker(struct work_struct
*work
)
1165 struct etnaviv_gpu
*gpu
= container_of(work
, struct etnaviv_gpu
,
1167 u32 fence
= gpu
->completed_fence
;
1168 struct etnaviv_cmdbuf
*cmdbuf
, *tmp
;
1171 mutex_lock(&gpu
->lock
);
1172 list_for_each_entry_safe(cmdbuf
, tmp
, &gpu
->active_cmd_list
, node
) {
1173 if (!dma_fence_is_signaled(cmdbuf
->fence
))
1176 list_del(&cmdbuf
->node
);
1177 dma_fence_put(cmdbuf
->fence
);
1179 for (i
= 0; i
< cmdbuf
->nr_bos
; i
++) {
1180 struct etnaviv_vram_mapping
*mapping
= cmdbuf
->bo_map
[i
];
1181 struct etnaviv_gem_object
*etnaviv_obj
= mapping
->object
;
1183 atomic_dec(&etnaviv_obj
->gpu_active
);
1184 /* drop the refcount taken in etnaviv_gpu_submit */
1185 etnaviv_gem_mapping_unreference(mapping
);
1188 etnaviv_cmdbuf_free(cmdbuf
);
1190 * We need to balance the runtime PM count caused by
1191 * each submission. Upon submission, we increment
1192 * the runtime PM counter, and allocate one event.
1193 * So here, we put the runtime PM count for each
1196 pm_runtime_put_autosuspend(gpu
->dev
);
1199 gpu
->retired_fence
= fence
;
1201 mutex_unlock(&gpu
->lock
);
1203 wake_up_all(&gpu
->fence_event
);
1206 int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu
*gpu
,
1207 u32 fence
, struct timespec
*timeout
)
1211 if (fence_after(fence
, gpu
->next_fence
)) {
1212 DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
1213 fence
, gpu
->next_fence
);
1218 /* No timeout was requested: just test for completion */
1219 ret
= fence_completed(gpu
, fence
) ? 0 : -EBUSY
;
1221 unsigned long remaining
= etnaviv_timeout_to_jiffies(timeout
);
1223 ret
= wait_event_interruptible_timeout(gpu
->fence_event
,
1224 fence_completed(gpu
, fence
),
1227 DBG("timeout waiting for fence: %u (retired: %u completed: %u)",
1228 fence
, gpu
->retired_fence
,
1229 gpu
->completed_fence
);
1231 } else if (ret
!= -ERESTARTSYS
) {
1240 * Wait for an object to become inactive. This, on it's own, is not race
1241 * free: the object is moved by the retire worker off the active list, and
1242 * then the iova is put. Moreover, the object could be re-submitted just
1243 * after we notice that it's become inactive.
1245 * Although the retirement happens under the gpu lock, we don't want to hold
1246 * that lock in this function while waiting.
1248 int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu
*gpu
,
1249 struct etnaviv_gem_object
*etnaviv_obj
, struct timespec
*timeout
)
1251 unsigned long remaining
;
1255 return !is_active(etnaviv_obj
) ? 0 : -EBUSY
;
1257 remaining
= etnaviv_timeout_to_jiffies(timeout
);
1259 ret
= wait_event_interruptible_timeout(gpu
->fence_event
,
1260 !is_active(etnaviv_obj
),
1263 struct etnaviv_drm_private
*priv
= gpu
->drm
->dev_private
;
1265 /* Synchronise with the retire worker */
1266 flush_workqueue(priv
->wq
);
1268 } else if (ret
== -ERESTARTSYS
) {
1269 return -ERESTARTSYS
;
1275 int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu
*gpu
)
1277 return pm_runtime_get_sync(gpu
->dev
);
1280 void etnaviv_gpu_pm_put(struct etnaviv_gpu
*gpu
)
1282 pm_runtime_mark_last_busy(gpu
->dev
);
1283 pm_runtime_put_autosuspend(gpu
->dev
);
1286 /* add bo's to gpu's ring, and kick gpu: */
1287 int etnaviv_gpu_submit(struct etnaviv_gpu
*gpu
,
1288 struct etnaviv_gem_submit
*submit
, struct etnaviv_cmdbuf
*cmdbuf
)
1290 struct dma_fence
*fence
;
1291 unsigned int event
, i
;
1294 ret
= etnaviv_gpu_pm_get_sync(gpu
);
1307 event
= event_alloc(gpu
);
1308 if (unlikely(event
== ~0U)) {
1309 DRM_ERROR("no free event\n");
1314 fence
= etnaviv_gpu_fence_alloc(gpu
);
1316 event_free(gpu
, event
);
1321 mutex_lock(&gpu
->lock
);
1323 gpu
->event
[event
].fence
= fence
;
1324 submit
->fence
= fence
->seqno
;
1325 gpu
->active_fence
= submit
->fence
;
1327 if (gpu
->lastctx
!= cmdbuf
->ctx
) {
1328 gpu
->mmu
->need_flush
= true;
1329 gpu
->switch_context
= true;
1330 gpu
->lastctx
= cmdbuf
->ctx
;
1333 etnaviv_buffer_queue(gpu
, event
, cmdbuf
);
1335 cmdbuf
->fence
= fence
;
1336 list_add_tail(&cmdbuf
->node
, &gpu
->active_cmd_list
);
1338 /* We're committed to adding this command buffer, hold a PM reference */
1339 pm_runtime_get_noresume(gpu
->dev
);
1341 for (i
= 0; i
< submit
->nr_bos
; i
++) {
1342 struct etnaviv_gem_object
*etnaviv_obj
= submit
->bos
[i
].obj
;
1344 /* Each cmdbuf takes a refcount on the mapping */
1345 etnaviv_gem_mapping_reference(submit
->bos
[i
].mapping
);
1346 cmdbuf
->bo_map
[i
] = submit
->bos
[i
].mapping
;
1347 atomic_inc(&etnaviv_obj
->gpu_active
);
1349 if (submit
->bos
[i
].flags
& ETNA_SUBMIT_BO_WRITE
)
1350 reservation_object_add_excl_fence(etnaviv_obj
->resv
,
1353 reservation_object_add_shared_fence(etnaviv_obj
->resv
,
1356 cmdbuf
->nr_bos
= submit
->nr_bos
;
1357 hangcheck_timer_reset(gpu
);
1360 mutex_unlock(&gpu
->lock
);
1363 etnaviv_gpu_pm_put(gpu
);
1371 static irqreturn_t
irq_handler(int irq
, void *data
)
1373 struct etnaviv_gpu
*gpu
= data
;
1374 irqreturn_t ret
= IRQ_NONE
;
1376 u32 intr
= gpu_read(gpu
, VIVS_HI_INTR_ACKNOWLEDGE
);
1381 pm_runtime_mark_last_busy(gpu
->dev
);
1383 dev_dbg(gpu
->dev
, "intr 0x%08x\n", intr
);
1385 if (intr
& VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR
) {
1386 dev_err(gpu
->dev
, "AXI bus error\n");
1387 intr
&= ~VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR
;
1390 if (intr
& VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION
) {
1393 dev_err_ratelimited(gpu
->dev
,
1394 "MMU fault status 0x%08x\n",
1395 gpu_read(gpu
, VIVS_MMUv2_STATUS
));
1396 for (i
= 0; i
< 4; i
++) {
1397 dev_err_ratelimited(gpu
->dev
,
1398 "MMU %d fault addr 0x%08x\n",
1400 VIVS_MMUv2_EXCEPTION_ADDR(i
)));
1402 intr
&= ~VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION
;
1405 while ((event
= ffs(intr
)) != 0) {
1406 struct dma_fence
*fence
;
1410 intr
&= ~(1 << event
);
1412 dev_dbg(gpu
->dev
, "event %u\n", event
);
1414 fence
= gpu
->event
[event
].fence
;
1415 gpu
->event
[event
].fence
= NULL
;
1416 dma_fence_signal(fence
);
1419 * Events can be processed out of order. Eg,
1420 * - allocate and queue event 0
1421 * - allocate event 1
1422 * - event 0 completes, we process it
1423 * - allocate and queue event 0
1424 * - event 1 and event 0 complete
1425 * we can end up processing event 0 first, then 1.
1427 if (fence_after(fence
->seqno
, gpu
->completed_fence
))
1428 gpu
->completed_fence
= fence
->seqno
;
1430 event_free(gpu
, event
);
1433 /* Retire the buffer objects in a work */
1434 etnaviv_queue_work(gpu
->drm
, &gpu
->retire_work
);
1442 static int etnaviv_gpu_clk_enable(struct etnaviv_gpu
*gpu
)
1447 ret
= clk_prepare_enable(gpu
->clk_bus
);
1452 if (gpu
->clk_core
) {
1453 ret
= clk_prepare_enable(gpu
->clk_core
);
1455 goto disable_clk_bus
;
1458 if (gpu
->clk_shader
) {
1459 ret
= clk_prepare_enable(gpu
->clk_shader
);
1461 goto disable_clk_core
;
1468 clk_disable_unprepare(gpu
->clk_core
);
1471 clk_disable_unprepare(gpu
->clk_bus
);
1476 static int etnaviv_gpu_clk_disable(struct etnaviv_gpu
*gpu
)
1478 if (gpu
->clk_shader
)
1479 clk_disable_unprepare(gpu
->clk_shader
);
1481 clk_disable_unprepare(gpu
->clk_core
);
1483 clk_disable_unprepare(gpu
->clk_bus
);
1488 int etnaviv_gpu_wait_idle(struct etnaviv_gpu
*gpu
, unsigned int timeout_ms
)
1490 unsigned long timeout
= jiffies
+ msecs_to_jiffies(timeout_ms
);
1493 u32 idle
= gpu_read(gpu
, VIVS_HI_IDLE_STATE
);
1495 if ((idle
& gpu
->idle_mask
) == gpu
->idle_mask
)
1498 if (time_is_before_jiffies(timeout
)) {
1500 "timed out waiting for idle: idle=0x%x\n",
1509 static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu
*gpu
)
1512 /* Replace the last WAIT with END */
1513 etnaviv_buffer_end(gpu
);
1516 * We know that only the FE is busy here, this should
1517 * happen quickly (as the WAIT is only 200 cycles). If
1518 * we fail, just warn and continue.
1520 etnaviv_gpu_wait_idle(gpu
, 100);
1523 return etnaviv_gpu_clk_disable(gpu
);
1527 static int etnaviv_gpu_hw_resume(struct etnaviv_gpu
*gpu
)
1532 ret
= mutex_lock_killable(&gpu
->lock
);
1536 clock
= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS
|
1537 VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
1539 etnaviv_gpu_load_clock(gpu
, clock
);
1540 etnaviv_gpu_hw_init(gpu
);
1542 gpu
->switch_context
= true;
1543 gpu
->exec_state
= -1;
1545 mutex_unlock(&gpu
->lock
);
1551 static int etnaviv_gpu_bind(struct device
*dev
, struct device
*master
,
1554 struct drm_device
*drm
= data
;
1555 struct etnaviv_drm_private
*priv
= drm
->dev_private
;
1556 struct etnaviv_gpu
*gpu
= dev_get_drvdata(dev
);
1560 ret
= pm_runtime_get_sync(gpu
->dev
);
1562 ret
= etnaviv_gpu_clk_enable(gpu
);
1568 gpu
->fence_context
= dma_fence_context_alloc(1);
1569 spin_lock_init(&gpu
->fence_spinlock
);
1571 INIT_LIST_HEAD(&gpu
->active_cmd_list
);
1572 INIT_WORK(&gpu
->retire_work
, retire_worker
);
1573 INIT_WORK(&gpu
->recover_work
, recover_worker
);
1574 init_waitqueue_head(&gpu
->fence_event
);
1576 setup_deferrable_timer(&gpu
->hangcheck_timer
, hangcheck_handler
,
1577 (unsigned long)gpu
);
1579 priv
->gpu
[priv
->num_gpus
++] = gpu
;
1581 pm_runtime_mark_last_busy(gpu
->dev
);
1582 pm_runtime_put_autosuspend(gpu
->dev
);
1587 static void etnaviv_gpu_unbind(struct device
*dev
, struct device
*master
,
1590 struct etnaviv_gpu
*gpu
= dev_get_drvdata(dev
);
1592 DBG("%s", dev_name(gpu
->dev
));
1594 hangcheck_disable(gpu
);
1597 pm_runtime_get_sync(gpu
->dev
);
1598 pm_runtime_put_sync_suspend(gpu
->dev
);
1600 etnaviv_gpu_hw_suspend(gpu
);
1604 etnaviv_cmdbuf_free(gpu
->buffer
);
1608 if (gpu
->cmdbuf_suballoc
) {
1609 etnaviv_cmdbuf_suballoc_destroy(gpu
->cmdbuf_suballoc
);
1610 gpu
->cmdbuf_suballoc
= NULL
;
1614 etnaviv_iommu_destroy(gpu
->mmu
);
1621 static const struct component_ops gpu_ops
= {
1622 .bind
= etnaviv_gpu_bind
,
1623 .unbind
= etnaviv_gpu_unbind
,
1626 static const struct of_device_id etnaviv_gpu_match
[] = {
1628 .compatible
= "vivante,gc"
1633 static int etnaviv_gpu_platform_probe(struct platform_device
*pdev
)
1635 struct device
*dev
= &pdev
->dev
;
1636 struct etnaviv_gpu
*gpu
;
1639 gpu
= devm_kzalloc(dev
, sizeof(*gpu
), GFP_KERNEL
);
1643 gpu
->dev
= &pdev
->dev
;
1644 mutex_init(&gpu
->lock
);
1646 /* Map registers: */
1647 gpu
->mmio
= etnaviv_ioremap(pdev
, NULL
, dev_name(gpu
->dev
));
1648 if (IS_ERR(gpu
->mmio
))
1649 return PTR_ERR(gpu
->mmio
);
1651 /* Get Interrupt: */
1652 gpu
->irq
= platform_get_irq(pdev
, 0);
1654 dev_err(dev
, "failed to get irq: %d\n", gpu
->irq
);
1658 err
= devm_request_irq(&pdev
->dev
, gpu
->irq
, irq_handler
, 0,
1659 dev_name(gpu
->dev
), gpu
);
1661 dev_err(dev
, "failed to request IRQ%u: %d\n", gpu
->irq
, err
);
1666 gpu
->clk_bus
= devm_clk_get(&pdev
->dev
, "bus");
1667 DBG("clk_bus: %p", gpu
->clk_bus
);
1668 if (IS_ERR(gpu
->clk_bus
))
1669 gpu
->clk_bus
= NULL
;
1671 gpu
->clk_core
= devm_clk_get(&pdev
->dev
, "core");
1672 DBG("clk_core: %p", gpu
->clk_core
);
1673 if (IS_ERR(gpu
->clk_core
))
1674 gpu
->clk_core
= NULL
;
1676 gpu
->clk_shader
= devm_clk_get(&pdev
->dev
, "shader");
1677 DBG("clk_shader: %p", gpu
->clk_shader
);
1678 if (IS_ERR(gpu
->clk_shader
))
1679 gpu
->clk_shader
= NULL
;
1681 /* TODO: figure out max mapped size */
1682 dev_set_drvdata(dev
, gpu
);
1685 * We treat the device as initially suspended. The runtime PM
1686 * autosuspend delay is rather arbitary: no measurements have
1687 * yet been performed to determine an appropriate value.
1689 pm_runtime_use_autosuspend(gpu
->dev
);
1690 pm_runtime_set_autosuspend_delay(gpu
->dev
, 200);
1691 pm_runtime_enable(gpu
->dev
);
1693 err
= component_add(&pdev
->dev
, &gpu_ops
);
1695 dev_err(&pdev
->dev
, "failed to register component: %d\n", err
);
1702 static int etnaviv_gpu_platform_remove(struct platform_device
*pdev
)
1704 component_del(&pdev
->dev
, &gpu_ops
);
1705 pm_runtime_disable(&pdev
->dev
);
1710 static int etnaviv_gpu_rpm_suspend(struct device
*dev
)
1712 struct etnaviv_gpu
*gpu
= dev_get_drvdata(dev
);
1715 /* If we have outstanding fences, we're not idle */
1716 if (gpu
->completed_fence
!= gpu
->active_fence
)
1719 /* Check whether the hardware (except FE) is idle */
1720 mask
= gpu
->idle_mask
& ~VIVS_HI_IDLE_STATE_FE
;
1721 idle
= gpu_read(gpu
, VIVS_HI_IDLE_STATE
) & mask
;
1725 return etnaviv_gpu_hw_suspend(gpu
);
1728 static int etnaviv_gpu_rpm_resume(struct device
*dev
)
1730 struct etnaviv_gpu
*gpu
= dev_get_drvdata(dev
);
1733 ret
= etnaviv_gpu_clk_enable(gpu
);
1737 /* Re-initialise the basic hardware state */
1738 if (gpu
->drm
&& gpu
->buffer
) {
1739 ret
= etnaviv_gpu_hw_resume(gpu
);
1741 etnaviv_gpu_clk_disable(gpu
);
1750 static const struct dev_pm_ops etnaviv_gpu_pm_ops
= {
1751 SET_RUNTIME_PM_OPS(etnaviv_gpu_rpm_suspend
, etnaviv_gpu_rpm_resume
,
1755 struct platform_driver etnaviv_gpu_driver
= {
1757 .name
= "etnaviv-gpu",
1758 .owner
= THIS_MODULE
,
1759 .pm
= &etnaviv_gpu_pm_ops
,
1760 .of_match_table
= etnaviv_gpu_match
,
1762 .probe
= etnaviv_gpu_platform_probe
,
1763 .remove
= etnaviv_gpu_platform_remove
,
1764 .id_table
= gpu_ids
,