2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include "changk104.h"
28 #include <core/client.h>
29 #include <core/gpuobj.h>
30 #include <subdev/bar.h>
31 #include <subdev/fault.h>
32 #include <subdev/timer.h>
33 #include <subdev/top.h>
34 #include <engine/sw.h>
36 #include <nvif/class.h>
37 #include <nvif/cl0080.h>
40 gk104_fifo_engine_status(struct gk104_fifo
*fifo
, int engn
,
41 struct gk104_fifo_engine_status
*status
)
43 struct nvkm_engine
*engine
= fifo
->engine
[engn
].engine
;
44 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
45 struct nvkm_device
*device
= subdev
->device
;
46 u32 stat
= nvkm_rd32(device
, 0x002640 + (engn
* 0x08));
48 status
->busy
= !!(stat
& 0x80000000);
49 status
->faulted
= !!(stat
& 0x40000000);
50 status
->next
.tsg
= !!(stat
& 0x10000000);
51 status
->next
.id
= (stat
& 0x0fff0000) >> 16;
52 status
->chsw
= !!(stat
& 0x00008000);
53 status
->save
= !!(stat
& 0x00004000);
54 status
->load
= !!(stat
& 0x00002000);
55 status
->prev
.tsg
= !!(stat
& 0x00001000);
56 status
->prev
.id
= (stat
& 0x00000fff);
59 if (status
->busy
&& status
->chsw
) {
60 if (status
->load
&& status
->save
) {
61 if (engine
&& nvkm_engine_chsw_load(engine
))
62 status
->chan
= &status
->next
;
64 status
->chan
= &status
->prev
;
67 status
->chan
= &status
->next
;
69 status
->chan
= &status
->prev
;
73 status
->chan
= &status
->prev
;
76 nvkm_debug(subdev
, "engine %02d: busy %d faulted %d chsw %d "
77 "save %d load %d %sid %d%s-> %sid %d%s\n",
78 engn
, status
->busy
, status
->faulted
,
79 status
->chsw
, status
->save
, status
->load
,
80 status
->prev
.tsg
? "tsg" : "ch", status
->prev
.id
,
81 status
->chan
== &status
->prev
? "*" : " ",
82 status
->next
.tsg
? "tsg" : "ch", status
->next
.id
,
83 status
->chan
== &status
->next
? "*" : " ");
87 gk104_fifo_class_new(struct nvkm_fifo
*base
, const struct nvkm_oclass
*oclass
,
88 void *argv
, u32 argc
, struct nvkm_object
**pobject
)
90 struct gk104_fifo
*fifo
= gk104_fifo(base
);
91 if (oclass
->engn
== &fifo
->func
->chan
) {
92 const struct gk104_fifo_chan_user
*user
= oclass
->engn
;
93 return user
->ctor(fifo
, oclass
, argv
, argc
, pobject
);
95 if (oclass
->engn
== &fifo
->func
->user
) {
96 const struct gk104_fifo_user_user
*user
= oclass
->engn
;
97 return user
->ctor(oclass
, argv
, argc
, pobject
);
104 gk104_fifo_class_get(struct nvkm_fifo
*base
, int index
,
105 struct nvkm_oclass
*oclass
)
107 struct gk104_fifo
*fifo
= gk104_fifo(base
);
110 if (fifo
->func
->user
.ctor
&& c
++ == index
) {
111 oclass
->base
= fifo
->func
->user
.user
;
112 oclass
->engn
= &fifo
->func
->user
;
116 if (fifo
->func
->chan
.ctor
&& c
++ == index
) {
117 oclass
->base
= fifo
->func
->chan
.user
;
118 oclass
->engn
= &fifo
->func
->chan
;
126 gk104_fifo_uevent_fini(struct nvkm_fifo
*fifo
)
128 struct nvkm_device
*device
= fifo
->engine
.subdev
.device
;
129 nvkm_mask(device
, 0x002140, 0x80000000, 0x00000000);
133 gk104_fifo_uevent_init(struct nvkm_fifo
*fifo
)
135 struct nvkm_device
*device
= fifo
->engine
.subdev
.device
;
136 nvkm_mask(device
, 0x002140, 0x80000000, 0x80000000);
140 gk104_fifo_runlist_commit(struct gk104_fifo
*fifo
, int runl
,
141 struct nvkm_memory
*mem
, int nr
)
143 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
144 struct nvkm_device
*device
= subdev
->device
;
147 switch (nvkm_memory_target(mem
)) {
148 case NVKM_MEM_TARGET_VRAM
: target
= 0; break;
149 case NVKM_MEM_TARGET_NCOH
: target
= 3; break;
155 nvkm_wr32(device
, 0x002270, (nvkm_memory_addr(mem
) >> 12) |
157 nvkm_wr32(device
, 0x002274, (runl
<< 20) | nr
);
159 if (nvkm_msec(device
, 2000,
160 if (!(nvkm_rd32(device
, 0x002284 + (runl
* 0x08)) & 0x00100000))
163 nvkm_error(subdev
, "runlist %d update timeout\n", runl
);
167 gk104_fifo_runlist_update(struct gk104_fifo
*fifo
, int runl
)
169 const struct gk104_fifo_runlist_func
*func
= fifo
->func
->runlist
;
170 struct gk104_fifo_chan
*chan
;
171 struct nvkm_memory
*mem
;
172 struct nvkm_fifo_cgrp
*cgrp
;
175 mutex_lock(&fifo
->base
.mutex
);
176 mem
= fifo
->runlist
[runl
].mem
[fifo
->runlist
[runl
].next
];
177 fifo
->runlist
[runl
].next
= !fifo
->runlist
[runl
].next
;
180 list_for_each_entry(chan
, &fifo
->runlist
[runl
].chan
, head
) {
181 func
->chan(chan
, mem
, nr
++ * func
->size
);
184 list_for_each_entry(cgrp
, &fifo
->runlist
[runl
].cgrp
, head
) {
185 func
->cgrp(cgrp
, mem
, nr
++ * func
->size
);
186 list_for_each_entry(chan
, &cgrp
->chan
, head
) {
187 func
->chan(chan
, mem
, nr
++ * func
->size
);
192 func
->commit(fifo
, runl
, mem
, nr
);
193 mutex_unlock(&fifo
->base
.mutex
);
197 gk104_fifo_runlist_remove(struct gk104_fifo
*fifo
, struct gk104_fifo_chan
*chan
)
199 struct nvkm_fifo_cgrp
*cgrp
= chan
->cgrp
;
200 mutex_lock(&fifo
->base
.mutex
);
201 if (!list_empty(&chan
->head
)) {
202 list_del_init(&chan
->head
);
203 if (cgrp
&& !--cgrp
->chan_nr
)
204 list_del_init(&cgrp
->head
);
206 mutex_unlock(&fifo
->base
.mutex
);
210 gk104_fifo_runlist_insert(struct gk104_fifo
*fifo
, struct gk104_fifo_chan
*chan
)
212 struct nvkm_fifo_cgrp
*cgrp
= chan
->cgrp
;
213 mutex_lock(&fifo
->base
.mutex
);
215 if (!cgrp
->chan_nr
++)
216 list_add_tail(&cgrp
->head
, &fifo
->runlist
[chan
->runl
].cgrp
);
217 list_add_tail(&chan
->head
, &cgrp
->chan
);
219 list_add_tail(&chan
->head
, &fifo
->runlist
[chan
->runl
].chan
);
221 mutex_unlock(&fifo
->base
.mutex
);
225 gk104_fifo_runlist_chan(struct gk104_fifo_chan
*chan
,
226 struct nvkm_memory
*memory
, u32 offset
)
228 nvkm_wo32(memory
, offset
+ 0, chan
->base
.chid
);
229 nvkm_wo32(memory
, offset
+ 4, 0x00000000);
232 const struct gk104_fifo_runlist_func
233 gk104_fifo_runlist
= {
235 .chan
= gk104_fifo_runlist_chan
,
236 .commit
= gk104_fifo_runlist_commit
,
240 gk104_fifo_pbdma_init(struct gk104_fifo
*fifo
)
242 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
243 nvkm_wr32(device
, 0x000204, (1 << fifo
->pbdma_nr
) - 1);
247 gk104_fifo_pbdma_nr(struct gk104_fifo
*fifo
)
249 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
250 /* Determine number of PBDMAs by checking valid enable bits. */
251 nvkm_wr32(device
, 0x000204, 0xffffffff);
252 return hweight32(nvkm_rd32(device
, 0x000204));
255 const struct gk104_fifo_pbdma_func
257 .nr
= gk104_fifo_pbdma_nr
,
258 .init
= gk104_fifo_pbdma_init
,
262 gk104_fifo_id_engine(struct nvkm_fifo
*base
, int engi
)
264 if (engi
== GK104_FIFO_ENGN_SW
)
265 return nvkm_device_engine(base
->engine
.subdev
.device
, NVKM_ENGINE_SW
, 0);
267 return gk104_fifo(base
)->engine
[engi
].engine
;
271 gk104_fifo_engine_id(struct nvkm_fifo
*base
, struct nvkm_engine
*engine
)
273 struct gk104_fifo
*fifo
= gk104_fifo(base
);
276 if (engine
->subdev
.type
== NVKM_ENGINE_SW
)
277 return GK104_FIFO_ENGN_SW
;
279 for (engn
= 0; engn
< fifo
->engine_nr
&& engine
; engn
++) {
280 if (fifo
->engine
[engn
].engine
== engine
)
289 gk104_fifo_recover_work(struct work_struct
*w
)
291 struct gk104_fifo
*fifo
= container_of(w
, typeof(*fifo
), recover
.work
);
292 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
293 struct nvkm_engine
*engine
;
295 u32 engm
, runm
, todo
;
298 spin_lock_irqsave(&fifo
->base
.lock
, flags
);
299 runm
= fifo
->recover
.runm
;
300 engm
= fifo
->recover
.engm
;
301 fifo
->recover
.engm
= 0;
302 fifo
->recover
.runm
= 0;
303 spin_unlock_irqrestore(&fifo
->base
.lock
, flags
);
305 nvkm_mask(device
, 0x002630, runm
, runm
);
307 for (todo
= engm
; engn
= __ffs(todo
), todo
; todo
&= ~BIT(engn
)) {
308 if ((engine
= fifo
->engine
[engn
].engine
)) {
309 nvkm_subdev_fini(&engine
->subdev
, false);
310 WARN_ON(nvkm_subdev_init(&engine
->subdev
));
314 for (todo
= runm
; runl
= __ffs(todo
), todo
; todo
&= ~BIT(runl
))
315 gk104_fifo_runlist_update(fifo
, runl
);
317 nvkm_wr32(device
, 0x00262c, runm
);
318 nvkm_mask(device
, 0x002630, runm
, 0x00000000);
321 static void gk104_fifo_recover_engn(struct gk104_fifo
*fifo
, int engn
);
324 gk104_fifo_recover_runl(struct gk104_fifo
*fifo
, int runl
)
326 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
327 struct nvkm_device
*device
= subdev
->device
;
328 const u32 runm
= BIT(runl
);
330 assert_spin_locked(&fifo
->base
.lock
);
331 if (fifo
->recover
.runm
& runm
)
333 fifo
->recover
.runm
|= runm
;
335 /* Block runlist to prevent channel assignment(s) from changing. */
336 nvkm_mask(device
, 0x002630, runm
, runm
);
338 /* Schedule recovery. */
339 nvkm_warn(subdev
, "runlist %d: scheduled for recovery\n", runl
);
340 schedule_work(&fifo
->recover
.work
);
343 static struct gk104_fifo_chan
*
344 gk104_fifo_recover_chid(struct gk104_fifo
*fifo
, int runl
, int chid
)
346 struct gk104_fifo_chan
*chan
;
347 struct nvkm_fifo_cgrp
*cgrp
;
349 list_for_each_entry(chan
, &fifo
->runlist
[runl
].chan
, head
) {
350 if (chan
->base
.chid
== chid
) {
351 list_del_init(&chan
->head
);
356 list_for_each_entry(cgrp
, &fifo
->runlist
[runl
].cgrp
, head
) {
357 if (cgrp
->id
== chid
) {
358 chan
= list_first_entry(&cgrp
->chan
, typeof(*chan
), head
);
359 list_del_init(&chan
->head
);
360 if (!--cgrp
->chan_nr
)
361 list_del_init(&cgrp
->head
);
370 gk104_fifo_recover_chan(struct nvkm_fifo
*base
, int chid
)
372 struct gk104_fifo
*fifo
= gk104_fifo(base
);
373 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
374 struct nvkm_device
*device
= subdev
->device
;
375 const u32 stat
= nvkm_rd32(device
, 0x800004 + (chid
* 0x08));
376 const u32 runl
= (stat
& 0x000f0000) >> 16;
377 const bool used
= (stat
& 0x00000001);
378 unsigned long engn
, engm
= fifo
->runlist
[runl
].engm
;
379 struct gk104_fifo_chan
*chan
;
381 assert_spin_locked(&fifo
->base
.lock
);
385 /* Lookup SW state for channel, and mark it as dead. */
386 chan
= gk104_fifo_recover_chid(fifo
, runl
, chid
);
389 nvkm_fifo_kevent(&fifo
->base
, chid
);
392 /* Disable channel. */
393 nvkm_wr32(device
, 0x800004 + (chid
* 0x08), stat
| 0x00000800);
394 nvkm_warn(subdev
, "channel %d: killed\n", chid
);
396 /* Block channel assignments from changing during recovery. */
397 gk104_fifo_recover_runl(fifo
, runl
);
399 /* Schedule recovery for any engines the channel is on. */
400 for_each_set_bit(engn
, &engm
, fifo
->engine_nr
) {
401 struct gk104_fifo_engine_status status
;
402 gk104_fifo_engine_status(fifo
, engn
, &status
);
403 if (!status
.chan
|| status
.chan
->id
!= chid
)
405 gk104_fifo_recover_engn(fifo
, engn
);
410 gk104_fifo_recover_engn(struct gk104_fifo
*fifo
, int engn
)
412 struct nvkm_engine
*engine
= fifo
->engine
[engn
].engine
;
413 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
414 struct nvkm_device
*device
= subdev
->device
;
415 const u32 runl
= fifo
->engine
[engn
].runl
;
416 const u32 engm
= BIT(engn
);
417 struct gk104_fifo_engine_status status
;
420 assert_spin_locked(&fifo
->base
.lock
);
421 if (fifo
->recover
.engm
& engm
)
423 fifo
->recover
.engm
|= engm
;
425 /* Block channel assignments from changing during recovery. */
426 gk104_fifo_recover_runl(fifo
, runl
);
428 /* Determine which channel (if any) is currently on the engine. */
429 gk104_fifo_engine_status(fifo
, engn
, &status
);
431 /* The channel is not longer viable, kill it. */
432 gk104_fifo_recover_chan(&fifo
->base
, status
.chan
->id
);
435 /* Determine MMU fault ID for the engine, if we're not being
436 * called from the fault handler already.
438 if (!status
.faulted
&& engine
) {
439 mmui
= nvkm_top_fault_id(device
, engine
->subdev
.type
, engine
->subdev
.inst
);
441 const struct nvkm_enum
*en
= fifo
->func
->fault
.engine
;
442 for (; en
&& en
->name
; en
++) {
443 if (en
->data2
== engine
->subdev
.type
&&
444 en
->inst
== engine
->subdev
.inst
) {
453 /* Trigger a MMU fault for the engine.
455 * No good idea why this is needed, but nvgpu does something similar,
456 * and it makes recovery from CTXSW_TIMEOUT a lot more reliable.
459 nvkm_wr32(device
, 0x002a30 + (engn
* 0x04), 0x00000100 | mmui
);
461 /* Wait for fault to trigger. */
462 nvkm_msec(device
, 2000,
463 gk104_fifo_engine_status(fifo
, engn
, &status
);
468 /* Release MMU fault trigger, and ACK the fault. */
469 nvkm_wr32(device
, 0x002a30 + (engn
* 0x04), 0x00000000);
470 nvkm_wr32(device
, 0x00259c, BIT(mmui
));
471 nvkm_wr32(device
, 0x002100, 0x10000000);
474 /* Schedule recovery. */
475 nvkm_warn(subdev
, "engine %d: scheduled for recovery\n", engn
);
476 schedule_work(&fifo
->recover
.work
);
480 gk104_fifo_fault(struct nvkm_fifo
*base
, struct nvkm_fault_data
*info
)
482 struct gk104_fifo
*fifo
= gk104_fifo(base
);
483 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
484 struct nvkm_device
*device
= subdev
->device
;
485 const struct nvkm_enum
*er
, *ee
, *ec
, *ea
;
486 struct nvkm_engine
*engine
= NULL
;
487 struct nvkm_fifo_chan
*chan
;
492 er
= nvkm_enum_find(fifo
->func
->fault
.reason
, info
->reason
);
493 ee
= nvkm_enum_find(fifo
->func
->fault
.engine
, info
->engine
);
495 ec
= nvkm_enum_find(fifo
->func
->fault
.hubclient
, info
->client
);
497 ec
= nvkm_enum_find(fifo
->func
->fault
.gpcclient
, info
->client
);
498 snprintf(ct
, sizeof(ct
), "GPC%d/", info
->gpc
);
500 ea
= nvkm_enum_find(fifo
->func
->fault
.access
, info
->access
);
502 if (ee
&& ee
->data2
) {
504 case NVKM_SUBDEV_BAR
:
505 nvkm_bar_bar1_reset(device
);
507 case NVKM_SUBDEV_INSTMEM
:
508 nvkm_bar_bar2_reset(device
);
510 case NVKM_ENGINE_IFB
:
511 nvkm_mask(device
, 0x001718, 0x00000000, 0x00000000);
514 engine
= nvkm_device_engine(device
, ee
->data2
, 0);
520 struct nvkm_subdev
*subdev
= nvkm_top_fault(device
, info
->engine
);
522 if (subdev
->func
== &nvkm_engine
)
523 engine
= container_of(subdev
, typeof(*engine
), subdev
);
524 en
= engine
->subdev
.name
;
530 spin_lock_irqsave(&fifo
->base
.lock
, flags
);
531 chan
= nvkm_fifo_chan_inst_locked(&fifo
->base
, info
->inst
);
534 "fault %02x [%s] at %016llx engine %02x [%s] client %02x "
535 "[%s%s] reason %02x [%s] on channel %d [%010llx %s]\n",
536 info
->access
, ea
? ea
->name
: "", info
->addr
,
537 info
->engine
, ee
? ee
->name
: en
,
538 info
->client
, ct
, ec
? ec
->name
: "",
539 info
->reason
, er
? er
->name
: "", chan
? chan
->chid
: -1,
540 info
->inst
, chan
? chan
->object
.client
->name
: "unknown");
542 /* Kill the channel that caused the fault. */
544 gk104_fifo_recover_chan(&fifo
->base
, chan
->chid
);
546 /* Channel recovery will probably have already done this for the
547 * correct engine(s), but just in case we can't find the channel
551 int engn
= fifo
->base
.func
->engine_id(&fifo
->base
, engine
);
552 if (engn
>= 0 && engn
!= GK104_FIFO_ENGN_SW
)
553 gk104_fifo_recover_engn(fifo
, engn
);
556 spin_unlock_irqrestore(&fifo
->base
.lock
, flags
);
559 static const struct nvkm_enum
560 gk104_fifo_bind_reason
[] = {
561 { 0x01, "BIND_NOT_UNBOUND" },
562 { 0x02, "SNOOP_WITHOUT_BAR1" },
563 { 0x03, "UNBIND_WHILE_RUNNING" },
564 { 0x05, "INVALID_RUNLIST" },
565 { 0x06, "INVALID_CTX_TGT" },
566 { 0x0b, "UNBIND_WHILE_PARKED" },
571 gk104_fifo_intr_bind(struct gk104_fifo
*fifo
)
573 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
574 struct nvkm_device
*device
= subdev
->device
;
575 u32 intr
= nvkm_rd32(device
, 0x00252c);
576 u32 code
= intr
& 0x000000ff;
577 const struct nvkm_enum
*en
=
578 nvkm_enum_find(gk104_fifo_bind_reason
, code
);
580 nvkm_error(subdev
, "BIND_ERROR %02x [%s]\n", code
, en
? en
->name
: "");
583 static const struct nvkm_enum
584 gk104_fifo_sched_reason
[] = {
585 { 0x0a, "CTXSW_TIMEOUT" },
590 gk104_fifo_intr_sched_ctxsw(struct gk104_fifo
*fifo
)
592 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
593 unsigned long flags
, engm
= 0;
596 /* We need to ACK the SCHED_ERROR here, and prevent it reasserting,
597 * as MMU_FAULT cannot be triggered while it's pending.
599 spin_lock_irqsave(&fifo
->base
.lock
, flags
);
600 nvkm_mask(device
, 0x002140, 0x00000100, 0x00000000);
601 nvkm_wr32(device
, 0x002100, 0x00000100);
603 for (engn
= 0; engn
< fifo
->engine_nr
; engn
++) {
604 struct gk104_fifo_engine_status status
;
606 gk104_fifo_engine_status(fifo
, engn
, &status
);
607 if (!status
.busy
|| !status
.chsw
)
613 for_each_set_bit(engn
, &engm
, fifo
->engine_nr
)
614 gk104_fifo_recover_engn(fifo
, engn
);
616 nvkm_mask(device
, 0x002140, 0x00000100, 0x00000100);
617 spin_unlock_irqrestore(&fifo
->base
.lock
, flags
);
621 gk104_fifo_intr_sched(struct gk104_fifo
*fifo
)
623 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
624 struct nvkm_device
*device
= subdev
->device
;
625 u32 intr
= nvkm_rd32(device
, 0x00254c);
626 u32 code
= intr
& 0x000000ff;
627 const struct nvkm_enum
*en
=
628 nvkm_enum_find(gk104_fifo_sched_reason
, code
);
630 nvkm_error(subdev
, "SCHED_ERROR %02x [%s]\n", code
, en
? en
->name
: "");
634 gk104_fifo_intr_sched_ctxsw(fifo
);
642 gk104_fifo_intr_chsw(struct gk104_fifo
*fifo
)
644 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
645 struct nvkm_device
*device
= subdev
->device
;
646 u32 stat
= nvkm_rd32(device
, 0x00256c);
647 nvkm_error(subdev
, "CHSW_ERROR %08x\n", stat
);
648 nvkm_wr32(device
, 0x00256c, stat
);
652 gk104_fifo_intr_dropped_fault(struct gk104_fifo
*fifo
)
654 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
655 struct nvkm_device
*device
= subdev
->device
;
656 u32 stat
= nvkm_rd32(device
, 0x00259c);
657 nvkm_error(subdev
, "DROPPED_MMU_FAULT %08x\n", stat
);
660 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0
[] = {
661 { 0x00000001, "MEMREQ" },
662 { 0x00000002, "MEMACK_TIMEOUT" },
663 { 0x00000004, "MEMACK_EXTRA" },
664 { 0x00000008, "MEMDAT_TIMEOUT" },
665 { 0x00000010, "MEMDAT_EXTRA" },
666 { 0x00000020, "MEMFLUSH" },
667 { 0x00000040, "MEMOP" },
668 { 0x00000080, "LBCONNECT" },
669 { 0x00000100, "LBREQ" },
670 { 0x00000200, "LBACK_TIMEOUT" },
671 { 0x00000400, "LBACK_EXTRA" },
672 { 0x00000800, "LBDAT_TIMEOUT" },
673 { 0x00001000, "LBDAT_EXTRA" },
674 { 0x00002000, "GPFIFO" },
675 { 0x00004000, "GPPTR" },
676 { 0x00008000, "GPENTRY" },
677 { 0x00010000, "GPCRC" },
678 { 0x00020000, "PBPTR" },
679 { 0x00040000, "PBENTRY" },
680 { 0x00080000, "PBCRC" },
681 { 0x00100000, "XBARCONNECT" },
682 { 0x00200000, "METHOD" },
683 { 0x00400000, "METHODCRC" },
684 { 0x00800000, "DEVICE" },
685 { 0x02000000, "SEMAPHORE" },
686 { 0x04000000, "ACQUIRE" },
687 { 0x08000000, "PRI" },
688 { 0x20000000, "NO_CTXSW_SEG" },
689 { 0x40000000, "PBSEG" },
690 { 0x80000000, "SIGNATURE" },
695 gk104_fifo_intr_pbdma_0(struct gk104_fifo
*fifo
, int unit
)
697 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
698 struct nvkm_device
*device
= subdev
->device
;
699 u32 mask
= nvkm_rd32(device
, 0x04010c + (unit
* 0x2000));
700 u32 stat
= nvkm_rd32(device
, 0x040108 + (unit
* 0x2000)) & mask
;
701 u32 addr
= nvkm_rd32(device
, 0x0400c0 + (unit
* 0x2000));
702 u32 data
= nvkm_rd32(device
, 0x0400c4 + (unit
* 0x2000));
703 u32 chid
= nvkm_rd32(device
, 0x040120 + (unit
* 0x2000)) & 0xfff;
704 u32 subc
= (addr
& 0x00070000) >> 16;
705 u32 mthd
= (addr
& 0x00003ffc);
707 struct nvkm_fifo_chan
*chan
;
711 if (stat
& 0x00800000) {
713 if (nvkm_sw_mthd(device
->sw
, chid
, subc
, mthd
, data
))
718 nvkm_wr32(device
, 0x0400c0 + (unit
* 0x2000), 0x80600008);
721 nvkm_snprintbf(msg
, sizeof(msg
), gk104_fifo_pbdma_intr_0
, show
);
722 chan
= nvkm_fifo_chan_chid(&fifo
->base
, chid
, &flags
);
723 nvkm_error(subdev
, "PBDMA%d: %08x [%s] ch %d [%010llx %s] "
724 "subc %d mthd %04x data %08x\n",
725 unit
, show
, msg
, chid
, chan
? chan
->inst
->addr
: 0,
726 chan
? chan
->object
.client
->name
: "unknown",
728 nvkm_fifo_chan_put(&fifo
->base
, flags
, &chan
);
731 nvkm_wr32(device
, 0x040108 + (unit
* 0x2000), stat
);
734 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1
[] = {
735 { 0x00000001, "HCE_RE_ILLEGAL_OP" },
736 { 0x00000002, "HCE_RE_ALIGNB" },
737 { 0x00000004, "HCE_PRIV" },
738 { 0x00000008, "HCE_ILLEGAL_MTHD" },
739 { 0x00000010, "HCE_ILLEGAL_CLASS" },
744 gk104_fifo_intr_pbdma_1(struct gk104_fifo
*fifo
, int unit
)
746 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
747 struct nvkm_device
*device
= subdev
->device
;
748 u32 mask
= nvkm_rd32(device
, 0x04014c + (unit
* 0x2000));
749 u32 stat
= nvkm_rd32(device
, 0x040148 + (unit
* 0x2000)) & mask
;
750 u32 chid
= nvkm_rd32(device
, 0x040120 + (unit
* 0x2000)) & 0xfff;
754 nvkm_snprintbf(msg
, sizeof(msg
), gk104_fifo_pbdma_intr_1
, stat
);
755 nvkm_error(subdev
, "PBDMA%d: %08x [%s] ch %d %08x %08x\n",
756 unit
, stat
, msg
, chid
,
757 nvkm_rd32(device
, 0x040150 + (unit
* 0x2000)),
758 nvkm_rd32(device
, 0x040154 + (unit
* 0x2000)));
761 nvkm_wr32(device
, 0x040148 + (unit
* 0x2000), stat
);
765 gk104_fifo_intr_runlist(struct gk104_fifo
*fifo
)
767 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
768 u32 mask
= nvkm_rd32(device
, 0x002a00);
770 int runl
= __ffs(mask
);
771 wake_up(&fifo
->runlist
[runl
].wait
);
772 nvkm_wr32(device
, 0x002a00, 1 << runl
);
773 mask
&= ~(1 << runl
);
778 gk104_fifo_intr_engine(struct gk104_fifo
*fifo
)
780 nvkm_fifo_uevent(&fifo
->base
);
784 gk104_fifo_intr(struct nvkm_fifo
*base
)
786 struct gk104_fifo
*fifo
= gk104_fifo(base
);
787 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
788 struct nvkm_device
*device
= subdev
->device
;
789 u32 mask
= nvkm_rd32(device
, 0x002140);
790 u32 stat
= nvkm_rd32(device
, 0x002100) & mask
;
792 if (stat
& 0x00000001) {
793 gk104_fifo_intr_bind(fifo
);
794 nvkm_wr32(device
, 0x002100, 0x00000001);
798 if (stat
& 0x00000010) {
799 nvkm_error(subdev
, "PIO_ERROR\n");
800 nvkm_wr32(device
, 0x002100, 0x00000010);
804 if (stat
& 0x00000100) {
805 gk104_fifo_intr_sched(fifo
);
806 nvkm_wr32(device
, 0x002100, 0x00000100);
810 if (stat
& 0x00010000) {
811 gk104_fifo_intr_chsw(fifo
);
812 nvkm_wr32(device
, 0x002100, 0x00010000);
816 if (stat
& 0x00800000) {
817 nvkm_error(subdev
, "FB_FLUSH_TIMEOUT\n");
818 nvkm_wr32(device
, 0x002100, 0x00800000);
822 if (stat
& 0x01000000) {
823 nvkm_error(subdev
, "LB_ERROR\n");
824 nvkm_wr32(device
, 0x002100, 0x01000000);
828 if (stat
& 0x08000000) {
829 gk104_fifo_intr_dropped_fault(fifo
);
830 nvkm_wr32(device
, 0x002100, 0x08000000);
834 if (stat
& 0x10000000) {
835 u32 mask
= nvkm_rd32(device
, 0x00259c);
837 u32 unit
= __ffs(mask
);
838 fifo
->func
->intr
.fault(&fifo
->base
, unit
);
839 nvkm_wr32(device
, 0x00259c, (1 << unit
));
840 mask
&= ~(1 << unit
);
845 if (stat
& 0x20000000) {
846 u32 mask
= nvkm_rd32(device
, 0x0025a0);
848 u32 unit
= __ffs(mask
);
849 gk104_fifo_intr_pbdma_0(fifo
, unit
);
850 gk104_fifo_intr_pbdma_1(fifo
, unit
);
851 nvkm_wr32(device
, 0x0025a0, (1 << unit
));
852 mask
&= ~(1 << unit
);
857 if (stat
& 0x40000000) {
858 gk104_fifo_intr_runlist(fifo
);
862 if (stat
& 0x80000000) {
863 nvkm_wr32(device
, 0x002100, 0x80000000);
864 gk104_fifo_intr_engine(fifo
);
869 nvkm_error(subdev
, "INTR %08x\n", stat
);
870 nvkm_mask(device
, 0x002140, stat
, 0x00000000);
871 nvkm_wr32(device
, 0x002100, stat
);
876 gk104_fifo_fini(struct nvkm_fifo
*base
)
878 struct gk104_fifo
*fifo
= gk104_fifo(base
);
879 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
880 flush_work(&fifo
->recover
.work
);
881 /* allow mmu fault interrupts, even when we're not using fifo */
882 nvkm_mask(device
, 0x002140, 0x10000000, 0x10000000);
886 gk104_fifo_info(struct nvkm_fifo
*base
, u64 mthd
, u64
*data
)
888 struct gk104_fifo
*fifo
= gk104_fifo(base
);
890 case NV_DEVICE_HOST_RUNLISTS
:
891 *data
= (1ULL << fifo
->runlist_nr
) - 1;
893 case NV_DEVICE_HOST_RUNLIST_ENGINES
: {
894 if (*data
< fifo
->runlist_nr
) {
895 unsigned long engm
= fifo
->runlist
[*data
].engm
;
896 struct nvkm_engine
*engine
;
899 for_each_set_bit(engn
, &engm
, fifo
->engine_nr
) {
900 if ((engine
= fifo
->engine
[engn
].engine
)) {
901 #define CASE(n) case NVKM_ENGINE_##n: *data |= NV_DEVICE_HOST_RUNLIST_ENGINES_##n; break
902 switch (engine
->subdev
.type
) {
936 gk104_fifo_oneinit(struct nvkm_fifo
*base
)
938 struct gk104_fifo
*fifo
= gk104_fifo(base
);
939 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
940 struct nvkm_device
*device
= subdev
->device
;
941 struct nvkm_vmm
*bar
= nvkm_bar_bar1_vmm(device
);
942 struct nvkm_top_device
*tdev
;
946 fifo
->pbdma_nr
= fifo
->func
->pbdma
->nr(fifo
);
947 nvkm_debug(subdev
, "%d PBDMA(s)\n", fifo
->pbdma_nr
);
949 /* Read PBDMA->runlist(s) mapping from HW. */
950 if (!(map
= kcalloc(fifo
->pbdma_nr
, sizeof(*map
), GFP_KERNEL
)))
953 for (i
= 0; i
< fifo
->pbdma_nr
; i
++)
954 map
[i
] = nvkm_rd32(device
, 0x002390 + (i
* 0x04));
956 /* Determine runlist configuration from topology device info. */
957 list_for_each_entry(tdev
, &device
->top
->device
, head
) {
958 const int engn
= tdev
->engine
;
964 /* Determine which PBDMA handles requests for this engine. */
965 for (j
= 0, pbid
= -1; j
< fifo
->pbdma_nr
; j
++) {
966 if (map
[j
] & BIT(tdev
->runlist
)) {
972 fifo
->engine
[engn
].engine
= nvkm_device_engine(device
, tdev
->type
, tdev
->inst
);
973 if (!fifo
->engine
[engn
].engine
) {
974 snprintf(_en
, sizeof(_en
), "%s, %d",
975 nvkm_subdev_type
[tdev
->type
], tdev
->inst
);
978 en
= fifo
->engine
[engn
].engine
->subdev
.name
;
981 nvkm_debug(subdev
, "engine %2d: runlist %2d pbdma %2d (%s)\n",
982 tdev
->engine
, tdev
->runlist
, pbid
, en
);
984 fifo
->engine
[engn
].runl
= tdev
->runlist
;
985 fifo
->engine
[engn
].pbid
= pbid
;
986 fifo
->engine_nr
= max(fifo
->engine_nr
, engn
+ 1);
987 fifo
->runlist
[tdev
->runlist
].engm
|= BIT(engn
);
988 fifo
->runlist
[tdev
->runlist
].engm_sw
|= BIT(engn
);
989 if (tdev
->type
== NVKM_ENGINE_GR
)
990 fifo
->runlist
[tdev
->runlist
].engm_sw
|= BIT(GK104_FIFO_ENGN_SW
);
991 fifo
->runlist_nr
= max(fifo
->runlist_nr
, tdev
->runlist
+ 1);
996 for (i
= 0; i
< fifo
->runlist_nr
; i
++) {
997 for (j
= 0; j
< ARRAY_SIZE(fifo
->runlist
[i
].mem
); j
++) {
998 ret
= nvkm_memory_new(device
, NVKM_MEM_TARGET_INST
,
999 fifo
->base
.nr
* 2/* TSG+chan */ *
1000 fifo
->func
->runlist
->size
,
1002 &fifo
->runlist
[i
].mem
[j
]);
1007 init_waitqueue_head(&fifo
->runlist
[i
].wait
);
1008 INIT_LIST_HEAD(&fifo
->runlist
[i
].cgrp
);
1009 INIT_LIST_HEAD(&fifo
->runlist
[i
].chan
);
1012 ret
= nvkm_memory_new(device
, NVKM_MEM_TARGET_INST
,
1013 fifo
->base
.nr
* 0x200, 0x1000, true,
1018 ret
= nvkm_vmm_get(bar
, 12, nvkm_memory_size(fifo
->user
.mem
),
1023 return nvkm_memory_map(fifo
->user
.mem
, 0, bar
, fifo
->user
.bar
, NULL
, 0);
1027 gk104_fifo_init(struct nvkm_fifo
*base
)
1029 struct gk104_fifo
*fifo
= gk104_fifo(base
);
1030 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
1033 /* Enable PBDMAs. */
1034 fifo
->func
->pbdma
->init(fifo
);
1037 for (i
= 0; i
< fifo
->pbdma_nr
; i
++) {
1038 nvkm_mask(device
, 0x04013c + (i
* 0x2000), 0x10000100, 0x00000000);
1039 nvkm_wr32(device
, 0x040108 + (i
* 0x2000), 0xffffffff); /* INTR */
1040 nvkm_wr32(device
, 0x04010c + (i
* 0x2000), 0xfffffeff); /* INTREN */
1044 for (i
= 0; i
< fifo
->pbdma_nr
; i
++) {
1045 nvkm_wr32(device
, 0x040148 + (i
* 0x2000), 0xffffffff); /* INTR */
1046 nvkm_wr32(device
, 0x04014c + (i
* 0x2000), 0xffffffff); /* INTREN */
1049 nvkm_wr32(device
, 0x002254, 0x10000000 | fifo
->user
.bar
->addr
>> 12);
1051 if (fifo
->func
->pbdma
->init_timeout
)
1052 fifo
->func
->pbdma
->init_timeout(fifo
);
1054 nvkm_wr32(device
, 0x002100, 0xffffffff);
1055 nvkm_wr32(device
, 0x002140, 0x7fffffff);
1059 gk104_fifo_dtor(struct nvkm_fifo
*base
)
1061 struct gk104_fifo
*fifo
= gk104_fifo(base
);
1062 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
1065 nvkm_vmm_put(nvkm_bar_bar1_vmm(device
), &fifo
->user
.bar
);
1066 nvkm_memory_unref(&fifo
->user
.mem
);
1068 for (i
= 0; i
< fifo
->runlist_nr
; i
++) {
1069 nvkm_memory_unref(&fifo
->runlist
[i
].mem
[1]);
1070 nvkm_memory_unref(&fifo
->runlist
[i
].mem
[0]);
1076 static const struct nvkm_fifo_func
1078 .dtor
= gk104_fifo_dtor
,
1079 .oneinit
= gk104_fifo_oneinit
,
1080 .info
= gk104_fifo_info
,
1081 .init
= gk104_fifo_init
,
1082 .fini
= gk104_fifo_fini
,
1083 .intr
= gk104_fifo_intr
,
1084 .fault
= gk104_fifo_fault
,
1085 .engine_id
= gk104_fifo_engine_id
,
1086 .id_engine
= gk104_fifo_id_engine
,
1087 .uevent_init
= gk104_fifo_uevent_init
,
1088 .uevent_fini
= gk104_fifo_uevent_fini
,
1089 .recover_chan
= gk104_fifo_recover_chan
,
1090 .class_get
= gk104_fifo_class_get
,
1091 .class_new
= gk104_fifo_class_new
,
1095 gk104_fifo_new_(const struct gk104_fifo_func
*func
, struct nvkm_device
*device
,
1096 enum nvkm_subdev_type type
, int inst
, int nr
, struct nvkm_fifo
**pfifo
)
1098 struct gk104_fifo
*fifo
;
1100 if (!(fifo
= kzalloc(sizeof(*fifo
), GFP_KERNEL
)))
1103 INIT_WORK(&fifo
->recover
.work
, gk104_fifo_recover_work
);
1104 *pfifo
= &fifo
->base
;
1106 return nvkm_fifo_ctor(&gk104_fifo_
, device
, type
, inst
, nr
, &fifo
->base
);
1109 const struct nvkm_enum
1110 gk104_fifo_fault_access
[] = {
1116 const struct nvkm_enum
1117 gk104_fifo_fault_engine
[] = {
1118 { 0x00, "GR", NULL
, NVKM_ENGINE_GR
},
1119 { 0x01, "DISPLAY" },
1120 { 0x02, "CAPTURE" },
1121 { 0x03, "IFB", NULL
, NVKM_ENGINE_IFB
},
1122 { 0x04, "BAR1", NULL
, NVKM_SUBDEV_BAR
},
1123 { 0x05, "BAR2", NULL
, NVKM_SUBDEV_INSTMEM
},
1125 { 0x07, "HOST0", NULL
, NVKM_ENGINE_FIFO
},
1126 { 0x08, "HOST1", NULL
, NVKM_ENGINE_FIFO
},
1127 { 0x09, "HOST2", NULL
, NVKM_ENGINE_FIFO
},
1128 { 0x0a, "HOST3", NULL
, NVKM_ENGINE_FIFO
},
1129 { 0x0b, "HOST4", NULL
, NVKM_ENGINE_FIFO
},
1130 { 0x0c, "HOST5", NULL
, NVKM_ENGINE_FIFO
},
1131 { 0x0d, "HOST6", NULL
, NVKM_ENGINE_FIFO
},
1132 { 0x0e, "HOST7", NULL
, NVKM_ENGINE_FIFO
},
1134 { 0x10, "MSVLD", NULL
, NVKM_ENGINE_MSVLD
},
1135 { 0x11, "MSPPP", NULL
, NVKM_ENGINE_MSPPP
},
1137 { 0x14, "MSPDEC", NULL
, NVKM_ENGINE_MSPDEC
},
1138 { 0x15, "CE0", NULL
, NVKM_ENGINE_CE
, 0 },
1139 { 0x16, "CE1", NULL
, NVKM_ENGINE_CE
, 1 },
1142 { 0x19, "MSENC", NULL
, NVKM_ENGINE_MSENC
},
1143 { 0x1b, "CE2", NULL
, NVKM_ENGINE_CE
, 2 },
1147 const struct nvkm_enum
1148 gk104_fifo_fault_reason
[] = {
1150 { 0x01, "PDE_SIZE" },
1152 { 0x03, "VA_LIMIT_VIOLATION" },
1153 { 0x04, "UNBOUND_INST_BLOCK" },
1154 { 0x05, "PRIV_VIOLATION" },
1155 { 0x06, "RO_VIOLATION" },
1156 { 0x07, "WO_VIOLATION" },
1157 { 0x08, "PITCH_MASK_VIOLATION" },
1158 { 0x09, "WORK_CREATION" },
1159 { 0x0a, "UNSUPPORTED_APERTURE" },
1160 { 0x0b, "COMPRESSION_FAILURE" },
1161 { 0x0c, "UNSUPPORTED_KIND" },
1162 { 0x0d, "REGION_VIOLATION" },
1163 { 0x0e, "BOTH_PTES_VALID" },
1164 { 0x0f, "INFO_TYPE_POISONED" },
1168 const struct nvkm_enum
1169 gk104_fifo_fault_hubclient
[] = {
1177 { 0x07, "HOST_CPU" },
1178 { 0x08, "HOST_CPU_NB" },
1189 { 0x13, "RASTERTWOD" },
1199 { 0x1d, "DFALCON" },
1201 { 0x1f, "AFALCON" },
1205 const struct nvkm_enum
1206 gk104_fifo_fault_gpcclient
[] = {
1207 { 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" },
1208 { 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" },
1209 { 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" },
1210 { 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" },
1218 { 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" },
1219 { 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" },
1220 { 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" },
1221 { 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" },
1223 { 0x20, "LTP_UTLB_0" },
1224 { 0x21, "LTP_UTLB_1" },
1225 { 0x22, "LTP_UTLB_2" },
1226 { 0x23, "LTP_UTLB_3" },
1227 { 0x24, "GPC_RGG_UTLB" },
1231 static const struct gk104_fifo_func
1233 .intr
.fault
= gf100_fifo_intr_fault
,
1234 .pbdma
= &gk104_fifo_pbdma
,
1235 .fault
.access
= gk104_fifo_fault_access
,
1236 .fault
.engine
= gk104_fifo_fault_engine
,
1237 .fault
.reason
= gk104_fifo_fault_reason
,
1238 .fault
.hubclient
= gk104_fifo_fault_hubclient
,
1239 .fault
.gpcclient
= gk104_fifo_fault_gpcclient
,
1240 .runlist
= &gk104_fifo_runlist
,
1241 .chan
= {{0,0,KEPLER_CHANNEL_GPFIFO_A
}, gk104_fifo_gpfifo_new
},
1245 gk104_fifo_new(struct nvkm_device
*device
, enum nvkm_subdev_type type
, int inst
,
1246 struct nvkm_fifo
**pfifo
)
1248 return gk104_fifo_new_(&gk104_fifo
, device
, type
, inst
, 4096, pfifo
);