2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include "changk104.h"
28 #include <core/client.h>
29 #include <core/gpuobj.h>
30 #include <subdev/bar.h>
31 #include <subdev/fault.h>
32 #include <subdev/timer.h>
33 #include <subdev/top.h>
34 #include <engine/sw.h>
36 #include <nvif/class.h>
37 #include <nvif/cl0080.h>
39 struct gk104_fifo_engine_status
{
52 gk104_fifo_engine_status(struct gk104_fifo
*fifo
, int engn
,
53 struct gk104_fifo_engine_status
*status
)
55 struct nvkm_engine
*engine
= fifo
->engine
[engn
].engine
;
56 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
57 struct nvkm_device
*device
= subdev
->device
;
58 u32 stat
= nvkm_rd32(device
, 0x002640 + (engn
* 0x08));
60 status
->busy
= !!(stat
& 0x80000000);
61 status
->faulted
= !!(stat
& 0x40000000);
62 status
->next
.tsg
= !!(stat
& 0x10000000);
63 status
->next
.id
= (stat
& 0x0fff0000) >> 16;
64 status
->chsw
= !!(stat
& 0x00008000);
65 status
->save
= !!(stat
& 0x00004000);
66 status
->load
= !!(stat
& 0x00002000);
67 status
->prev
.tsg
= !!(stat
& 0x00001000);
68 status
->prev
.id
= (stat
& 0x00000fff);
71 if (status
->busy
&& status
->chsw
) {
72 if (status
->load
&& status
->save
) {
73 if (engine
&& nvkm_engine_chsw_load(engine
))
74 status
->chan
= &status
->next
;
76 status
->chan
= &status
->prev
;
79 status
->chan
= &status
->next
;
81 status
->chan
= &status
->prev
;
85 status
->chan
= &status
->prev
;
88 nvkm_debug(subdev
, "engine %02d: busy %d faulted %d chsw %d "
89 "save %d load %d %sid %d%s-> %sid %d%s\n",
90 engn
, status
->busy
, status
->faulted
,
91 status
->chsw
, status
->save
, status
->load
,
92 status
->prev
.tsg
? "tsg" : "ch", status
->prev
.id
,
93 status
->chan
== &status
->prev
? "*" : " ",
94 status
->next
.tsg
? "tsg" : "ch", status
->next
.id
,
95 status
->chan
== &status
->next
? "*" : " ");
99 gk104_fifo_class_new(struct nvkm_fifo
*base
, const struct nvkm_oclass
*oclass
,
100 void *argv
, u32 argc
, struct nvkm_object
**pobject
)
102 struct gk104_fifo
*fifo
= gk104_fifo(base
);
103 if (oclass
->engn
== &fifo
->func
->chan
) {
104 const struct gk104_fifo_chan_user
*user
= oclass
->engn
;
105 return user
->ctor(fifo
, oclass
, argv
, argc
, pobject
);
107 if (oclass
->engn
== &fifo
->func
->user
) {
108 const struct gk104_fifo_user_user
*user
= oclass
->engn
;
109 return user
->ctor(oclass
, argv
, argc
, pobject
);
116 gk104_fifo_class_get(struct nvkm_fifo
*base
, int index
,
117 struct nvkm_oclass
*oclass
)
119 struct gk104_fifo
*fifo
= gk104_fifo(base
);
122 if (fifo
->func
->user
.ctor
&& c
++ == index
) {
123 oclass
->base
= fifo
->func
->user
.user
;
124 oclass
->engn
= &fifo
->func
->user
;
128 if (fifo
->func
->chan
.ctor
&& c
++ == index
) {
129 oclass
->base
= fifo
->func
->chan
.user
;
130 oclass
->engn
= &fifo
->func
->chan
;
138 gk104_fifo_uevent_fini(struct nvkm_fifo
*fifo
)
140 struct nvkm_device
*device
= fifo
->engine
.subdev
.device
;
141 nvkm_mask(device
, 0x002140, 0x80000000, 0x00000000);
145 gk104_fifo_uevent_init(struct nvkm_fifo
*fifo
)
147 struct nvkm_device
*device
= fifo
->engine
.subdev
.device
;
148 nvkm_mask(device
, 0x002140, 0x80000000, 0x80000000);
152 gk104_fifo_runlist_commit(struct gk104_fifo
*fifo
, int runl
,
153 struct nvkm_memory
*mem
, int nr
)
155 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
156 struct nvkm_device
*device
= subdev
->device
;
159 switch (nvkm_memory_target(mem
)) {
160 case NVKM_MEM_TARGET_VRAM
: target
= 0; break;
161 case NVKM_MEM_TARGET_NCOH
: target
= 3; break;
167 nvkm_wr32(device
, 0x002270, (nvkm_memory_addr(mem
) >> 12) |
169 nvkm_wr32(device
, 0x002274, (runl
<< 20) | nr
);
171 if (nvkm_msec(device
, 2000,
172 if (!(nvkm_rd32(device
, 0x002284 + (runl
* 0x08)) & 0x00100000))
175 nvkm_error(subdev
, "runlist %d update timeout\n", runl
);
179 gk104_fifo_runlist_update(struct gk104_fifo
*fifo
, int runl
)
181 const struct gk104_fifo_runlist_func
*func
= fifo
->func
->runlist
;
182 struct gk104_fifo_chan
*chan
;
183 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
184 struct nvkm_memory
*mem
;
185 struct nvkm_fifo_cgrp
*cgrp
;
188 mutex_lock(&subdev
->mutex
);
189 mem
= fifo
->runlist
[runl
].mem
[fifo
->runlist
[runl
].next
];
190 fifo
->runlist
[runl
].next
= !fifo
->runlist
[runl
].next
;
193 list_for_each_entry(chan
, &fifo
->runlist
[runl
].chan
, head
) {
194 func
->chan(chan
, mem
, nr
++ * func
->size
);
197 list_for_each_entry(cgrp
, &fifo
->runlist
[runl
].cgrp
, head
) {
198 func
->cgrp(cgrp
, mem
, nr
++ * func
->size
);
199 list_for_each_entry(chan
, &cgrp
->chan
, head
) {
200 func
->chan(chan
, mem
, nr
++ * func
->size
);
205 func
->commit(fifo
, runl
, mem
, nr
);
206 mutex_unlock(&subdev
->mutex
);
210 gk104_fifo_runlist_remove(struct gk104_fifo
*fifo
, struct gk104_fifo_chan
*chan
)
212 struct nvkm_fifo_cgrp
*cgrp
= chan
->cgrp
;
213 mutex_lock(&fifo
->base
.engine
.subdev
.mutex
);
214 if (!list_empty(&chan
->head
)) {
215 list_del_init(&chan
->head
);
216 if (cgrp
&& !--cgrp
->chan_nr
)
217 list_del_init(&cgrp
->head
);
219 mutex_unlock(&fifo
->base
.engine
.subdev
.mutex
);
223 gk104_fifo_runlist_insert(struct gk104_fifo
*fifo
, struct gk104_fifo_chan
*chan
)
225 struct nvkm_fifo_cgrp
*cgrp
= chan
->cgrp
;
226 mutex_lock(&fifo
->base
.engine
.subdev
.mutex
);
228 if (!cgrp
->chan_nr
++)
229 list_add_tail(&cgrp
->head
, &fifo
->runlist
[chan
->runl
].cgrp
);
230 list_add_tail(&chan
->head
, &cgrp
->chan
);
232 list_add_tail(&chan
->head
, &fifo
->runlist
[chan
->runl
].chan
);
234 mutex_unlock(&fifo
->base
.engine
.subdev
.mutex
);
238 gk104_fifo_runlist_chan(struct gk104_fifo_chan
*chan
,
239 struct nvkm_memory
*memory
, u32 offset
)
241 nvkm_wo32(memory
, offset
+ 0, chan
->base
.chid
);
242 nvkm_wo32(memory
, offset
+ 4, 0x00000000);
245 const struct gk104_fifo_runlist_func
246 gk104_fifo_runlist
= {
248 .chan
= gk104_fifo_runlist_chan
,
249 .commit
= gk104_fifo_runlist_commit
,
253 gk104_fifo_pbdma_init(struct gk104_fifo
*fifo
)
255 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
256 nvkm_wr32(device
, 0x000204, (1 << fifo
->pbdma_nr
) - 1);
260 gk104_fifo_pbdma_nr(struct gk104_fifo
*fifo
)
262 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
263 /* Determine number of PBDMAs by checking valid enable bits. */
264 nvkm_wr32(device
, 0x000204, 0xffffffff);
265 return hweight32(nvkm_rd32(device
, 0x000204));
268 const struct gk104_fifo_pbdma_func
270 .nr
= gk104_fifo_pbdma_nr
,
271 .init
= gk104_fifo_pbdma_init
,
275 gk104_fifo_recover_work(struct work_struct
*w
)
277 struct gk104_fifo
*fifo
= container_of(w
, typeof(*fifo
), recover
.work
);
278 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
279 struct nvkm_engine
*engine
;
281 u32 engm
, runm
, todo
;
284 spin_lock_irqsave(&fifo
->base
.lock
, flags
);
285 runm
= fifo
->recover
.runm
;
286 engm
= fifo
->recover
.engm
;
287 fifo
->recover
.engm
= 0;
288 fifo
->recover
.runm
= 0;
289 spin_unlock_irqrestore(&fifo
->base
.lock
, flags
);
291 nvkm_mask(device
, 0x002630, runm
, runm
);
293 for (todo
= engm
; engn
= __ffs(todo
), todo
; todo
&= ~BIT(engn
)) {
294 if ((engine
= fifo
->engine
[engn
].engine
)) {
295 nvkm_subdev_fini(&engine
->subdev
, false);
296 WARN_ON(nvkm_subdev_init(&engine
->subdev
));
300 for (todo
= runm
; runl
= __ffs(todo
), todo
; todo
&= ~BIT(runl
))
301 gk104_fifo_runlist_update(fifo
, runl
);
303 nvkm_wr32(device
, 0x00262c, runm
);
304 nvkm_mask(device
, 0x002630, runm
, 0x00000000);
307 static void gk104_fifo_recover_engn(struct gk104_fifo
*fifo
, int engn
);
310 gk104_fifo_recover_runl(struct gk104_fifo
*fifo
, int runl
)
312 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
313 struct nvkm_device
*device
= subdev
->device
;
314 const u32 runm
= BIT(runl
);
316 assert_spin_locked(&fifo
->base
.lock
);
317 if (fifo
->recover
.runm
& runm
)
319 fifo
->recover
.runm
|= runm
;
321 /* Block runlist to prevent channel assignment(s) from changing. */
322 nvkm_mask(device
, 0x002630, runm
, runm
);
324 /* Schedule recovery. */
325 nvkm_warn(subdev
, "runlist %d: scheduled for recovery\n", runl
);
326 schedule_work(&fifo
->recover
.work
);
329 static struct gk104_fifo_chan
*
330 gk104_fifo_recover_chid(struct gk104_fifo
*fifo
, int runl
, int chid
)
332 struct gk104_fifo_chan
*chan
;
333 struct nvkm_fifo_cgrp
*cgrp
;
335 list_for_each_entry(chan
, &fifo
->runlist
[runl
].chan
, head
) {
336 if (chan
->base
.chid
== chid
) {
337 list_del_init(&chan
->head
);
342 list_for_each_entry(cgrp
, &fifo
->runlist
[runl
].cgrp
, head
) {
343 if (cgrp
->id
== chid
) {
344 chan
= list_first_entry(&cgrp
->chan
, typeof(*chan
), head
);
345 list_del_init(&chan
->head
);
346 if (!--cgrp
->chan_nr
)
347 list_del_init(&cgrp
->head
);
356 gk104_fifo_recover_chan(struct nvkm_fifo
*base
, int chid
)
358 struct gk104_fifo
*fifo
= gk104_fifo(base
);
359 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
360 struct nvkm_device
*device
= subdev
->device
;
361 const u32 stat
= nvkm_rd32(device
, 0x800004 + (chid
* 0x08));
362 const u32 runl
= (stat
& 0x000f0000) >> 16;
363 const bool used
= (stat
& 0x00000001);
364 unsigned long engn
, engm
= fifo
->runlist
[runl
].engm
;
365 struct gk104_fifo_chan
*chan
;
367 assert_spin_locked(&fifo
->base
.lock
);
371 /* Lookup SW state for channel, and mark it as dead. */
372 chan
= gk104_fifo_recover_chid(fifo
, runl
, chid
);
375 nvkm_fifo_kevent(&fifo
->base
, chid
);
378 /* Disable channel. */
379 nvkm_wr32(device
, 0x800004 + (chid
* 0x08), stat
| 0x00000800);
380 nvkm_warn(subdev
, "channel %d: killed\n", chid
);
382 /* Block channel assignments from changing during recovery. */
383 gk104_fifo_recover_runl(fifo
, runl
);
385 /* Schedule recovery for any engines the channel is on. */
386 for_each_set_bit(engn
, &engm
, fifo
->engine_nr
) {
387 struct gk104_fifo_engine_status status
;
388 gk104_fifo_engine_status(fifo
, engn
, &status
);
389 if (!status
.chan
|| status
.chan
->id
!= chid
)
391 gk104_fifo_recover_engn(fifo
, engn
);
396 gk104_fifo_recover_engn(struct gk104_fifo
*fifo
, int engn
)
398 struct nvkm_engine
*engine
= fifo
->engine
[engn
].engine
;
399 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
400 struct nvkm_device
*device
= subdev
->device
;
401 const u32 runl
= fifo
->engine
[engn
].runl
;
402 const u32 engm
= BIT(engn
);
403 struct gk104_fifo_engine_status status
;
406 assert_spin_locked(&fifo
->base
.lock
);
407 if (fifo
->recover
.engm
& engm
)
409 fifo
->recover
.engm
|= engm
;
411 /* Block channel assignments from changing during recovery. */
412 gk104_fifo_recover_runl(fifo
, runl
);
414 /* Determine which channel (if any) is currently on the engine. */
415 gk104_fifo_engine_status(fifo
, engn
, &status
);
417 /* The channel is not longer viable, kill it. */
418 gk104_fifo_recover_chan(&fifo
->base
, status
.chan
->id
);
421 /* Determine MMU fault ID for the engine, if we're not being
422 * called from the fault handler already.
424 if (!status
.faulted
&& engine
) {
425 mmui
= nvkm_top_fault_id(device
, engine
->subdev
.index
);
427 const struct nvkm_enum
*en
= fifo
->func
->fault
.engine
;
428 for (; en
&& en
->name
; en
++) {
429 if (en
->data2
== engine
->subdev
.index
) {
438 /* Trigger a MMU fault for the engine.
440 * No good idea why this is needed, but nvgpu does something similar,
441 * and it makes recovery from CTXSW_TIMEOUT a lot more reliable.
444 nvkm_wr32(device
, 0x002a30 + (engn
* 0x04), 0x00000100 | mmui
);
446 /* Wait for fault to trigger. */
447 nvkm_msec(device
, 2000,
448 gk104_fifo_engine_status(fifo
, engn
, &status
);
453 /* Release MMU fault trigger, and ACK the fault. */
454 nvkm_wr32(device
, 0x002a30 + (engn
* 0x04), 0x00000000);
455 nvkm_wr32(device
, 0x00259c, BIT(mmui
));
456 nvkm_wr32(device
, 0x002100, 0x10000000);
459 /* Schedule recovery. */
460 nvkm_warn(subdev
, "engine %d: scheduled for recovery\n", engn
);
461 schedule_work(&fifo
->recover
.work
);
465 gk104_fifo_fault(struct nvkm_fifo
*base
, struct nvkm_fault_data
*info
)
467 struct gk104_fifo
*fifo
= gk104_fifo(base
);
468 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
469 struct nvkm_device
*device
= subdev
->device
;
470 const struct nvkm_enum
*er
, *ee
, *ec
, *ea
;
471 struct nvkm_engine
*engine
= NULL
;
472 struct nvkm_fifo_chan
*chan
;
474 char ct
[8] = "HUB/", en
[16] = "";
477 er
= nvkm_enum_find(fifo
->func
->fault
.reason
, info
->reason
);
478 ee
= nvkm_enum_find(fifo
->func
->fault
.engine
, info
->engine
);
480 ec
= nvkm_enum_find(fifo
->func
->fault
.hubclient
, info
->client
);
482 ec
= nvkm_enum_find(fifo
->func
->fault
.gpcclient
, info
->client
);
483 snprintf(ct
, sizeof(ct
), "GPC%d/", info
->gpc
);
485 ea
= nvkm_enum_find(fifo
->func
->fault
.access
, info
->access
);
487 if (ee
&& ee
->data2
) {
489 case NVKM_SUBDEV_BAR
:
490 nvkm_bar_bar1_reset(device
);
492 case NVKM_SUBDEV_INSTMEM
:
493 nvkm_bar_bar2_reset(device
);
495 case NVKM_ENGINE_IFB
:
496 nvkm_mask(device
, 0x001718, 0x00000000, 0x00000000);
499 engine
= nvkm_device_engine(device
, ee
->data2
);
505 enum nvkm_devidx engidx
= nvkm_top_fault(device
, info
->engine
);
506 if (engidx
< NVKM_SUBDEV_NR
) {
507 const char *src
= nvkm_subdev_name
[engidx
];
510 *dst
++ = toupper(*src
++);
512 engine
= nvkm_device_engine(device
, engidx
);
515 snprintf(en
, sizeof(en
), "%s", ee
->name
);
518 spin_lock_irqsave(&fifo
->base
.lock
, flags
);
519 chan
= nvkm_fifo_chan_inst_locked(&fifo
->base
, info
->inst
);
522 "fault %02x [%s] at %016llx engine %02x [%s] client %02x "
523 "[%s%s] reason %02x [%s] on channel %d [%010llx %s]\n",
524 info
->access
, ea
? ea
->name
: "", info
->addr
,
525 info
->engine
, ee
? ee
->name
: en
,
526 info
->client
, ct
, ec
? ec
->name
: "",
527 info
->reason
, er
? er
->name
: "", chan
? chan
->chid
: -1,
528 info
->inst
, chan
? chan
->object
.client
->name
: "unknown");
530 /* Kill the channel that caused the fault. */
532 gk104_fifo_recover_chan(&fifo
->base
, chan
->chid
);
534 /* Channel recovery will probably have already done this for the
535 * correct engine(s), but just in case we can't find the channel
538 for (engn
= 0; engn
< fifo
->engine_nr
&& engine
; engn
++) {
539 if (fifo
->engine
[engn
].engine
== engine
) {
540 gk104_fifo_recover_engn(fifo
, engn
);
545 spin_unlock_irqrestore(&fifo
->base
.lock
, flags
);
548 static const struct nvkm_enum
549 gk104_fifo_bind_reason
[] = {
550 { 0x01, "BIND_NOT_UNBOUND" },
551 { 0x02, "SNOOP_WITHOUT_BAR1" },
552 { 0x03, "UNBIND_WHILE_RUNNING" },
553 { 0x05, "INVALID_RUNLIST" },
554 { 0x06, "INVALID_CTX_TGT" },
555 { 0x0b, "UNBIND_WHILE_PARKED" },
560 gk104_fifo_intr_bind(struct gk104_fifo
*fifo
)
562 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
563 struct nvkm_device
*device
= subdev
->device
;
564 u32 intr
= nvkm_rd32(device
, 0x00252c);
565 u32 code
= intr
& 0x000000ff;
566 const struct nvkm_enum
*en
=
567 nvkm_enum_find(gk104_fifo_bind_reason
, code
);
569 nvkm_error(subdev
, "BIND_ERROR %02x [%s]\n", code
, en
? en
->name
: "");
572 static const struct nvkm_enum
573 gk104_fifo_sched_reason
[] = {
574 { 0x0a, "CTXSW_TIMEOUT" },
579 gk104_fifo_intr_sched_ctxsw(struct gk104_fifo
*fifo
)
581 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
582 unsigned long flags
, engm
= 0;
585 /* We need to ACK the SCHED_ERROR here, and prevent it reasserting,
586 * as MMU_FAULT cannot be triggered while it's pending.
588 spin_lock_irqsave(&fifo
->base
.lock
, flags
);
589 nvkm_mask(device
, 0x002140, 0x00000100, 0x00000000);
590 nvkm_wr32(device
, 0x002100, 0x00000100);
592 for (engn
= 0; engn
< fifo
->engine_nr
; engn
++) {
593 struct gk104_fifo_engine_status status
;
595 gk104_fifo_engine_status(fifo
, engn
, &status
);
596 if (!status
.busy
|| !status
.chsw
)
602 for_each_set_bit(engn
, &engm
, fifo
->engine_nr
)
603 gk104_fifo_recover_engn(fifo
, engn
);
605 nvkm_mask(device
, 0x002140, 0x00000100, 0x00000100);
606 spin_unlock_irqrestore(&fifo
->base
.lock
, flags
);
610 gk104_fifo_intr_sched(struct gk104_fifo
*fifo
)
612 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
613 struct nvkm_device
*device
= subdev
->device
;
614 u32 intr
= nvkm_rd32(device
, 0x00254c);
615 u32 code
= intr
& 0x000000ff;
616 const struct nvkm_enum
*en
=
617 nvkm_enum_find(gk104_fifo_sched_reason
, code
);
619 nvkm_error(subdev
, "SCHED_ERROR %02x [%s]\n", code
, en
? en
->name
: "");
623 gk104_fifo_intr_sched_ctxsw(fifo
);
631 gk104_fifo_intr_chsw(struct gk104_fifo
*fifo
)
633 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
634 struct nvkm_device
*device
= subdev
->device
;
635 u32 stat
= nvkm_rd32(device
, 0x00256c);
636 nvkm_error(subdev
, "CHSW_ERROR %08x\n", stat
);
637 nvkm_wr32(device
, 0x00256c, stat
);
641 gk104_fifo_intr_dropped_fault(struct gk104_fifo
*fifo
)
643 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
644 struct nvkm_device
*device
= subdev
->device
;
645 u32 stat
= nvkm_rd32(device
, 0x00259c);
646 nvkm_error(subdev
, "DROPPED_MMU_FAULT %08x\n", stat
);
649 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0
[] = {
650 { 0x00000001, "MEMREQ" },
651 { 0x00000002, "MEMACK_TIMEOUT" },
652 { 0x00000004, "MEMACK_EXTRA" },
653 { 0x00000008, "MEMDAT_TIMEOUT" },
654 { 0x00000010, "MEMDAT_EXTRA" },
655 { 0x00000020, "MEMFLUSH" },
656 { 0x00000040, "MEMOP" },
657 { 0x00000080, "LBCONNECT" },
658 { 0x00000100, "LBREQ" },
659 { 0x00000200, "LBACK_TIMEOUT" },
660 { 0x00000400, "LBACK_EXTRA" },
661 { 0x00000800, "LBDAT_TIMEOUT" },
662 { 0x00001000, "LBDAT_EXTRA" },
663 { 0x00002000, "GPFIFO" },
664 { 0x00004000, "GPPTR" },
665 { 0x00008000, "GPENTRY" },
666 { 0x00010000, "GPCRC" },
667 { 0x00020000, "PBPTR" },
668 { 0x00040000, "PBENTRY" },
669 { 0x00080000, "PBCRC" },
670 { 0x00100000, "XBARCONNECT" },
671 { 0x00200000, "METHOD" },
672 { 0x00400000, "METHODCRC" },
673 { 0x00800000, "DEVICE" },
674 { 0x02000000, "SEMAPHORE" },
675 { 0x04000000, "ACQUIRE" },
676 { 0x08000000, "PRI" },
677 { 0x20000000, "NO_CTXSW_SEG" },
678 { 0x40000000, "PBSEG" },
679 { 0x80000000, "SIGNATURE" },
684 gk104_fifo_intr_pbdma_0(struct gk104_fifo
*fifo
, int unit
)
686 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
687 struct nvkm_device
*device
= subdev
->device
;
688 u32 mask
= nvkm_rd32(device
, 0x04010c + (unit
* 0x2000));
689 u32 stat
= nvkm_rd32(device
, 0x040108 + (unit
* 0x2000)) & mask
;
690 u32 addr
= nvkm_rd32(device
, 0x0400c0 + (unit
* 0x2000));
691 u32 data
= nvkm_rd32(device
, 0x0400c4 + (unit
* 0x2000));
692 u32 chid
= nvkm_rd32(device
, 0x040120 + (unit
* 0x2000)) & 0xfff;
693 u32 subc
= (addr
& 0x00070000) >> 16;
694 u32 mthd
= (addr
& 0x00003ffc);
696 struct nvkm_fifo_chan
*chan
;
700 if (stat
& 0x00800000) {
702 if (nvkm_sw_mthd(device
->sw
, chid
, subc
, mthd
, data
))
707 nvkm_wr32(device
, 0x0400c0 + (unit
* 0x2000), 0x80600008);
710 nvkm_snprintbf(msg
, sizeof(msg
), gk104_fifo_pbdma_intr_0
, show
);
711 chan
= nvkm_fifo_chan_chid(&fifo
->base
, chid
, &flags
);
712 nvkm_error(subdev
, "PBDMA%d: %08x [%s] ch %d [%010llx %s] "
713 "subc %d mthd %04x data %08x\n",
714 unit
, show
, msg
, chid
, chan
? chan
->inst
->addr
: 0,
715 chan
? chan
->object
.client
->name
: "unknown",
717 nvkm_fifo_chan_put(&fifo
->base
, flags
, &chan
);
720 nvkm_wr32(device
, 0x040108 + (unit
* 0x2000), stat
);
723 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1
[] = {
724 { 0x00000001, "HCE_RE_ILLEGAL_OP" },
725 { 0x00000002, "HCE_RE_ALIGNB" },
726 { 0x00000004, "HCE_PRIV" },
727 { 0x00000008, "HCE_ILLEGAL_MTHD" },
728 { 0x00000010, "HCE_ILLEGAL_CLASS" },
733 gk104_fifo_intr_pbdma_1(struct gk104_fifo
*fifo
, int unit
)
735 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
736 struct nvkm_device
*device
= subdev
->device
;
737 u32 mask
= nvkm_rd32(device
, 0x04014c + (unit
* 0x2000));
738 u32 stat
= nvkm_rd32(device
, 0x040148 + (unit
* 0x2000)) & mask
;
739 u32 chid
= nvkm_rd32(device
, 0x040120 + (unit
* 0x2000)) & 0xfff;
743 nvkm_snprintbf(msg
, sizeof(msg
), gk104_fifo_pbdma_intr_1
, stat
);
744 nvkm_error(subdev
, "PBDMA%d: %08x [%s] ch %d %08x %08x\n",
745 unit
, stat
, msg
, chid
,
746 nvkm_rd32(device
, 0x040150 + (unit
* 0x2000)),
747 nvkm_rd32(device
, 0x040154 + (unit
* 0x2000)));
750 nvkm_wr32(device
, 0x040148 + (unit
* 0x2000), stat
);
754 gk104_fifo_intr_runlist(struct gk104_fifo
*fifo
)
756 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
757 u32 mask
= nvkm_rd32(device
, 0x002a00);
759 int runl
= __ffs(mask
);
760 wake_up(&fifo
->runlist
[runl
].wait
);
761 nvkm_wr32(device
, 0x002a00, 1 << runl
);
762 mask
&= ~(1 << runl
);
767 gk104_fifo_intr_engine(struct gk104_fifo
*fifo
)
769 nvkm_fifo_uevent(&fifo
->base
);
773 gk104_fifo_intr(struct nvkm_fifo
*base
)
775 struct gk104_fifo
*fifo
= gk104_fifo(base
);
776 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
777 struct nvkm_device
*device
= subdev
->device
;
778 u32 mask
= nvkm_rd32(device
, 0x002140);
779 u32 stat
= nvkm_rd32(device
, 0x002100) & mask
;
781 if (stat
& 0x00000001) {
782 gk104_fifo_intr_bind(fifo
);
783 nvkm_wr32(device
, 0x002100, 0x00000001);
787 if (stat
& 0x00000010) {
788 nvkm_error(subdev
, "PIO_ERROR\n");
789 nvkm_wr32(device
, 0x002100, 0x00000010);
793 if (stat
& 0x00000100) {
794 gk104_fifo_intr_sched(fifo
);
795 nvkm_wr32(device
, 0x002100, 0x00000100);
799 if (stat
& 0x00010000) {
800 gk104_fifo_intr_chsw(fifo
);
801 nvkm_wr32(device
, 0x002100, 0x00010000);
805 if (stat
& 0x00800000) {
806 nvkm_error(subdev
, "FB_FLUSH_TIMEOUT\n");
807 nvkm_wr32(device
, 0x002100, 0x00800000);
811 if (stat
& 0x01000000) {
812 nvkm_error(subdev
, "LB_ERROR\n");
813 nvkm_wr32(device
, 0x002100, 0x01000000);
817 if (stat
& 0x08000000) {
818 gk104_fifo_intr_dropped_fault(fifo
);
819 nvkm_wr32(device
, 0x002100, 0x08000000);
823 if (stat
& 0x10000000) {
824 u32 mask
= nvkm_rd32(device
, 0x00259c);
826 u32 unit
= __ffs(mask
);
827 fifo
->func
->intr
.fault(&fifo
->base
, unit
);
828 nvkm_wr32(device
, 0x00259c, (1 << unit
));
829 mask
&= ~(1 << unit
);
834 if (stat
& 0x20000000) {
835 u32 mask
= nvkm_rd32(device
, 0x0025a0);
837 u32 unit
= __ffs(mask
);
838 gk104_fifo_intr_pbdma_0(fifo
, unit
);
839 gk104_fifo_intr_pbdma_1(fifo
, unit
);
840 nvkm_wr32(device
, 0x0025a0, (1 << unit
));
841 mask
&= ~(1 << unit
);
846 if (stat
& 0x40000000) {
847 gk104_fifo_intr_runlist(fifo
);
851 if (stat
& 0x80000000) {
852 nvkm_wr32(device
, 0x002100, 0x80000000);
853 gk104_fifo_intr_engine(fifo
);
858 nvkm_error(subdev
, "INTR %08x\n", stat
);
859 nvkm_mask(device
, 0x002140, stat
, 0x00000000);
860 nvkm_wr32(device
, 0x002100, stat
);
865 gk104_fifo_fini(struct nvkm_fifo
*base
)
867 struct gk104_fifo
*fifo
= gk104_fifo(base
);
868 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
869 flush_work(&fifo
->recover
.work
);
870 /* allow mmu fault interrupts, even when we're not using fifo */
871 nvkm_mask(device
, 0x002140, 0x10000000, 0x10000000);
875 gk104_fifo_info(struct nvkm_fifo
*base
, u64 mthd
, u64
*data
)
877 struct gk104_fifo
*fifo
= gk104_fifo(base
);
879 case NV_DEVICE_FIFO_RUNLISTS
:
880 *data
= (1ULL << fifo
->runlist_nr
) - 1;
882 case NV_DEVICE_FIFO_RUNLIST_ENGINES(0)...
883 NV_DEVICE_FIFO_RUNLIST_ENGINES(63): {
884 int runl
= mthd
- NV_DEVICE_FIFO_RUNLIST_ENGINES(0), engn
;
885 if (runl
< fifo
->runlist_nr
) {
886 unsigned long engm
= fifo
->runlist
[runl
].engm
;
887 struct nvkm_engine
*engine
;
889 for_each_set_bit(engn
, &engm
, fifo
->engine_nr
) {
890 if ((engine
= fifo
->engine
[engn
].engine
))
891 *data
|= BIT_ULL(engine
->subdev
.index
);
903 gk104_fifo_oneinit(struct nvkm_fifo
*base
)
905 struct gk104_fifo
*fifo
= gk104_fifo(base
);
906 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
907 struct nvkm_device
*device
= subdev
->device
;
908 struct nvkm_vmm
*bar
= nvkm_bar_bar1_vmm(device
);
909 int engn
, runl
, pbid
, ret
, i
, j
;
910 enum nvkm_devidx engidx
;
913 fifo
->pbdma_nr
= fifo
->func
->pbdma
->nr(fifo
);
914 nvkm_debug(subdev
, "%d PBDMA(s)\n", fifo
->pbdma_nr
);
916 /* Read PBDMA->runlist(s) mapping from HW. */
917 if (!(map
= kcalloc(fifo
->pbdma_nr
, sizeof(*map
), GFP_KERNEL
)))
920 for (i
= 0; i
< fifo
->pbdma_nr
; i
++)
921 map
[i
] = nvkm_rd32(device
, 0x002390 + (i
* 0x04));
923 /* Determine runlist configuration from topology device info. */
925 while ((int)(engidx
= nvkm_top_engine(device
, i
++, &runl
, &engn
)) >= 0) {
926 /* Determine which PBDMA handles requests for this engine. */
927 for (j
= 0, pbid
= -1; j
< fifo
->pbdma_nr
; j
++) {
928 if (map
[j
] & (1 << runl
)) {
934 nvkm_debug(subdev
, "engine %2d: runlist %2d pbdma %2d (%s)\n",
935 engn
, runl
, pbid
, nvkm_subdev_name
[engidx
]);
937 fifo
->engine
[engn
].engine
= nvkm_device_engine(device
, engidx
);
938 fifo
->engine
[engn
].runl
= runl
;
939 fifo
->engine
[engn
].pbid
= pbid
;
940 fifo
->engine_nr
= max(fifo
->engine_nr
, engn
+ 1);
941 fifo
->runlist
[runl
].engm
|= 1 << engn
;
942 fifo
->runlist_nr
= max(fifo
->runlist_nr
, runl
+ 1);
947 for (i
= 0; i
< fifo
->runlist_nr
; i
++) {
948 for (j
= 0; j
< ARRAY_SIZE(fifo
->runlist
[i
].mem
); j
++) {
949 ret
= nvkm_memory_new(device
, NVKM_MEM_TARGET_INST
,
950 fifo
->base
.nr
* 2/* TSG+chan */ *
951 fifo
->func
->runlist
->size
,
953 &fifo
->runlist
[i
].mem
[j
]);
958 init_waitqueue_head(&fifo
->runlist
[i
].wait
);
959 INIT_LIST_HEAD(&fifo
->runlist
[i
].cgrp
);
960 INIT_LIST_HEAD(&fifo
->runlist
[i
].chan
);
963 ret
= nvkm_memory_new(device
, NVKM_MEM_TARGET_INST
,
964 fifo
->base
.nr
* 0x200, 0x1000, true,
969 ret
= nvkm_vmm_get(bar
, 12, nvkm_memory_size(fifo
->user
.mem
),
974 return nvkm_memory_map(fifo
->user
.mem
, 0, bar
, fifo
->user
.bar
, NULL
, 0);
978 gk104_fifo_init(struct nvkm_fifo
*base
)
980 struct gk104_fifo
*fifo
= gk104_fifo(base
);
981 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
985 fifo
->func
->pbdma
->init(fifo
);
988 for (i
= 0; i
< fifo
->pbdma_nr
; i
++) {
989 nvkm_mask(device
, 0x04013c + (i
* 0x2000), 0x10000100, 0x00000000);
990 nvkm_wr32(device
, 0x040108 + (i
* 0x2000), 0xffffffff); /* INTR */
991 nvkm_wr32(device
, 0x04010c + (i
* 0x2000), 0xfffffeff); /* INTREN */
995 for (i
= 0; i
< fifo
->pbdma_nr
; i
++) {
996 nvkm_wr32(device
, 0x040148 + (i
* 0x2000), 0xffffffff); /* INTR */
997 nvkm_wr32(device
, 0x04014c + (i
* 0x2000), 0xffffffff); /* INTREN */
1000 nvkm_wr32(device
, 0x002254, 0x10000000 | fifo
->user
.bar
->addr
>> 12);
1002 if (fifo
->func
->pbdma
->init_timeout
)
1003 fifo
->func
->pbdma
->init_timeout(fifo
);
1005 nvkm_wr32(device
, 0x002100, 0xffffffff);
1006 nvkm_wr32(device
, 0x002140, 0x7fffffff);
1010 gk104_fifo_dtor(struct nvkm_fifo
*base
)
1012 struct gk104_fifo
*fifo
= gk104_fifo(base
);
1013 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
1016 nvkm_vmm_put(nvkm_bar_bar1_vmm(device
), &fifo
->user
.bar
);
1017 nvkm_memory_unref(&fifo
->user
.mem
);
1019 for (i
= 0; i
< fifo
->runlist_nr
; i
++) {
1020 nvkm_memory_unref(&fifo
->runlist
[i
].mem
[1]);
1021 nvkm_memory_unref(&fifo
->runlist
[i
].mem
[0]);
1027 static const struct nvkm_fifo_func
1029 .dtor
= gk104_fifo_dtor
,
1030 .oneinit
= gk104_fifo_oneinit
,
1031 .info
= gk104_fifo_info
,
1032 .init
= gk104_fifo_init
,
1033 .fini
= gk104_fifo_fini
,
1034 .intr
= gk104_fifo_intr
,
1035 .fault
= gk104_fifo_fault
,
1036 .uevent_init
= gk104_fifo_uevent_init
,
1037 .uevent_fini
= gk104_fifo_uevent_fini
,
1038 .recover_chan
= gk104_fifo_recover_chan
,
1039 .class_get
= gk104_fifo_class_get
,
1040 .class_new
= gk104_fifo_class_new
,
1044 gk104_fifo_new_(const struct gk104_fifo_func
*func
, struct nvkm_device
*device
,
1045 int index
, int nr
, struct nvkm_fifo
**pfifo
)
1047 struct gk104_fifo
*fifo
;
1049 if (!(fifo
= kzalloc(sizeof(*fifo
), GFP_KERNEL
)))
1052 INIT_WORK(&fifo
->recover
.work
, gk104_fifo_recover_work
);
1053 *pfifo
= &fifo
->base
;
1055 return nvkm_fifo_ctor(&gk104_fifo_
, device
, index
, nr
, &fifo
->base
);
1058 const struct nvkm_enum
1059 gk104_fifo_fault_access
[] = {
1065 const struct nvkm_enum
1066 gk104_fifo_fault_engine
[] = {
1067 { 0x00, "GR", NULL
, NVKM_ENGINE_GR
},
1068 { 0x01, "DISPLAY" },
1069 { 0x02, "CAPTURE" },
1070 { 0x03, "IFB", NULL
, NVKM_ENGINE_IFB
},
1071 { 0x04, "BAR1", NULL
, NVKM_SUBDEV_BAR
},
1072 { 0x05, "BAR2", NULL
, NVKM_SUBDEV_INSTMEM
},
1074 { 0x07, "HOST0", NULL
, NVKM_ENGINE_FIFO
},
1075 { 0x08, "HOST1", NULL
, NVKM_ENGINE_FIFO
},
1076 { 0x09, "HOST2", NULL
, NVKM_ENGINE_FIFO
},
1077 { 0x0a, "HOST3", NULL
, NVKM_ENGINE_FIFO
},
1078 { 0x0b, "HOST4", NULL
, NVKM_ENGINE_FIFO
},
1079 { 0x0c, "HOST5", NULL
, NVKM_ENGINE_FIFO
},
1080 { 0x0d, "HOST6", NULL
, NVKM_ENGINE_FIFO
},
1081 { 0x0e, "HOST7", NULL
, NVKM_ENGINE_FIFO
},
1083 { 0x10, "MSVLD", NULL
, NVKM_ENGINE_MSVLD
},
1084 { 0x11, "MSPPP", NULL
, NVKM_ENGINE_MSPPP
},
1086 { 0x14, "MSPDEC", NULL
, NVKM_ENGINE_MSPDEC
},
1087 { 0x15, "CE0", NULL
, NVKM_ENGINE_CE0
},
1088 { 0x16, "CE1", NULL
, NVKM_ENGINE_CE1
},
1091 { 0x19, "MSENC", NULL
, NVKM_ENGINE_MSENC
},
1092 { 0x1b, "CE2", NULL
, NVKM_ENGINE_CE2
},
1096 const struct nvkm_enum
1097 gk104_fifo_fault_reason
[] = {
1099 { 0x01, "PDE_SIZE" },
1101 { 0x03, "VA_LIMIT_VIOLATION" },
1102 { 0x04, "UNBOUND_INST_BLOCK" },
1103 { 0x05, "PRIV_VIOLATION" },
1104 { 0x06, "RO_VIOLATION" },
1105 { 0x07, "WO_VIOLATION" },
1106 { 0x08, "PITCH_MASK_VIOLATION" },
1107 { 0x09, "WORK_CREATION" },
1108 { 0x0a, "UNSUPPORTED_APERTURE" },
1109 { 0x0b, "COMPRESSION_FAILURE" },
1110 { 0x0c, "UNSUPPORTED_KIND" },
1111 { 0x0d, "REGION_VIOLATION" },
1112 { 0x0e, "BOTH_PTES_VALID" },
1113 { 0x0f, "INFO_TYPE_POISONED" },
1117 const struct nvkm_enum
1118 gk104_fifo_fault_hubclient
[] = {
1126 { 0x07, "HOST_CPU" },
1127 { 0x08, "HOST_CPU_NB" },
1138 { 0x13, "RASTERTWOD" },
1148 { 0x1d, "DFALCON" },
1150 { 0x1f, "AFALCON" },
1154 const struct nvkm_enum
1155 gk104_fifo_fault_gpcclient
[] = {
1156 { 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" },
1157 { 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" },
1158 { 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" },
1159 { 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" },
1167 { 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" },
1168 { 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" },
1169 { 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" },
1170 { 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" },
1172 { 0x20, "LTP_UTLB_0" },
1173 { 0x21, "LTP_UTLB_1" },
1174 { 0x22, "LTP_UTLB_2" },
1175 { 0x23, "LTP_UTLB_3" },
1176 { 0x24, "GPC_RGG_UTLB" },
1180 static const struct gk104_fifo_func
1182 .intr
.fault
= gf100_fifo_intr_fault
,
1183 .pbdma
= &gk104_fifo_pbdma
,
1184 .fault
.access
= gk104_fifo_fault_access
,
1185 .fault
.engine
= gk104_fifo_fault_engine
,
1186 .fault
.reason
= gk104_fifo_fault_reason
,
1187 .fault
.hubclient
= gk104_fifo_fault_hubclient
,
1188 .fault
.gpcclient
= gk104_fifo_fault_gpcclient
,
1189 .runlist
= &gk104_fifo_runlist
,
1190 .chan
= {{0,0,KEPLER_CHANNEL_GPFIFO_A
}, gk104_fifo_gpfifo_new
},
1194 gk104_fifo_new(struct nvkm_device
*device
, int index
, struct nvkm_fifo
**pfifo
)
1196 return gk104_fifo_new_(&gk104_fifo
, device
, index
, 4096, pfifo
);