]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
drm/nouveau/nvif: assign internal class identifiers to sw classes
[mirror_ubuntu-jammy-kernel.git] / drivers / gpu / drm / nouveau / nvkm / engine / fifo / gk104.c
CommitLineData
5132f377 1/*
ebb945a9 2 * Copyright 2012 Red Hat Inc.
5132f377
BS
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
05c7145d 24#include "gk104.h"
5132f377 25
ebb945a9 26#include <core/client.h>
ebb945a9 27#include <core/engctx.h>
ebb945a9 28#include <core/enum.h>
05c7145d 29#include <core/handle.h>
ebb945a9 30#include <subdev/bar.h>
52225551 31#include <subdev/fb.h>
5ce3bf3c 32#include <subdev/mmu.h>
05c7145d 33#include <subdev/timer.h>
5132f377 34
05c7145d 35#include <nvif/class.h>
f58ddf95 36#include <nvif/ioctl.h>
05c7145d 37#include <nvif/unpack.h>
5132f377 38
507ceb15 39#define _(a,b) { (a), ((1ULL << (a)) | (b)) }
dbff2dee 40static const struct {
507ceb15
MP
41 u64 subdev;
42 u64 mask;
dbff2dee 43} fifo_engine[] = {
48506d17 44 _(NVDEV_ENGINE_GR , (1ULL << NVDEV_ENGINE_SW) |
aedf24ff 45 (1ULL << NVDEV_ENGINE_CE2)),
37a5d028 46 _(NVDEV_ENGINE_MSPDEC , 0),
fd8666f7 47 _(NVDEV_ENGINE_MSPPP , 0),
eccf7e8a 48 _(NVDEV_ENGINE_MSVLD , 0),
aedf24ff
BS
49 _(NVDEV_ENGINE_CE0 , 0),
50 _(NVDEV_ENGINE_CE1 , 0),
bd8369ec 51 _(NVDEV_ENGINE_MSENC , 0),
dbff2dee
BS
52};
53#undef _
54#define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine)
55
05c7145d
BS
56struct gk104_fifo_engn {
57 struct nvkm_gpuobj *runlist[2];
f82c44a7 58 int cur_runlist;
138b873f 59 wait_queue_head_t wait;
5132f377
BS
60};
61
6189f1b0 62struct gk104_fifo {
05c7145d 63 struct nvkm_fifo base;
98d1e317
BS
64
65 struct work_struct fault;
66 u64 mask;
67
05c7145d 68 struct gk104_fifo_engn engine[FIFO_ENGINE_NR];
5132f377 69 struct {
05c7145d
BS
70 struct nvkm_gpuobj *mem;
71 struct nvkm_vma bar;
5132f377
BS
72 } user;
73 int spoon_nr;
74};
75
05c7145d
BS
76struct gk104_fifo_base {
77 struct nvkm_fifo_base base;
78 struct nvkm_gpuobj *pgd;
79 struct nvkm_vm *vm;
ebb945a9
BS
80};
81
05c7145d
BS
82struct gk104_fifo_chan {
83 struct nvkm_fifo_chan base;
5132f377 84 u32 engine;
87032e11
BS
85 enum {
86 STOPPED,
87 RUNNING,
88 KILLED
89 } state;
5132f377
BS
90};
91
ebb945a9
BS
92/*******************************************************************************
93 * FIFO channel objects
94 ******************************************************************************/
95
5132f377 96static void
6189f1b0 97gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine)
5132f377 98{
6189f1b0 99 struct gk104_fifo_engn *engn = &fifo->engine[engine];
e5c5e4f5
BS
100 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
101 struct nvkm_device *device = subdev->device;
87744403 102 struct nvkm_bar *bar = device->bar;
05c7145d 103 struct nvkm_gpuobj *cur;
ebb945a9 104 int i, p;
5132f377 105
6189f1b0 106 mutex_lock(&nv_subdev(fifo)->mutex);
f82c44a7
BS
107 cur = engn->runlist[engn->cur_runlist];
108 engn->cur_runlist = !engn->cur_runlist;
5132f377 109
5444e770 110 nvkm_kmap(cur);
6189f1b0
BS
111 for (i = 0, p = 0; i < fifo->base.max; i++) {
112 struct gk104_fifo_chan *chan = (void *)fifo->base.channel[i];
87032e11 113 if (chan && chan->state == RUNNING && chan->engine == engine) {
5444e770
BS
114 nvkm_wo32(cur, p + 0, i);
115 nvkm_wo32(cur, p + 4, 0x00000000);
87032e11
BS
116 p += 8;
117 }
5132f377 118 }
ebb945a9 119 bar->flush(bar);
5444e770 120 nvkm_done(cur);
5132f377 121
87744403
BS
122 nvkm_wr32(device, 0x002270, cur->addr >> 12);
123 nvkm_wr32(device, 0x002274, (engine << 20) | (p >> 3));
87032e11 124
87744403 125 if (wait_event_timeout(engn->wait, !(nvkm_rd32(device, 0x002284 +
5c0633e6
BS
126 (engine * 0x08)) & 0x00100000),
127 msecs_to_jiffies(2000)) == 0)
e5c5e4f5 128 nvkm_error(subdev, "runlist %d update timeout\n", engine);
6189f1b0 129 mutex_unlock(&nv_subdev(fifo)->mutex);
5132f377
BS
130}
131
c420b2dc 132static int
05c7145d
BS
133gk104_fifo_context_attach(struct nvkm_object *parent,
134 struct nvkm_object *object)
5132f377 135{
05c7145d
BS
136 struct nvkm_bar *bar = nvkm_bar(parent);
137 struct gk104_fifo_base *base = (void *)parent->parent;
5444e770 138 struct nvkm_gpuobj *engn = &base->base.gpuobj;
05c7145d 139 struct nvkm_engctx *ectx = (void *)object;
ebb945a9
BS
140 u32 addr;
141 int ret;
142
143 switch (nv_engidx(object->engine)) {
01672ef4 144 case NVDEV_ENGINE_SW :
448a4532 145 return 0;
aedf24ff
BS
146 case NVDEV_ENGINE_CE0:
147 case NVDEV_ENGINE_CE1:
148 case NVDEV_ENGINE_CE2:
448a4532 149 nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
01672ef4 150 return 0;
37a5d028
BS
151 case NVDEV_ENGINE_GR : addr = 0x0210; break;
152 case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
153 case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
154 case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
ebb945a9
BS
155 default:
156 return -EINVAL;
5132f377
BS
157 }
158
ebb945a9 159 if (!ectx->vma.node) {
05c7145d
BS
160 ret = nvkm_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
161 NV_MEM_ACCESS_RW, &ectx->vma);
ebb945a9
BS
162 if (ret)
163 return ret;
4c2d4222
BS
164
165 nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
ebb945a9
BS
166 }
167
5444e770
BS
168 nvkm_kmap(engn);
169 nvkm_wo32(engn, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
170 nvkm_wo32(engn, addr + 0x04, upper_32_bits(ectx->vma.offset));
ebb945a9 171 bar->flush(bar);
5444e770 172 nvkm_done(engn);
ebb945a9 173 return 0;
5132f377
BS
174}
175
6189f1b0
BS
176static int
177gk104_fifo_chan_kick(struct gk104_fifo_chan *chan)
178{
179 struct nvkm_object *obj = (void *)chan;
180 struct gk104_fifo *fifo = (void *)obj->engine;
e5c5e4f5
BS
181 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
182 struct nvkm_device *device = subdev->device;
6189f1b0 183
87744403 184 nvkm_wr32(device, 0x002634, chan->base.chid);
af3082b3
BS
185 if (nvkm_msec(device, 2000,
186 if (!(nvkm_rd32(device, 0x002634) & 0x00100000))
187 break;
188 ) < 0) {
e5c5e4f5
BS
189 nvkm_error(subdev, "channel %d [%s] kick timeout\n",
190 chan->base.chid, nvkm_client_name(chan));
6189f1b0
BS
191 return -EBUSY;
192 }
193
194 return 0;
195}
196
ebb945a9 197static int
05c7145d
BS
198gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
199 struct nvkm_object *object)
5132f377 200{
05c7145d 201 struct nvkm_bar *bar = nvkm_bar(parent);
05c7145d
BS
202 struct gk104_fifo_base *base = (void *)parent->parent;
203 struct gk104_fifo_chan *chan = (void *)parent;
5444e770 204 struct nvkm_gpuobj *engn = &base->base.gpuobj;
ebb945a9 205 u32 addr;
6189f1b0 206 int ret;
ebb945a9
BS
207
208 switch (nv_engidx(object->engine)) {
37a5d028
BS
209 case NVDEV_ENGINE_SW : return 0;
210 case NVDEV_ENGINE_CE0 :
211 case NVDEV_ENGINE_CE1 :
212 case NVDEV_ENGINE_CE2 : addr = 0x0000; break;
213 case NVDEV_ENGINE_GR : addr = 0x0210; break;
214 case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
215 case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
216 case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
ebb945a9
BS
217 default:
218 return -EINVAL;
219 }
220
6189f1b0
BS
221 ret = gk104_fifo_chan_kick(chan);
222 if (ret && suspend)
223 return ret;
5132f377 224
01672ef4 225 if (addr) {
5444e770
BS
226 nvkm_kmap(engn);
227 nvkm_wo32(engn, addr + 0x00, 0x00000000);
228 nvkm_wo32(engn, addr + 0x04, 0x00000000);
01672ef4 229 bar->flush(bar);
5444e770 230 nvkm_done(engn);
01672ef4
BS
231 }
232
ebb945a9 233 return 0;
5132f377
BS
234}
235
236static int
05c7145d
BS
237gk104_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
238 struct nvkm_oclass *oclass, void *data, u32 size,
239 struct nvkm_object **pobject)
5132f377 240{
bbf8906b
BS
241 union {
242 struct kepler_channel_gpfifo_a_v0 v0;
243 } *args = data;
05c7145d 244 struct nvkm_bar *bar = nvkm_bar(parent);
6189f1b0 245 struct gk104_fifo *fifo = (void *)engine;
05c7145d
BS
246 struct gk104_fifo_base *base = (void *)parent;
247 struct gk104_fifo_chan *chan;
e5c5e4f5 248 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
5444e770 249 struct nvkm_gpuobj *ramfc = &base->base.gpuobj;
ebb945a9
BS
250 u64 usermem, ioffset, ilength;
251 int ret, i;
252
53003941 253 nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
bbf8906b 254 if (nvif_unpack(args->v0, 0, 0, false)) {
bf81df9b 255 nvif_ioctl(parent, "create channel gpfifo vers %d pushbuf %llx "
53003941
BS
256 "ioffset %016llx ilength %08x engine %08x\n",
257 args->v0.version, args->v0.pushbuf, args->v0.ioffset,
258 args->v0.ilength, args->v0.engine);
bbf8906b
BS
259 } else
260 return ret;
ebb945a9 261
dbff2dee 262 for (i = 0; i < FIFO_ENGINE_NR; i++) {
bbf8906b 263 if (args->v0.engine & (1 << i)) {
05c7145d 264 if (nvkm_engine(parent, fifo_engine[i].subdev)) {
bbf8906b 265 args->v0.engine = (1 << i);
dbff2dee
BS
266 break;
267 }
268 }
269 }
270
56fbd2b6 271 if (i == FIFO_ENGINE_NR) {
e5c5e4f5
BS
272 nvkm_error(subdev, "unsupported engines %08x\n",
273 args->v0.engine);
dbff2dee 274 return -ENODEV;
56fbd2b6 275 }
dbff2dee 276
05c7145d 277 ret = nvkm_fifo_channel_create(parent, engine, oclass, 1,
6189f1b0 278 fifo->user.bar.offset, 0x200,
05c7145d
BS
279 args->v0.pushbuf,
280 fifo_engine[i].mask, &chan);
ebb945a9
BS
281 *pobject = nv_object(chan);
282 if (ret)
283 return ret;
284
bbf8906b
BS
285 args->v0.chid = chan->base.chid;
286
05c7145d
BS
287 nv_parent(chan)->context_attach = gk104_fifo_context_attach;
288 nv_parent(chan)->context_detach = gk104_fifo_context_detach;
dbff2dee 289 chan->engine = i;
ebb945a9
BS
290
291 usermem = chan->base.chid * 0x200;
bbf8906b
BS
292 ioffset = args->v0.ioffset;
293 ilength = order_base_2(args->v0.ilength / 8);
ebb945a9 294
5444e770 295 nvkm_kmap(fifo->user.mem);
ebb945a9 296 for (i = 0; i < 0x200; i += 4)
5444e770
BS
297 nvkm_wo32(fifo->user.mem, usermem + i, 0x00000000);
298 nvkm_done(fifo->user.mem);
299
300 nvkm_kmap(ramfc);
301 nvkm_wo32(ramfc, 0x08, lower_32_bits(fifo->user.mem->addr + usermem));
302 nvkm_wo32(ramfc, 0x0c, upper_32_bits(fifo->user.mem->addr + usermem));
303 nvkm_wo32(ramfc, 0x10, 0x0000face);
304 nvkm_wo32(ramfc, 0x30, 0xfffff902);
305 nvkm_wo32(ramfc, 0x48, lower_32_bits(ioffset));
306 nvkm_wo32(ramfc, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
307 nvkm_wo32(ramfc, 0x84, 0x20400000);
308 nvkm_wo32(ramfc, 0x94, 0x30000001);
309 nvkm_wo32(ramfc, 0x9c, 0x00000100);
310 nvkm_wo32(ramfc, 0xac, 0x0000001f);
311 nvkm_wo32(ramfc, 0xe8, chan->base.chid);
312 nvkm_wo32(ramfc, 0xb8, 0xf8000000);
313 nvkm_wo32(ramfc, 0xf8, 0x10003080); /* 0x002310 */
314 nvkm_wo32(ramfc, 0xfc, 0x10000010); /* 0x002350 */
ebb945a9 315 bar->flush(bar);
5444e770 316 nvkm_done(ramfc);
ebb945a9
BS
317 return 0;
318}
5132f377 319
ebb945a9 320static int
05c7145d 321gk104_fifo_chan_init(struct nvkm_object *object)
ebb945a9 322{
05c7145d 323 struct nvkm_gpuobj *base = nv_gpuobj(object->parent);
6189f1b0 324 struct gk104_fifo *fifo = (void *)object->engine;
05c7145d 325 struct gk104_fifo_chan *chan = (void *)object;
87744403 326 struct nvkm_device *device = fifo->base.engine.subdev.device;
ebb945a9
BS
327 u32 chid = chan->base.chid;
328 int ret;
5132f377 329
05c7145d 330 ret = nvkm_fifo_channel_init(&chan->base);
ebb945a9
BS
331 if (ret)
332 return ret;
5132f377 333
87744403
BS
334 nvkm_mask(device, 0x800004 + (chid * 8), 0x000f0000, chan->engine << 16);
335 nvkm_wr32(device, 0x800000 + (chid * 8), 0x80000000 | base->addr >> 12);
87032e11
BS
336
337 if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) {
87744403 338 nvkm_mask(device, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
6189f1b0 339 gk104_fifo_runlist_update(fifo, chan->engine);
87744403 340 nvkm_mask(device, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
87032e11
BS
341 }
342
ebb945a9
BS
343 return 0;
344}
5132f377 345
ebb945a9 346static int
05c7145d 347gk104_fifo_chan_fini(struct nvkm_object *object, bool suspend)
ebb945a9 348{
6189f1b0 349 struct gk104_fifo *fifo = (void *)object->engine;
05c7145d 350 struct gk104_fifo_chan *chan = (void *)object;
87744403 351 struct nvkm_device *device = fifo->base.engine.subdev.device;
ebb945a9 352 u32 chid = chan->base.chid;
5132f377 353
87032e11 354 if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) {
87744403 355 nvkm_mask(device, 0x800004 + (chid * 8), 0x00000800, 0x00000800);
6189f1b0 356 gk104_fifo_runlist_update(fifo, chan->engine);
87032e11 357 }
5132f377 358
87744403 359 nvkm_wr32(device, 0x800000 + (chid * 8), 0x00000000);
05c7145d 360 return nvkm_fifo_channel_fini(&chan->base, suspend);
ebb945a9 361}
5132f377 362
89025bd4
BS
363struct nvkm_ofuncs
364gk104_fifo_chan_ofuncs = {
05c7145d
BS
365 .ctor = gk104_fifo_chan_ctor,
366 .dtor = _nvkm_fifo_channel_dtor,
367 .init = gk104_fifo_chan_init,
368 .fini = gk104_fifo_chan_fini,
369 .map = _nvkm_fifo_channel_map,
370 .rd32 = _nvkm_fifo_channel_rd32,
371 .wr32 = _nvkm_fifo_channel_wr32,
372 .ntfy = _nvkm_fifo_channel_ntfy
ebb945a9 373};
5132f377 374
05c7145d
BS
375static struct nvkm_oclass
376gk104_fifo_sclass[] = {
89025bd4 377 { KEPLER_CHANNEL_GPFIFO_A, &gk104_fifo_chan_ofuncs },
ebb945a9
BS
378 {}
379};
380
381/*******************************************************************************
382 * FIFO context - instmem heap and vm setup
383 ******************************************************************************/
5132f377 384
c420b2dc 385static int
05c7145d
BS
386gk104_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
387 struct nvkm_oclass *oclass, void *data, u32 size,
388 struct nvkm_object **pobject)
c420b2dc 389{
05c7145d 390 struct gk104_fifo_base *base;
ebb945a9 391 int ret;
c420b2dc 392
05c7145d
BS
393 ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
394 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base);
ebb945a9
BS
395 *pobject = nv_object(base);
396 if (ret)
397 return ret;
c420b2dc 398
05c7145d
BS
399 ret = nvkm_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0,
400 &base->pgd);
ebb945a9
BS
401 if (ret)
402 return ret;
403
5444e770
BS
404 nvkm_kmap(&base->base.gpuobj);
405 nvkm_wo32(&base->base.gpuobj, 0x0200, lower_32_bits(base->pgd->addr));
406 nvkm_wo32(&base->base.gpuobj, 0x0204, upper_32_bits(base->pgd->addr));
407 nvkm_wo32(&base->base.gpuobj, 0x0208, 0xffffffff);
408 nvkm_wo32(&base->base.gpuobj, 0x020c, 0x000000ff);
409 nvkm_done(&base->base.gpuobj);
ebb945a9 410
05c7145d 411 ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
ebb945a9
BS
412 if (ret)
413 return ret;
c420b2dc 414
c420b2dc
BS
415 return 0;
416}
417
ebb945a9 418static void
05c7145d 419gk104_fifo_context_dtor(struct nvkm_object *object)
ebb945a9 420{
05c7145d
BS
421 struct gk104_fifo_base *base = (void *)object;
422 nvkm_vm_ref(NULL, &base->vm, base->pgd);
423 nvkm_gpuobj_ref(NULL, &base->pgd);
424 nvkm_fifo_context_destroy(&base->base);
ebb945a9
BS
425}
426
05c7145d
BS
427static struct nvkm_oclass
428gk104_fifo_cclass = {
ebb945a9 429 .handle = NV_ENGCTX(FIFO, 0xe0),
05c7145d
BS
430 .ofuncs = &(struct nvkm_ofuncs) {
431 .ctor = gk104_fifo_context_ctor,
432 .dtor = gk104_fifo_context_dtor,
433 .init = _nvkm_fifo_context_init,
434 .fini = _nvkm_fifo_context_fini,
435 .rd32 = _nvkm_fifo_context_rd32,
436 .wr32 = _nvkm_fifo_context_wr32,
ebb945a9
BS
437 },
438};
439
440/*******************************************************************************
441 * PFIFO engine
442 ******************************************************************************/
443
98d1e317 444static inline int
6189f1b0 445gk104_fifo_engidx(struct gk104_fifo *fifo, u32 engn)
98d1e317
BS
446{
447 switch (engn) {
37a5d028
BS
448 case NVDEV_ENGINE_GR :
449 case NVDEV_ENGINE_CE2 : engn = 0; break;
450 case NVDEV_ENGINE_MSVLD : engn = 1; break;
451 case NVDEV_ENGINE_MSPPP : engn = 2; break;
452 case NVDEV_ENGINE_MSPDEC: engn = 3; break;
453 case NVDEV_ENGINE_CE0 : engn = 4; break;
454 case NVDEV_ENGINE_CE1 : engn = 5; break;
455 case NVDEV_ENGINE_MSENC : engn = 6; break;
98d1e317
BS
456 default:
457 return -1;
458 }
459
460 return engn;
461}
462
05c7145d 463static inline struct nvkm_engine *
6189f1b0 464gk104_fifo_engine(struct gk104_fifo *fifo, u32 engn)
129dcca7
BS
465{
466 if (engn >= ARRAY_SIZE(fifo_engine))
467 return NULL;
6189f1b0 468 return nvkm_engine(fifo, fifo_engine[engn].subdev);
129dcca7
BS
469}
470
98d1e317 471static void
05c7145d 472gk104_fifo_recover_work(struct work_struct *work)
98d1e317 473{
6189f1b0 474 struct gk104_fifo *fifo = container_of(work, typeof(*fifo), fault);
87744403 475 struct nvkm_device *device = fifo->base.engine.subdev.device;
05c7145d 476 struct nvkm_object *engine;
98d1e317
BS
477 unsigned long flags;
478 u32 engn, engm = 0;
479 u64 mask, todo;
480
6189f1b0
BS
481 spin_lock_irqsave(&fifo->base.lock, flags);
482 mask = fifo->mask;
483 fifo->mask = 0ULL;
484 spin_unlock_irqrestore(&fifo->base.lock, flags);
98d1e317
BS
485
486 for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn))
6189f1b0 487 engm |= 1 << gk104_fifo_engidx(fifo, engn);
87744403 488 nvkm_mask(device, 0x002630, engm, engm);
98d1e317
BS
489
490 for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) {
6189f1b0 491 if ((engine = (void *)nvkm_engine(fifo, engn))) {
98d1e317
BS
492 nv_ofuncs(engine)->fini(engine, false);
493 WARN_ON(nv_ofuncs(engine)->init(engine));
494 }
6189f1b0 495 gk104_fifo_runlist_update(fifo, gk104_fifo_engidx(fifo, engn));
98d1e317
BS
496 }
497
87744403
BS
498 nvkm_wr32(device, 0x00262c, engm);
499 nvkm_mask(device, 0x002630, engm, 0x00000000);
98d1e317
BS
500}
501
502static void
6189f1b0 503gk104_fifo_recover(struct gk104_fifo *fifo, struct nvkm_engine *engine,
05c7145d 504 struct gk104_fifo_chan *chan)
98d1e317 505{
e5c5e4f5
BS
506 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
507 struct nvkm_device *device = subdev->device;
98d1e317
BS
508 u32 chid = chan->base.chid;
509 unsigned long flags;
510
e5c5e4f5
BS
511 nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
512 nv_subdev(engine)->name, chid);
98d1e317 513
87744403 514 nvkm_mask(device, 0x800004 + (chid * 0x08), 0x00000800, 0x00000800);
98d1e317
BS
515 chan->state = KILLED;
516
6189f1b0
BS
517 spin_lock_irqsave(&fifo->base.lock, flags);
518 fifo->mask |= 1ULL << nv_engidx(engine);
519 spin_unlock_irqrestore(&fifo->base.lock, flags);
520 schedule_work(&fifo->fault);
98d1e317
BS
521}
522
3d61b967 523static int
6189f1b0 524gk104_fifo_swmthd(struct gk104_fifo *fifo, u32 chid, u32 mthd, u32 data)
3d61b967 525{
05c7145d
BS
526 struct gk104_fifo_chan *chan = NULL;
527 struct nvkm_handle *bind;
3d61b967
BS
528 unsigned long flags;
529 int ret = -EINVAL;
530
6189f1b0
BS
531 spin_lock_irqsave(&fifo->base.lock, flags);
532 if (likely(chid >= fifo->base.min && chid <= fifo->base.max))
533 chan = (void *)fifo->base.channel[chid];
3d61b967
BS
534 if (unlikely(!chan))
535 goto out;
536
f58ddf95 537 bind = nvkm_namedb_get_class(nv_namedb(chan), NVIF_IOCTL_NEW_V0_SW_GF100);
3d61b967
BS
538 if (likely(bind)) {
539 if (!mthd || !nv_call(bind->object, mthd, data))
540 ret = 0;
05c7145d 541 nvkm_namedb_put(bind);
3d61b967
BS
542 }
543
544out:
6189f1b0 545 spin_unlock_irqrestore(&fifo->base.lock, flags);
3d61b967
BS
546 return ret;
547}
548
05c7145d
BS
549static const struct nvkm_enum
550gk104_fifo_bind_reason[] = {
56b2f68c
BS
551 { 0x01, "BIND_NOT_UNBOUND" },
552 { 0x02, "SNOOP_WITHOUT_BAR1" },
553 { 0x03, "UNBIND_WHILE_RUNNING" },
554 { 0x05, "INVALID_RUNLIST" },
555 { 0x06, "INVALID_CTX_TGT" },
556 { 0x0b, "UNBIND_WHILE_PARKED" },
557 {}
558};
559
560static void
6189f1b0 561gk104_fifo_intr_bind(struct gk104_fifo *fifo)
56b2f68c 562{
e5c5e4f5
BS
563 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
564 struct nvkm_device *device = subdev->device;
87744403 565 u32 intr = nvkm_rd32(device, 0x00252c);
56b2f68c 566 u32 code = intr & 0x000000ff;
e5c5e4f5
BS
567 const struct nvkm_enum *en =
568 nvkm_enum_find(gk104_fifo_bind_reason, code);
56b2f68c 569
e5c5e4f5 570 nvkm_error(subdev, "BIND_ERROR %02x [%s]\n", code, en ? en->name : "");
56b2f68c
BS
571}
572
05c7145d
BS
573static const struct nvkm_enum
574gk104_fifo_sched_reason[] = {
e9fb9805
BS
575 { 0x0a, "CTXSW_TIMEOUT" },
576 {}
577};
578
129dcca7 579static void
6189f1b0 580gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo)
129dcca7 581{
87744403 582 struct nvkm_device *device = fifo->base.engine.subdev.device;
05c7145d
BS
583 struct nvkm_engine *engine;
584 struct gk104_fifo_chan *chan;
129dcca7
BS
585 u32 engn;
586
587 for (engn = 0; engn < ARRAY_SIZE(fifo_engine); engn++) {
87744403 588 u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04));
129dcca7
BS
589 u32 busy = (stat & 0x80000000);
590 u32 next = (stat & 0x07ff0000) >> 16;
591 u32 chsw = (stat & 0x00008000);
592 u32 save = (stat & 0x00004000);
593 u32 load = (stat & 0x00002000);
594 u32 prev = (stat & 0x000007ff);
595 u32 chid = load ? next : prev;
596 (void)save;
597
598 if (busy && chsw) {
6189f1b0 599 if (!(chan = (void *)fifo->base.channel[chid]))
129dcca7 600 continue;
6189f1b0 601 if (!(engine = gk104_fifo_engine(fifo, engn)))
129dcca7 602 continue;
6189f1b0 603 gk104_fifo_recover(fifo, engine, chan);
129dcca7
BS
604 }
605 }
606}
607
885f3ced 608static void
6189f1b0 609gk104_fifo_intr_sched(struct gk104_fifo *fifo)
885f3ced 610{
e5c5e4f5
BS
611 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
612 struct nvkm_device *device = subdev->device;
87744403 613 u32 intr = nvkm_rd32(device, 0x00254c);
885f3ced 614 u32 code = intr & 0x000000ff;
e5c5e4f5
BS
615 const struct nvkm_enum *en =
616 nvkm_enum_find(gk104_fifo_sched_reason, code);
0a7760e0 617
e5c5e4f5 618 nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
129dcca7
BS
619
620 switch (code) {
621 case 0x0a:
6189f1b0 622 gk104_fifo_intr_sched_ctxsw(fifo);
129dcca7
BS
623 break;
624 default:
625 break;
626 }
885f3ced
BS
627}
628
629static void
6189f1b0 630gk104_fifo_intr_chsw(struct gk104_fifo *fifo)
885f3ced 631{
e5c5e4f5
BS
632 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
633 struct nvkm_device *device = subdev->device;
87744403 634 u32 stat = nvkm_rd32(device, 0x00256c);
e5c5e4f5 635 nvkm_error(subdev, "CHSW_ERROR %08x\n", stat);
87744403 636 nvkm_wr32(device, 0x00256c, stat);
885f3ced
BS
637}
638
639static void
6189f1b0 640gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo)
885f3ced 641{
e5c5e4f5
BS
642 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
643 struct nvkm_device *device = subdev->device;
87744403 644 u32 stat = nvkm_rd32(device, 0x00259c);
e5c5e4f5 645 nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat);
885f3ced
BS
646}
647
05c7145d
BS
648static const struct nvkm_enum
649gk104_fifo_fault_engine[] = {
e1b6b14a 650 { 0x00, "GR", NULL, NVDEV_ENGINE_GR },
885f3ced 651 { 0x03, "IFB", NULL, NVDEV_ENGINE_IFB },
cb1567c2
BS
652 { 0x04, "BAR1", NULL, NVDEV_SUBDEV_BAR },
653 { 0x05, "BAR3", NULL, NVDEV_SUBDEV_INSTMEM },
e1b6b14a
BS
654 { 0x07, "PBDMA0", NULL, NVDEV_ENGINE_FIFO },
655 { 0x08, "PBDMA1", NULL, NVDEV_ENGINE_FIFO },
656 { 0x09, "PBDMA2", NULL, NVDEV_ENGINE_FIFO },
eccf7e8a 657 { 0x10, "MSVLD", NULL, NVDEV_ENGINE_MSVLD },
fd8666f7 658 { 0x11, "MSPPP", NULL, NVDEV_ENGINE_MSPPP },
e1b6b14a 659 { 0x13, "PERF" },
37a5d028 660 { 0x14, "MSPDEC", NULL, NVDEV_ENGINE_MSPDEC },
aedf24ff
BS
661 { 0x15, "CE0", NULL, NVDEV_ENGINE_CE0 },
662 { 0x16, "CE1", NULL, NVDEV_ENGINE_CE1 },
e1b6b14a 663 { 0x17, "PMU" },
bd8369ec 664 { 0x19, "MSENC", NULL, NVDEV_ENGINE_MSENC },
aedf24ff 665 { 0x1b, "CE2", NULL, NVDEV_ENGINE_CE2 },
5132f377
BS
666 {}
667};
668
05c7145d
BS
669static const struct nvkm_enum
670gk104_fifo_fault_reason[] = {
e1b6b14a
BS
671 { 0x00, "PDE" },
672 { 0x01, "PDE_SIZE" },
673 { 0x02, "PTE" },
674 { 0x03, "VA_LIMIT_VIOLATION" },
675 { 0x04, "UNBOUND_INST_BLOCK" },
676 { 0x05, "PRIV_VIOLATION" },
677 { 0x06, "RO_VIOLATION" },
678 { 0x07, "WO_VIOLATION" },
679 { 0x08, "PITCH_MASK_VIOLATION" },
680 { 0x09, "WORK_CREATION" },
681 { 0x0a, "UNSUPPORTED_APERTURE" },
682 { 0x0b, "COMPRESSION_FAILURE" },
683 { 0x0c, "UNSUPPORTED_KIND" },
684 { 0x0d, "REGION_VIOLATION" },
685 { 0x0e, "BOTH_PTES_VALID" },
686 { 0x0f, "INFO_TYPE_POISONED" },
5132f377
BS
687 {}
688};
689
05c7145d
BS
690static const struct nvkm_enum
691gk104_fifo_fault_hubclient[] = {
e1b6b14a
BS
692 { 0x00, "VIP" },
693 { 0x01, "CE0" },
694 { 0x02, "CE1" },
695 { 0x03, "DNISO" },
696 { 0x04, "FE" },
697 { 0x05, "FECS" },
698 { 0x06, "HOST" },
699 { 0x07, "HOST_CPU" },
700 { 0x08, "HOST_CPU_NB" },
701 { 0x09, "ISO" },
702 { 0x0a, "MMU" },
703 { 0x0b, "MSPDEC" },
704 { 0x0c, "MSPPP" },
705 { 0x0d, "MSVLD" },
706 { 0x0e, "NISO" },
707 { 0x0f, "P2P" },
708 { 0x10, "PD" },
709 { 0x11, "PERF" },
710 { 0x12, "PMU" },
711 { 0x13, "RASTERTWOD" },
712 { 0x14, "SCC" },
713 { 0x15, "SCC_NB" },
714 { 0x16, "SEC" },
715 { 0x17, "SSYNC" },
aedf24ff 716 { 0x18, "GR_CE" },
e1b6b14a
BS
717 { 0x19, "CE2" },
718 { 0x1a, "XV" },
719 { 0x1b, "MMU_NB" },
720 { 0x1c, "MSENC" },
721 { 0x1d, "DFALCON" },
722 { 0x1e, "SKED" },
723 { 0x1f, "AFALCON" },
5132f377
BS
724 {}
725};
726
05c7145d
BS
727static const struct nvkm_enum
728gk104_fifo_fault_gpcclient[] = {
e1b6b14a
BS
729 { 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" },
730 { 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" },
731 { 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" },
732 { 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" },
733 { 0x0c, "RAST" },
734 { 0x0d, "GCC" },
735 { 0x0e, "GPCCS" },
736 { 0x0f, "PROP_0" },
737 { 0x10, "PROP_1" },
738 { 0x11, "PROP_2" },
739 { 0x12, "PROP_3" },
740 { 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" },
741 { 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" },
742 { 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" },
743 { 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" },
744 { 0x1f, "GPM" },
745 { 0x20, "LTP_UTLB_0" },
746 { 0x21, "LTP_UTLB_1" },
747 { 0x22, "LTP_UTLB_2" },
748 { 0x23, "LTP_UTLB_3" },
749 { 0x24, "GPC_RGG_UTLB" },
5132f377
BS
750 {}
751};
752
e9fb9805 753static void
6189f1b0 754gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
5132f377 755{
e5c5e4f5
BS
756 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
757 struct nvkm_device *device = subdev->device;
87744403
BS
758 u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
759 u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
760 u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
761 u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10));
885f3ced 762 u32 gpc = (stat & 0x1f000000) >> 24;
5132f377 763 u32 client = (stat & 0x00001f00) >> 8;
885f3ced
BS
764 u32 write = (stat & 0x00000080);
765 u32 hub = (stat & 0x00000040);
766 u32 reason = (stat & 0x0000000f);
05c7145d
BS
767 struct nvkm_object *engctx = NULL, *object;
768 struct nvkm_engine *engine = NULL;
769 const struct nvkm_enum *er, *eu, *ec;
e5c5e4f5 770 char gpcid[8] = "";
885f3ced 771
05c7145d 772 er = nvkm_enum_find(gk104_fifo_fault_reason, reason);
05c7145d 773 eu = nvkm_enum_find(gk104_fifo_fault_engine, unit);
e5c5e4f5
BS
774 if (hub) {
775 ec = nvkm_enum_find(gk104_fifo_fault_hubclient, client);
776 } else {
777 ec = nvkm_enum_find(gk104_fifo_fault_gpcclient, client);
778 snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
779 }
780
885f3ced
BS
781 if (eu) {
782 switch (eu->data2) {
783 case NVDEV_SUBDEV_BAR:
87744403 784 nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
885f3ced
BS
785 break;
786 case NVDEV_SUBDEV_INSTMEM:
87744403 787 nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
885f3ced
BS
788 break;
789 case NVDEV_ENGINE_IFB:
87744403 790 nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
885f3ced
BS
791 break;
792 default:
6189f1b0 793 engine = nvkm_engine(fifo, eu->data2);
885f3ced 794 if (engine)
05c7145d 795 engctx = nvkm_engctx_get(engine, inst);
885f3ced 796 break;
cb1567c2 797 }
885f3ced
BS
798 }
799
e5c5e4f5
BS
800 nvkm_error(subdev,
801 "%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
802 "reason %02x [%s] on channel %d [%010llx %s]\n",
803 write ? "write" : "read", (u64)vahi << 32 | valo,
804 unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "",
805 reason, er ? er->name : "", -1, (u64)inst << 12,
806 nvkm_client_name(engctx));
93260d3c 807
98d1e317
BS
808 object = engctx;
809 while (object) {
810 switch (nv_mclass(object)) {
bbf8906b 811 case KEPLER_CHANNEL_GPFIFO_A:
89025bd4 812 case MAXWELL_CHANNEL_GPFIFO_A:
6189f1b0 813 gk104_fifo_recover(fifo, engine, (void *)object);
98d1e317
BS
814 break;
815 }
816 object = object->parent;
817 }
818
05c7145d 819 nvkm_engctx_put(engctx);
5132f377
BS
820}
821
05c7145d 822static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = {
3d61b967
BS
823 { 0x00000001, "MEMREQ" },
824 { 0x00000002, "MEMACK_TIMEOUT" },
825 { 0x00000004, "MEMACK_EXTRA" },
826 { 0x00000008, "MEMDAT_TIMEOUT" },
827 { 0x00000010, "MEMDAT_EXTRA" },
828 { 0x00000020, "MEMFLUSH" },
829 { 0x00000040, "MEMOP" },
830 { 0x00000080, "LBCONNECT" },
831 { 0x00000100, "LBREQ" },
832 { 0x00000200, "LBACK_TIMEOUT" },
833 { 0x00000400, "LBACK_EXTRA" },
834 { 0x00000800, "LBDAT_TIMEOUT" },
835 { 0x00001000, "LBDAT_EXTRA" },
836 { 0x00002000, "GPFIFO" },
837 { 0x00004000, "GPPTR" },
838 { 0x00008000, "GPENTRY" },
839 { 0x00010000, "GPCRC" },
840 { 0x00020000, "PBPTR" },
841 { 0x00040000, "PBENTRY" },
842 { 0x00080000, "PBCRC" },
843 { 0x00100000, "XBARCONNECT" },
844 { 0x00200000, "METHOD" },
845 { 0x00400000, "METHODCRC" },
846 { 0x00800000, "DEVICE" },
847 { 0x02000000, "SEMAPHORE" },
848 { 0x04000000, "ACQUIRE" },
849 { 0x08000000, "PRI" },
850 { 0x20000000, "NO_CTXSW_SEG" },
851 { 0x40000000, "PBSEG" },
852 { 0x80000000, "SIGNATURE" },
853 {}
854};
e2b34fa0 855
5132f377 856static void
6189f1b0 857gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit)
5132f377 858{
e5c5e4f5
BS
859 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
860 struct nvkm_device *device = subdev->device;
87744403
BS
861 u32 mask = nvkm_rd32(device, 0x04010c + (unit * 0x2000));
862 u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000)) & mask;
863 u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
864 u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
865 u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
ebb945a9 866 u32 subc = (addr & 0x00070000) >> 16;
5132f377 867 u32 mthd = (addr & 0x00003ffc);
e2b34fa0 868 u32 show = stat;
e5c5e4f5 869 char msg[128];
e2b34fa0 870
ebb945a9 871 if (stat & 0x00800000) {
6189f1b0 872 if (!gk104_fifo_swmthd(fifo, chid, mthd, data))
ebb945a9 873 show &= ~0x00800000;
87744403 874 nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
ebb945a9
BS
875 }
876
e2b34fa0 877 if (show) {
e5c5e4f5
BS
878 nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_0, show);
879 nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%s] subc %d "
880 "mthd %04x data %08x\n",
881 unit, show, msg, chid,
882 nvkm_client_name_for_fifo_chid(&fifo->base, chid),
883 subc, mthd, data);
e2b34fa0 884 }
5132f377 885
87744403 886 nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
5132f377
BS
887}
888
05c7145d 889static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = {
70b2cc8e
BS
890 { 0x00000001, "HCE_RE_ILLEGAL_OP" },
891 { 0x00000002, "HCE_RE_ALIGNB" },
892 { 0x00000004, "HCE_PRIV" },
893 { 0x00000008, "HCE_ILLEGAL_MTHD" },
894 { 0x00000010, "HCE_ILLEGAL_CLASS" },
895 {}
896};
897
898static void
6189f1b0 899gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit)
70b2cc8e 900{
e5c5e4f5
BS
901 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
902 struct nvkm_device *device = subdev->device;
87744403
BS
903 u32 mask = nvkm_rd32(device, 0x04014c + (unit * 0x2000));
904 u32 stat = nvkm_rd32(device, 0x040148 + (unit * 0x2000)) & mask;
905 u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
e5c5e4f5 906 char msg[128];
70b2cc8e
BS
907
908 if (stat) {
e5c5e4f5
BS
909 nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_1, stat);
910 nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d %08x %08x\n",
911 unit, stat, msg, chid,
912 nvkm_rd32(device, 0x040150 + (unit * 0x2000)),
913 nvkm_rd32(device, 0x040154 + (unit * 0x2000)));
70b2cc8e
BS
914 }
915
87744403 916 nvkm_wr32(device, 0x040148 + (unit * 0x2000), stat);
70b2cc8e
BS
917}
918
138b873f 919static void
6189f1b0 920gk104_fifo_intr_runlist(struct gk104_fifo *fifo)
138b873f 921{
87744403
BS
922 struct nvkm_device *device = fifo->base.engine.subdev.device;
923 u32 mask = nvkm_rd32(device, 0x002a00);
138b873f
BS
924 while (mask) {
925 u32 engn = __ffs(mask);
6189f1b0 926 wake_up(&fifo->engine[engn].wait);
87744403 927 nvkm_wr32(device, 0x002a00, 1 << engn);
138b873f
BS
928 mask &= ~(1 << engn);
929 }
930}
931
c074bdbc 932static void
6189f1b0 933gk104_fifo_intr_engine(struct gk104_fifo *fifo)
c074bdbc 934{
6189f1b0 935 nvkm_fifo_uevent(&fifo->base);
c074bdbc
BS
936}
937
5132f377 938static void
05c7145d 939gk104_fifo_intr(struct nvkm_subdev *subdev)
5132f377 940{
6189f1b0 941 struct gk104_fifo *fifo = (void *)subdev;
87744403
BS
942 struct nvkm_device *device = fifo->base.engine.subdev.device;
943 u32 mask = nvkm_rd32(device, 0x002140);
944 u32 stat = nvkm_rd32(device, 0x002100) & mask;
5132f377 945
e9fb9805 946 if (stat & 0x00000001) {
6189f1b0 947 gk104_fifo_intr_bind(fifo);
87744403 948 nvkm_wr32(device, 0x002100, 0x00000001);
e9fb9805
BS
949 stat &= ~0x00000001;
950 }
951
952 if (stat & 0x00000010) {
e5c5e4f5 953 nvkm_error(subdev, "PIO_ERROR\n");
87744403 954 nvkm_wr32(device, 0x002100, 0x00000010);
e9fb9805
BS
955 stat &= ~0x00000010;
956 }
957
5132f377 958 if (stat & 0x00000100) {
6189f1b0 959 gk104_fifo_intr_sched(fifo);
87744403 960 nvkm_wr32(device, 0x002100, 0x00000100);
5132f377
BS
961 stat &= ~0x00000100;
962 }
963
e9fb9805 964 if (stat & 0x00010000) {
6189f1b0 965 gk104_fifo_intr_chsw(fifo);
87744403 966 nvkm_wr32(device, 0x002100, 0x00010000);
e9fb9805
BS
967 stat &= ~0x00010000;
968 }
969
970 if (stat & 0x00800000) {
e5c5e4f5 971 nvkm_error(subdev, "FB_FLUSH_TIMEOUT\n");
87744403 972 nvkm_wr32(device, 0x002100, 0x00800000);
e9fb9805
BS
973 stat &= ~0x00800000;
974 }
975
976 if (stat & 0x01000000) {
e5c5e4f5 977 nvkm_error(subdev, "LB_ERROR\n");
87744403 978 nvkm_wr32(device, 0x002100, 0x01000000);
e9fb9805
BS
979 stat &= ~0x01000000;
980 }
981
982 if (stat & 0x08000000) {
6189f1b0 983 gk104_fifo_intr_dropped_fault(fifo);
87744403 984 nvkm_wr32(device, 0x002100, 0x08000000);
e9fb9805
BS
985 stat &= ~0x08000000;
986 }
987
5132f377 988 if (stat & 0x10000000) {
87744403 989 u32 mask = nvkm_rd32(device, 0x00259c);
885f3ced
BS
990 while (mask) {
991 u32 unit = __ffs(mask);
6189f1b0 992 gk104_fifo_intr_fault(fifo, unit);
87744403 993 nvkm_wr32(device, 0x00259c, (1 << unit));
885f3ced 994 mask &= ~(1 << unit);
5132f377 995 }
5132f377
BS
996 stat &= ~0x10000000;
997 }
998
999 if (stat & 0x20000000) {
87744403 1000 u32 mask = nvkm_rd32(device, 0x0025a0);
3d61b967
BS
1001 while (mask) {
1002 u32 unit = __ffs(mask);
6189f1b0
BS
1003 gk104_fifo_intr_pbdma_0(fifo, unit);
1004 gk104_fifo_intr_pbdma_1(fifo, unit);
87744403 1005 nvkm_wr32(device, 0x0025a0, (1 << unit));
3d61b967 1006 mask &= ~(1 << unit);
5132f377 1007 }
5132f377
BS
1008 stat &= ~0x20000000;
1009 }
1010
1011 if (stat & 0x40000000) {
6189f1b0 1012 gk104_fifo_intr_runlist(fifo);
5132f377
BS
1013 stat &= ~0x40000000;
1014 }
1015
9bd2ddba 1016 if (stat & 0x80000000) {
87744403 1017 nvkm_wr32(device, 0x002100, 0x80000000);
6189f1b0 1018 gk104_fifo_intr_engine(fifo);
9bd2ddba
BS
1019 stat &= ~0x80000000;
1020 }
1021
5132f377 1022 if (stat) {
e5c5e4f5 1023 nvkm_error(subdev, "INTR %08x\n", stat);
87744403
BS
1024 nvkm_mask(device, 0x002140, stat, 0x00000000);
1025 nvkm_wr32(device, 0x002100, stat);
5132f377
BS
1026 }
1027}
c420b2dc 1028
9bd2ddba 1029static void
05c7145d 1030gk104_fifo_uevent_init(struct nvkm_event *event, int type, int index)
9bd2ddba 1031{
05c7145d 1032 struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
87744403
BS
1033 struct nvkm_device *device = fifo->engine.subdev.device;
1034 nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
9bd2ddba
BS
1035}
1036
1037static void
05c7145d 1038gk104_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
9bd2ddba 1039{
05c7145d 1040 struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
87744403
BS
1041 struct nvkm_device *device = fifo->engine.subdev.device;
1042 nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
9bd2ddba
BS
1043}
1044
79ca2770 1045static const struct nvkm_event_func
05c7145d
BS
1046gk104_fifo_uevent_func = {
1047 .ctor = nvkm_fifo_uevent_ctor,
1048 .init = gk104_fifo_uevent_init,
1049 .fini = gk104_fifo_uevent_fini,
79ca2770
BS
1050};
1051
649ec925 1052int
05c7145d 1053gk104_fifo_fini(struct nvkm_object *object, bool suspend)
649ec925 1054{
6189f1b0 1055 struct gk104_fifo *fifo = (void *)object;
87744403 1056 struct nvkm_device *device = fifo->base.engine.subdev.device;
649ec925
BS
1057 int ret;
1058
6189f1b0 1059 ret = nvkm_fifo_fini(&fifo->base, suspend);
649ec925
BS
1060 if (ret)
1061 return ret;
1062
1063 /* allow mmu fault interrupts, even when we're not using fifo */
87744403 1064 nvkm_mask(device, 0x002140, 0x10000000, 0x10000000);
649ec925
BS
1065 return 0;
1066}
1067
a763951a 1068int
05c7145d 1069gk104_fifo_init(struct nvkm_object *object)
a763951a 1070{
6189f1b0 1071 struct gk104_fifo *fifo = (void *)object;
e5c5e4f5
BS
1072 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
1073 struct nvkm_device *device = subdev->device;
a763951a
BS
1074 int ret, i;
1075
6189f1b0 1076 ret = nvkm_fifo_init(&fifo->base);
a763951a
BS
1077 if (ret)
1078 return ret;
1079
39b05542 1080 /* enable all available PBDMA units */
87744403
BS
1081 nvkm_wr32(device, 0x000204, 0xffffffff);
1082 fifo->spoon_nr = hweight32(nvkm_rd32(device, 0x000204));
e5c5e4f5 1083 nvkm_debug(subdev, "%d PBDMA unit(s)\n", fifo->spoon_nr);
a763951a 1084
39b05542 1085 /* PBDMA[n] */
6189f1b0 1086 for (i = 0; i < fifo->spoon_nr; i++) {
87744403
BS
1087 nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
1088 nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
1089 nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
a763951a
BS
1090 }
1091
70b2cc8e 1092 /* PBDMA[n].HCE */
6189f1b0 1093 for (i = 0; i < fifo->spoon_nr; i++) {
87744403
BS
1094 nvkm_wr32(device, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */
1095 nvkm_wr32(device, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */
70b2cc8e
BS
1096 }
1097
87744403 1098 nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);
a763951a 1099
87744403
BS
1100 nvkm_wr32(device, 0x002100, 0xffffffff);
1101 nvkm_wr32(device, 0x002140, 0x7fffffff);
a763951a
BS
1102 return 0;
1103}
1104
1105void
05c7145d 1106gk104_fifo_dtor(struct nvkm_object *object)
a763951a 1107{
6189f1b0 1108 struct gk104_fifo *fifo = (void *)object;
a763951a
BS
1109 int i;
1110
6189f1b0
BS
1111 nvkm_gpuobj_unmap(&fifo->user.bar);
1112 nvkm_gpuobj_ref(NULL, &fifo->user.mem);
a763951a
BS
1113
1114 for (i = 0; i < FIFO_ENGINE_NR; i++) {
6189f1b0
BS
1115 nvkm_gpuobj_ref(NULL, &fifo->engine[i].runlist[1]);
1116 nvkm_gpuobj_ref(NULL, &fifo->engine[i].runlist[0]);
a763951a
BS
1117 }
1118
6189f1b0 1119 nvkm_fifo_destroy(&fifo->base);
a763951a
BS
1120}
1121
1122int
05c7145d
BS
1123gk104_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
1124 struct nvkm_oclass *oclass, void *data, u32 size,
1125 struct nvkm_object **pobject)
ebb945a9 1126{
05c7145d 1127 struct gk104_fifo_impl *impl = (void *)oclass;
6189f1b0 1128 struct gk104_fifo *fifo;
8d6f585d 1129 int ret, i;
ebb945a9 1130
05c7145d 1131 ret = nvkm_fifo_create(parent, engine, oclass, 0,
6189f1b0
BS
1132 impl->channels - 1, &fifo);
1133 *pobject = nv_object(fifo);
ebb945a9
BS
1134 if (ret)
1135 return ret;
1136
6189f1b0 1137 INIT_WORK(&fifo->fault, gk104_fifo_recover_work);
98d1e317 1138
8d6f585d 1139 for (i = 0; i < FIFO_ENGINE_NR; i++) {
6189f1b0
BS
1140 ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 0x8000, 0x1000,
1141 0, &fifo->engine[i].runlist[0]);
8d6f585d
BS
1142 if (ret)
1143 return ret;
1144
6189f1b0
BS
1145 ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 0x8000, 0x1000,
1146 0, &fifo->engine[i].runlist[1]);
8d6f585d
BS
1147 if (ret)
1148 return ret;
138b873f 1149
6189f1b0 1150 init_waitqueue_head(&fifo->engine[i].wait);
8d6f585d
BS
1151 }
1152
6189f1b0
BS
1153 ret = nvkm_gpuobj_new(nv_object(fifo), NULL, impl->channels * 0x200,
1154 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &fifo->user.mem);
ebb945a9
BS
1155 if (ret)
1156 return ret;
1157
6189f1b0
BS
1158 ret = nvkm_gpuobj_map(fifo->user.mem, NV_MEM_ACCESS_RW,
1159 &fifo->user.bar);
ebb945a9
BS
1160 if (ret)
1161 return ret;
1162
6189f1b0 1163 ret = nvkm_event_init(&gk104_fifo_uevent_func, 1, 1, &fifo->base.uevent);
79ca2770
BS
1164 if (ret)
1165 return ret;
9bd2ddba 1166
6189f1b0
BS
1167 nv_subdev(fifo)->unit = 0x00000100;
1168 nv_subdev(fifo)->intr = gk104_fifo_intr;
1169 nv_engine(fifo)->cclass = &gk104_fifo_cclass;
1170 nv_engine(fifo)->sclass = gk104_fifo_sclass;
ebb945a9
BS
1171 return 0;
1172}
1173
05c7145d
BS
1174struct nvkm_oclass *
1175gk104_fifo_oclass = &(struct gk104_fifo_impl) {
a763951a 1176 .base.handle = NV_ENGINE(FIFO, 0xe0),
05c7145d
BS
1177 .base.ofuncs = &(struct nvkm_ofuncs) {
1178 .ctor = gk104_fifo_ctor,
1179 .dtor = gk104_fifo_dtor,
1180 .init = gk104_fifo_init,
1181 .fini = gk104_fifo_fini,
ebb945a9 1182 },
a763951a
BS
1183 .channels = 4096,
1184}.base;