]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
Merge branch 'acpi-config'
[mirror_ubuntu-hirsute-kernel.git] / drivers / gpu / drm / nouveau / core / engine / fifo / nve0.c
1 /*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25 #include <core/client.h>
26 #include <core/handle.h>
27 #include <core/namedb.h>
28 #include <core/gpuobj.h>
29 #include <core/engctx.h>
30 #include <core/event.h>
31 #include <core/class.h>
32 #include <core/enum.h>
33
34 #include <subdev/timer.h>
35 #include <subdev/bar.h>
36 #include <subdev/fb.h>
37 #include <subdev/vm.h>
38
39 #include <engine/dmaobj.h>
40
41 #include "nve0.h"
42
43 #define _(a,b) { (a), ((1ULL << (a)) | (b)) }
44 static const struct {
45 u64 subdev;
46 u64 mask;
47 } fifo_engine[] = {
48 _(NVDEV_ENGINE_GR , (1ULL << NVDEV_ENGINE_SW) |
49 (1ULL << NVDEV_ENGINE_COPY2)),
50 _(NVDEV_ENGINE_VP , 0),
51 _(NVDEV_ENGINE_PPP , 0),
52 _(NVDEV_ENGINE_BSP , 0),
53 _(NVDEV_ENGINE_COPY0 , 0),
54 _(NVDEV_ENGINE_COPY1 , 0),
55 _(NVDEV_ENGINE_VENC , 0),
56 };
57 #undef _
58 #define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine)
59
60 struct nve0_fifo_engn {
61 struct nouveau_gpuobj *runlist[2];
62 int cur_runlist;
63 };
64
65 struct nve0_fifo_priv {
66 struct nouveau_fifo base;
67 struct nve0_fifo_engn engine[FIFO_ENGINE_NR];
68 struct {
69 struct nouveau_gpuobj *mem;
70 struct nouveau_vma bar;
71 } user;
72 int spoon_nr;
73 };
74
75 struct nve0_fifo_base {
76 struct nouveau_fifo_base base;
77 struct nouveau_gpuobj *pgd;
78 struct nouveau_vm *vm;
79 };
80
81 struct nve0_fifo_chan {
82 struct nouveau_fifo_chan base;
83 u32 engine;
84 };
85
86 /*******************************************************************************
87 * FIFO channel objects
88 ******************************************************************************/
89
90 static void
91 nve0_fifo_runlist_update(struct nve0_fifo_priv *priv, u32 engine)
92 {
93 struct nouveau_bar *bar = nouveau_bar(priv);
94 struct nve0_fifo_engn *engn = &priv->engine[engine];
95 struct nouveau_gpuobj *cur;
96 u32 match = (engine << 16) | 0x00000001;
97 int i, p;
98
99 mutex_lock(&nv_subdev(priv)->mutex);
100 cur = engn->runlist[engn->cur_runlist];
101 engn->cur_runlist = !engn->cur_runlist;
102
103 for (i = 0, p = 0; i < priv->base.max; i++) {
104 u32 ctrl = nv_rd32(priv, 0x800004 + (i * 8)) & 0x001f0001;
105 if (ctrl != match)
106 continue;
107 nv_wo32(cur, p + 0, i);
108 nv_wo32(cur, p + 4, 0x00000000);
109 p += 8;
110 }
111 bar->flush(bar);
112
113 nv_wr32(priv, 0x002270, cur->addr >> 12);
114 nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3));
115 if (!nv_wait(priv, 0x002284 + (engine * 8), 0x00100000, 0x00000000))
116 nv_error(priv, "runlist %d update timeout\n", engine);
117 mutex_unlock(&nv_subdev(priv)->mutex);
118 }
119
120 static int
121 nve0_fifo_context_attach(struct nouveau_object *parent,
122 struct nouveau_object *object)
123 {
124 struct nouveau_bar *bar = nouveau_bar(parent);
125 struct nve0_fifo_base *base = (void *)parent->parent;
126 struct nouveau_engctx *ectx = (void *)object;
127 u32 addr;
128 int ret;
129
130 switch (nv_engidx(object->engine)) {
131 case NVDEV_ENGINE_SW :
132 case NVDEV_ENGINE_COPY0:
133 case NVDEV_ENGINE_COPY1:
134 case NVDEV_ENGINE_COPY2:
135 return 0;
136 case NVDEV_ENGINE_GR : addr = 0x0210; break;
137 case NVDEV_ENGINE_BSP : addr = 0x0270; break;
138 case NVDEV_ENGINE_VP : addr = 0x0250; break;
139 case NVDEV_ENGINE_PPP : addr = 0x0260; break;
140 default:
141 return -EINVAL;
142 }
143
144 if (!ectx->vma.node) {
145 ret = nouveau_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
146 NV_MEM_ACCESS_RW, &ectx->vma);
147 if (ret)
148 return ret;
149
150 nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
151 }
152
153 nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
154 nv_wo32(base, addr + 0x04, upper_32_bits(ectx->vma.offset));
155 bar->flush(bar);
156 return 0;
157 }
158
159 static int
160 nve0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
161 struct nouveau_object *object)
162 {
163 struct nouveau_bar *bar = nouveau_bar(parent);
164 struct nve0_fifo_priv *priv = (void *)parent->engine;
165 struct nve0_fifo_base *base = (void *)parent->parent;
166 struct nve0_fifo_chan *chan = (void *)parent;
167 u32 addr;
168
169 switch (nv_engidx(object->engine)) {
170 case NVDEV_ENGINE_SW : return 0;
171 case NVDEV_ENGINE_COPY0:
172 case NVDEV_ENGINE_COPY1:
173 case NVDEV_ENGINE_COPY2: addr = 0x0000; break;
174 case NVDEV_ENGINE_GR : addr = 0x0210; break;
175 case NVDEV_ENGINE_BSP : addr = 0x0270; break;
176 case NVDEV_ENGINE_VP : addr = 0x0250; break;
177 case NVDEV_ENGINE_PPP : addr = 0x0260; break;
178 default:
179 return -EINVAL;
180 }
181
182 nv_wr32(priv, 0x002634, chan->base.chid);
183 if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
184 nv_error(priv, "channel %d [%s] kick timeout\n",
185 chan->base.chid, nouveau_client_name(chan));
186 if (suspend)
187 return -EBUSY;
188 }
189
190 if (addr) {
191 nv_wo32(base, addr + 0x00, 0x00000000);
192 nv_wo32(base, addr + 0x04, 0x00000000);
193 bar->flush(bar);
194 }
195
196 return 0;
197 }
198
199 static int
200 nve0_fifo_chan_ctor(struct nouveau_object *parent,
201 struct nouveau_object *engine,
202 struct nouveau_oclass *oclass, void *data, u32 size,
203 struct nouveau_object **pobject)
204 {
205 struct nouveau_bar *bar = nouveau_bar(parent);
206 struct nve0_fifo_priv *priv = (void *)engine;
207 struct nve0_fifo_base *base = (void *)parent;
208 struct nve0_fifo_chan *chan;
209 struct nve0_channel_ind_class *args = data;
210 u64 usermem, ioffset, ilength;
211 int ret, i;
212
213 if (size < sizeof(*args))
214 return -EINVAL;
215
216 for (i = 0; i < FIFO_ENGINE_NR; i++) {
217 if (args->engine & (1 << i)) {
218 if (nouveau_engine(parent, fifo_engine[i].subdev)) {
219 args->engine = (1 << i);
220 break;
221 }
222 }
223 }
224
225 if (i == FIFO_ENGINE_NR) {
226 nv_error(priv, "unsupported engines 0x%08x\n", args->engine);
227 return -ENODEV;
228 }
229
230 ret = nouveau_fifo_channel_create(parent, engine, oclass, 1,
231 priv->user.bar.offset, 0x200,
232 args->pushbuf,
233 fifo_engine[i].mask, &chan);
234 *pobject = nv_object(chan);
235 if (ret)
236 return ret;
237
238 nv_parent(chan)->context_attach = nve0_fifo_context_attach;
239 nv_parent(chan)->context_detach = nve0_fifo_context_detach;
240 chan->engine = i;
241
242 usermem = chan->base.chid * 0x200;
243 ioffset = args->ioffset;
244 ilength = order_base_2(args->ilength / 8);
245
246 for (i = 0; i < 0x200; i += 4)
247 nv_wo32(priv->user.mem, usermem + i, 0x00000000);
248
249 nv_wo32(base, 0x08, lower_32_bits(priv->user.mem->addr + usermem));
250 nv_wo32(base, 0x0c, upper_32_bits(priv->user.mem->addr + usermem));
251 nv_wo32(base, 0x10, 0x0000face);
252 nv_wo32(base, 0x30, 0xfffff902);
253 nv_wo32(base, 0x48, lower_32_bits(ioffset));
254 nv_wo32(base, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
255 nv_wo32(base, 0x84, 0x20400000);
256 nv_wo32(base, 0x94, 0x30000001);
257 nv_wo32(base, 0x9c, 0x00000100);
258 nv_wo32(base, 0xac, 0x0000001f);
259 nv_wo32(base, 0xe8, chan->base.chid);
260 nv_wo32(base, 0xb8, 0xf8000000);
261 nv_wo32(base, 0xf8, 0x10003080); /* 0x002310 */
262 nv_wo32(base, 0xfc, 0x10000010); /* 0x002350 */
263 bar->flush(bar);
264 return 0;
265 }
266
267 static int
268 nve0_fifo_chan_init(struct nouveau_object *object)
269 {
270 struct nouveau_gpuobj *base = nv_gpuobj(object->parent);
271 struct nve0_fifo_priv *priv = (void *)object->engine;
272 struct nve0_fifo_chan *chan = (void *)object;
273 u32 chid = chan->base.chid;
274 int ret;
275
276 ret = nouveau_fifo_channel_init(&chan->base);
277 if (ret)
278 return ret;
279
280 nv_mask(priv, 0x800004 + (chid * 8), 0x000f0000, chan->engine << 16);
281 nv_wr32(priv, 0x800000 + (chid * 8), 0x80000000 | base->addr >> 12);
282 nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
283 nve0_fifo_runlist_update(priv, chan->engine);
284 nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
285 return 0;
286 }
287
288 static int
289 nve0_fifo_chan_fini(struct nouveau_object *object, bool suspend)
290 {
291 struct nve0_fifo_priv *priv = (void *)object->engine;
292 struct nve0_fifo_chan *chan = (void *)object;
293 u32 chid = chan->base.chid;
294
295 nv_mask(priv, 0x800004 + (chid * 8), 0x00000800, 0x00000800);
296 nve0_fifo_runlist_update(priv, chan->engine);
297 nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000);
298
299 return nouveau_fifo_channel_fini(&chan->base, suspend);
300 }
301
302 static struct nouveau_ofuncs
303 nve0_fifo_ofuncs = {
304 .ctor = nve0_fifo_chan_ctor,
305 .dtor = _nouveau_fifo_channel_dtor,
306 .init = nve0_fifo_chan_init,
307 .fini = nve0_fifo_chan_fini,
308 .rd32 = _nouveau_fifo_channel_rd32,
309 .wr32 = _nouveau_fifo_channel_wr32,
310 };
311
312 static struct nouveau_oclass
313 nve0_fifo_sclass[] = {
314 { NVE0_CHANNEL_IND_CLASS, &nve0_fifo_ofuncs },
315 {}
316 };
317
318 /*******************************************************************************
319 * FIFO context - instmem heap and vm setup
320 ******************************************************************************/
321
322 static int
323 nve0_fifo_context_ctor(struct nouveau_object *parent,
324 struct nouveau_object *engine,
325 struct nouveau_oclass *oclass, void *data, u32 size,
326 struct nouveau_object **pobject)
327 {
328 struct nve0_fifo_base *base;
329 int ret;
330
331 ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
332 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base);
333 *pobject = nv_object(base);
334 if (ret)
335 return ret;
336
337 ret = nouveau_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0,
338 &base->pgd);
339 if (ret)
340 return ret;
341
342 nv_wo32(base, 0x0200, lower_32_bits(base->pgd->addr));
343 nv_wo32(base, 0x0204, upper_32_bits(base->pgd->addr));
344 nv_wo32(base, 0x0208, 0xffffffff);
345 nv_wo32(base, 0x020c, 0x000000ff);
346
347 ret = nouveau_vm_ref(nouveau_client(parent)->vm, &base->vm, base->pgd);
348 if (ret)
349 return ret;
350
351 return 0;
352 }
353
354 static void
355 nve0_fifo_context_dtor(struct nouveau_object *object)
356 {
357 struct nve0_fifo_base *base = (void *)object;
358 nouveau_vm_ref(NULL, &base->vm, base->pgd);
359 nouveau_gpuobj_ref(NULL, &base->pgd);
360 nouveau_fifo_context_destroy(&base->base);
361 }
362
363 static struct nouveau_oclass
364 nve0_fifo_cclass = {
365 .handle = NV_ENGCTX(FIFO, 0xe0),
366 .ofuncs = &(struct nouveau_ofuncs) {
367 .ctor = nve0_fifo_context_ctor,
368 .dtor = nve0_fifo_context_dtor,
369 .init = _nouveau_fifo_context_init,
370 .fini = _nouveau_fifo_context_fini,
371 .rd32 = _nouveau_fifo_context_rd32,
372 .wr32 = _nouveau_fifo_context_wr32,
373 },
374 };
375
376 /*******************************************************************************
377 * PFIFO engine
378 ******************************************************************************/
379
380 static const struct nouveau_enum nve0_fifo_sched_reason[] = {
381 { 0x0a, "CTXSW_TIMEOUT" },
382 {}
383 };
384
385 static const struct nouveau_enum nve0_fifo_fault_engine[] = {
386 { 0x00, "GR", NULL, NVDEV_ENGINE_GR },
387 { 0x03, "IFB" },
388 { 0x04, "BAR1", NULL, NVDEV_SUBDEV_BAR },
389 { 0x05, "BAR3", NULL, NVDEV_SUBDEV_INSTMEM },
390 { 0x07, "PBDMA0", NULL, NVDEV_ENGINE_FIFO },
391 { 0x08, "PBDMA1", NULL, NVDEV_ENGINE_FIFO },
392 { 0x09, "PBDMA2", NULL, NVDEV_ENGINE_FIFO },
393 { 0x10, "MSVLD", NULL, NVDEV_ENGINE_BSP },
394 { 0x11, "MSPPP", NULL, NVDEV_ENGINE_PPP },
395 { 0x13, "PERF" },
396 { 0x14, "MSPDEC", NULL, NVDEV_ENGINE_VP },
397 { 0x15, "CE0", NULL, NVDEV_ENGINE_COPY0 },
398 { 0x16, "CE1", NULL, NVDEV_ENGINE_COPY1 },
399 { 0x17, "PMU" },
400 { 0x19, "MSENC", NULL, NVDEV_ENGINE_VENC },
401 { 0x1b, "CE2", NULL, NVDEV_ENGINE_COPY2 },
402 {}
403 };
404
405 static const struct nouveau_enum nve0_fifo_fault_reason[] = {
406 { 0x00, "PDE" },
407 { 0x01, "PDE_SIZE" },
408 { 0x02, "PTE" },
409 { 0x03, "VA_LIMIT_VIOLATION" },
410 { 0x04, "UNBOUND_INST_BLOCK" },
411 { 0x05, "PRIV_VIOLATION" },
412 { 0x06, "RO_VIOLATION" },
413 { 0x07, "WO_VIOLATION" },
414 { 0x08, "PITCH_MASK_VIOLATION" },
415 { 0x09, "WORK_CREATION" },
416 { 0x0a, "UNSUPPORTED_APERTURE" },
417 { 0x0b, "COMPRESSION_FAILURE" },
418 { 0x0c, "UNSUPPORTED_KIND" },
419 { 0x0d, "REGION_VIOLATION" },
420 { 0x0e, "BOTH_PTES_VALID" },
421 { 0x0f, "INFO_TYPE_POISONED" },
422 {}
423 };
424
425 static const struct nouveau_enum nve0_fifo_fault_hubclient[] = {
426 { 0x00, "VIP" },
427 { 0x01, "CE0" },
428 { 0x02, "CE1" },
429 { 0x03, "DNISO" },
430 { 0x04, "FE" },
431 { 0x05, "FECS" },
432 { 0x06, "HOST" },
433 { 0x07, "HOST_CPU" },
434 { 0x08, "HOST_CPU_NB" },
435 { 0x09, "ISO" },
436 { 0x0a, "MMU" },
437 { 0x0b, "MSPDEC" },
438 { 0x0c, "MSPPP" },
439 { 0x0d, "MSVLD" },
440 { 0x0e, "NISO" },
441 { 0x0f, "P2P" },
442 { 0x10, "PD" },
443 { 0x11, "PERF" },
444 { 0x12, "PMU" },
445 { 0x13, "RASTERTWOD" },
446 { 0x14, "SCC" },
447 { 0x15, "SCC_NB" },
448 { 0x16, "SEC" },
449 { 0x17, "SSYNC" },
450 { 0x18, "GR_COPY" },
451 { 0x19, "CE2" },
452 { 0x1a, "XV" },
453 { 0x1b, "MMU_NB" },
454 { 0x1c, "MSENC" },
455 { 0x1d, "DFALCON" },
456 { 0x1e, "SKED" },
457 { 0x1f, "AFALCON" },
458 {}
459 };
460
461 static const struct nouveau_enum nve0_fifo_fault_gpcclient[] = {
462 { 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" },
463 { 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" },
464 { 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" },
465 { 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" },
466 { 0x0c, "RAST" },
467 { 0x0d, "GCC" },
468 { 0x0e, "GPCCS" },
469 { 0x0f, "PROP_0" },
470 { 0x10, "PROP_1" },
471 { 0x11, "PROP_2" },
472 { 0x12, "PROP_3" },
473 { 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" },
474 { 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" },
475 { 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" },
476 { 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" },
477 { 0x1f, "GPM" },
478 { 0x20, "LTP_UTLB_0" },
479 { 0x21, "LTP_UTLB_1" },
480 { 0x22, "LTP_UTLB_2" },
481 { 0x23, "LTP_UTLB_3" },
482 { 0x24, "GPC_RGG_UTLB" },
483 {}
484 };
485
486 static const struct nouveau_bitfield nve0_fifo_pbdma_intr[] = {
487 { 0x00000001, "MEMREQ" },
488 { 0x00000002, "MEMACK_TIMEOUT" },
489 { 0x00000004, "MEMACK_EXTRA" },
490 { 0x00000008, "MEMDAT_TIMEOUT" },
491 { 0x00000010, "MEMDAT_EXTRA" },
492 { 0x00000020, "MEMFLUSH" },
493 { 0x00000040, "MEMOP" },
494 { 0x00000080, "LBCONNECT" },
495 { 0x00000100, "LBREQ" },
496 { 0x00000200, "LBACK_TIMEOUT" },
497 { 0x00000400, "LBACK_EXTRA" },
498 { 0x00000800, "LBDAT_TIMEOUT" },
499 { 0x00001000, "LBDAT_EXTRA" },
500 { 0x00002000, "GPFIFO" },
501 { 0x00004000, "GPPTR" },
502 { 0x00008000, "GPENTRY" },
503 { 0x00010000, "GPCRC" },
504 { 0x00020000, "PBPTR" },
505 { 0x00040000, "PBENTRY" },
506 { 0x00080000, "PBCRC" },
507 { 0x00100000, "XBARCONNECT" },
508 { 0x00200000, "METHOD" },
509 { 0x00400000, "METHODCRC" },
510 { 0x00800000, "DEVICE" },
511 { 0x02000000, "SEMAPHORE" },
512 { 0x04000000, "ACQUIRE" },
513 { 0x08000000, "PRI" },
514 { 0x20000000, "NO_CTXSW_SEG" },
515 { 0x40000000, "PBSEG" },
516 { 0x80000000, "SIGNATURE" },
517 {}
518 };
519
520 static void
521 nve0_fifo_intr_sched(struct nve0_fifo_priv *priv)
522 {
523 u32 intr = nv_rd32(priv, 0x00254c);
524 u32 code = intr & 0x000000ff;
525 nv_error(priv, "SCHED_ERROR [");
526 nouveau_enum_print(nve0_fifo_sched_reason, code);
527 pr_cont("]\n");
528 }
529
530 static void
531 nve0_fifo_intr_chsw(struct nve0_fifo_priv *priv)
532 {
533 u32 stat = nv_rd32(priv, 0x00256c);
534 nv_error(priv, "CHSW_ERROR 0x%08x\n", stat);
535 nv_wr32(priv, 0x00256c, stat);
536 }
537
538 static void
539 nve0_fifo_intr_dropped_fault(struct nve0_fifo_priv *priv)
540 {
541 u32 stat = nv_rd32(priv, 0x00259c);
542 nv_error(priv, "DROPPED_MMU_FAULT 0x%08x\n", stat);
543 }
544
545 static void
546 nve0_fifo_intr_fault(struct nve0_fifo_priv *priv, int unit)
547 {
548 u32 inst = nv_rd32(priv, 0x2800 + (unit * 0x10));
549 u32 valo = nv_rd32(priv, 0x2804 + (unit * 0x10));
550 u32 vahi = nv_rd32(priv, 0x2808 + (unit * 0x10));
551 u32 stat = nv_rd32(priv, 0x280c + (unit * 0x10));
552 u32 client = (stat & 0x00001f00) >> 8;
553 struct nouveau_engine *engine = NULL;
554 struct nouveau_object *engctx = NULL;
555 const struct nouveau_enum *en;
556 const char *name = "unknown";
557
558 nv_error(priv, "PFIFO: %s fault at 0x%010llx [", (stat & 0x00000080) ?
559 "write" : "read", (u64)vahi << 32 | valo);
560 nouveau_enum_print(nve0_fifo_fault_reason, stat & 0x0000000f);
561 pr_cont("] from ");
562 en = nouveau_enum_print(nve0_fifo_fault_engine, unit);
563 if (stat & 0x00000040) {
564 pr_cont("/");
565 nouveau_enum_print(nve0_fifo_fault_hubclient, client);
566 } else {
567 pr_cont("/GPC%d/", (stat & 0x1f000000) >> 24);
568 nouveau_enum_print(nve0_fifo_fault_gpcclient, client);
569 }
570
571 if (en && en->data2) {
572 if (en->data2 == NVDEV_SUBDEV_BAR) {
573 nv_mask(priv, 0x001704, 0x00000000, 0x00000000);
574 name = "BAR1";
575 } else
576 if (en->data2 == NVDEV_SUBDEV_INSTMEM) {
577 nv_mask(priv, 0x001714, 0x00000000, 0x00000000);
578 name = "BAR3";
579 } else {
580 engine = nouveau_engine(priv, en->data2);
581 if (engine) {
582 engctx = nouveau_engctx_get(engine, inst);
583 name = nouveau_client_name(engctx);
584 }
585 }
586 }
587 pr_cont(" on channel 0x%010llx [%s]\n", (u64)inst << 12, name);
588
589 nouveau_engctx_put(engctx);
590 }
591
592 static int
593 nve0_fifo_swmthd(struct nve0_fifo_priv *priv, u32 chid, u32 mthd, u32 data)
594 {
595 struct nve0_fifo_chan *chan = NULL;
596 struct nouveau_handle *bind;
597 unsigned long flags;
598 int ret = -EINVAL;
599
600 spin_lock_irqsave(&priv->base.lock, flags);
601 if (likely(chid >= priv->base.min && chid <= priv->base.max))
602 chan = (void *)priv->base.channel[chid];
603 if (unlikely(!chan))
604 goto out;
605
606 bind = nouveau_namedb_get_class(nv_namedb(chan), 0x906e);
607 if (likely(bind)) {
608 if (!mthd || !nv_call(bind->object, mthd, data))
609 ret = 0;
610 nouveau_namedb_put(bind);
611 }
612
613 out:
614 spin_unlock_irqrestore(&priv->base.lock, flags);
615 return ret;
616 }
617
618 static void
619 nve0_fifo_intr_pbdma(struct nve0_fifo_priv *priv, int unit)
620 {
621 u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000));
622 u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000));
623 u32 data = nv_rd32(priv, 0x0400c4 + (unit * 0x2000));
624 u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0xfff;
625 u32 subc = (addr & 0x00070000) >> 16;
626 u32 mthd = (addr & 0x00003ffc);
627 u32 show = stat;
628
629 if (stat & 0x00800000) {
630 if (!nve0_fifo_swmthd(priv, chid, mthd, data))
631 show &= ~0x00800000;
632 }
633
634 if (show) {
635 nv_error(priv, "PBDMA%d:", unit);
636 nouveau_bitfield_print(nve0_fifo_pbdma_intr, show);
637 pr_cont("\n");
638 nv_error(priv,
639 "PBDMA%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
640 unit, chid,
641 nouveau_client_name_for_fifo_chid(&priv->base, chid),
642 subc, mthd, data);
643 }
644
645 nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008);
646 nv_wr32(priv, 0x040108 + (unit * 0x2000), stat);
647 }
648
649 static void
650 nve0_fifo_intr(struct nouveau_subdev *subdev)
651 {
652 struct nve0_fifo_priv *priv = (void *)subdev;
653 u32 mask = nv_rd32(priv, 0x002140);
654 u32 stat = nv_rd32(priv, 0x002100) & mask;
655
656 if (stat & 0x00000001) {
657 u32 stat = nv_rd32(priv, 0x00252c);
658 nv_error(priv, "BIND_ERROR 0x%08x\n", stat);
659 nv_wr32(priv, 0x002100, 0x00000001);
660 stat &= ~0x00000001;
661 }
662
663 if (stat & 0x00000010) {
664 nv_error(priv, "PIO_ERROR\n");
665 nv_wr32(priv, 0x002100, 0x00000010);
666 stat &= ~0x00000010;
667 }
668
669 if (stat & 0x00000100) {
670 nve0_fifo_intr_sched(priv);
671 nv_wr32(priv, 0x002100, 0x00000100);
672 stat &= ~0x00000100;
673 }
674
675 if (stat & 0x00010000) {
676 nve0_fifo_intr_chsw(priv);
677 nv_wr32(priv, 0x002100, 0x00010000);
678 stat &= ~0x00010000;
679 }
680
681 if (stat & 0x00800000) {
682 nv_error(priv, "FB_FLUSH_TIMEOUT\n");
683 nv_wr32(priv, 0x002100, 0x00800000);
684 stat &= ~0x00800000;
685 }
686
687 if (stat & 0x01000000) {
688 nv_error(priv, "LB_ERROR\n");
689 nv_wr32(priv, 0x002100, 0x01000000);
690 stat &= ~0x01000000;
691 }
692
693 if (stat & 0x08000000) {
694 nve0_fifo_intr_dropped_fault(priv);
695 nv_wr32(priv, 0x002100, 0x08000000);
696 stat &= ~0x08000000;
697 }
698
699 if (stat & 0x10000000) {
700 u32 units = nv_rd32(priv, 0x00259c);
701 u32 u = units;
702
703 while (u) {
704 int i = ffs(u) - 1;
705 nve0_fifo_intr_fault(priv, i);
706 u &= ~(1 << i);
707 }
708
709 nv_wr32(priv, 0x00259c, units);
710 stat &= ~0x10000000;
711 }
712
713 if (stat & 0x20000000) {
714 u32 mask = nv_rd32(priv, 0x0025a0);
715 u32 temp = mask;
716
717 while (temp) {
718 u32 unit = ffs(temp) - 1;
719 nve0_fifo_intr_pbdma(priv, unit);
720 temp &= ~(1 << unit);
721 }
722
723 nv_wr32(priv, 0x0025a0, mask);
724 stat &= ~0x20000000;
725 }
726
727 if (stat & 0x40000000) {
728 u32 mask = nv_mask(priv, 0x002a00, 0x00000000, 0x00000000);
729
730 while (mask) {
731 u32 engn = ffs(mask) - 1;
732 /* runlist event, not currently used */
733 mask &= ~(1 << engn);
734 }
735
736 stat &= ~0x40000000;
737 }
738
739 if (stat & 0x80000000) {
740 nouveau_event_trigger(priv->base.uevent, 0);
741 nv_wr32(priv, 0x002100, 0x80000000);
742 stat &= ~0x80000000;
743 }
744
745 if (stat) {
746 nv_fatal(priv, "unhandled status 0x%08x\n", stat);
747 nv_wr32(priv, 0x002100, stat);
748 nv_wr32(priv, 0x002140, 0);
749 }
750 }
751
752 static void
753 nve0_fifo_uevent_enable(struct nouveau_event *event, int index)
754 {
755 struct nve0_fifo_priv *priv = event->priv;
756 nv_mask(priv, 0x002140, 0x80000000, 0x80000000);
757 }
758
759 static void
760 nve0_fifo_uevent_disable(struct nouveau_event *event, int index)
761 {
762 struct nve0_fifo_priv *priv = event->priv;
763 nv_mask(priv, 0x002140, 0x80000000, 0x00000000);
764 }
765
766 int
767 nve0_fifo_fini(struct nouveau_object *object, bool suspend)
768 {
769 struct nve0_fifo_priv *priv = (void *)object;
770 int ret;
771
772 ret = nouveau_fifo_fini(&priv->base, suspend);
773 if (ret)
774 return ret;
775
776 /* allow mmu fault interrupts, even when we're not using fifo */
777 nv_mask(priv, 0x002140, 0x10000000, 0x10000000);
778 return 0;
779 }
780
781 int
782 nve0_fifo_init(struct nouveau_object *object)
783 {
784 struct nve0_fifo_priv *priv = (void *)object;
785 int ret, i;
786
787 ret = nouveau_fifo_init(&priv->base);
788 if (ret)
789 return ret;
790
791 /* enable all available PBDMA units */
792 nv_wr32(priv, 0x000204, 0xffffffff);
793 priv->spoon_nr = hweight32(nv_rd32(priv, 0x000204));
794 nv_debug(priv, "%d PBDMA unit(s)\n", priv->spoon_nr);
795
796 /* PBDMA[n] */
797 for (i = 0; i < priv->spoon_nr; i++) {
798 nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
799 nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
800 nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
801 }
802
803 nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
804
805 nv_wr32(priv, 0x002a00, 0xffffffff);
806 nv_wr32(priv, 0x002100, 0xffffffff);
807 nv_wr32(priv, 0x002140, 0x3fffffff);
808 return 0;
809 }
810
811 void
812 nve0_fifo_dtor(struct nouveau_object *object)
813 {
814 struct nve0_fifo_priv *priv = (void *)object;
815 int i;
816
817 nouveau_gpuobj_unmap(&priv->user.bar);
818 nouveau_gpuobj_ref(NULL, &priv->user.mem);
819
820 for (i = 0; i < FIFO_ENGINE_NR; i++) {
821 nouveau_gpuobj_ref(NULL, &priv->engine[i].runlist[1]);
822 nouveau_gpuobj_ref(NULL, &priv->engine[i].runlist[0]);
823 }
824
825 nouveau_fifo_destroy(&priv->base);
826 }
827
828 int
829 nve0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
830 struct nouveau_oclass *oclass, void *data, u32 size,
831 struct nouveau_object **pobject)
832 {
833 struct nve0_fifo_impl *impl = (void *)oclass;
834 struct nve0_fifo_priv *priv;
835 int ret, i;
836
837 ret = nouveau_fifo_create(parent, engine, oclass, 0,
838 impl->channels - 1, &priv);
839 *pobject = nv_object(priv);
840 if (ret)
841 return ret;
842
843 for (i = 0; i < FIFO_ENGINE_NR; i++) {
844 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000,
845 0, &priv->engine[i].runlist[0]);
846 if (ret)
847 return ret;
848
849 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000,
850 0, &priv->engine[i].runlist[1]);
851 if (ret)
852 return ret;
853 }
854
855 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 4096 * 0x200, 0x1000,
856 NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
857 if (ret)
858 return ret;
859
860 ret = nouveau_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW,
861 &priv->user.bar);
862 if (ret)
863 return ret;
864
865 priv->base.uevent->enable = nve0_fifo_uevent_enable;
866 priv->base.uevent->disable = nve0_fifo_uevent_disable;
867 priv->base.uevent->priv = priv;
868
869 nv_subdev(priv)->unit = 0x00000100;
870 nv_subdev(priv)->intr = nve0_fifo_intr;
871 nv_engine(priv)->cclass = &nve0_fifo_cclass;
872 nv_engine(priv)->sclass = nve0_fifo_sclass;
873 return 0;
874 }
875
876 struct nouveau_oclass *
877 nve0_fifo_oclass = &(struct nve0_fifo_impl) {
878 .base.handle = NV_ENGINE(FIFO, 0xe0),
879 .base.ofuncs = &(struct nouveau_ofuncs) {
880 .ctor = nve0_fifo_ctor,
881 .dtor = nve0_fifo_dtor,
882 .init = nve0_fifo_init,
883 .fini = nve0_fifo_fini,
884 },
885 .channels = 4096,
886 }.base;