2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <core/client.h>
26 #include <core/handle.h>
27 #include <core/namedb.h>
28 #include <core/gpuobj.h>
29 #include <core/engctx.h>
30 #include <core/event.h>
31 #include <core/class.h>
32 #include <core/enum.h>
34 #include <subdev/timer.h>
35 #include <subdev/bar.h>
36 #include <subdev/fb.h>
37 #include <subdev/vm.h>
39 #include <engine/dmaobj.h>
43 #define _(a,b) { (a), ((1ULL << (a)) | (b)) }
48 _(NVDEV_ENGINE_GR
, (1ULL << NVDEV_ENGINE_SW
) |
49 (1ULL << NVDEV_ENGINE_COPY2
)),
50 _(NVDEV_ENGINE_VP
, 0),
51 _(NVDEV_ENGINE_PPP
, 0),
52 _(NVDEV_ENGINE_BSP
, 0),
53 _(NVDEV_ENGINE_COPY0
, 0),
54 _(NVDEV_ENGINE_COPY1
, 0),
55 _(NVDEV_ENGINE_VENC
, 0),
58 #define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine)
60 struct nve0_fifo_engn
{
61 struct nouveau_gpuobj
*runlist
[2];
65 struct nve0_fifo_priv
{
66 struct nouveau_fifo base
;
67 struct nve0_fifo_engn engine
[FIFO_ENGINE_NR
];
69 struct nouveau_gpuobj
*mem
;
70 struct nouveau_vma bar
;
75 struct nve0_fifo_base
{
76 struct nouveau_fifo_base base
;
77 struct nouveau_gpuobj
*pgd
;
78 struct nouveau_vm
*vm
;
81 struct nve0_fifo_chan
{
82 struct nouveau_fifo_chan base
;
86 /*******************************************************************************
87 * FIFO channel objects
88 ******************************************************************************/
91 nve0_fifo_runlist_update(struct nve0_fifo_priv
*priv
, u32 engine
)
93 struct nouveau_bar
*bar
= nouveau_bar(priv
);
94 struct nve0_fifo_engn
*engn
= &priv
->engine
[engine
];
95 struct nouveau_gpuobj
*cur
;
96 u32 match
= (engine
<< 16) | 0x00000001;
99 mutex_lock(&nv_subdev(priv
)->mutex
);
100 cur
= engn
->runlist
[engn
->cur_runlist
];
101 engn
->cur_runlist
= !engn
->cur_runlist
;
103 for (i
= 0, p
= 0; i
< priv
->base
.max
; i
++) {
104 u32 ctrl
= nv_rd32(priv
, 0x800004 + (i
* 8)) & 0x001f0001;
107 nv_wo32(cur
, p
+ 0, i
);
108 nv_wo32(cur
, p
+ 4, 0x00000000);
113 nv_wr32(priv
, 0x002270, cur
->addr
>> 12);
114 nv_wr32(priv
, 0x002274, (engine
<< 20) | (p
>> 3));
115 if (!nv_wait(priv
, 0x002284 + (engine
* 8), 0x00100000, 0x00000000))
116 nv_error(priv
, "runlist %d update timeout\n", engine
);
117 mutex_unlock(&nv_subdev(priv
)->mutex
);
121 nve0_fifo_context_attach(struct nouveau_object
*parent
,
122 struct nouveau_object
*object
)
124 struct nouveau_bar
*bar
= nouveau_bar(parent
);
125 struct nve0_fifo_base
*base
= (void *)parent
->parent
;
126 struct nouveau_engctx
*ectx
= (void *)object
;
130 switch (nv_engidx(object
->engine
)) {
131 case NVDEV_ENGINE_SW
:
132 case NVDEV_ENGINE_COPY0
:
133 case NVDEV_ENGINE_COPY1
:
134 case NVDEV_ENGINE_COPY2
:
136 case NVDEV_ENGINE_GR
: addr
= 0x0210; break;
137 case NVDEV_ENGINE_BSP
: addr
= 0x0270; break;
138 case NVDEV_ENGINE_VP
: addr
= 0x0250; break;
139 case NVDEV_ENGINE_PPP
: addr
= 0x0260; break;
144 if (!ectx
->vma
.node
) {
145 ret
= nouveau_gpuobj_map_vm(nv_gpuobj(ectx
), base
->vm
,
146 NV_MEM_ACCESS_RW
, &ectx
->vma
);
150 nv_engctx(ectx
)->addr
= nv_gpuobj(base
)->addr
>> 12;
153 nv_wo32(base
, addr
+ 0x00, lower_32_bits(ectx
->vma
.offset
) | 4);
154 nv_wo32(base
, addr
+ 0x04, upper_32_bits(ectx
->vma
.offset
));
160 nve0_fifo_context_detach(struct nouveau_object
*parent
, bool suspend
,
161 struct nouveau_object
*object
)
163 struct nouveau_bar
*bar
= nouveau_bar(parent
);
164 struct nve0_fifo_priv
*priv
= (void *)parent
->engine
;
165 struct nve0_fifo_base
*base
= (void *)parent
->parent
;
166 struct nve0_fifo_chan
*chan
= (void *)parent
;
169 switch (nv_engidx(object
->engine
)) {
170 case NVDEV_ENGINE_SW
: return 0;
171 case NVDEV_ENGINE_COPY0
:
172 case NVDEV_ENGINE_COPY1
:
173 case NVDEV_ENGINE_COPY2
: addr
= 0x0000; break;
174 case NVDEV_ENGINE_GR
: addr
= 0x0210; break;
175 case NVDEV_ENGINE_BSP
: addr
= 0x0270; break;
176 case NVDEV_ENGINE_VP
: addr
= 0x0250; break;
177 case NVDEV_ENGINE_PPP
: addr
= 0x0260; break;
182 nv_wr32(priv
, 0x002634, chan
->base
.chid
);
183 if (!nv_wait(priv
, 0x002634, 0xffffffff, chan
->base
.chid
)) {
184 nv_error(priv
, "channel %d [%s] kick timeout\n",
185 chan
->base
.chid
, nouveau_client_name(chan
));
191 nv_wo32(base
, addr
+ 0x00, 0x00000000);
192 nv_wo32(base
, addr
+ 0x04, 0x00000000);
200 nve0_fifo_chan_ctor(struct nouveau_object
*parent
,
201 struct nouveau_object
*engine
,
202 struct nouveau_oclass
*oclass
, void *data
, u32 size
,
203 struct nouveau_object
**pobject
)
205 struct nouveau_bar
*bar
= nouveau_bar(parent
);
206 struct nve0_fifo_priv
*priv
= (void *)engine
;
207 struct nve0_fifo_base
*base
= (void *)parent
;
208 struct nve0_fifo_chan
*chan
;
209 struct nve0_channel_ind_class
*args
= data
;
210 u64 usermem
, ioffset
, ilength
;
213 if (size
< sizeof(*args
))
216 for (i
= 0; i
< FIFO_ENGINE_NR
; i
++) {
217 if (args
->engine
& (1 << i
)) {
218 if (nouveau_engine(parent
, fifo_engine
[i
].subdev
)) {
219 args
->engine
= (1 << i
);
225 if (i
== FIFO_ENGINE_NR
) {
226 nv_error(priv
, "unsupported engines 0x%08x\n", args
->engine
);
230 ret
= nouveau_fifo_channel_create(parent
, engine
, oclass
, 1,
231 priv
->user
.bar
.offset
, 0x200,
233 fifo_engine
[i
].mask
, &chan
);
234 *pobject
= nv_object(chan
);
238 nv_parent(chan
)->context_attach
= nve0_fifo_context_attach
;
239 nv_parent(chan
)->context_detach
= nve0_fifo_context_detach
;
242 usermem
= chan
->base
.chid
* 0x200;
243 ioffset
= args
->ioffset
;
244 ilength
= order_base_2(args
->ilength
/ 8);
246 for (i
= 0; i
< 0x200; i
+= 4)
247 nv_wo32(priv
->user
.mem
, usermem
+ i
, 0x00000000);
249 nv_wo32(base
, 0x08, lower_32_bits(priv
->user
.mem
->addr
+ usermem
));
250 nv_wo32(base
, 0x0c, upper_32_bits(priv
->user
.mem
->addr
+ usermem
));
251 nv_wo32(base
, 0x10, 0x0000face);
252 nv_wo32(base
, 0x30, 0xfffff902);
253 nv_wo32(base
, 0x48, lower_32_bits(ioffset
));
254 nv_wo32(base
, 0x4c, upper_32_bits(ioffset
) | (ilength
<< 16));
255 nv_wo32(base
, 0x84, 0x20400000);
256 nv_wo32(base
, 0x94, 0x30000001);
257 nv_wo32(base
, 0x9c, 0x00000100);
258 nv_wo32(base
, 0xac, 0x0000001f);
259 nv_wo32(base
, 0xe8, chan
->base
.chid
);
260 nv_wo32(base
, 0xb8, 0xf8000000);
261 nv_wo32(base
, 0xf8, 0x10003080); /* 0x002310 */
262 nv_wo32(base
, 0xfc, 0x10000010); /* 0x002350 */
268 nve0_fifo_chan_init(struct nouveau_object
*object
)
270 struct nouveau_gpuobj
*base
= nv_gpuobj(object
->parent
);
271 struct nve0_fifo_priv
*priv
= (void *)object
->engine
;
272 struct nve0_fifo_chan
*chan
= (void *)object
;
273 u32 chid
= chan
->base
.chid
;
276 ret
= nouveau_fifo_channel_init(&chan
->base
);
280 nv_mask(priv
, 0x800004 + (chid
* 8), 0x000f0000, chan
->engine
<< 16);
281 nv_wr32(priv
, 0x800000 + (chid
* 8), 0x80000000 | base
->addr
>> 12);
282 nv_mask(priv
, 0x800004 + (chid
* 8), 0x00000400, 0x00000400);
283 nve0_fifo_runlist_update(priv
, chan
->engine
);
284 nv_mask(priv
, 0x800004 + (chid
* 8), 0x00000400, 0x00000400);
289 nve0_fifo_chan_fini(struct nouveau_object
*object
, bool suspend
)
291 struct nve0_fifo_priv
*priv
= (void *)object
->engine
;
292 struct nve0_fifo_chan
*chan
= (void *)object
;
293 u32 chid
= chan
->base
.chid
;
295 nv_mask(priv
, 0x800004 + (chid
* 8), 0x00000800, 0x00000800);
296 nve0_fifo_runlist_update(priv
, chan
->engine
);
297 nv_wr32(priv
, 0x800000 + (chid
* 8), 0x00000000);
299 return nouveau_fifo_channel_fini(&chan
->base
, suspend
);
302 static struct nouveau_ofuncs
304 .ctor
= nve0_fifo_chan_ctor
,
305 .dtor
= _nouveau_fifo_channel_dtor
,
306 .init
= nve0_fifo_chan_init
,
307 .fini
= nve0_fifo_chan_fini
,
308 .rd32
= _nouveau_fifo_channel_rd32
,
309 .wr32
= _nouveau_fifo_channel_wr32
,
312 static struct nouveau_oclass
313 nve0_fifo_sclass
[] = {
314 { NVE0_CHANNEL_IND_CLASS
, &nve0_fifo_ofuncs
},
318 /*******************************************************************************
319 * FIFO context - instmem heap and vm setup
320 ******************************************************************************/
323 nve0_fifo_context_ctor(struct nouveau_object
*parent
,
324 struct nouveau_object
*engine
,
325 struct nouveau_oclass
*oclass
, void *data
, u32 size
,
326 struct nouveau_object
**pobject
)
328 struct nve0_fifo_base
*base
;
331 ret
= nouveau_fifo_context_create(parent
, engine
, oclass
, NULL
, 0x1000,
332 0x1000, NVOBJ_FLAG_ZERO_ALLOC
, &base
);
333 *pobject
= nv_object(base
);
337 ret
= nouveau_gpuobj_new(nv_object(base
), NULL
, 0x10000, 0x1000, 0,
342 nv_wo32(base
, 0x0200, lower_32_bits(base
->pgd
->addr
));
343 nv_wo32(base
, 0x0204, upper_32_bits(base
->pgd
->addr
));
344 nv_wo32(base
, 0x0208, 0xffffffff);
345 nv_wo32(base
, 0x020c, 0x000000ff);
347 ret
= nouveau_vm_ref(nouveau_client(parent
)->vm
, &base
->vm
, base
->pgd
);
355 nve0_fifo_context_dtor(struct nouveau_object
*object
)
357 struct nve0_fifo_base
*base
= (void *)object
;
358 nouveau_vm_ref(NULL
, &base
->vm
, base
->pgd
);
359 nouveau_gpuobj_ref(NULL
, &base
->pgd
);
360 nouveau_fifo_context_destroy(&base
->base
);
363 static struct nouveau_oclass
365 .handle
= NV_ENGCTX(FIFO
, 0xe0),
366 .ofuncs
= &(struct nouveau_ofuncs
) {
367 .ctor
= nve0_fifo_context_ctor
,
368 .dtor
= nve0_fifo_context_dtor
,
369 .init
= _nouveau_fifo_context_init
,
370 .fini
= _nouveau_fifo_context_fini
,
371 .rd32
= _nouveau_fifo_context_rd32
,
372 .wr32
= _nouveau_fifo_context_wr32
,
376 /*******************************************************************************
378 ******************************************************************************/
380 static const struct nouveau_enum nve0_fifo_sched_reason
[] = {
381 { 0x0a, "CTXSW_TIMEOUT" },
385 static const struct nouveau_enum nve0_fifo_fault_engine
[] = {
386 { 0x00, "GR", NULL
, NVDEV_ENGINE_GR
},
388 { 0x04, "BAR1", NULL
, NVDEV_SUBDEV_BAR
},
389 { 0x05, "BAR3", NULL
, NVDEV_SUBDEV_INSTMEM
},
390 { 0x07, "PBDMA0", NULL
, NVDEV_ENGINE_FIFO
},
391 { 0x08, "PBDMA1", NULL
, NVDEV_ENGINE_FIFO
},
392 { 0x09, "PBDMA2", NULL
, NVDEV_ENGINE_FIFO
},
393 { 0x10, "MSVLD", NULL
, NVDEV_ENGINE_BSP
},
394 { 0x11, "MSPPP", NULL
, NVDEV_ENGINE_PPP
},
396 { 0x14, "MSPDEC", NULL
, NVDEV_ENGINE_VP
},
397 { 0x15, "CE0", NULL
, NVDEV_ENGINE_COPY0
},
398 { 0x16, "CE1", NULL
, NVDEV_ENGINE_COPY1
},
400 { 0x19, "MSENC", NULL
, NVDEV_ENGINE_VENC
},
401 { 0x1b, "CE2", NULL
, NVDEV_ENGINE_COPY2
},
405 static const struct nouveau_enum nve0_fifo_fault_reason
[] = {
407 { 0x01, "PDE_SIZE" },
409 { 0x03, "VA_LIMIT_VIOLATION" },
410 { 0x04, "UNBOUND_INST_BLOCK" },
411 { 0x05, "PRIV_VIOLATION" },
412 { 0x06, "RO_VIOLATION" },
413 { 0x07, "WO_VIOLATION" },
414 { 0x08, "PITCH_MASK_VIOLATION" },
415 { 0x09, "WORK_CREATION" },
416 { 0x0a, "UNSUPPORTED_APERTURE" },
417 { 0x0b, "COMPRESSION_FAILURE" },
418 { 0x0c, "UNSUPPORTED_KIND" },
419 { 0x0d, "REGION_VIOLATION" },
420 { 0x0e, "BOTH_PTES_VALID" },
421 { 0x0f, "INFO_TYPE_POISONED" },
425 static const struct nouveau_enum nve0_fifo_fault_hubclient
[] = {
433 { 0x07, "HOST_CPU" },
434 { 0x08, "HOST_CPU_NB" },
445 { 0x13, "RASTERTWOD" },
461 static const struct nouveau_enum nve0_fifo_fault_gpcclient
[] = {
462 { 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" },
463 { 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" },
464 { 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" },
465 { 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" },
473 { 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" },
474 { 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" },
475 { 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" },
476 { 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" },
478 { 0x20, "LTP_UTLB_0" },
479 { 0x21, "LTP_UTLB_1" },
480 { 0x22, "LTP_UTLB_2" },
481 { 0x23, "LTP_UTLB_3" },
482 { 0x24, "GPC_RGG_UTLB" },
486 static const struct nouveau_bitfield nve0_fifo_pbdma_intr
[] = {
487 { 0x00000001, "MEMREQ" },
488 { 0x00000002, "MEMACK_TIMEOUT" },
489 { 0x00000004, "MEMACK_EXTRA" },
490 { 0x00000008, "MEMDAT_TIMEOUT" },
491 { 0x00000010, "MEMDAT_EXTRA" },
492 { 0x00000020, "MEMFLUSH" },
493 { 0x00000040, "MEMOP" },
494 { 0x00000080, "LBCONNECT" },
495 { 0x00000100, "LBREQ" },
496 { 0x00000200, "LBACK_TIMEOUT" },
497 { 0x00000400, "LBACK_EXTRA" },
498 { 0x00000800, "LBDAT_TIMEOUT" },
499 { 0x00001000, "LBDAT_EXTRA" },
500 { 0x00002000, "GPFIFO" },
501 { 0x00004000, "GPPTR" },
502 { 0x00008000, "GPENTRY" },
503 { 0x00010000, "GPCRC" },
504 { 0x00020000, "PBPTR" },
505 { 0x00040000, "PBENTRY" },
506 { 0x00080000, "PBCRC" },
507 { 0x00100000, "XBARCONNECT" },
508 { 0x00200000, "METHOD" },
509 { 0x00400000, "METHODCRC" },
510 { 0x00800000, "DEVICE" },
511 { 0x02000000, "SEMAPHORE" },
512 { 0x04000000, "ACQUIRE" },
513 { 0x08000000, "PRI" },
514 { 0x20000000, "NO_CTXSW_SEG" },
515 { 0x40000000, "PBSEG" },
516 { 0x80000000, "SIGNATURE" },
521 nve0_fifo_intr_sched(struct nve0_fifo_priv
*priv
)
523 u32 intr
= nv_rd32(priv
, 0x00254c);
524 u32 code
= intr
& 0x000000ff;
525 nv_error(priv
, "SCHED_ERROR [");
526 nouveau_enum_print(nve0_fifo_sched_reason
, code
);
531 nve0_fifo_intr_chsw(struct nve0_fifo_priv
*priv
)
533 u32 stat
= nv_rd32(priv
, 0x00256c);
534 nv_error(priv
, "CHSW_ERROR 0x%08x\n", stat
);
535 nv_wr32(priv
, 0x00256c, stat
);
539 nve0_fifo_intr_dropped_fault(struct nve0_fifo_priv
*priv
)
541 u32 stat
= nv_rd32(priv
, 0x00259c);
542 nv_error(priv
, "DROPPED_MMU_FAULT 0x%08x\n", stat
);
546 nve0_fifo_intr_fault(struct nve0_fifo_priv
*priv
, int unit
)
548 u32 inst
= nv_rd32(priv
, 0x2800 + (unit
* 0x10));
549 u32 valo
= nv_rd32(priv
, 0x2804 + (unit
* 0x10));
550 u32 vahi
= nv_rd32(priv
, 0x2808 + (unit
* 0x10));
551 u32 stat
= nv_rd32(priv
, 0x280c + (unit
* 0x10));
552 u32 client
= (stat
& 0x00001f00) >> 8;
553 struct nouveau_engine
*engine
= NULL
;
554 struct nouveau_object
*engctx
= NULL
;
555 const struct nouveau_enum
*en
;
556 const char *name
= "unknown";
558 nv_error(priv
, "PFIFO: %s fault at 0x%010llx [", (stat
& 0x00000080) ?
559 "write" : "read", (u64
)vahi
<< 32 | valo
);
560 nouveau_enum_print(nve0_fifo_fault_reason
, stat
& 0x0000000f);
562 en
= nouveau_enum_print(nve0_fifo_fault_engine
, unit
);
563 if (stat
& 0x00000040) {
565 nouveau_enum_print(nve0_fifo_fault_hubclient
, client
);
567 pr_cont("/GPC%d/", (stat
& 0x1f000000) >> 24);
568 nouveau_enum_print(nve0_fifo_fault_gpcclient
, client
);
571 if (en
&& en
->data2
) {
572 if (en
->data2
== NVDEV_SUBDEV_BAR
) {
573 nv_mask(priv
, 0x001704, 0x00000000, 0x00000000);
576 if (en
->data2
== NVDEV_SUBDEV_INSTMEM
) {
577 nv_mask(priv
, 0x001714, 0x00000000, 0x00000000);
580 engine
= nouveau_engine(priv
, en
->data2
);
582 engctx
= nouveau_engctx_get(engine
, inst
);
583 name
= nouveau_client_name(engctx
);
587 pr_cont(" on channel 0x%010llx [%s]\n", (u64
)inst
<< 12, name
);
589 nouveau_engctx_put(engctx
);
593 nve0_fifo_swmthd(struct nve0_fifo_priv
*priv
, u32 chid
, u32 mthd
, u32 data
)
595 struct nve0_fifo_chan
*chan
= NULL
;
596 struct nouveau_handle
*bind
;
600 spin_lock_irqsave(&priv
->base
.lock
, flags
);
601 if (likely(chid
>= priv
->base
.min
&& chid
<= priv
->base
.max
))
602 chan
= (void *)priv
->base
.channel
[chid
];
606 bind
= nouveau_namedb_get_class(nv_namedb(chan
), 0x906e);
608 if (!mthd
|| !nv_call(bind
->object
, mthd
, data
))
610 nouveau_namedb_put(bind
);
614 spin_unlock_irqrestore(&priv
->base
.lock
, flags
);
619 nve0_fifo_intr_pbdma(struct nve0_fifo_priv
*priv
, int unit
)
621 u32 stat
= nv_rd32(priv
, 0x040108 + (unit
* 0x2000));
622 u32 addr
= nv_rd32(priv
, 0x0400c0 + (unit
* 0x2000));
623 u32 data
= nv_rd32(priv
, 0x0400c4 + (unit
* 0x2000));
624 u32 chid
= nv_rd32(priv
, 0x040120 + (unit
* 0x2000)) & 0xfff;
625 u32 subc
= (addr
& 0x00070000) >> 16;
626 u32 mthd
= (addr
& 0x00003ffc);
629 if (stat
& 0x00800000) {
630 if (!nve0_fifo_swmthd(priv
, chid
, mthd
, data
))
635 nv_error(priv
, "PBDMA%d:", unit
);
636 nouveau_bitfield_print(nve0_fifo_pbdma_intr
, show
);
639 "PBDMA%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
641 nouveau_client_name_for_fifo_chid(&priv
->base
, chid
),
645 nv_wr32(priv
, 0x0400c0 + (unit
* 0x2000), 0x80600008);
646 nv_wr32(priv
, 0x040108 + (unit
* 0x2000), stat
);
650 nve0_fifo_intr(struct nouveau_subdev
*subdev
)
652 struct nve0_fifo_priv
*priv
= (void *)subdev
;
653 u32 mask
= nv_rd32(priv
, 0x002140);
654 u32 stat
= nv_rd32(priv
, 0x002100) & mask
;
656 if (stat
& 0x00000001) {
657 u32 stat
= nv_rd32(priv
, 0x00252c);
658 nv_error(priv
, "BIND_ERROR 0x%08x\n", stat
);
659 nv_wr32(priv
, 0x002100, 0x00000001);
663 if (stat
& 0x00000010) {
664 nv_error(priv
, "PIO_ERROR\n");
665 nv_wr32(priv
, 0x002100, 0x00000010);
669 if (stat
& 0x00000100) {
670 nve0_fifo_intr_sched(priv
);
671 nv_wr32(priv
, 0x002100, 0x00000100);
675 if (stat
& 0x00010000) {
676 nve0_fifo_intr_chsw(priv
);
677 nv_wr32(priv
, 0x002100, 0x00010000);
681 if (stat
& 0x00800000) {
682 nv_error(priv
, "FB_FLUSH_TIMEOUT\n");
683 nv_wr32(priv
, 0x002100, 0x00800000);
687 if (stat
& 0x01000000) {
688 nv_error(priv
, "LB_ERROR\n");
689 nv_wr32(priv
, 0x002100, 0x01000000);
693 if (stat
& 0x08000000) {
694 nve0_fifo_intr_dropped_fault(priv
);
695 nv_wr32(priv
, 0x002100, 0x08000000);
699 if (stat
& 0x10000000) {
700 u32 units
= nv_rd32(priv
, 0x00259c);
705 nve0_fifo_intr_fault(priv
, i
);
709 nv_wr32(priv
, 0x00259c, units
);
713 if (stat
& 0x20000000) {
714 u32 mask
= nv_rd32(priv
, 0x0025a0);
718 u32 unit
= ffs(temp
) - 1;
719 nve0_fifo_intr_pbdma(priv
, unit
);
720 temp
&= ~(1 << unit
);
723 nv_wr32(priv
, 0x0025a0, mask
);
727 if (stat
& 0x40000000) {
728 u32 mask
= nv_mask(priv
, 0x002a00, 0x00000000, 0x00000000);
731 u32 engn
= ffs(mask
) - 1;
732 /* runlist event, not currently used */
733 mask
&= ~(1 << engn
);
739 if (stat
& 0x80000000) {
740 nouveau_event_trigger(priv
->base
.uevent
, 0);
741 nv_wr32(priv
, 0x002100, 0x80000000);
746 nv_fatal(priv
, "unhandled status 0x%08x\n", stat
);
747 nv_wr32(priv
, 0x002100, stat
);
748 nv_wr32(priv
, 0x002140, 0);
753 nve0_fifo_uevent_enable(struct nouveau_event
*event
, int index
)
755 struct nve0_fifo_priv
*priv
= event
->priv
;
756 nv_mask(priv
, 0x002140, 0x80000000, 0x80000000);
760 nve0_fifo_uevent_disable(struct nouveau_event
*event
, int index
)
762 struct nve0_fifo_priv
*priv
= event
->priv
;
763 nv_mask(priv
, 0x002140, 0x80000000, 0x00000000);
767 nve0_fifo_fini(struct nouveau_object
*object
, bool suspend
)
769 struct nve0_fifo_priv
*priv
= (void *)object
;
772 ret
= nouveau_fifo_fini(&priv
->base
, suspend
);
776 /* allow mmu fault interrupts, even when we're not using fifo */
777 nv_mask(priv
, 0x002140, 0x10000000, 0x10000000);
782 nve0_fifo_init(struct nouveau_object
*object
)
784 struct nve0_fifo_priv
*priv
= (void *)object
;
787 ret
= nouveau_fifo_init(&priv
->base
);
791 /* enable all available PBDMA units */
792 nv_wr32(priv
, 0x000204, 0xffffffff);
793 priv
->spoon_nr
= hweight32(nv_rd32(priv
, 0x000204));
794 nv_debug(priv
, "%d PBDMA unit(s)\n", priv
->spoon_nr
);
797 for (i
= 0; i
< priv
->spoon_nr
; i
++) {
798 nv_mask(priv
, 0x04013c + (i
* 0x2000), 0x10000100, 0x00000000);
799 nv_wr32(priv
, 0x040108 + (i
* 0x2000), 0xffffffff); /* INTR */
800 nv_wr32(priv
, 0x04010c + (i
* 0x2000), 0xfffffeff); /* INTREN */
803 nv_wr32(priv
, 0x002254, 0x10000000 | priv
->user
.bar
.offset
>> 12);
805 nv_wr32(priv
, 0x002a00, 0xffffffff);
806 nv_wr32(priv
, 0x002100, 0xffffffff);
807 nv_wr32(priv
, 0x002140, 0x3fffffff);
812 nve0_fifo_dtor(struct nouveau_object
*object
)
814 struct nve0_fifo_priv
*priv
= (void *)object
;
817 nouveau_gpuobj_unmap(&priv
->user
.bar
);
818 nouveau_gpuobj_ref(NULL
, &priv
->user
.mem
);
820 for (i
= 0; i
< FIFO_ENGINE_NR
; i
++) {
821 nouveau_gpuobj_ref(NULL
, &priv
->engine
[i
].runlist
[1]);
822 nouveau_gpuobj_ref(NULL
, &priv
->engine
[i
].runlist
[0]);
825 nouveau_fifo_destroy(&priv
->base
);
829 nve0_fifo_ctor(struct nouveau_object
*parent
, struct nouveau_object
*engine
,
830 struct nouveau_oclass
*oclass
, void *data
, u32 size
,
831 struct nouveau_object
**pobject
)
833 struct nve0_fifo_impl
*impl
= (void *)oclass
;
834 struct nve0_fifo_priv
*priv
;
837 ret
= nouveau_fifo_create(parent
, engine
, oclass
, 0,
838 impl
->channels
- 1, &priv
);
839 *pobject
= nv_object(priv
);
843 for (i
= 0; i
< FIFO_ENGINE_NR
; i
++) {
844 ret
= nouveau_gpuobj_new(nv_object(priv
), NULL
, 0x8000, 0x1000,
845 0, &priv
->engine
[i
].runlist
[0]);
849 ret
= nouveau_gpuobj_new(nv_object(priv
), NULL
, 0x8000, 0x1000,
850 0, &priv
->engine
[i
].runlist
[1]);
855 ret
= nouveau_gpuobj_new(nv_object(priv
), NULL
, 4096 * 0x200, 0x1000,
856 NVOBJ_FLAG_ZERO_ALLOC
, &priv
->user
.mem
);
860 ret
= nouveau_gpuobj_map(priv
->user
.mem
, NV_MEM_ACCESS_RW
,
865 priv
->base
.uevent
->enable
= nve0_fifo_uevent_enable
;
866 priv
->base
.uevent
->disable
= nve0_fifo_uevent_disable
;
867 priv
->base
.uevent
->priv
= priv
;
869 nv_subdev(priv
)->unit
= 0x00000100;
870 nv_subdev(priv
)->intr
= nve0_fifo_intr
;
871 nv_engine(priv
)->cclass
= &nve0_fifo_cclass
;
872 nv_engine(priv
)->sclass
= nve0_fifo_sclass
;
876 struct nouveau_oclass
*
877 nve0_fifo_oclass
= &(struct nve0_fifo_impl
) {
878 .base
.handle
= NV_ENGINE(FIFO
, 0xe0),
879 .base
.ofuncs
= &(struct nouveau_ofuncs
) {
880 .ctor
= nve0_fifo_ctor
,
881 .dtor
= nve0_fifo_dtor
,
882 .init
= nve0_fifo_init
,
883 .fini
= nve0_fifo_fini
,