]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c
drm/nouveau/gr: remove dependence on namedb/engctx lookup
[mirror_ubuntu-zesty-kernel.git] / drivers / gpu / drm / nouveau / nvkm / engine / gr / nv04.c
1 /*
2 * Copyright 2007 Stephane Marchesin
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragr) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24 #include <engine/gr.h>
25 #include "regs.h"
26
27 #include <core/client.h>
28 #include <engine/fifo.h>
29 #include <subdev/instmem.h>
30 #include <subdev/timer.h>
31
32 static u32
33 nv04_gr_ctx_regs[] = {
34 0x0040053c,
35 0x00400544,
36 0x00400540,
37 0x00400548,
38 NV04_PGRAPH_CTX_SWITCH1,
39 NV04_PGRAPH_CTX_SWITCH2,
40 NV04_PGRAPH_CTX_SWITCH3,
41 NV04_PGRAPH_CTX_SWITCH4,
42 NV04_PGRAPH_CTX_CACHE1,
43 NV04_PGRAPH_CTX_CACHE2,
44 NV04_PGRAPH_CTX_CACHE3,
45 NV04_PGRAPH_CTX_CACHE4,
46 0x00400184,
47 0x004001a4,
48 0x004001c4,
49 0x004001e4,
50 0x00400188,
51 0x004001a8,
52 0x004001c8,
53 0x004001e8,
54 0x0040018c,
55 0x004001ac,
56 0x004001cc,
57 0x004001ec,
58 0x00400190,
59 0x004001b0,
60 0x004001d0,
61 0x004001f0,
62 0x00400194,
63 0x004001b4,
64 0x004001d4,
65 0x004001f4,
66 0x00400198,
67 0x004001b8,
68 0x004001d8,
69 0x004001f8,
70 0x0040019c,
71 0x004001bc,
72 0x004001dc,
73 0x004001fc,
74 0x00400174,
75 NV04_PGRAPH_DMA_START_0,
76 NV04_PGRAPH_DMA_START_1,
77 NV04_PGRAPH_DMA_LENGTH,
78 NV04_PGRAPH_DMA_MISC,
79 NV04_PGRAPH_DMA_PITCH,
80 NV04_PGRAPH_BOFFSET0,
81 NV04_PGRAPH_BBASE0,
82 NV04_PGRAPH_BLIMIT0,
83 NV04_PGRAPH_BOFFSET1,
84 NV04_PGRAPH_BBASE1,
85 NV04_PGRAPH_BLIMIT1,
86 NV04_PGRAPH_BOFFSET2,
87 NV04_PGRAPH_BBASE2,
88 NV04_PGRAPH_BLIMIT2,
89 NV04_PGRAPH_BOFFSET3,
90 NV04_PGRAPH_BBASE3,
91 NV04_PGRAPH_BLIMIT3,
92 NV04_PGRAPH_BOFFSET4,
93 NV04_PGRAPH_BBASE4,
94 NV04_PGRAPH_BLIMIT4,
95 NV04_PGRAPH_BOFFSET5,
96 NV04_PGRAPH_BBASE5,
97 NV04_PGRAPH_BLIMIT5,
98 NV04_PGRAPH_BPITCH0,
99 NV04_PGRAPH_BPITCH1,
100 NV04_PGRAPH_BPITCH2,
101 NV04_PGRAPH_BPITCH3,
102 NV04_PGRAPH_BPITCH4,
103 NV04_PGRAPH_SURFACE,
104 NV04_PGRAPH_STATE,
105 NV04_PGRAPH_BSWIZZLE2,
106 NV04_PGRAPH_BSWIZZLE5,
107 NV04_PGRAPH_BPIXEL,
108 NV04_PGRAPH_NOTIFY,
109 NV04_PGRAPH_PATT_COLOR0,
110 NV04_PGRAPH_PATT_COLOR1,
111 NV04_PGRAPH_PATT_COLORRAM+0x00,
112 NV04_PGRAPH_PATT_COLORRAM+0x04,
113 NV04_PGRAPH_PATT_COLORRAM+0x08,
114 NV04_PGRAPH_PATT_COLORRAM+0x0c,
115 NV04_PGRAPH_PATT_COLORRAM+0x10,
116 NV04_PGRAPH_PATT_COLORRAM+0x14,
117 NV04_PGRAPH_PATT_COLORRAM+0x18,
118 NV04_PGRAPH_PATT_COLORRAM+0x1c,
119 NV04_PGRAPH_PATT_COLORRAM+0x20,
120 NV04_PGRAPH_PATT_COLORRAM+0x24,
121 NV04_PGRAPH_PATT_COLORRAM+0x28,
122 NV04_PGRAPH_PATT_COLORRAM+0x2c,
123 NV04_PGRAPH_PATT_COLORRAM+0x30,
124 NV04_PGRAPH_PATT_COLORRAM+0x34,
125 NV04_PGRAPH_PATT_COLORRAM+0x38,
126 NV04_PGRAPH_PATT_COLORRAM+0x3c,
127 NV04_PGRAPH_PATT_COLORRAM+0x40,
128 NV04_PGRAPH_PATT_COLORRAM+0x44,
129 NV04_PGRAPH_PATT_COLORRAM+0x48,
130 NV04_PGRAPH_PATT_COLORRAM+0x4c,
131 NV04_PGRAPH_PATT_COLORRAM+0x50,
132 NV04_PGRAPH_PATT_COLORRAM+0x54,
133 NV04_PGRAPH_PATT_COLORRAM+0x58,
134 NV04_PGRAPH_PATT_COLORRAM+0x5c,
135 NV04_PGRAPH_PATT_COLORRAM+0x60,
136 NV04_PGRAPH_PATT_COLORRAM+0x64,
137 NV04_PGRAPH_PATT_COLORRAM+0x68,
138 NV04_PGRAPH_PATT_COLORRAM+0x6c,
139 NV04_PGRAPH_PATT_COLORRAM+0x70,
140 NV04_PGRAPH_PATT_COLORRAM+0x74,
141 NV04_PGRAPH_PATT_COLORRAM+0x78,
142 NV04_PGRAPH_PATT_COLORRAM+0x7c,
143 NV04_PGRAPH_PATT_COLORRAM+0x80,
144 NV04_PGRAPH_PATT_COLORRAM+0x84,
145 NV04_PGRAPH_PATT_COLORRAM+0x88,
146 NV04_PGRAPH_PATT_COLORRAM+0x8c,
147 NV04_PGRAPH_PATT_COLORRAM+0x90,
148 NV04_PGRAPH_PATT_COLORRAM+0x94,
149 NV04_PGRAPH_PATT_COLORRAM+0x98,
150 NV04_PGRAPH_PATT_COLORRAM+0x9c,
151 NV04_PGRAPH_PATT_COLORRAM+0xa0,
152 NV04_PGRAPH_PATT_COLORRAM+0xa4,
153 NV04_PGRAPH_PATT_COLORRAM+0xa8,
154 NV04_PGRAPH_PATT_COLORRAM+0xac,
155 NV04_PGRAPH_PATT_COLORRAM+0xb0,
156 NV04_PGRAPH_PATT_COLORRAM+0xb4,
157 NV04_PGRAPH_PATT_COLORRAM+0xb8,
158 NV04_PGRAPH_PATT_COLORRAM+0xbc,
159 NV04_PGRAPH_PATT_COLORRAM+0xc0,
160 NV04_PGRAPH_PATT_COLORRAM+0xc4,
161 NV04_PGRAPH_PATT_COLORRAM+0xc8,
162 NV04_PGRAPH_PATT_COLORRAM+0xcc,
163 NV04_PGRAPH_PATT_COLORRAM+0xd0,
164 NV04_PGRAPH_PATT_COLORRAM+0xd4,
165 NV04_PGRAPH_PATT_COLORRAM+0xd8,
166 NV04_PGRAPH_PATT_COLORRAM+0xdc,
167 NV04_PGRAPH_PATT_COLORRAM+0xe0,
168 NV04_PGRAPH_PATT_COLORRAM+0xe4,
169 NV04_PGRAPH_PATT_COLORRAM+0xe8,
170 NV04_PGRAPH_PATT_COLORRAM+0xec,
171 NV04_PGRAPH_PATT_COLORRAM+0xf0,
172 NV04_PGRAPH_PATT_COLORRAM+0xf4,
173 NV04_PGRAPH_PATT_COLORRAM+0xf8,
174 NV04_PGRAPH_PATT_COLORRAM+0xfc,
175 NV04_PGRAPH_PATTERN,
176 0x0040080c,
177 NV04_PGRAPH_PATTERN_SHAPE,
178 0x00400600,
179 NV04_PGRAPH_ROP3,
180 NV04_PGRAPH_CHROMA,
181 NV04_PGRAPH_BETA_AND,
182 NV04_PGRAPH_BETA_PREMULT,
183 NV04_PGRAPH_CONTROL0,
184 NV04_PGRAPH_CONTROL1,
185 NV04_PGRAPH_CONTROL2,
186 NV04_PGRAPH_BLEND,
187 NV04_PGRAPH_STORED_FMT,
188 NV04_PGRAPH_SOURCE_COLOR,
189 0x00400560,
190 0x00400568,
191 0x00400564,
192 0x0040056c,
193 0x00400400,
194 0x00400480,
195 0x00400404,
196 0x00400484,
197 0x00400408,
198 0x00400488,
199 0x0040040c,
200 0x0040048c,
201 0x00400410,
202 0x00400490,
203 0x00400414,
204 0x00400494,
205 0x00400418,
206 0x00400498,
207 0x0040041c,
208 0x0040049c,
209 0x00400420,
210 0x004004a0,
211 0x00400424,
212 0x004004a4,
213 0x00400428,
214 0x004004a8,
215 0x0040042c,
216 0x004004ac,
217 0x00400430,
218 0x004004b0,
219 0x00400434,
220 0x004004b4,
221 0x00400438,
222 0x004004b8,
223 0x0040043c,
224 0x004004bc,
225 0x00400440,
226 0x004004c0,
227 0x00400444,
228 0x004004c4,
229 0x00400448,
230 0x004004c8,
231 0x0040044c,
232 0x004004cc,
233 0x00400450,
234 0x004004d0,
235 0x00400454,
236 0x004004d4,
237 0x00400458,
238 0x004004d8,
239 0x0040045c,
240 0x004004dc,
241 0x00400460,
242 0x004004e0,
243 0x00400464,
244 0x004004e4,
245 0x00400468,
246 0x004004e8,
247 0x0040046c,
248 0x004004ec,
249 0x00400470,
250 0x004004f0,
251 0x00400474,
252 0x004004f4,
253 0x00400478,
254 0x004004f8,
255 0x0040047c,
256 0x004004fc,
257 0x00400534,
258 0x00400538,
259 0x00400514,
260 0x00400518,
261 0x0040051c,
262 0x00400520,
263 0x00400524,
264 0x00400528,
265 0x0040052c,
266 0x00400530,
267 0x00400d00,
268 0x00400d40,
269 0x00400d80,
270 0x00400d04,
271 0x00400d44,
272 0x00400d84,
273 0x00400d08,
274 0x00400d48,
275 0x00400d88,
276 0x00400d0c,
277 0x00400d4c,
278 0x00400d8c,
279 0x00400d10,
280 0x00400d50,
281 0x00400d90,
282 0x00400d14,
283 0x00400d54,
284 0x00400d94,
285 0x00400d18,
286 0x00400d58,
287 0x00400d98,
288 0x00400d1c,
289 0x00400d5c,
290 0x00400d9c,
291 0x00400d20,
292 0x00400d60,
293 0x00400da0,
294 0x00400d24,
295 0x00400d64,
296 0x00400da4,
297 0x00400d28,
298 0x00400d68,
299 0x00400da8,
300 0x00400d2c,
301 0x00400d6c,
302 0x00400dac,
303 0x00400d30,
304 0x00400d70,
305 0x00400db0,
306 0x00400d34,
307 0x00400d74,
308 0x00400db4,
309 0x00400d38,
310 0x00400d78,
311 0x00400db8,
312 0x00400d3c,
313 0x00400d7c,
314 0x00400dbc,
315 0x00400590,
316 0x00400594,
317 0x00400598,
318 0x0040059c,
319 0x004005a8,
320 0x004005ac,
321 0x004005b0,
322 0x004005b4,
323 0x004005c0,
324 0x004005c4,
325 0x004005c8,
326 0x004005cc,
327 0x004005d0,
328 0x004005d4,
329 0x004005d8,
330 0x004005dc,
331 0x004005e0,
332 NV04_PGRAPH_PASSTHRU_0,
333 NV04_PGRAPH_PASSTHRU_1,
334 NV04_PGRAPH_PASSTHRU_2,
335 NV04_PGRAPH_DVD_COLORFMT,
336 NV04_PGRAPH_SCALED_FORMAT,
337 NV04_PGRAPH_MISC24_0,
338 NV04_PGRAPH_MISC24_1,
339 NV04_PGRAPH_MISC24_2,
340 0x00400500,
341 0x00400504,
342 NV04_PGRAPH_VALID1,
343 NV04_PGRAPH_VALID2,
344 NV04_PGRAPH_DEBUG_3
345 };
346
347 struct nv04_gr {
348 struct nvkm_gr base;
349 struct nv04_gr_chan *chan[16];
350 spinlock_t lock;
351 };
352
353 struct nv04_gr_chan {
354 struct nvkm_object base;
355 int chid;
356 u32 nv04[ARRAY_SIZE(nv04_gr_ctx_regs)];
357 };
358
359
360 static inline struct nv04_gr *
361 nv04_gr(struct nv04_gr_chan *chan)
362 {
363 return (void *)nv_object(chan)->engine;
364 }
365
366 /*******************************************************************************
367 * Graphics object classes
368 ******************************************************************************/
369
370 /*
371 * Software methods, why they are needed, and how they all work:
372 *
373 * NV04 and NV05 keep most of the state in PGRAPH context itself, but some
374 * 2d engine settings are kept inside the grobjs themselves. The grobjs are
375 * 3 words long on both. grobj format on NV04 is:
376 *
377 * word 0:
378 * - bits 0-7: class
379 * - bit 12: color key active
380 * - bit 13: clip rect active
381 * - bit 14: if set, destination surface is swizzled and taken from buffer 5
382 * [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken
383 * from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or
384 * NV03_CONTEXT_SURFACE_DST].
385 * - bits 15-17: 2d operation [aka patch config]
386 * - bit 24: patch valid [enables rendering using this object]
387 * - bit 25: surf3d valid [for tex_tri and multitex_tri only]
388 * word 1:
389 * - bits 0-1: mono format
390 * - bits 8-13: color format
391 * - bits 16-31: DMA_NOTIFY instance
392 * word 2:
393 * - bits 0-15: DMA_A instance
394 * - bits 16-31: DMA_B instance
395 *
396 * On NV05 it's:
397 *
398 * word 0:
399 * - bits 0-7: class
400 * - bit 12: color key active
401 * - bit 13: clip rect active
402 * - bit 14: if set, destination surface is swizzled and taken from buffer 5
403 * [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken
404 * from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or
405 * NV03_CONTEXT_SURFACE_DST].
406 * - bits 15-17: 2d operation [aka patch config]
407 * - bits 20-22: dither mode
408 * - bit 24: patch valid [enables rendering using this object]
409 * - bit 25: surface_dst/surface_color/surf2d/surf3d valid
410 * - bit 26: surface_src/surface_zeta valid
411 * - bit 27: pattern valid
412 * - bit 28: rop valid
413 * - bit 29: beta1 valid
414 * - bit 30: beta4 valid
415 * word 1:
416 * - bits 0-1: mono format
417 * - bits 8-13: color format
418 * - bits 16-31: DMA_NOTIFY instance
419 * word 2:
420 * - bits 0-15: DMA_A instance
421 * - bits 16-31: DMA_B instance
422 *
423 * NV05 will set/unset the relevant valid bits when you poke the relevant
424 * object-binding methods with object of the proper type, or with the NULL
425 * type. It'll only allow rendering using the grobj if all needed objects
426 * are bound. The needed set of objects depends on selected operation: for
427 * example rop object is needed by ROP_AND, but not by SRCCOPY_AND.
428 *
429 * NV04 doesn't have these methods implemented at all, and doesn't have the
430 * relevant bits in grobj. Instead, it'll allow rendering whenever bit 24
431 * is set. So we have to emulate them in software, internally keeping the
432 * same bits as NV05 does. Since grobjs are aligned to 16 bytes on nv04,
433 * but the last word isn't actually used for anything, we abuse it for this
434 * purpose.
435 *
436 * Actually, NV05 can optionally check bit 24 too, but we disable this since
437 * there's no use for it.
438 *
439 * For unknown reasons, NV04 implements surf3d binding in hardware as an
440 * exception. Also for unknown reasons, NV04 doesn't implement the clipping
441 * methods on the surf3d object, so we have to emulate them too.
442 */
443
444 static void
445 nv04_gr_set_ctx1(struct nvkm_device *device, u32 inst, u32 mask, u32 value)
446 {
447 int subc = (nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
448 u32 tmp;
449
450 tmp = nvkm_rd32(device, 0x700000 + inst);
451 tmp &= ~mask;
452 tmp |= value;
453 nvkm_wr32(device, 0x700000 + inst, tmp);
454
455 nvkm_wr32(device, NV04_PGRAPH_CTX_SWITCH1, tmp);
456 nvkm_wr32(device, NV04_PGRAPH_CTX_CACHE1 + (subc << 2), tmp);
457 }
458
459 static void
460 nv04_gr_set_ctx_val(struct nvkm_device *device, u32 inst, u32 mask, u32 value)
461 {
462 int class, op, valid = 1;
463 u32 tmp, ctx1;
464
465 ctx1 = nvkm_rd32(device, 0x700000 + inst);
466 class = ctx1 & 0xff;
467 op = (ctx1 >> 15) & 7;
468
469 tmp = nvkm_rd32(device, 0x70000c + inst);
470 tmp &= ~mask;
471 tmp |= value;
472 nvkm_wr32(device, 0x70000c + inst, tmp);
473
474 /* check for valid surf2d/surf_dst/surf_color */
475 if (!(tmp & 0x02000000))
476 valid = 0;
477 /* check for valid surf_src/surf_zeta */
478 if ((class == 0x1f || class == 0x48) && !(tmp & 0x04000000))
479 valid = 0;
480
481 switch (op) {
482 /* SRCCOPY_AND, SRCCOPY: no extra objects required */
483 case 0:
484 case 3:
485 break;
486 /* ROP_AND: requires pattern and rop */
487 case 1:
488 if (!(tmp & 0x18000000))
489 valid = 0;
490 break;
491 /* BLEND_AND: requires beta1 */
492 case 2:
493 if (!(tmp & 0x20000000))
494 valid = 0;
495 break;
496 /* SRCCOPY_PREMULT, BLEND_PREMULT: beta4 required */
497 case 4:
498 case 5:
499 if (!(tmp & 0x40000000))
500 valid = 0;
501 break;
502 }
503
504 nv04_gr_set_ctx1(device, inst, 0x01000000, valid << 24);
505 }
506
507 static bool
508 nv04_gr_mthd_set_operation(struct nvkm_device *device, u32 inst, u32 data)
509 {
510 u8 class = nvkm_rd32(device, 0x700000) & 0x000000ff;
511 if (data > 5)
512 return false;
513 /* Old versions of the objects only accept first three operations. */
514 if (data > 2 && class < 0x40)
515 return false;
516 nv04_gr_set_ctx1(device, inst, 0x00038000, data << 15);
517 /* changing operation changes set of objects needed for validation */
518 nv04_gr_set_ctx_val(device, inst, 0, 0);
519 return true;
520 }
521
522 static bool
523 nv04_gr_mthd_surf3d_clip_h(struct nvkm_device *device, u32 inst, u32 data)
524 {
525 u32 min = data & 0xffff, max;
526 u32 w = data >> 16;
527 if (min & 0x8000)
528 /* too large */
529 return false;
530 if (w & 0x8000)
531 /* yes, it accepts negative for some reason. */
532 w |= 0xffff0000;
533 max = min + w;
534 max &= 0x3ffff;
535 nvkm_wr32(device, 0x40053c, min);
536 nvkm_wr32(device, 0x400544, max);
537 return true;
538 }
539
540 static bool
541 nv04_gr_mthd_surf3d_clip_v(struct nvkm_device *device, u32 inst, u32 data)
542 {
543 u32 min = data & 0xffff, max;
544 u32 w = data >> 16;
545 if (min & 0x8000)
546 /* too large */
547 return false;
548 if (w & 0x8000)
549 /* yes, it accepts negative for some reason. */
550 w |= 0xffff0000;
551 max = min + w;
552 max &= 0x3ffff;
553 nvkm_wr32(device, 0x400540, min);
554 nvkm_wr32(device, 0x400548, max);
555 return true;
556 }
557
558 static u8
559 nv04_gr_mthd_bind_class(struct nvkm_device *device, u32 inst)
560 {
561 return nvkm_rd32(device, 0x700000 + (inst << 4));
562 }
563
564 static bool
565 nv04_gr_mthd_bind_surf2d(struct nvkm_device *device, u32 inst, u32 data)
566 {
567 switch (nv04_gr_mthd_bind_class(device, data)) {
568 case 0x30:
569 nv04_gr_set_ctx1(device, inst, 0x00004000, 0);
570 nv04_gr_set_ctx_val(device, inst, 0x02000000, 0);
571 return true;
572 case 0x42:
573 nv04_gr_set_ctx1(device, inst, 0x00004000, 0);
574 nv04_gr_set_ctx_val(device, inst, 0x02000000, 0x02000000);
575 return true;
576 }
577 return false;
578 }
579
580 static bool
581 nv04_gr_mthd_bind_surf2d_swzsurf(struct nvkm_device *device, u32 inst, u32 data)
582 {
583 switch (nv04_gr_mthd_bind_class(device, data)) {
584 case 0x30:
585 nv04_gr_set_ctx1(device, inst, 0x00004000, 0);
586 nv04_gr_set_ctx_val(device, inst, 0x02000000, 0);
587 return true;
588 case 0x42:
589 nv04_gr_set_ctx1(device, inst, 0x00004000, 0);
590 nv04_gr_set_ctx_val(device, inst, 0x02000000, 0x02000000);
591 return true;
592 case 0x52:
593 nv04_gr_set_ctx1(device, inst, 0x00004000, 0x00004000);
594 nv04_gr_set_ctx_val(device, inst, 0x02000000, 0x02000000);
595 return true;
596 }
597 return false;
598 }
599
600 static bool
601 nv01_gr_mthd_bind_patt(struct nvkm_device *device, u32 inst, u32 data)
602 {
603 switch (nv04_gr_mthd_bind_class(device, data)) {
604 case 0x30:
605 nv04_gr_set_ctx_val(device, inst, 0x08000000, 0);
606 return true;
607 case 0x18:
608 nv04_gr_set_ctx_val(device, inst, 0x08000000, 0x08000000);
609 return true;
610 }
611 return false;
612 }
613
614 static bool
615 nv04_gr_mthd_bind_patt(struct nvkm_device *device, u32 inst, u32 data)
616 {
617 switch (nv04_gr_mthd_bind_class(device, data)) {
618 case 0x30:
619 nv04_gr_set_ctx_val(device, inst, 0x08000000, 0);
620 return true;
621 case 0x44:
622 nv04_gr_set_ctx_val(device, inst, 0x08000000, 0x08000000);
623 return true;
624 }
625 return false;
626 }
627
628 static bool
629 nv04_gr_mthd_bind_rop(struct nvkm_device *device, u32 inst, u32 data)
630 {
631 switch (nv04_gr_mthd_bind_class(device, data)) {
632 case 0x30:
633 nv04_gr_set_ctx_val(device, inst, 0x10000000, 0);
634 return true;
635 case 0x43:
636 nv04_gr_set_ctx_val(device, inst, 0x10000000, 0x10000000);
637 return true;
638 }
639 return false;
640 }
641
642 static bool
643 nv04_gr_mthd_bind_beta1(struct nvkm_device *device, u32 inst, u32 data)
644 {
645 switch (nv04_gr_mthd_bind_class(device, data)) {
646 case 0x30:
647 nv04_gr_set_ctx_val(device, inst, 0x20000000, 0);
648 return true;
649 case 0x12:
650 nv04_gr_set_ctx_val(device, inst, 0x20000000, 0x20000000);
651 return true;
652 }
653 return false;
654 }
655
656 static bool
657 nv04_gr_mthd_bind_beta4(struct nvkm_device *device, u32 inst, u32 data)
658 {
659 switch (nv04_gr_mthd_bind_class(device, data)) {
660 case 0x30:
661 nv04_gr_set_ctx_val(device, inst, 0x40000000, 0);
662 return true;
663 case 0x72:
664 nv04_gr_set_ctx_val(device, inst, 0x40000000, 0x40000000);
665 return true;
666 }
667 return false;
668 }
669
670 static bool
671 nv04_gr_mthd_bind_surf_dst(struct nvkm_device *device, u32 inst, u32 data)
672 {
673 switch (nv04_gr_mthd_bind_class(device, data)) {
674 case 0x30:
675 nv04_gr_set_ctx_val(device, inst, 0x02000000, 0);
676 return true;
677 case 0x58:
678 nv04_gr_set_ctx_val(device, inst, 0x02000000, 0x02000000);
679 return true;
680 }
681 return false;
682 }
683
684 static bool
685 nv04_gr_mthd_bind_surf_src(struct nvkm_device *device, u32 inst, u32 data)
686 {
687 switch (nv04_gr_mthd_bind_class(device, data)) {
688 case 0x30:
689 nv04_gr_set_ctx_val(device, inst, 0x04000000, 0);
690 return true;
691 case 0x59:
692 nv04_gr_set_ctx_val(device, inst, 0x04000000, 0x04000000);
693 return true;
694 }
695 return false;
696 }
697
698 static bool
699 nv04_gr_mthd_bind_surf_color(struct nvkm_device *device, u32 inst, u32 data)
700 {
701 switch (nv04_gr_mthd_bind_class(device, data)) {
702 case 0x30:
703 nv04_gr_set_ctx_val(device, inst, 0x02000000, 0);
704 return true;
705 case 0x5a:
706 nv04_gr_set_ctx_val(device, inst, 0x02000000, 0x02000000);
707 return true;
708 }
709 return false;
710 }
711
712 static bool
713 nv04_gr_mthd_bind_surf_zeta(struct nvkm_device *device, u32 inst, u32 data)
714 {
715 switch (nv04_gr_mthd_bind_class(device, data)) {
716 case 0x30:
717 nv04_gr_set_ctx_val(device, inst, 0x04000000, 0);
718 return true;
719 case 0x5b:
720 nv04_gr_set_ctx_val(device, inst, 0x04000000, 0x04000000);
721 return true;
722 }
723 return false;
724 }
725
726 static bool
727 nv01_gr_mthd_bind_clip(struct nvkm_device *device, u32 inst, u32 data)
728 {
729 switch (nv04_gr_mthd_bind_class(device, data)) {
730 case 0x30:
731 nv04_gr_set_ctx1(device, inst, 0x2000, 0);
732 return true;
733 case 0x19:
734 nv04_gr_set_ctx1(device, inst, 0x2000, 0x2000);
735 return true;
736 }
737 return false;
738 }
739
740 static bool
741 nv01_gr_mthd_bind_chroma(struct nvkm_device *device, u32 inst, u32 data)
742 {
743 switch (nv04_gr_mthd_bind_class(device, data)) {
744 case 0x30:
745 nv04_gr_set_ctx1(device, inst, 0x1000, 0);
746 return true;
747 /* Yes, for some reason even the old versions of objects
748 * accept 0x57 and not 0x17. Consistency be damned.
749 */
750 case 0x57:
751 nv04_gr_set_ctx1(device, inst, 0x1000, 0x1000);
752 return true;
753 }
754 return false;
755 }
756
757 static bool
758 nv03_gr_mthd_gdi(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
759 {
760 bool (*func)(struct nvkm_device *, u32, u32);
761 switch (mthd) {
762 case 0x0184: func = nv01_gr_mthd_bind_patt; break;
763 case 0x0188: func = nv04_gr_mthd_bind_rop; break;
764 case 0x018c: func = nv04_gr_mthd_bind_beta1; break;
765 case 0x0190: func = nv04_gr_mthd_bind_surf_dst; break;
766 case 0x02fc: func = nv04_gr_mthd_set_operation; break;
767 default:
768 return false;
769 }
770 return func(device, inst, data);
771 }
772
773 static bool
774 nv04_gr_mthd_gdi(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
775 {
776 bool (*func)(struct nvkm_device *, u32, u32);
777 switch (mthd) {
778 case 0x0188: func = nv04_gr_mthd_bind_patt; break;
779 case 0x018c: func = nv04_gr_mthd_bind_rop; break;
780 case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
781 case 0x0194: func = nv04_gr_mthd_bind_beta4; break;
782 case 0x0198: func = nv04_gr_mthd_bind_surf2d; break;
783 case 0x02fc: func = nv04_gr_mthd_set_operation; break;
784 default:
785 return false;
786 }
787 return func(device, inst, data);
788 }
789
790 static bool
791 nv01_gr_mthd_blit(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
792 {
793 bool (*func)(struct nvkm_device *, u32, u32);
794 switch (mthd) {
795 case 0x0184: func = nv01_gr_mthd_bind_chroma; break;
796 case 0x0188: func = nv01_gr_mthd_bind_clip; break;
797 case 0x018c: func = nv01_gr_mthd_bind_patt; break;
798 case 0x0190: func = nv04_gr_mthd_bind_rop; break;
799 case 0x0194: func = nv04_gr_mthd_bind_beta1; break;
800 case 0x0198: func = nv04_gr_mthd_bind_surf_dst; break;
801 case 0x019c: func = nv04_gr_mthd_bind_surf_src; break;
802 case 0x02fc: func = nv04_gr_mthd_set_operation; break;
803 default:
804 return false;
805 }
806 return func(device, inst, data);
807 }
808
809 static bool
810 nv04_gr_mthd_blit(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
811 {
812 bool (*func)(struct nvkm_device *, u32, u32);
813 switch (mthd) {
814 case 0x0184: func = nv01_gr_mthd_bind_chroma; break;
815 case 0x0188: func = nv01_gr_mthd_bind_clip; break;
816 case 0x018c: func = nv04_gr_mthd_bind_patt; break;
817 case 0x0190: func = nv04_gr_mthd_bind_rop; break;
818 case 0x0194: func = nv04_gr_mthd_bind_beta1; break;
819 case 0x0198: func = nv04_gr_mthd_bind_beta4; break;
820 case 0x019c: func = nv04_gr_mthd_bind_surf2d; break;
821 case 0x02fc: func = nv04_gr_mthd_set_operation; break;
822 default:
823 return false;
824 }
825 return func(device, inst, data);
826 }
827
828 static bool
829 nv04_gr_mthd_iifc(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
830 {
831 bool (*func)(struct nvkm_device *, u32, u32);
832 switch (mthd) {
833 case 0x0188: func = nv01_gr_mthd_bind_chroma; break;
834 case 0x018c: func = nv01_gr_mthd_bind_clip; break;
835 case 0x0190: func = nv04_gr_mthd_bind_patt; break;
836 case 0x0194: func = nv04_gr_mthd_bind_rop; break;
837 case 0x0198: func = nv04_gr_mthd_bind_beta1; break;
838 case 0x019c: func = nv04_gr_mthd_bind_beta4; break;
839 case 0x01a0: func = nv04_gr_mthd_bind_surf2d_swzsurf; break;
840 case 0x03e4: func = nv04_gr_mthd_set_operation; break;
841 default:
842 return false;
843 }
844 return func(device, inst, data);
845 }
846
847 static bool
848 nv01_gr_mthd_ifc(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
849 {
850 bool (*func)(struct nvkm_device *, u32, u32);
851 switch (mthd) {
852 case 0x0184: func = nv01_gr_mthd_bind_chroma; break;
853 case 0x0188: func = nv01_gr_mthd_bind_clip; break;
854 case 0x018c: func = nv01_gr_mthd_bind_patt; break;
855 case 0x0190: func = nv04_gr_mthd_bind_rop; break;
856 case 0x0194: func = nv04_gr_mthd_bind_beta1; break;
857 case 0x0198: func = nv04_gr_mthd_bind_surf_dst; break;
858 case 0x02fc: func = nv04_gr_mthd_set_operation; break;
859 default:
860 return false;
861 }
862 return func(device, inst, data);
863 }
864
865 static bool
866 nv04_gr_mthd_ifc(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
867 {
868 bool (*func)(struct nvkm_device *, u32, u32);
869 switch (mthd) {
870 case 0x0184: func = nv01_gr_mthd_bind_chroma; break;
871 case 0x0188: func = nv01_gr_mthd_bind_clip; break;
872 case 0x018c: func = nv04_gr_mthd_bind_patt; break;
873 case 0x0190: func = nv04_gr_mthd_bind_rop; break;
874 case 0x0194: func = nv04_gr_mthd_bind_beta1; break;
875 case 0x0198: func = nv04_gr_mthd_bind_beta4; break;
876 case 0x019c: func = nv04_gr_mthd_bind_surf2d; break;
877 case 0x02fc: func = nv04_gr_mthd_set_operation; break;
878 default:
879 return false;
880 }
881 return func(device, inst, data);
882 }
883
884 static bool
885 nv03_gr_mthd_sifc(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
886 {
887 bool (*func)(struct nvkm_device *, u32, u32);
888 switch (mthd) {
889 case 0x0184: func = nv01_gr_mthd_bind_chroma; break;
890 case 0x0188: func = nv01_gr_mthd_bind_patt; break;
891 case 0x018c: func = nv04_gr_mthd_bind_rop; break;
892 case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
893 case 0x0194: func = nv04_gr_mthd_bind_surf_dst; break;
894 case 0x02fc: func = nv04_gr_mthd_set_operation; break;
895 default:
896 return false;
897 }
898 return func(device, inst, data);
899 }
900
901 static bool
902 nv04_gr_mthd_sifc(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
903 {
904 bool (*func)(struct nvkm_device *, u32, u32);
905 switch (mthd) {
906 case 0x0184: func = nv01_gr_mthd_bind_chroma; break;
907 case 0x0188: func = nv04_gr_mthd_bind_patt; break;
908 case 0x018c: func = nv04_gr_mthd_bind_rop; break;
909 case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
910 case 0x0194: func = nv04_gr_mthd_bind_beta4; break;
911 case 0x0198: func = nv04_gr_mthd_bind_surf2d; break;
912 case 0x02fc: func = nv04_gr_mthd_set_operation; break;
913 default:
914 return false;
915 }
916 return func(device, inst, data);
917 }
918
919 static bool
920 nv03_gr_mthd_sifm(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
921 {
922 bool (*func)(struct nvkm_device *, u32, u32);
923 switch (mthd) {
924 case 0x0188: func = nv01_gr_mthd_bind_patt; break;
925 case 0x018c: func = nv04_gr_mthd_bind_rop; break;
926 case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
927 case 0x0194: func = nv04_gr_mthd_bind_surf_dst; break;
928 case 0x0304: func = nv04_gr_mthd_set_operation; break;
929 default:
930 return false;
931 }
932 return func(device, inst, data);
933 }
934
935 static bool
936 nv04_gr_mthd_sifm(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
937 {
938 bool (*func)(struct nvkm_device *, u32, u32);
939 switch (mthd) {
940 case 0x0188: func = nv04_gr_mthd_bind_patt; break;
941 case 0x018c: func = nv04_gr_mthd_bind_rop; break;
942 case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
943 case 0x0194: func = nv04_gr_mthd_bind_beta4; break;
944 case 0x0198: func = nv04_gr_mthd_bind_surf2d; break;
945 case 0x0304: func = nv04_gr_mthd_set_operation; break;
946 default:
947 return false;
948 }
949 return func(device, inst, data);
950 }
951
952 static bool
953 nv04_gr_mthd_surf3d(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
954 {
955 bool (*func)(struct nvkm_device *, u32, u32);
956 switch (mthd) {
957 case 0x02f8: func = nv04_gr_mthd_surf3d_clip_h; break;
958 case 0x02fc: func = nv04_gr_mthd_surf3d_clip_v; break;
959 default:
960 return false;
961 }
962 return func(device, inst, data);
963 }
964
965 static bool
966 nv03_gr_mthd_ttri(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
967 {
968 bool (*func)(struct nvkm_device *, u32, u32);
969 switch (mthd) {
970 case 0x0188: func = nv01_gr_mthd_bind_clip; break;
971 case 0x018c: func = nv04_gr_mthd_bind_surf_color; break;
972 case 0x0190: func = nv04_gr_mthd_bind_surf_zeta; break;
973 default:
974 return false;
975 }
976 return func(device, inst, data);
977 }
978
979 static bool
980 nv01_gr_mthd_prim(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
981 {
982 bool (*func)(struct nvkm_device *, u32, u32);
983 switch (mthd) {
984 case 0x0184: func = nv01_gr_mthd_bind_clip; break;
985 case 0x0188: func = nv01_gr_mthd_bind_patt; break;
986 case 0x018c: func = nv04_gr_mthd_bind_rop; break;
987 case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
988 case 0x0194: func = nv04_gr_mthd_bind_surf_dst; break;
989 case 0x02fc: func = nv04_gr_mthd_set_operation; break;
990 default:
991 return false;
992 }
993 return func(device, inst, data);
994 }
995
996 static bool
997 nv04_gr_mthd_prim(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
998 {
999 bool (*func)(struct nvkm_device *, u32, u32);
1000 switch (mthd) {
1001 case 0x0184: func = nv01_gr_mthd_bind_clip; break;
1002 case 0x0188: func = nv04_gr_mthd_bind_patt; break;
1003 case 0x018c: func = nv04_gr_mthd_bind_rop; break;
1004 case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
1005 case 0x0194: func = nv04_gr_mthd_bind_beta4; break;
1006 case 0x0198: func = nv04_gr_mthd_bind_surf2d; break;
1007 case 0x02fc: func = nv04_gr_mthd_set_operation; break;
1008 default:
1009 return false;
1010 }
1011 return func(device, inst, data);
1012 }
1013
1014 static bool
1015 nv04_gr_mthd(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
1016 {
1017 bool (*func)(struct nvkm_device *, u32, u32, u32);
1018 switch (nvkm_rd32(device, 0x700000 + inst) & 0x000000ff) {
1019 case 0x1c ... 0x1e:
1020 func = nv01_gr_mthd_prim; break;
1021 case 0x1f: func = nv01_gr_mthd_blit; break;
1022 case 0x21: func = nv01_gr_mthd_ifc; break;
1023 case 0x36: func = nv03_gr_mthd_sifc; break;
1024 case 0x37: func = nv03_gr_mthd_sifm; break;
1025 case 0x48: func = nv03_gr_mthd_ttri; break;
1026 case 0x4a: func = nv04_gr_mthd_gdi; break;
1027 case 0x4b: func = nv03_gr_mthd_gdi; break;
1028 case 0x53: func = nv04_gr_mthd_surf3d; break;
1029 case 0x5c ... 0x5e:
1030 func = nv04_gr_mthd_prim; break;
1031 case 0x5f: func = nv04_gr_mthd_blit; break;
1032 case 0x60: func = nv04_gr_mthd_iifc; break;
1033 case 0x61: func = nv04_gr_mthd_ifc; break;
1034 case 0x76: func = nv04_gr_mthd_sifc; break;
1035 case 0x77: func = nv04_gr_mthd_sifm; break;
1036 default:
1037 return false;
1038 }
1039 return func(device, inst, mthd, data);
1040 }
1041
1042 static int
1043 nv04_gr_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
1044 struct nvkm_oclass *oclass, void *data, u32 size,
1045 struct nvkm_object **pobject)
1046 {
1047 struct nvkm_gpuobj *obj;
1048 int ret;
1049
1050 ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent,
1051 16, 16, 0, &obj);
1052 *pobject = nv_object(obj);
1053 if (ret)
1054 return ret;
1055
1056 nvkm_kmap(obj);
1057 nvkm_wo32(obj, 0x00, nv_mclass(obj));
1058 #ifdef __BIG_ENDIAN
1059 nvkm_mo32(obj, 0x00, 0x00080000, 0x00080000);
1060 #endif
1061 nvkm_wo32(obj, 0x04, 0x00000000);
1062 nvkm_wo32(obj, 0x08, 0x00000000);
1063 nvkm_wo32(obj, 0x0c, 0x00000000);
1064 nvkm_done(obj);
1065 return 0;
1066 }
1067
1068 struct nvkm_ofuncs
1069 nv04_gr_ofuncs = {
1070 .ctor = nv04_gr_object_ctor,
1071 .dtor = _nvkm_gpuobj_dtor,
1072 .init = _nvkm_gpuobj_init,
1073 .fini = _nvkm_gpuobj_fini,
1074 .rd32 = _nvkm_gpuobj_rd32,
1075 .wr32 = _nvkm_gpuobj_wr32,
1076 };
1077
1078 static struct nvkm_oclass
1079 nv04_gr_sclass[] = {
1080 { 0x0012, &nv04_gr_ofuncs }, /* beta1 */
1081 { 0x0017, &nv04_gr_ofuncs }, /* chroma */
1082 { 0x0018, &nv04_gr_ofuncs }, /* pattern (nv01) */
1083 { 0x0019, &nv04_gr_ofuncs }, /* clip */
1084 { 0x001c, &nv04_gr_ofuncs }, /* line */
1085 { 0x001d, &nv04_gr_ofuncs }, /* tri */
1086 { 0x001e, &nv04_gr_ofuncs }, /* rect */
1087 { 0x001f, &nv04_gr_ofuncs },
1088 { 0x0021, &nv04_gr_ofuncs },
1089 { 0x0030, &nv04_gr_ofuncs }, /* null */
1090 { 0x0036, &nv04_gr_ofuncs },
1091 { 0x0037, &nv04_gr_ofuncs },
1092 { 0x0038, &nv04_gr_ofuncs }, /* dvd subpicture */
1093 { 0x0039, &nv04_gr_ofuncs }, /* m2mf */
1094 { 0x0042, &nv04_gr_ofuncs }, /* surf2d */
1095 { 0x0043, &nv04_gr_ofuncs }, /* rop */
1096 { 0x0044, &nv04_gr_ofuncs }, /* pattern */
1097 { 0x0048, &nv04_gr_ofuncs },
1098 { 0x004a, &nv04_gr_ofuncs },
1099 { 0x004b, &nv04_gr_ofuncs },
1100 { 0x0052, &nv04_gr_ofuncs }, /* swzsurf */
1101 { 0x0053, &nv04_gr_ofuncs },
1102 { 0x0054, &nv04_gr_ofuncs }, /* ttri */
1103 { 0x0055, &nv04_gr_ofuncs }, /* mtri */
1104 { 0x0057, &nv04_gr_ofuncs }, /* chroma */
1105 { 0x0058, &nv04_gr_ofuncs }, /* surf_dst */
1106 { 0x0059, &nv04_gr_ofuncs }, /* surf_src */
1107 { 0x005a, &nv04_gr_ofuncs }, /* surf_color */
1108 { 0x005b, &nv04_gr_ofuncs }, /* surf_zeta */
1109 { 0x005c, &nv04_gr_ofuncs }, /* line */
1110 { 0x005d, &nv04_gr_ofuncs }, /* tri */
1111 { 0x005e, &nv04_gr_ofuncs }, /* rect */
1112 { 0x005f, &nv04_gr_ofuncs },
1113 { 0x0060, &nv04_gr_ofuncs },
1114 { 0x0061, &nv04_gr_ofuncs },
1115 { 0x0064, &nv04_gr_ofuncs }, /* iifc (nv05) */
1116 { 0x0065, &nv04_gr_ofuncs }, /* ifc (nv05) */
1117 { 0x0066, &nv04_gr_ofuncs }, /* sifc (nv05) */
1118 { 0x0072, &nv04_gr_ofuncs }, /* beta4 */
1119 { 0x0076, &nv04_gr_ofuncs },
1120 { 0x0077, &nv04_gr_ofuncs },
1121 {},
1122 };
1123
1124 /*******************************************************************************
1125 * PGRAPH context
1126 ******************************************************************************/
1127
1128 static struct nv04_gr_chan *
1129 nv04_gr_channel(struct nv04_gr *gr)
1130 {
1131 struct nvkm_device *device = gr->base.engine.subdev.device;
1132 struct nv04_gr_chan *chan = NULL;
1133 if (nvkm_rd32(device, NV04_PGRAPH_CTX_CONTROL) & 0x00010000) {
1134 int chid = nvkm_rd32(device, NV04_PGRAPH_CTX_USER) >> 24;
1135 if (chid < ARRAY_SIZE(gr->chan))
1136 chan = gr->chan[chid];
1137 }
1138 return chan;
1139 }
1140
1141 static int
1142 nv04_gr_load_context(struct nv04_gr_chan *chan, int chid)
1143 {
1144 struct nv04_gr *gr = nv04_gr(chan);
1145 struct nvkm_device *device = gr->base.engine.subdev.device;
1146 int i;
1147
1148 for (i = 0; i < ARRAY_SIZE(nv04_gr_ctx_regs); i++)
1149 nvkm_wr32(device, nv04_gr_ctx_regs[i], chan->nv04[i]);
1150
1151 nvkm_wr32(device, NV04_PGRAPH_CTX_CONTROL, 0x10010100);
1152 nvkm_mask(device, NV04_PGRAPH_CTX_USER, 0xff000000, chid << 24);
1153 nvkm_mask(device, NV04_PGRAPH_FFINTFC_ST2, 0xfff00000, 0x00000000);
1154 return 0;
1155 }
1156
1157 static int
1158 nv04_gr_unload_context(struct nv04_gr_chan *chan)
1159 {
1160 struct nv04_gr *gr = nv04_gr(chan);
1161 struct nvkm_device *device = gr->base.engine.subdev.device;
1162 int i;
1163
1164 for (i = 0; i < ARRAY_SIZE(nv04_gr_ctx_regs); i++)
1165 chan->nv04[i] = nvkm_rd32(device, nv04_gr_ctx_regs[i]);
1166
1167 nvkm_wr32(device, NV04_PGRAPH_CTX_CONTROL, 0x10000000);
1168 nvkm_mask(device, NV04_PGRAPH_CTX_USER, 0xff000000, 0x0f000000);
1169 return 0;
1170 }
1171
1172 static void
1173 nv04_gr_context_switch(struct nv04_gr *gr)
1174 {
1175 struct nvkm_device *device = gr->base.engine.subdev.device;
1176 struct nv04_gr_chan *prev = NULL;
1177 struct nv04_gr_chan *next = NULL;
1178 int chid;
1179
1180 nv04_gr_idle(gr);
1181
1182 /* If previous context is valid, we need to save it */
1183 prev = nv04_gr_channel(gr);
1184 if (prev)
1185 nv04_gr_unload_context(prev);
1186
1187 /* load context for next channel */
1188 chid = (nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0x0f;
1189 next = gr->chan[chid];
1190 if (next)
1191 nv04_gr_load_context(next, chid);
1192 }
1193
1194 static u32 *ctx_reg(struct nv04_gr_chan *chan, u32 reg)
1195 {
1196 int i;
1197
1198 for (i = 0; i < ARRAY_SIZE(nv04_gr_ctx_regs); i++) {
1199 if (nv04_gr_ctx_regs[i] == reg)
1200 return &chan->nv04[i];
1201 }
1202
1203 return NULL;
1204 }
1205
1206 static int
1207 nv04_gr_context_ctor(struct nvkm_object *parent,
1208 struct nvkm_object *engine,
1209 struct nvkm_oclass *oclass, void *data, u32 size,
1210 struct nvkm_object **pobject)
1211 {
1212 struct nvkm_fifo_chan *fifo = (void *)parent;
1213 struct nv04_gr *gr = (void *)engine;
1214 struct nv04_gr_chan *chan;
1215 unsigned long flags;
1216 int ret;
1217
1218 ret = nvkm_object_create(parent, engine, oclass, 0, &chan);
1219 *pobject = nv_object(chan);
1220 if (ret)
1221 return ret;
1222
1223 spin_lock_irqsave(&gr->lock, flags);
1224 if (gr->chan[fifo->chid]) {
1225 *pobject = nv_object(gr->chan[fifo->chid]);
1226 atomic_inc(&(*pobject)->refcount);
1227 spin_unlock_irqrestore(&gr->lock, flags);
1228 nvkm_object_destroy(&chan->base);
1229 return 1;
1230 }
1231
1232 *ctx_reg(chan, NV04_PGRAPH_DEBUG_3) = 0xfad4ff31;
1233
1234 gr->chan[fifo->chid] = chan;
1235 chan->chid = fifo->chid;
1236 spin_unlock_irqrestore(&gr->lock, flags);
1237 return 0;
1238 }
1239
1240 static void
1241 nv04_gr_context_dtor(struct nvkm_object *object)
1242 {
1243 struct nv04_gr *gr = (void *)object->engine;
1244 struct nv04_gr_chan *chan = (void *)object;
1245 unsigned long flags;
1246
1247 spin_lock_irqsave(&gr->lock, flags);
1248 gr->chan[chan->chid] = NULL;
1249 spin_unlock_irqrestore(&gr->lock, flags);
1250
1251 nvkm_object_destroy(&chan->base);
1252 }
1253
1254 static int
1255 nv04_gr_context_fini(struct nvkm_object *object, bool suspend)
1256 {
1257 struct nv04_gr *gr = (void *)object->engine;
1258 struct nv04_gr_chan *chan = (void *)object;
1259 struct nvkm_device *device = gr->base.engine.subdev.device;
1260 unsigned long flags;
1261
1262 spin_lock_irqsave(&gr->lock, flags);
1263 nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
1264 if (nv04_gr_channel(gr) == chan)
1265 nv04_gr_unload_context(chan);
1266 nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
1267 spin_unlock_irqrestore(&gr->lock, flags);
1268
1269 return _nvkm_object_fini(&chan->base, suspend);
1270 }
1271
1272 static struct nvkm_oclass
1273 nv04_gr_cclass = {
1274 .handle = NV_ENGCTX(GR, 0x04),
1275 .ofuncs = &(struct nvkm_ofuncs) {
1276 .ctor = nv04_gr_context_ctor,
1277 .dtor = nv04_gr_context_dtor,
1278 .init = _nvkm_object_init,
1279 .fini = nv04_gr_context_fini,
1280 },
1281 };
1282
1283 /*******************************************************************************
1284 * PGRAPH engine/subdev functions
1285 ******************************************************************************/
1286
1287 bool
1288 nv04_gr_idle(void *obj)
1289 {
1290 struct nvkm_gr *gr = nvkm_gr(obj);
1291 struct nvkm_subdev *subdev = &gr->engine.subdev;
1292 struct nvkm_device *device = subdev->device;
1293 u32 mask = 0xffffffff;
1294
1295 if (nv_device(obj)->card_type == NV_40)
1296 mask &= ~NV40_PGRAPH_STATUS_SYNC_STALL;
1297
1298 if (nvkm_msec(device, 2000,
1299 if (!(nvkm_rd32(device, NV04_PGRAPH_STATUS) & mask))
1300 break;
1301 ) < 0) {
1302 nvkm_error(subdev, "idle timed out with status %08x\n",
1303 nvkm_rd32(device, NV04_PGRAPH_STATUS));
1304 return false;
1305 }
1306
1307 return true;
1308 }
1309
1310 static const struct nvkm_bitfield
1311 nv04_gr_intr_name[] = {
1312 { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
1313 {}
1314 };
1315
1316 static const struct nvkm_bitfield
1317 nv04_gr_nstatus[] = {
1318 { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
1319 { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
1320 { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
1321 { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" },
1322 {}
1323 };
1324
1325 const struct nvkm_bitfield
1326 nv04_gr_nsource[] = {
1327 { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" },
1328 { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" },
1329 { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" },
1330 { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" },
1331 { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" },
1332 { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" },
1333 { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" },
1334 { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" },
1335 { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" },
1336 { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" },
1337 { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" },
1338 { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" },
1339 { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" },
1340 { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" },
1341 { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" },
1342 { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" },
1343 { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
1344 { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" },
1345 { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" },
1346 {}
1347 };
1348
1349 static void
1350 nv04_gr_intr(struct nvkm_subdev *subdev)
1351 {
1352 struct nv04_gr *gr = (void *)subdev;
1353 struct nv04_gr_chan *chan = NULL;
1354 struct nvkm_device *device = gr->base.engine.subdev.device;
1355 u32 stat = nvkm_rd32(device, NV03_PGRAPH_INTR);
1356 u32 nsource = nvkm_rd32(device, NV03_PGRAPH_NSOURCE);
1357 u32 nstatus = nvkm_rd32(device, NV03_PGRAPH_NSTATUS);
1358 u32 addr = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR);
1359 u32 chid = (addr & 0x0f000000) >> 24;
1360 u32 subc = (addr & 0x0000e000) >> 13;
1361 u32 mthd = (addr & 0x00001ffc);
1362 u32 data = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_DATA);
1363 u32 class = nvkm_rd32(device, 0x400180 + subc * 4) & 0xff;
1364 u32 inst = (nvkm_rd32(device, 0x40016c) & 0xffff) << 4;
1365 u32 show = stat;
1366 char msg[128], src[128], sta[128];
1367 unsigned long flags;
1368
1369 spin_lock_irqsave(&gr->lock, flags);
1370 chan = gr->chan[chid];
1371
1372 if (stat & NV_PGRAPH_INTR_NOTIFY) {
1373 if (chan && (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD)) {
1374 if (!nv04_gr_mthd(device, inst, mthd, data))
1375 show &= ~NV_PGRAPH_INTR_NOTIFY;
1376 }
1377 }
1378
1379 if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
1380 nvkm_wr32(device, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
1381 stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1382 show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1383 nv04_gr_context_switch(gr);
1384 }
1385
1386 nvkm_wr32(device, NV03_PGRAPH_INTR, stat);
1387 nvkm_wr32(device, NV04_PGRAPH_FIFO, 0x00000001);
1388
1389 if (show) {
1390 nvkm_snprintbf(msg, sizeof(msg), nv04_gr_intr_name, show);
1391 nvkm_snprintbf(src, sizeof(src), nv04_gr_nsource, nsource);
1392 nvkm_snprintbf(sta, sizeof(sta), nv04_gr_nstatus, nstatus);
1393 nvkm_error(subdev, "intr %08x [%s] nsource %08x [%s] "
1394 "nstatus %08x [%s] ch %d [%s] subc %d "
1395 "class %04x mthd %04x data %08x\n",
1396 show, msg, nsource, src, nstatus, sta, chid,
1397 nvkm_client_name(chan), subc, class, mthd, data);
1398 }
1399
1400 spin_unlock_irqrestore(&gr->lock, flags);
1401 }
1402
1403 static int
1404 nv04_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
1405 struct nvkm_oclass *oclass, void *data, u32 size,
1406 struct nvkm_object **pobject)
1407 {
1408 struct nv04_gr *gr;
1409 int ret;
1410
1411 ret = nvkm_gr_create(parent, engine, oclass, true, &gr);
1412 *pobject = nv_object(gr);
1413 if (ret)
1414 return ret;
1415
1416 nv_subdev(gr)->unit = 0x00001000;
1417 nv_subdev(gr)->intr = nv04_gr_intr;
1418 nv_engine(gr)->cclass = &nv04_gr_cclass;
1419 nv_engine(gr)->sclass = nv04_gr_sclass;
1420 spin_lock_init(&gr->lock);
1421 return 0;
1422 }
1423
1424 static int
1425 nv04_gr_init(struct nvkm_object *object)
1426 {
1427 struct nvkm_engine *engine = nv_engine(object);
1428 struct nv04_gr *gr = (void *)engine;
1429 struct nvkm_device *device = gr->base.engine.subdev.device;
1430 int ret;
1431
1432 ret = nvkm_gr_init(&gr->base);
1433 if (ret)
1434 return ret;
1435
1436 /* Enable PGRAPH interrupts */
1437 nvkm_wr32(device, NV03_PGRAPH_INTR, 0xFFFFFFFF);
1438 nvkm_wr32(device, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
1439
1440 nvkm_wr32(device, NV04_PGRAPH_VALID1, 0);
1441 nvkm_wr32(device, NV04_PGRAPH_VALID2, 0);
1442 /*nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x000001FF);
1443 nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/
1444 nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x1231c000);
1445 /*1231C000 blob, 001 haiku*/
1446 /*V_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);*/
1447 nvkm_wr32(device, NV04_PGRAPH_DEBUG_1, 0x72111100);
1448 /*0x72111100 blob , 01 haiku*/
1449 /*nvkm_wr32(device, NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/
1450 nvkm_wr32(device, NV04_PGRAPH_DEBUG_2, 0x11d5f071);
1451 /*haiku same*/
1452
1453 /*nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0xfad4ff31);*/
1454 nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0xf0d4ff31);
1455 /*haiku and blob 10d4*/
1456
1457 nvkm_wr32(device, NV04_PGRAPH_STATE , 0xFFFFFFFF);
1458 nvkm_wr32(device, NV04_PGRAPH_CTX_CONTROL , 0x10000100);
1459 nvkm_mask(device, NV04_PGRAPH_CTX_USER, 0xff000000, 0x0f000000);
1460
1461 /* These don't belong here, they're part of a per-channel context */
1462 nvkm_wr32(device, NV04_PGRAPH_PATTERN_SHAPE, 0x00000000);
1463 nvkm_wr32(device, NV04_PGRAPH_BETA_AND , 0xFFFFFFFF);
1464 return 0;
1465 }
1466
1467 struct nvkm_oclass
1468 nv04_gr_oclass = {
1469 .handle = NV_ENGINE(GR, 0x04),
1470 .ofuncs = &(struct nvkm_ofuncs) {
1471 .ctor = nv04_gr_ctor,
1472 .dtor = _nvkm_gr_dtor,
1473 .init = nv04_gr_init,
1474 .fini = _nvkm_gr_fini,
1475 },
1476 };