]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
m32r: switch to generic sys_execve()
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / nouveau / core / engine / graph / nv40.c
1 /*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25 #include <core/os.h>
26 #include <core/class.h>
27 #include <core/handle.h>
28 #include <core/engctx.h>
29
30 #include <subdev/fb.h>
31 #include <subdev/timer.h>
32
33 #include <engine/graph.h>
34 #include <engine/fifo.h>
35
36 #include "nv40.h"
37 #include "regs.h"
38
39 struct nv40_graph_priv {
40 struct nouveau_graph base;
41 u32 size;
42 };
43
44 struct nv40_graph_chan {
45 struct nouveau_graph_chan base;
46 };
47
48 /*******************************************************************************
49 * Graphics object classes
50 ******************************************************************************/
51
52 static int
53 nv40_graph_object_ctor(struct nouveau_object *parent,
54 struct nouveau_object *engine,
55 struct nouveau_oclass *oclass, void *data, u32 size,
56 struct nouveau_object **pobject)
57 {
58 struct nouveau_gpuobj *obj;
59 int ret;
60
61 ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
62 20, 16, 0, &obj);
63 *pobject = nv_object(obj);
64 if (ret)
65 return ret;
66
67 nv_wo32(obj, 0x00, nv_mclass(obj));
68 nv_wo32(obj, 0x04, 0x00000000);
69 nv_wo32(obj, 0x08, 0x00000000);
70 #ifdef __BIG_ENDIAN
71 nv_mo32(obj, 0x08, 0x01000000, 0x01000000);
72 #endif
73 nv_wo32(obj, 0x0c, 0x00000000);
74 nv_wo32(obj, 0x10, 0x00000000);
75 return 0;
76 }
77
78 static struct nouveau_ofuncs
79 nv40_graph_ofuncs = {
80 .ctor = nv40_graph_object_ctor,
81 .dtor = _nouveau_gpuobj_dtor,
82 .init = _nouveau_gpuobj_init,
83 .fini = _nouveau_gpuobj_fini,
84 .rd32 = _nouveau_gpuobj_rd32,
85 .wr32 = _nouveau_gpuobj_wr32,
86 };
87
88 static struct nouveau_oclass
89 nv40_graph_sclass[] = {
90 { 0x0012, &nv40_graph_ofuncs, NULL }, /* beta1 */
91 { 0x0019, &nv40_graph_ofuncs, NULL }, /* clip */
92 { 0x0030, &nv40_graph_ofuncs, NULL }, /* null */
93 { 0x0039, &nv40_graph_ofuncs, NULL }, /* m2mf */
94 { 0x0043, &nv40_graph_ofuncs, NULL }, /* rop */
95 { 0x0044, &nv40_graph_ofuncs, NULL }, /* patt */
96 { 0x004a, &nv40_graph_ofuncs, NULL }, /* gdi */
97 { 0x0062, &nv40_graph_ofuncs, NULL }, /* surf2d */
98 { 0x0072, &nv40_graph_ofuncs, NULL }, /* beta4 */
99 { 0x0089, &nv40_graph_ofuncs, NULL }, /* sifm */
100 { 0x008a, &nv40_graph_ofuncs, NULL }, /* ifc */
101 { 0x009f, &nv40_graph_ofuncs, NULL }, /* imageblit */
102 { 0x3062, &nv40_graph_ofuncs, NULL }, /* surf2d (nv40) */
103 { 0x3089, &nv40_graph_ofuncs, NULL }, /* sifm (nv40) */
104 { 0x309e, &nv40_graph_ofuncs, NULL }, /* swzsurf (nv40) */
105 { 0x4097, &nv40_graph_ofuncs, NULL }, /* curie */
106 {},
107 };
108
109 static struct nouveau_oclass
110 nv44_graph_sclass[] = {
111 { 0x0012, &nv40_graph_ofuncs, NULL }, /* beta1 */
112 { 0x0019, &nv40_graph_ofuncs, NULL }, /* clip */
113 { 0x0030, &nv40_graph_ofuncs, NULL }, /* null */
114 { 0x0039, &nv40_graph_ofuncs, NULL }, /* m2mf */
115 { 0x0043, &nv40_graph_ofuncs, NULL }, /* rop */
116 { 0x0044, &nv40_graph_ofuncs, NULL }, /* patt */
117 { 0x004a, &nv40_graph_ofuncs, NULL }, /* gdi */
118 { 0x0062, &nv40_graph_ofuncs, NULL }, /* surf2d */
119 { 0x0072, &nv40_graph_ofuncs, NULL }, /* beta4 */
120 { 0x0089, &nv40_graph_ofuncs, NULL }, /* sifm */
121 { 0x008a, &nv40_graph_ofuncs, NULL }, /* ifc */
122 { 0x009f, &nv40_graph_ofuncs, NULL }, /* imageblit */
123 { 0x3062, &nv40_graph_ofuncs, NULL }, /* surf2d (nv40) */
124 { 0x3089, &nv40_graph_ofuncs, NULL }, /* sifm (nv40) */
125 { 0x309e, &nv40_graph_ofuncs, NULL }, /* swzsurf (nv40) */
126 { 0x4497, &nv40_graph_ofuncs, NULL }, /* curie */
127 {},
128 };
129
130 /*******************************************************************************
131 * PGRAPH context
132 ******************************************************************************/
133
134 static int
135 nv40_graph_context_ctor(struct nouveau_object *parent,
136 struct nouveau_object *engine,
137 struct nouveau_oclass *oclass, void *data, u32 size,
138 struct nouveau_object **pobject)
139 {
140 struct nv40_graph_priv *priv = (void *)engine;
141 struct nv40_graph_chan *chan;
142 int ret;
143
144 ret = nouveau_graph_context_create(parent, engine, oclass, NULL,
145 priv->size, 16,
146 NVOBJ_FLAG_ZERO_ALLOC, &chan);
147 *pobject = nv_object(chan);
148 if (ret)
149 return ret;
150
151 nv40_grctx_fill(nv_device(priv), nv_gpuobj(chan));
152 nv_wo32(chan, 0x00000, nv_gpuobj(chan)->addr >> 4);
153 return 0;
154 }
155
156 static int
157 nv40_graph_context_fini(struct nouveau_object *object, bool suspend)
158 {
159 struct nv04_graph_priv *priv = (void *)object->engine;
160 struct nv04_graph_chan *chan = (void *)object;
161 u32 inst = 0x01000000 | nv_gpuobj(chan)->addr >> 4;
162 int ret = 0;
163
164 nv_mask(priv, 0x400720, 0x00000001, 0x00000000);
165
166 if (nv_rd32(priv, 0x40032c) == inst) {
167 if (suspend) {
168 nv_wr32(priv, 0x400720, 0x00000000);
169 nv_wr32(priv, 0x400784, inst);
170 nv_mask(priv, 0x400310, 0x00000020, 0x00000020);
171 nv_mask(priv, 0x400304, 0x00000001, 0x00000001);
172 if (!nv_wait(priv, 0x400300, 0x00000001, 0x00000000)) {
173 u32 insn = nv_rd32(priv, 0x400308);
174 nv_warn(priv, "ctxprog timeout 0x%08x\n", insn);
175 ret = -EBUSY;
176 }
177 }
178
179 nv_mask(priv, 0x40032c, 0x01000000, 0x00000000);
180 }
181
182 if (nv_rd32(priv, 0x400330) == inst)
183 nv_mask(priv, 0x400330, 0x01000000, 0x00000000);
184
185 nv_mask(priv, 0x400720, 0x00000001, 0x00000001);
186 return ret;
187 }
188
189 static struct nouveau_oclass
190 nv40_graph_cclass = {
191 .handle = NV_ENGCTX(GR, 0x40),
192 .ofuncs = &(struct nouveau_ofuncs) {
193 .ctor = nv40_graph_context_ctor,
194 .dtor = _nouveau_graph_context_dtor,
195 .init = _nouveau_graph_context_init,
196 .fini = nv40_graph_context_fini,
197 .rd32 = _nouveau_graph_context_rd32,
198 .wr32 = _nouveau_graph_context_wr32,
199 },
200 };
201
202 /*******************************************************************************
203 * PGRAPH engine/subdev functions
204 ******************************************************************************/
205
206 static void
207 nv40_graph_tile_prog(struct nouveau_engine *engine, int i)
208 {
209 struct nouveau_fb_tile *tile = &nouveau_fb(engine)->tile.region[i];
210 struct nouveau_fifo *pfifo = nouveau_fifo(engine);
211 struct nv40_graph_priv *priv = (void *)engine;
212 unsigned long flags;
213
214 pfifo->pause(pfifo, &flags);
215 nv04_graph_idle(priv);
216
217 switch (nv_device(priv)->chipset) {
218 case 0x40:
219 case 0x41: /* guess */
220 case 0x42:
221 case 0x43:
222 case 0x45: /* guess */
223 case 0x4e:
224 nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch);
225 nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit);
226 nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr);
227 nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch);
228 nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit);
229 nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr);
230 break;
231 case 0x44:
232 case 0x4a:
233 nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch);
234 nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit);
235 nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr);
236 break;
237 case 0x46:
238 case 0x47:
239 case 0x49:
240 case 0x4b:
241 case 0x4c:
242 case 0x67:
243 default:
244 nv_wr32(priv, NV47_PGRAPH_TSIZE(i), tile->pitch);
245 nv_wr32(priv, NV47_PGRAPH_TLIMIT(i), tile->limit);
246 nv_wr32(priv, NV47_PGRAPH_TILE(i), tile->addr);
247 nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch);
248 nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit);
249 nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr);
250 break;
251 }
252
253 pfifo->start(pfifo, &flags);
254 }
255
256 static void
257 nv40_graph_intr(struct nouveau_subdev *subdev)
258 {
259 struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
260 struct nouveau_engine *engine = nv_engine(subdev);
261 struct nouveau_object *engctx;
262 struct nouveau_handle *handle = NULL;
263 struct nv40_graph_priv *priv = (void *)subdev;
264 u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR);
265 u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE);
266 u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS);
267 u32 inst = nv_rd32(priv, 0x40032c) & 0x000fffff;
268 u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR);
269 u32 subc = (addr & 0x00070000) >> 16;
270 u32 mthd = (addr & 0x00001ffc);
271 u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA);
272 u32 class = nv_rd32(priv, 0x400160 + subc * 4) & 0xffff;
273 u32 show = stat;
274 int chid;
275
276 engctx = nouveau_engctx_get(engine, inst);
277 chid = pfifo->chid(pfifo, engctx);
278
279 if (stat & NV_PGRAPH_INTR_ERROR) {
280 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
281 handle = nouveau_handle_get_class(engctx, class);
282 if (handle && !nv_call(handle->object, mthd, data))
283 show &= ~NV_PGRAPH_INTR_ERROR;
284 nouveau_handle_put(handle);
285 }
286
287 if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
288 nv_mask(priv, 0x402000, 0, 0);
289 }
290 }
291
292 nv_wr32(priv, NV03_PGRAPH_INTR, stat);
293 nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
294
295 if (show) {
296 nv_info(priv, "");
297 nouveau_bitfield_print(nv10_graph_intr_name, show);
298 printk(" nsource:");
299 nouveau_bitfield_print(nv04_graph_nsource, nsource);
300 printk(" nstatus:");
301 nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
302 printk("\n");
303 nv_error(priv, "ch %d [0x%08x] subc %d class 0x%04x "
304 "mthd 0x%04x data 0x%08x\n",
305 chid, inst << 4, subc, class, mthd, data);
306 }
307
308 nouveau_engctx_put(engctx);
309 }
310
311 static int
312 nv40_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
313 struct nouveau_oclass *oclass, void *data, u32 size,
314 struct nouveau_object **pobject)
315 {
316 struct nv40_graph_priv *priv;
317 int ret;
318
319 ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
320 *pobject = nv_object(priv);
321 if (ret)
322 return ret;
323
324 nv_subdev(priv)->unit = 0x00001000;
325 nv_subdev(priv)->intr = nv40_graph_intr;
326 nv_engine(priv)->cclass = &nv40_graph_cclass;
327 if (nv44_graph_class(priv))
328 nv_engine(priv)->sclass = nv44_graph_sclass;
329 else
330 nv_engine(priv)->sclass = nv40_graph_sclass;
331 nv_engine(priv)->tile_prog = nv40_graph_tile_prog;
332 return 0;
333 }
334
335 static int
336 nv40_graph_init(struct nouveau_object *object)
337 {
338 struct nouveau_engine *engine = nv_engine(object);
339 struct nouveau_fb *pfb = nouveau_fb(object);
340 struct nv40_graph_priv *priv = (void *)engine;
341 int ret, i, j;
342 u32 vramsz;
343
344 ret = nouveau_graph_init(&priv->base);
345 if (ret)
346 return ret;
347
348 /* generate and upload context program */
349 nv40_grctx_init(nv_device(priv), &priv->size);
350
351 /* No context present currently */
352 nv_wr32(priv, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
353
354 nv_wr32(priv, NV03_PGRAPH_INTR , 0xFFFFFFFF);
355 nv_wr32(priv, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
356
357 nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
358 nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000);
359 nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x401287c0);
360 nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xe0de8055);
361 nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00008000);
362 nv_wr32(priv, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f);
363
364 nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
365 nv_wr32(priv, NV10_PGRAPH_STATE , 0xFFFFFFFF);
366
367 j = nv_rd32(priv, 0x1540) & 0xff;
368 if (j) {
369 for (i = 0; !(j & 1); j >>= 1, i++)
370 ;
371 nv_wr32(priv, 0x405000, i);
372 }
373
374 if (nv_device(priv)->chipset == 0x40) {
375 nv_wr32(priv, 0x4009b0, 0x83280fff);
376 nv_wr32(priv, 0x4009b4, 0x000000a0);
377 } else {
378 nv_wr32(priv, 0x400820, 0x83280eff);
379 nv_wr32(priv, 0x400824, 0x000000a0);
380 }
381
382 switch (nv_device(priv)->chipset) {
383 case 0x40:
384 case 0x45:
385 nv_wr32(priv, 0x4009b8, 0x0078e366);
386 nv_wr32(priv, 0x4009bc, 0x0000014c);
387 break;
388 case 0x41:
389 case 0x42: /* pciid also 0x00Cx */
390 /* case 0x0120: XXX (pciid) */
391 nv_wr32(priv, 0x400828, 0x007596ff);
392 nv_wr32(priv, 0x40082c, 0x00000108);
393 break;
394 case 0x43:
395 nv_wr32(priv, 0x400828, 0x0072cb77);
396 nv_wr32(priv, 0x40082c, 0x00000108);
397 break;
398 case 0x44:
399 case 0x46: /* G72 */
400 case 0x4a:
401 case 0x4c: /* G7x-based C51 */
402 case 0x4e:
403 nv_wr32(priv, 0x400860, 0);
404 nv_wr32(priv, 0x400864, 0);
405 break;
406 case 0x47: /* G70 */
407 case 0x49: /* G71 */
408 case 0x4b: /* G73 */
409 nv_wr32(priv, 0x400828, 0x07830610);
410 nv_wr32(priv, 0x40082c, 0x0000016A);
411 break;
412 default:
413 break;
414 }
415
416 nv_wr32(priv, 0x400b38, 0x2ffff800);
417 nv_wr32(priv, 0x400b3c, 0x00006000);
418
419 /* Tiling related stuff. */
420 switch (nv_device(priv)->chipset) {
421 case 0x44:
422 case 0x4a:
423 nv_wr32(priv, 0x400bc4, 0x1003d888);
424 nv_wr32(priv, 0x400bbc, 0xb7a7b500);
425 break;
426 case 0x46:
427 nv_wr32(priv, 0x400bc4, 0x0000e024);
428 nv_wr32(priv, 0x400bbc, 0xb7a7b520);
429 break;
430 case 0x4c:
431 case 0x4e:
432 case 0x67:
433 nv_wr32(priv, 0x400bc4, 0x1003d888);
434 nv_wr32(priv, 0x400bbc, 0xb7a7b540);
435 break;
436 default:
437 break;
438 }
439
440 /* Turn all the tiling regions off. */
441 for (i = 0; i < pfb->tile.regions; i++)
442 engine->tile_prog(engine, i);
443
444 /* begin RAM config */
445 vramsz = pci_resource_len(nv_device(priv)->pdev, 0) - 1;
446 switch (nv_device(priv)->chipset) {
447 case 0x40:
448 nv_wr32(priv, 0x4009A4, nv_rd32(priv, 0x100200));
449 nv_wr32(priv, 0x4009A8, nv_rd32(priv, 0x100204));
450 nv_wr32(priv, 0x4069A4, nv_rd32(priv, 0x100200));
451 nv_wr32(priv, 0x4069A8, nv_rd32(priv, 0x100204));
452 nv_wr32(priv, 0x400820, 0);
453 nv_wr32(priv, 0x400824, 0);
454 nv_wr32(priv, 0x400864, vramsz);
455 nv_wr32(priv, 0x400868, vramsz);
456 break;
457 default:
458 switch (nv_device(priv)->chipset) {
459 case 0x41:
460 case 0x42:
461 case 0x43:
462 case 0x45:
463 case 0x4e:
464 case 0x44:
465 case 0x4a:
466 nv_wr32(priv, 0x4009F0, nv_rd32(priv, 0x100200));
467 nv_wr32(priv, 0x4009F4, nv_rd32(priv, 0x100204));
468 break;
469 default:
470 nv_wr32(priv, 0x400DF0, nv_rd32(priv, 0x100200));
471 nv_wr32(priv, 0x400DF4, nv_rd32(priv, 0x100204));
472 break;
473 }
474 nv_wr32(priv, 0x4069F0, nv_rd32(priv, 0x100200));
475 nv_wr32(priv, 0x4069F4, nv_rd32(priv, 0x100204));
476 nv_wr32(priv, 0x400840, 0);
477 nv_wr32(priv, 0x400844, 0);
478 nv_wr32(priv, 0x4008A0, vramsz);
479 nv_wr32(priv, 0x4008A4, vramsz);
480 break;
481 }
482
483 return 0;
484 }
485
486 struct nouveau_oclass
487 nv40_graph_oclass = {
488 .handle = NV_ENGINE(GR, 0x40),
489 .ofuncs = &(struct nouveau_ofuncs) {
490 .ctor = nv40_graph_ctor,
491 .dtor = _nouveau_graph_dtor,
492 .init = nv40_graph_init,
493 .fini = _nouveau_graph_fini,
494 },
495 };