]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
drm/nouveau/gr: remove dependence on namedb/engctx lookup
[mirror_ubuntu-zesty-kernel.git] / drivers / gpu / drm / nouveau / nvkm / engine / gr / nv40.c
CommitLineData
6ee73861 1/*
ebb945a9 2 * Copyright 2012 Red Hat Inc.
6ee73861 3 *
ebb945a9
BS
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
6ee73861 10 *
ebb945a9
BS
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
6ee73861 13 *
ebb945a9
BS
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
6ee73861 21 *
ebb945a9 22 * Authors: Ben Skeggs
6ee73861 23 */
e3c71eb2
BS
24#include "nv40.h"
25#include "regs.h"
6ee73861 26
93260d3c 27#include <core/client.h>
ebb945a9
BS
28#include <subdev/fb.h>
29#include <subdev/timer.h>
02a841d4 30#include <engine/fifo.h>
6ee73861 31
bfee3f3d 32struct nv40_gr {
e3c71eb2 33 struct nvkm_gr base;
ebb945a9 34 u32 size;
a65955e1 35 struct list_head chan;
39c8d368 36};
b8c157d3 37
b8bf04e1 38struct nv40_gr_chan {
e3c71eb2 39 struct nvkm_gr_chan base;
a65955e1
BS
40 struct nvkm_fifo_chan *fifo;
41 u32 inst;
42 struct list_head head;
ebb945a9
BS
43};
44
7e22e71e 45static u64
e3c71eb2 46nv40_gr_units(struct nvkm_gr *gr)
7e22e71e 47{
276836d4 48 return nvkm_rd32(gr->engine.subdev.device, 0x1540);
7e22e71e
CB
49}
50
ebb945a9
BS
51/*******************************************************************************
52 * Graphics object classes
53 ******************************************************************************/
54
39c8d368 55static int
e3c71eb2
BS
56nv40_gr_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
57 struct nvkm_oclass *oclass, void *data, u32 size,
58 struct nvkm_object **pobject)
39c8d368 59{
e3c71eb2 60 struct nvkm_gpuobj *obj;
39c8d368
BS
61 int ret;
62
e3c71eb2
BS
63 ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent,
64 20, 16, 0, &obj);
ebb945a9 65 *pobject = nv_object(obj);
6ee73861
BS
66 if (ret)
67 return ret;
68
142ea05f
BS
69 nvkm_kmap(obj);
70 nvkm_wo32(obj, 0x00, nv_mclass(obj));
71 nvkm_wo32(obj, 0x04, 0x00000000);
72 nvkm_wo32(obj, 0x08, 0x00000000);
ebb945a9 73#ifdef __BIG_ENDIAN
142ea05f 74 nvkm_mo32(obj, 0x08, 0x01000000, 0x01000000);
ebb945a9 75#endif
142ea05f
BS
76 nvkm_wo32(obj, 0x0c, 0x00000000);
77 nvkm_wo32(obj, 0x10, 0x00000000);
78 nvkm_done(obj);
6ee73861
BS
79 return 0;
80}
81
e3c71eb2 82static struct nvkm_ofuncs
b8bf04e1
BS
83nv40_gr_ofuncs = {
84 .ctor = nv40_gr_object_ctor,
e3c71eb2
BS
85 .dtor = _nvkm_gpuobj_dtor,
86 .init = _nvkm_gpuobj_init,
87 .fini = _nvkm_gpuobj_fini,
88 .rd32 = _nvkm_gpuobj_rd32,
89 .wr32 = _nvkm_gpuobj_wr32,
ebb945a9 90};
6ee73861 91
e3c71eb2 92static struct nvkm_oclass
b8bf04e1
BS
93nv40_gr_sclass[] = {
94 { 0x0012, &nv40_gr_ofuncs, NULL }, /* beta1 */
95 { 0x0019, &nv40_gr_ofuncs, NULL }, /* clip */
96 { 0x0030, &nv40_gr_ofuncs, NULL }, /* null */
97 { 0x0039, &nv40_gr_ofuncs, NULL }, /* m2mf */
98 { 0x0043, &nv40_gr_ofuncs, NULL }, /* rop */
99 { 0x0044, &nv40_gr_ofuncs, NULL }, /* patt */
100 { 0x004a, &nv40_gr_ofuncs, NULL }, /* gdi */
101 { 0x0062, &nv40_gr_ofuncs, NULL }, /* surf2d */
102 { 0x0072, &nv40_gr_ofuncs, NULL }, /* beta4 */
103 { 0x0089, &nv40_gr_ofuncs, NULL }, /* sifm */
104 { 0x008a, &nv40_gr_ofuncs, NULL }, /* ifc */
105 { 0x009f, &nv40_gr_ofuncs, NULL }, /* imageblit */
106 { 0x3062, &nv40_gr_ofuncs, NULL }, /* surf2d (nv40) */
107 { 0x3089, &nv40_gr_ofuncs, NULL }, /* sifm (nv40) */
108 { 0x309e, &nv40_gr_ofuncs, NULL }, /* swzsurf (nv40) */
109 { 0x4097, &nv40_gr_ofuncs, NULL }, /* curie */
ebb945a9
BS
110 {},
111};
112
e3c71eb2 113static struct nvkm_oclass
b8bf04e1
BS
114nv44_gr_sclass[] = {
115 { 0x0012, &nv40_gr_ofuncs, NULL }, /* beta1 */
116 { 0x0019, &nv40_gr_ofuncs, NULL }, /* clip */
117 { 0x0030, &nv40_gr_ofuncs, NULL }, /* null */
118 { 0x0039, &nv40_gr_ofuncs, NULL }, /* m2mf */
119 { 0x0043, &nv40_gr_ofuncs, NULL }, /* rop */
120 { 0x0044, &nv40_gr_ofuncs, NULL }, /* patt */
121 { 0x004a, &nv40_gr_ofuncs, NULL }, /* gdi */
122 { 0x0062, &nv40_gr_ofuncs, NULL }, /* surf2d */
123 { 0x0072, &nv40_gr_ofuncs, NULL }, /* beta4 */
124 { 0x0089, &nv40_gr_ofuncs, NULL }, /* sifm */
125 { 0x008a, &nv40_gr_ofuncs, NULL }, /* ifc */
126 { 0x009f, &nv40_gr_ofuncs, NULL }, /* imageblit */
127 { 0x3062, &nv40_gr_ofuncs, NULL }, /* surf2d (nv40) */
128 { 0x3089, &nv40_gr_ofuncs, NULL }, /* sifm (nv40) */
129 { 0x309e, &nv40_gr_ofuncs, NULL }, /* swzsurf (nv40) */
130 { 0x4497, &nv40_gr_ofuncs, NULL }, /* curie */
ebb945a9
BS
131 {},
132};
133
134/*******************************************************************************
135 * PGRAPH context
136 ******************************************************************************/
6ee73861 137
a65955e1
BS
138static void
139nv40_gr_context_dtor(struct nvkm_object *object)
140{
141 struct nv40_gr_chan *chan = (void *)object;
142 unsigned long flags;
143 spin_lock_irqsave(&object->engine->lock, flags);
144 list_del(&chan->head);
145 spin_unlock_irqrestore(&object->engine->lock, flags);
146}
147
ebb945a9 148static int
e3c71eb2
BS
149nv40_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
150 struct nvkm_oclass *oclass, void *data, u32 size,
151 struct nvkm_object **pobject)
4ea52f89 152{
bfee3f3d 153 struct nv40_gr *gr = (void *)engine;
b8bf04e1 154 struct nv40_gr_chan *chan;
a65955e1 155 unsigned long flags;
4ea52f89
BS
156 int ret;
157
bfee3f3d 158 ret = nvkm_gr_context_create(parent, engine, oclass, NULL, gr->size,
e3c71eb2 159 16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
ebb945a9 160 *pobject = nv_object(chan);
4ea52f89
BS
161 if (ret)
162 return ret;
4ea52f89 163
bfee3f3d 164 nv40_grctx_fill(nv_device(gr), nv_gpuobj(chan));
142ea05f 165 nvkm_wo32(&chan->base.base.gpuobj, 0x00000, nv_gpuobj(chan)->addr >> 4);
a65955e1
BS
166
167 spin_lock_irqsave(&gr->base.engine.lock, flags);
168 chan->fifo = (void *)parent;
169 chan->inst = chan->base.base.gpuobj.addr;
170 list_add(&chan->head, &gr->chan);
171 spin_unlock_irqrestore(&gr->base.engine.lock, flags);
ebb945a9
BS
172 return 0;
173}
174
175static int
e3c71eb2 176nv40_gr_context_fini(struct nvkm_object *object, bool suspend)
ebb945a9 177{
bfee3f3d 178 struct nv40_gr *gr = (void *)object->engine;
b8bf04e1 179 struct nv40_gr_chan *chan = (void *)object;
109c2f2f
BS
180 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
181 struct nvkm_device *device = subdev->device;
ebb945a9
BS
182 u32 inst = 0x01000000 | nv_gpuobj(chan)->addr >> 4;
183 int ret = 0;
184
276836d4 185 nvkm_mask(device, 0x400720, 0x00000001, 0x00000000);
ebb945a9 186
276836d4 187 if (nvkm_rd32(device, 0x40032c) == inst) {
ebb945a9 188 if (suspend) {
276836d4
BS
189 nvkm_wr32(device, 0x400720, 0x00000000);
190 nvkm_wr32(device, 0x400784, inst);
191 nvkm_mask(device, 0x400310, 0x00000020, 0x00000020);
192 nvkm_mask(device, 0x400304, 0x00000001, 0x00000001);
c4584adc
BS
193 if (nvkm_msec(device, 2000,
194 if (!(nvkm_rd32(device, 0x400300) & 0x00000001))
195 break;
196 ) < 0) {
276836d4 197 u32 insn = nvkm_rd32(device, 0x400308);
109c2f2f 198 nvkm_warn(subdev, "ctxprog timeout %08x\n", insn);
ebb945a9
BS
199 ret = -EBUSY;
200 }
201 }
4ea52f89 202
276836d4 203 nvkm_mask(device, 0x40032c, 0x01000000, 0x00000000);
ebb945a9
BS
204 }
205
276836d4
BS
206 if (nvkm_rd32(device, 0x400330) == inst)
207 nvkm_mask(device, 0x400330, 0x01000000, 0x00000000);
ebb945a9 208
276836d4 209 nvkm_mask(device, 0x400720, 0x00000001, 0x00000001);
4ea52f89
BS
210 return ret;
211}
212
e3c71eb2 213static struct nvkm_oclass
b8bf04e1 214nv40_gr_cclass = {
ebb945a9 215 .handle = NV_ENGCTX(GR, 0x40),
e3c71eb2 216 .ofuncs = &(struct nvkm_ofuncs) {
b8bf04e1 217 .ctor = nv40_gr_context_ctor,
a65955e1 218 .dtor = nv40_gr_context_dtor,
e3c71eb2 219 .init = _nvkm_gr_context_init,
b8bf04e1 220 .fini = nv40_gr_context_fini,
e3c71eb2
BS
221 .rd32 = _nvkm_gr_context_rd32,
222 .wr32 = _nvkm_gr_context_wr32,
ebb945a9
BS
223 },
224};
225
226/*******************************************************************************
227 * PGRAPH engine/subdev functions
228 ******************************************************************************/
229
96c50082 230static void
e3c71eb2 231nv40_gr_tile_prog(struct nvkm_engine *engine, int i)
0d87c100 232{
bfee3f3d 233 struct nv40_gr *gr = (void *)engine;
276836d4
BS
234 struct nvkm_device *device = gr->base.engine.subdev.device;
235 struct nvkm_fifo *fifo = device->fifo;
236 struct nvkm_fb_tile *tile = &device->fb->tile.region[i];
ebb945a9
BS
237 unsigned long flags;
238
6189f1b0 239 fifo->pause(fifo, &flags);
bfee3f3d 240 nv04_gr_idle(gr);
0d87c100 241
bfee3f3d 242 switch (nv_device(gr)->chipset) {
1dc32671 243 case 0x40:
fafa0cb3 244 case 0x41:
1dc32671
BS
245 case 0x42:
246 case 0x43:
fafa0cb3 247 case 0x45:
1dc32671 248 case 0x4e:
276836d4
BS
249 nvkm_wr32(device, NV20_PGRAPH_TSIZE(i), tile->pitch);
250 nvkm_wr32(device, NV20_PGRAPH_TLIMIT(i), tile->limit);
251 nvkm_wr32(device, NV20_PGRAPH_TILE(i), tile->addr);
252 nvkm_wr32(device, NV40_PGRAPH_TSIZE1(i), tile->pitch);
253 nvkm_wr32(device, NV40_PGRAPH_TLIMIT1(i), tile->limit);
254 nvkm_wr32(device, NV40_PGRAPH_TILE1(i), tile->addr);
bfee3f3d 255 switch (nv_device(gr)->chipset) {
fafa0cb3
BS
256 case 0x40:
257 case 0x45:
276836d4
BS
258 nvkm_wr32(device, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
259 nvkm_wr32(device, NV40_PGRAPH_ZCOMP1(i), tile->zcomp);
fafa0cb3
BS
260 break;
261 case 0x41:
262 case 0x42:
263 case 0x43:
276836d4
BS
264 nvkm_wr32(device, NV41_PGRAPH_ZCOMP0(i), tile->zcomp);
265 nvkm_wr32(device, NV41_PGRAPH_ZCOMP1(i), tile->zcomp);
fafa0cb3
BS
266 break;
267 default:
268 break;
269 }
1dc32671 270 break;
0d87c100
FJ
271 case 0x44:
272 case 0x4a:
276836d4
BS
273 nvkm_wr32(device, NV20_PGRAPH_TSIZE(i), tile->pitch);
274 nvkm_wr32(device, NV20_PGRAPH_TLIMIT(i), tile->limit);
275 nvkm_wr32(device, NV20_PGRAPH_TILE(i), tile->addr);
0d87c100 276 break;
0d87c100 277 case 0x46:
fafa0cb3 278 case 0x4c:
0d87c100
FJ
279 case 0x47:
280 case 0x49:
281 case 0x4b:
fafa0cb3 282 case 0x63:
1dc32671 283 case 0x67:
fafa0cb3 284 case 0x68:
276836d4
BS
285 nvkm_wr32(device, NV47_PGRAPH_TSIZE(i), tile->pitch);
286 nvkm_wr32(device, NV47_PGRAPH_TLIMIT(i), tile->limit);
287 nvkm_wr32(device, NV47_PGRAPH_TILE(i), tile->addr);
288 nvkm_wr32(device, NV40_PGRAPH_TSIZE1(i), tile->pitch);
289 nvkm_wr32(device, NV40_PGRAPH_TLIMIT1(i), tile->limit);
290 nvkm_wr32(device, NV40_PGRAPH_TILE1(i), tile->addr);
bfee3f3d 291 switch (nv_device(gr)->chipset) {
fafa0cb3
BS
292 case 0x47:
293 case 0x49:
294 case 0x4b:
276836d4
BS
295 nvkm_wr32(device, NV47_PGRAPH_ZCOMP0(i), tile->zcomp);
296 nvkm_wr32(device, NV47_PGRAPH_ZCOMP1(i), tile->zcomp);
fafa0cb3
BS
297 break;
298 default:
299 break;
300 }
301 break;
302 default:
0d87c100 303 break;
0d87c100 304 }
ebb945a9 305
6189f1b0 306 fifo->start(fifo, &flags);
0d87c100
FJ
307}
308
ebb945a9 309static void
e3c71eb2 310nv40_gr_intr(struct nvkm_subdev *subdev)
ebb945a9 311{
bfee3f3d 312 struct nv40_gr *gr = (void *)subdev;
a65955e1 313 struct nv40_gr_chan *temp, *chan = NULL;
276836d4
BS
314 struct nvkm_device *device = gr->base.engine.subdev.device;
315 u32 stat = nvkm_rd32(device, NV03_PGRAPH_INTR);
316 u32 nsource = nvkm_rd32(device, NV03_PGRAPH_NSOURCE);
317 u32 nstatus = nvkm_rd32(device, NV03_PGRAPH_NSTATUS);
318 u32 inst = nvkm_rd32(device, 0x40032c) & 0x000fffff;
319 u32 addr = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR);
ebb945a9
BS
320 u32 subc = (addr & 0x00070000) >> 16;
321 u32 mthd = (addr & 0x00001ffc);
276836d4
BS
322 u32 data = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_DATA);
323 u32 class = nvkm_rd32(device, 0x400160 + subc * 4) & 0xffff;
ebb945a9 324 u32 show = stat;
109c2f2f 325 char msg[128], src[128], sta[128];
a65955e1 326 unsigned long flags;
ebb945a9 327
a65955e1
BS
328 spin_lock_irqsave(&gr->base.engine.lock, flags);
329 list_for_each_entry(temp, &gr->chan, head) {
330 if (temp->inst >> 4 == inst) {
331 chan = temp;
332 list_del(&chan->head);
333 list_add(&chan->head, &gr->chan);
334 break;
ebb945a9 335 }
a65955e1 336 }
ebb945a9 337
a65955e1 338 if (stat & NV_PGRAPH_INTR_ERROR) {
ebb945a9 339 if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
276836d4 340 nvkm_mask(device, 0x402000, 0, 0);
ebb945a9
BS
341 }
342 }
343
276836d4
BS
344 nvkm_wr32(device, NV03_PGRAPH_INTR, stat);
345 nvkm_wr32(device, NV04_PGRAPH_FIFO, 0x00000001);
ebb945a9
BS
346
347 if (show) {
109c2f2f
BS
348 nvkm_snprintbf(msg, sizeof(msg), nv10_gr_intr_name, show);
349 nvkm_snprintbf(src, sizeof(src), nv04_gr_nsource, nsource);
350 nvkm_snprintbf(sta, sizeof(sta), nv10_gr_nstatus, nstatus);
351 nvkm_error(subdev, "intr %08x [%s] nsource %08x [%s] "
352 "nstatus %08x [%s] ch %d [%08x %s] subc %d "
353 "class %04x mthd %04x data %08x\n",
a65955e1
BS
354 show, msg, nsource, src, nstatus, sta,
355 chan ? chan->fifo->chid : -1, inst << 4,
356 nvkm_client_name(chan), subc, class, mthd, data);
ebb945a9 357 }
72a14827 358
a65955e1 359 spin_unlock_irqrestore(&gr->base.engine.lock, flags);
ebb945a9
BS
360}
361
362static int
e3c71eb2
BS
363nv40_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
364 struct nvkm_oclass *oclass, void *data, u32 size,
365 struct nvkm_object **pobject)
6ee73861 366{
bfee3f3d 367 struct nv40_gr *gr;
ebb945a9
BS
368 int ret;
369
bfee3f3d
BS
370 ret = nvkm_gr_create(parent, engine, oclass, true, &gr);
371 *pobject = nv_object(gr);
ebb945a9
BS
372 if (ret)
373 return ret;
6ee73861 374
a65955e1
BS
375 INIT_LIST_HEAD(&gr->chan);
376
bfee3f3d
BS
377 nv_subdev(gr)->unit = 0x00001000;
378 nv_subdev(gr)->intr = nv40_gr_intr;
379 nv_engine(gr)->cclass = &nv40_gr_cclass;
380 if (nv44_gr_class(gr))
381 nv_engine(gr)->sclass = nv44_gr_sclass;
ebb945a9 382 else
bfee3f3d
BS
383 nv_engine(gr)->sclass = nv40_gr_sclass;
384 nv_engine(gr)->tile_prog = nv40_gr_tile_prog;
7e22e71e 385
bfee3f3d 386 gr->base.units = nv40_gr_units;
ebb945a9
BS
387 return 0;
388}
389
390static int
e3c71eb2 391nv40_gr_init(struct nvkm_object *object)
ebb945a9 392{
e3c71eb2 393 struct nvkm_engine *engine = nv_engine(object);
bfee3f3d 394 struct nv40_gr *gr = (void *)engine;
276836d4
BS
395 struct nvkm_device *device = gr->base.engine.subdev.device;
396 struct nvkm_fb *fb = device->fb;
ebb945a9
BS
397 int ret, i, j;
398 u32 vramsz;
399
bfee3f3d 400 ret = nvkm_gr_init(&gr->base);
ebb945a9
BS
401 if (ret)
402 return ret;
6ee73861 403
d58086de 404 /* generate and upload context program */
bfee3f3d 405 ret = nv40_grctx_init(nv_device(gr), &gr->size);
1f150b3e
MS
406 if (ret)
407 return ret;
6ee73861
BS
408
409 /* No context present currently */
276836d4 410 nvkm_wr32(device, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
6ee73861 411
276836d4
BS
412 nvkm_wr32(device, NV03_PGRAPH_INTR , 0xFFFFFFFF);
413 nvkm_wr32(device, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
6ee73861 414
276836d4
BS
415 nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
416 nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x00000000);
417 nvkm_wr32(device, NV04_PGRAPH_DEBUG_1, 0x401287c0);
418 nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0xe0de8055);
419 nvkm_wr32(device, NV10_PGRAPH_DEBUG_4, 0x00008000);
420 nvkm_wr32(device, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f);
6ee73861 421
276836d4
BS
422 nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
423 nvkm_wr32(device, NV10_PGRAPH_STATE , 0xFFFFFFFF);
6ee73861 424
276836d4 425 j = nvkm_rd32(device, 0x1540) & 0xff;
6ee73861
BS
426 if (j) {
427 for (i = 0; !(j & 1); j >>= 1, i++)
428 ;
276836d4 429 nvkm_wr32(device, 0x405000, i);
6ee73861
BS
430 }
431
bfee3f3d 432 if (nv_device(gr)->chipset == 0x40) {
276836d4
BS
433 nvkm_wr32(device, 0x4009b0, 0x83280fff);
434 nvkm_wr32(device, 0x4009b4, 0x000000a0);
6ee73861 435 } else {
276836d4
BS
436 nvkm_wr32(device, 0x400820, 0x83280eff);
437 nvkm_wr32(device, 0x400824, 0x000000a0);
6ee73861
BS
438 }
439
bfee3f3d 440 switch (nv_device(gr)->chipset) {
6ee73861
BS
441 case 0x40:
442 case 0x45:
276836d4
BS
443 nvkm_wr32(device, 0x4009b8, 0x0078e366);
444 nvkm_wr32(device, 0x4009bc, 0x0000014c);
6ee73861
BS
445 break;
446 case 0x41:
447 case 0x42: /* pciid also 0x00Cx */
448 /* case 0x0120: XXX (pciid) */
276836d4
BS
449 nvkm_wr32(device, 0x400828, 0x007596ff);
450 nvkm_wr32(device, 0x40082c, 0x00000108);
6ee73861
BS
451 break;
452 case 0x43:
276836d4
BS
453 nvkm_wr32(device, 0x400828, 0x0072cb77);
454 nvkm_wr32(device, 0x40082c, 0x00000108);
6ee73861
BS
455 break;
456 case 0x44:
457 case 0x46: /* G72 */
458 case 0x4a:
459 case 0x4c: /* G7x-based C51 */
460 case 0x4e:
276836d4
BS
461 nvkm_wr32(device, 0x400860, 0);
462 nvkm_wr32(device, 0x400864, 0);
6ee73861
BS
463 break;
464 case 0x47: /* G70 */
465 case 0x49: /* G71 */
466 case 0x4b: /* G73 */
276836d4
BS
467 nvkm_wr32(device, 0x400828, 0x07830610);
468 nvkm_wr32(device, 0x40082c, 0x0000016A);
6ee73861
BS
469 break;
470 default:
471 break;
472 }
473
276836d4
BS
474 nvkm_wr32(device, 0x400b38, 0x2ffff800);
475 nvkm_wr32(device, 0x400b3c, 0x00006000);
6ee73861 476
2295e17a 477 /* Tiling related stuff. */
bfee3f3d 478 switch (nv_device(gr)->chipset) {
2295e17a
FJ
479 case 0x44:
480 case 0x4a:
276836d4
BS
481 nvkm_wr32(device, 0x400bc4, 0x1003d888);
482 nvkm_wr32(device, 0x400bbc, 0xb7a7b500);
2295e17a
FJ
483 break;
484 case 0x46:
276836d4
BS
485 nvkm_wr32(device, 0x400bc4, 0x0000e024);
486 nvkm_wr32(device, 0x400bbc, 0xb7a7b520);
2295e17a
FJ
487 break;
488 case 0x4c:
489 case 0x4e:
490 case 0x67:
276836d4
BS
491 nvkm_wr32(device, 0x400bc4, 0x1003d888);
492 nvkm_wr32(device, 0x400bbc, 0xb7a7b540);
2295e17a
FJ
493 break;
494 default:
495 break;
496 }
497
0d87c100 498 /* Turn all the tiling regions off. */
b1e4553c 499 for (i = 0; i < fb->tile.regions; i++)
ebb945a9 500 engine->tile_prog(engine, i);
6ee73861
BS
501
502 /* begin RAM config */
bfee3f3d
BS
503 vramsz = nv_device_resource_len(nv_device(gr), 1) - 1;
504 switch (nv_device(gr)->chipset) {
6ee73861 505 case 0x40:
276836d4
BS
506 nvkm_wr32(device, 0x4009A4, nvkm_rd32(device, 0x100200));
507 nvkm_wr32(device, 0x4009A8, nvkm_rd32(device, 0x100204));
508 nvkm_wr32(device, 0x4069A4, nvkm_rd32(device, 0x100200));
509 nvkm_wr32(device, 0x4069A8, nvkm_rd32(device, 0x100204));
510 nvkm_wr32(device, 0x400820, 0);
511 nvkm_wr32(device, 0x400824, 0);
512 nvkm_wr32(device, 0x400864, vramsz);
513 nvkm_wr32(device, 0x400868, vramsz);
6ee73861
BS
514 break;
515 default:
bfee3f3d 516 switch (nv_device(gr)->chipset) {
1dc32671
BS
517 case 0x41:
518 case 0x42:
519 case 0x43:
520 case 0x45:
521 case 0x4e:
522 case 0x44:
523 case 0x4a:
276836d4
BS
524 nvkm_wr32(device, 0x4009F0, nvkm_rd32(device, 0x100200));
525 nvkm_wr32(device, 0x4009F4, nvkm_rd32(device, 0x100204));
6ee73861 526 break;
1dc32671 527 default:
276836d4
BS
528 nvkm_wr32(device, 0x400DF0, nvkm_rd32(device, 0x100200));
529 nvkm_wr32(device, 0x400DF4, nvkm_rd32(device, 0x100204));
1dc32671 530 break;
6ee73861 531 }
276836d4
BS
532 nvkm_wr32(device, 0x4069F0, nvkm_rd32(device, 0x100200));
533 nvkm_wr32(device, 0x4069F4, nvkm_rd32(device, 0x100204));
534 nvkm_wr32(device, 0x400840, 0);
535 nvkm_wr32(device, 0x400844, 0);
536 nvkm_wr32(device, 0x4008A0, vramsz);
537 nvkm_wr32(device, 0x4008A4, vramsz);
6ee73861
BS
538 break;
539 }
540
541 return 0;
542}
543
e3c71eb2 544struct nvkm_oclass
b8bf04e1 545nv40_gr_oclass = {
ebb945a9 546 .handle = NV_ENGINE(GR, 0x40),
e3c71eb2 547 .ofuncs = &(struct nvkm_ofuncs) {
b8bf04e1 548 .ctor = nv40_gr_ctor,
e3c71eb2 549 .dtor = _nvkm_gr_dtor,
b8bf04e1 550 .init = nv40_gr_init,
e3c71eb2 551 .fini = _nvkm_gr_fini,
ebb945a9
BS
552 },
553};