2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
28 #include <core/client.h>
29 #include <core/gpuobj.h>
30 #include <core/ramht.h>
31 #include <subdev/bios.h>
32 #include <subdev/bios/dcb.h>
33 #include <subdev/bios/disp.h>
34 #include <subdev/bios/init.h>
35 #include <subdev/bios/pll.h>
36 #include <subdev/devinit.h>
37 #include <subdev/timer.h>
39 #include <nvif/class.h>
40 #include <nvif/unpack.h>
42 /*******************************************************************************
43 * EVO channel base class
44 ******************************************************************************/
47 gf110_disp_chan_uevent_fini(struct nvkm_event
*event
, int type
, int index
)
49 struct nv50_disp_priv
*priv
= container_of(event
, typeof(*priv
), uevent
);
50 nv_mask(priv
, 0x610090, 0x00000001 << index
, 0x00000000 << index
);
51 nv_wr32(priv
, 0x61008c, 0x00000001 << index
);
55 gf110_disp_chan_uevent_init(struct nvkm_event
*event
, int types
, int index
)
57 struct nv50_disp_priv
*priv
= container_of(event
, typeof(*priv
), uevent
);
58 nv_wr32(priv
, 0x61008c, 0x00000001 << index
);
59 nv_mask(priv
, 0x610090, 0x00000001 << index
, 0x00000001 << index
);
62 const struct nvkm_event_func
63 gf110_disp_chan_uevent
= {
64 .ctor
= nv50_disp_chan_uevent_ctor
,
65 .init
= gf110_disp_chan_uevent_init
,
66 .fini
= gf110_disp_chan_uevent_fini
,
69 /*******************************************************************************
70 * EVO DMA channel base class
71 ******************************************************************************/
74 gf110_disp_dmac_object_attach(struct nvkm_object
*parent
,
75 struct nvkm_object
*object
, u32 name
)
77 struct nv50_disp_base
*base
= (void *)parent
->parent
;
78 struct nv50_disp_chan
*chan
= (void *)parent
;
79 u32 addr
= nv_gpuobj(object
)->node
->offset
;
80 u32 data
= (chan
->chid
<< 27) | (addr
<< 9) | 0x00000001;
81 return nvkm_ramht_insert(base
->ramht
, chan
->chid
, name
, data
);
85 gf110_disp_dmac_object_detach(struct nvkm_object
*parent
, int cookie
)
87 struct nv50_disp_base
*base
= (void *)parent
->parent
;
88 nvkm_ramht_remove(base
->ramht
, cookie
);
92 gf110_disp_dmac_init(struct nvkm_object
*object
)
94 struct nv50_disp_priv
*priv
= (void *)object
->engine
;
95 struct nv50_disp_dmac
*dmac
= (void *)object
;
96 int chid
= dmac
->base
.chid
;
99 ret
= nv50_disp_chan_init(&dmac
->base
);
103 /* enable error reporting */
104 nv_mask(priv
, 0x6100a0, 0x00000001 << chid
, 0x00000001 << chid
);
106 /* initialise channel for dma command submission */
107 nv_wr32(priv
, 0x610494 + (chid
* 0x0010), dmac
->push
);
108 nv_wr32(priv
, 0x610498 + (chid
* 0x0010), 0x00010000);
109 nv_wr32(priv
, 0x61049c + (chid
* 0x0010), 0x00000001);
110 nv_mask(priv
, 0x610490 + (chid
* 0x0010), 0x00000010, 0x00000010);
111 nv_wr32(priv
, 0x640000 + (chid
* 0x1000), 0x00000000);
112 nv_wr32(priv
, 0x610490 + (chid
* 0x0010), 0x00000013);
114 /* wait for it to go inactive */
115 if (!nv_wait(priv
, 0x610490 + (chid
* 0x10), 0x80000000, 0x00000000)) {
116 nv_error(dmac
, "init: 0x%08x\n",
117 nv_rd32(priv
, 0x610490 + (chid
* 0x10)));
125 gf110_disp_dmac_fini(struct nvkm_object
*object
, bool suspend
)
127 struct nv50_disp_priv
*priv
= (void *)object
->engine
;
128 struct nv50_disp_dmac
*dmac
= (void *)object
;
129 int chid
= dmac
->base
.chid
;
131 /* deactivate channel */
132 nv_mask(priv
, 0x610490 + (chid
* 0x0010), 0x00001010, 0x00001000);
133 nv_mask(priv
, 0x610490 + (chid
* 0x0010), 0x00000003, 0x00000000);
134 if (!nv_wait(priv
, 0x610490 + (chid
* 0x10), 0x001e0000, 0x00000000)) {
135 nv_error(dmac
, "fini: 0x%08x\n",
136 nv_rd32(priv
, 0x610490 + (chid
* 0x10)));
141 /* disable error reporting and completion notification */
142 nv_mask(priv
, 0x610090, 0x00000001 << chid
, 0x00000000);
143 nv_mask(priv
, 0x6100a0, 0x00000001 << chid
, 0x00000000);
145 return nv50_disp_chan_fini(&dmac
->base
, suspend
);
148 /*******************************************************************************
149 * EVO master channel object
150 ******************************************************************************/
152 const struct nv50_disp_mthd_list
153 gf110_disp_core_mthd_base
= {
157 { 0x0080, 0x660080 },
158 { 0x0084, 0x660084 },
159 { 0x0088, 0x660088 },
160 { 0x008c, 0x000000 },
165 const struct nv50_disp_mthd_list
166 gf110_disp_core_mthd_dac
= {
170 { 0x0180, 0x660180 },
171 { 0x0184, 0x660184 },
172 { 0x0188, 0x660188 },
173 { 0x0190, 0x660190 },
178 const struct nv50_disp_mthd_list
179 gf110_disp_core_mthd_sor
= {
183 { 0x0200, 0x660200 },
184 { 0x0204, 0x660204 },
185 { 0x0208, 0x660208 },
186 { 0x0210, 0x660210 },
191 const struct nv50_disp_mthd_list
192 gf110_disp_core_mthd_pior
= {
196 { 0x0300, 0x660300 },
197 { 0x0304, 0x660304 },
198 { 0x0308, 0x660308 },
199 { 0x0310, 0x660310 },
204 static const struct nv50_disp_mthd_list
205 gf110_disp_core_mthd_head
= {
209 { 0x0400, 0x660400 },
210 { 0x0404, 0x660404 },
211 { 0x0408, 0x660408 },
212 { 0x040c, 0x66040c },
213 { 0x0410, 0x660410 },
214 { 0x0414, 0x660414 },
215 { 0x0418, 0x660418 },
216 { 0x041c, 0x66041c },
217 { 0x0420, 0x660420 },
218 { 0x0424, 0x660424 },
219 { 0x0428, 0x660428 },
220 { 0x042c, 0x66042c },
221 { 0x0430, 0x660430 },
222 { 0x0434, 0x660434 },
223 { 0x0438, 0x660438 },
224 { 0x0440, 0x660440 },
225 { 0x0444, 0x660444 },
226 { 0x0448, 0x660448 },
227 { 0x044c, 0x66044c },
228 { 0x0450, 0x660450 },
229 { 0x0454, 0x660454 },
230 { 0x0458, 0x660458 },
231 { 0x045c, 0x66045c },
232 { 0x0460, 0x660460 },
233 { 0x0468, 0x660468 },
234 { 0x046c, 0x66046c },
235 { 0x0470, 0x660470 },
236 { 0x0474, 0x660474 },
237 { 0x0480, 0x660480 },
238 { 0x0484, 0x660484 },
239 { 0x048c, 0x66048c },
240 { 0x0490, 0x660490 },
241 { 0x0494, 0x660494 },
242 { 0x0498, 0x660498 },
243 { 0x04b0, 0x6604b0 },
244 { 0x04b8, 0x6604b8 },
245 { 0x04bc, 0x6604bc },
246 { 0x04c0, 0x6604c0 },
247 { 0x04c4, 0x6604c4 },
248 { 0x04c8, 0x6604c8 },
249 { 0x04d0, 0x6604d0 },
250 { 0x04d4, 0x6604d4 },
251 { 0x04e0, 0x6604e0 },
252 { 0x04e4, 0x6604e4 },
253 { 0x04e8, 0x6604e8 },
254 { 0x04ec, 0x6604ec },
255 { 0x04f0, 0x6604f0 },
256 { 0x04f4, 0x6604f4 },
257 { 0x04f8, 0x6604f8 },
258 { 0x04fc, 0x6604fc },
259 { 0x0500, 0x660500 },
260 { 0x0504, 0x660504 },
261 { 0x0508, 0x660508 },
262 { 0x050c, 0x66050c },
263 { 0x0510, 0x660510 },
264 { 0x0514, 0x660514 },
265 { 0x0518, 0x660518 },
266 { 0x051c, 0x66051c },
267 { 0x052c, 0x66052c },
268 { 0x0530, 0x660530 },
269 { 0x054c, 0x66054c },
270 { 0x0550, 0x660550 },
271 { 0x0554, 0x660554 },
272 { 0x0558, 0x660558 },
273 { 0x055c, 0x66055c },
278 static const struct nv50_disp_mthd_chan
279 gf110_disp_core_mthd_chan
= {
283 { "Global", 1, &gf110_disp_core_mthd_base
},
284 { "DAC", 3, &gf110_disp_core_mthd_dac
},
285 { "SOR", 8, &gf110_disp_core_mthd_sor
},
286 { "PIOR", 4, &gf110_disp_core_mthd_pior
},
287 { "HEAD", 4, &gf110_disp_core_mthd_head
},
293 gf110_disp_core_init(struct nvkm_object
*object
)
295 struct nv50_disp_priv
*priv
= (void *)object
->engine
;
296 struct nv50_disp_dmac
*mast
= (void *)object
;
299 ret
= nv50_disp_chan_init(&mast
->base
);
303 /* enable error reporting */
304 nv_mask(priv
, 0x6100a0, 0x00000001, 0x00000001);
306 /* initialise channel for dma command submission */
307 nv_wr32(priv
, 0x610494, mast
->push
);
308 nv_wr32(priv
, 0x610498, 0x00010000);
309 nv_wr32(priv
, 0x61049c, 0x00000001);
310 nv_mask(priv
, 0x610490, 0x00000010, 0x00000010);
311 nv_wr32(priv
, 0x640000, 0x00000000);
312 nv_wr32(priv
, 0x610490, 0x01000013);
314 /* wait for it to go inactive */
315 if (!nv_wait(priv
, 0x610490, 0x80000000, 0x00000000)) {
316 nv_error(mast
, "init: 0x%08x\n", nv_rd32(priv
, 0x610490));
324 gf110_disp_core_fini(struct nvkm_object
*object
, bool suspend
)
326 struct nv50_disp_priv
*priv
= (void *)object
->engine
;
327 struct nv50_disp_dmac
*mast
= (void *)object
;
329 /* deactivate channel */
330 nv_mask(priv
, 0x610490, 0x00000010, 0x00000000);
331 nv_mask(priv
, 0x610490, 0x00000003, 0x00000000);
332 if (!nv_wait(priv
, 0x610490, 0x001e0000, 0x00000000)) {
333 nv_error(mast
, "fini: 0x%08x\n", nv_rd32(priv
, 0x610490));
338 /* disable error reporting and completion notification */
339 nv_mask(priv
, 0x610090, 0x00000001, 0x00000000);
340 nv_mask(priv
, 0x6100a0, 0x00000001, 0x00000000);
342 return nv50_disp_chan_fini(&mast
->base
, suspend
);
345 struct nv50_disp_chan_impl
346 gf110_disp_core_ofuncs
= {
347 .base
.ctor
= nv50_disp_core_ctor
,
348 .base
.dtor
= nv50_disp_dmac_dtor
,
349 .base
.init
= gf110_disp_core_init
,
350 .base
.fini
= gf110_disp_core_fini
,
351 .base
.ntfy
= nv50_disp_chan_ntfy
,
352 .base
.map
= nv50_disp_chan_map
,
353 .base
.rd32
= nv50_disp_chan_rd32
,
354 .base
.wr32
= nv50_disp_chan_wr32
,
356 .attach
= gf110_disp_dmac_object_attach
,
357 .detach
= gf110_disp_dmac_object_detach
,
360 /*******************************************************************************
361 * EVO sync channel objects
362 ******************************************************************************/
364 static const struct nv50_disp_mthd_list
365 gf110_disp_base_mthd_base
= {
369 { 0x0080, 0x661080 },
370 { 0x0084, 0x661084 },
371 { 0x0088, 0x661088 },
372 { 0x008c, 0x66108c },
373 { 0x0090, 0x661090 },
374 { 0x0094, 0x661094 },
375 { 0x00a0, 0x6610a0 },
376 { 0x00a4, 0x6610a4 },
377 { 0x00c0, 0x6610c0 },
378 { 0x00c4, 0x6610c4 },
379 { 0x00c8, 0x6610c8 },
380 { 0x00cc, 0x6610cc },
381 { 0x00e0, 0x6610e0 },
382 { 0x00e4, 0x6610e4 },
383 { 0x00e8, 0x6610e8 },
384 { 0x00ec, 0x6610ec },
385 { 0x00fc, 0x6610fc },
386 { 0x0100, 0x661100 },
387 { 0x0104, 0x661104 },
388 { 0x0108, 0x661108 },
389 { 0x010c, 0x66110c },
390 { 0x0110, 0x661110 },
391 { 0x0114, 0x661114 },
392 { 0x0118, 0x661118 },
393 { 0x011c, 0x66111c },
394 { 0x0130, 0x661130 },
395 { 0x0134, 0x661134 },
396 { 0x0138, 0x661138 },
397 { 0x013c, 0x66113c },
398 { 0x0140, 0x661140 },
399 { 0x0144, 0x661144 },
400 { 0x0148, 0x661148 },
401 { 0x014c, 0x66114c },
402 { 0x0150, 0x661150 },
403 { 0x0154, 0x661154 },
404 { 0x0158, 0x661158 },
405 { 0x015c, 0x66115c },
406 { 0x0160, 0x661160 },
407 { 0x0164, 0x661164 },
408 { 0x0168, 0x661168 },
409 { 0x016c, 0x66116c },
414 static const struct nv50_disp_mthd_list
415 gf110_disp_base_mthd_image
= {
419 { 0x0400, 0x661400 },
420 { 0x0404, 0x661404 },
421 { 0x0408, 0x661408 },
422 { 0x040c, 0x66140c },
423 { 0x0410, 0x661410 },
428 const struct nv50_disp_mthd_chan
429 gf110_disp_base_mthd_chan
= {
433 { "Global", 1, &gf110_disp_base_mthd_base
},
434 { "Image", 2, &gf110_disp_base_mthd_image
},
439 struct nv50_disp_chan_impl
440 gf110_disp_base_ofuncs
= {
441 .base
.ctor
= nv50_disp_base_ctor
,
442 .base
.dtor
= nv50_disp_dmac_dtor
,
443 .base
.init
= gf110_disp_dmac_init
,
444 .base
.fini
= gf110_disp_dmac_fini
,
445 .base
.ntfy
= nv50_disp_chan_ntfy
,
446 .base
.map
= nv50_disp_chan_map
,
447 .base
.rd32
= nv50_disp_chan_rd32
,
448 .base
.wr32
= nv50_disp_chan_wr32
,
450 .attach
= gf110_disp_dmac_object_attach
,
451 .detach
= gf110_disp_dmac_object_detach
,
454 /*******************************************************************************
455 * EVO overlay channel objects
456 ******************************************************************************/
458 static const struct nv50_disp_mthd_list
459 gf110_disp_ovly_mthd_base
= {
462 { 0x0080, 0x665080 },
463 { 0x0084, 0x665084 },
464 { 0x0088, 0x665088 },
465 { 0x008c, 0x66508c },
466 { 0x0090, 0x665090 },
467 { 0x0094, 0x665094 },
468 { 0x00a0, 0x6650a0 },
469 { 0x00a4, 0x6650a4 },
470 { 0x00b0, 0x6650b0 },
471 { 0x00b4, 0x6650b4 },
472 { 0x00b8, 0x6650b8 },
473 { 0x00c0, 0x6650c0 },
474 { 0x00e0, 0x6650e0 },
475 { 0x00e4, 0x6650e4 },
476 { 0x00e8, 0x6650e8 },
477 { 0x0100, 0x665100 },
478 { 0x0104, 0x665104 },
479 { 0x0108, 0x665108 },
480 { 0x010c, 0x66510c },
481 { 0x0110, 0x665110 },
482 { 0x0118, 0x665118 },
483 { 0x011c, 0x66511c },
484 { 0x0120, 0x665120 },
485 { 0x0124, 0x665124 },
486 { 0x0130, 0x665130 },
487 { 0x0134, 0x665134 },
488 { 0x0138, 0x665138 },
489 { 0x013c, 0x66513c },
490 { 0x0140, 0x665140 },
491 { 0x0144, 0x665144 },
492 { 0x0148, 0x665148 },
493 { 0x014c, 0x66514c },
494 { 0x0150, 0x665150 },
495 { 0x0154, 0x665154 },
496 { 0x0158, 0x665158 },
497 { 0x015c, 0x66515c },
498 { 0x0160, 0x665160 },
499 { 0x0164, 0x665164 },
500 { 0x0168, 0x665168 },
501 { 0x016c, 0x66516c },
502 { 0x0400, 0x665400 },
503 { 0x0408, 0x665408 },
504 { 0x040c, 0x66540c },
505 { 0x0410, 0x665410 },
510 static const struct nv50_disp_mthd_chan
511 gf110_disp_ovly_mthd_chan
= {
515 { "Global", 1, &gf110_disp_ovly_mthd_base
},
520 struct nv50_disp_chan_impl
521 gf110_disp_ovly_ofuncs
= {
522 .base
.ctor
= nv50_disp_ovly_ctor
,
523 .base
.dtor
= nv50_disp_dmac_dtor
,
524 .base
.init
= gf110_disp_dmac_init
,
525 .base
.fini
= gf110_disp_dmac_fini
,
526 .base
.ntfy
= nv50_disp_chan_ntfy
,
527 .base
.map
= nv50_disp_chan_map
,
528 .base
.rd32
= nv50_disp_chan_rd32
,
529 .base
.wr32
= nv50_disp_chan_wr32
,
531 .attach
= gf110_disp_dmac_object_attach
,
532 .detach
= gf110_disp_dmac_object_detach
,
535 /*******************************************************************************
536 * EVO PIO channel base class
537 ******************************************************************************/
540 gf110_disp_pioc_init(struct nvkm_object
*object
)
542 struct nv50_disp_priv
*priv
= (void *)object
->engine
;
543 struct nv50_disp_pioc
*pioc
= (void *)object
;
544 int chid
= pioc
->base
.chid
;
547 ret
= nv50_disp_chan_init(&pioc
->base
);
551 /* enable error reporting */
552 nv_mask(priv
, 0x6100a0, 0x00000001 << chid
, 0x00000001 << chid
);
554 /* activate channel */
555 nv_wr32(priv
, 0x610490 + (chid
* 0x10), 0x00000001);
556 if (!nv_wait(priv
, 0x610490 + (chid
* 0x10), 0x00030000, 0x00010000)) {
557 nv_error(pioc
, "init: 0x%08x\n",
558 nv_rd32(priv
, 0x610490 + (chid
* 0x10)));
566 gf110_disp_pioc_fini(struct nvkm_object
*object
, bool suspend
)
568 struct nv50_disp_priv
*priv
= (void *)object
->engine
;
569 struct nv50_disp_pioc
*pioc
= (void *)object
;
570 int chid
= pioc
->base
.chid
;
572 nv_mask(priv
, 0x610490 + (chid
* 0x10), 0x00000001, 0x00000000);
573 if (!nv_wait(priv
, 0x610490 + (chid
* 0x10), 0x00030000, 0x00000000)) {
574 nv_error(pioc
, "timeout: 0x%08x\n",
575 nv_rd32(priv
, 0x610490 + (chid
* 0x10)));
580 /* disable error reporting and completion notification */
581 nv_mask(priv
, 0x610090, 0x00000001 << chid
, 0x00000000);
582 nv_mask(priv
, 0x6100a0, 0x00000001 << chid
, 0x00000000);
584 return nv50_disp_chan_fini(&pioc
->base
, suspend
);
587 /*******************************************************************************
588 * EVO immediate overlay channel objects
589 ******************************************************************************/
591 struct nv50_disp_chan_impl
592 gf110_disp_oimm_ofuncs
= {
593 .base
.ctor
= nv50_disp_oimm_ctor
,
594 .base
.dtor
= nv50_disp_pioc_dtor
,
595 .base
.init
= gf110_disp_pioc_init
,
596 .base
.fini
= gf110_disp_pioc_fini
,
597 .base
.ntfy
= nv50_disp_chan_ntfy
,
598 .base
.map
= nv50_disp_chan_map
,
599 .base
.rd32
= nv50_disp_chan_rd32
,
600 .base
.wr32
= nv50_disp_chan_wr32
,
604 /*******************************************************************************
605 * EVO cursor channel objects
606 ******************************************************************************/
608 struct nv50_disp_chan_impl
609 gf110_disp_curs_ofuncs
= {
610 .base
.ctor
= nv50_disp_curs_ctor
,
611 .base
.dtor
= nv50_disp_pioc_dtor
,
612 .base
.init
= gf110_disp_pioc_init
,
613 .base
.fini
= gf110_disp_pioc_fini
,
614 .base
.ntfy
= nv50_disp_chan_ntfy
,
615 .base
.map
= nv50_disp_chan_map
,
616 .base
.rd32
= nv50_disp_chan_rd32
,
617 .base
.wr32
= nv50_disp_chan_wr32
,
621 /*******************************************************************************
622 * Base display object
623 ******************************************************************************/
626 gf110_disp_main_scanoutpos(NV50_DISP_MTHD_V0
)
628 const u32 total
= nv_rd32(priv
, 0x640414 + (head
* 0x300));
629 const u32 blanke
= nv_rd32(priv
, 0x64041c + (head
* 0x300));
630 const u32 blanks
= nv_rd32(priv
, 0x640420 + (head
* 0x300));
632 struct nv04_disp_scanoutpos_v0 v0
;
636 nv_ioctl(object
, "disp scanoutpos size %d\n", size
);
637 if (nvif_unpack(args
->v0
, 0, 0, false)) {
638 nv_ioctl(object
, "disp scanoutpos vers %d\n", args
->v0
.version
);
639 args
->v0
.vblanke
= (blanke
& 0xffff0000) >> 16;
640 args
->v0
.hblanke
= (blanke
& 0x0000ffff);
641 args
->v0
.vblanks
= (blanks
& 0xffff0000) >> 16;
642 args
->v0
.hblanks
= (blanks
& 0x0000ffff);
643 args
->v0
.vtotal
= ( total
& 0xffff0000) >> 16;
644 args
->v0
.htotal
= ( total
& 0x0000ffff);
645 args
->v0
.time
[0] = ktime_to_ns(ktime_get());
646 args
->v0
.vline
= /* vline read locks hline */
647 nv_rd32(priv
, 0x616340 + (head
* 0x800)) & 0xffff;
648 args
->v0
.time
[1] = ktime_to_ns(ktime_get());
650 nv_rd32(priv
, 0x616344 + (head
* 0x800)) & 0xffff;
658 gf110_disp_main_init(struct nvkm_object
*object
)
660 struct nv50_disp_priv
*priv
= (void *)object
->engine
;
661 struct nv50_disp_base
*base
= (void *)object
;
665 ret
= nvkm_parent_init(&base
->base
);
669 /* The below segments of code copying values from one register to
670 * another appear to inform EVO of the display capabilities or
675 for (i
= 0; i
< priv
->head
.nr
; i
++) {
676 tmp
= nv_rd32(priv
, 0x616104 + (i
* 0x800));
677 nv_wr32(priv
, 0x6101b4 + (i
* 0x800), tmp
);
678 tmp
= nv_rd32(priv
, 0x616108 + (i
* 0x800));
679 nv_wr32(priv
, 0x6101b8 + (i
* 0x800), tmp
);
680 tmp
= nv_rd32(priv
, 0x61610c + (i
* 0x800));
681 nv_wr32(priv
, 0x6101bc + (i
* 0x800), tmp
);
685 for (i
= 0; i
< priv
->dac
.nr
; i
++) {
686 tmp
= nv_rd32(priv
, 0x61a000 + (i
* 0x800));
687 nv_wr32(priv
, 0x6101c0 + (i
* 0x800), tmp
);
691 for (i
= 0; i
< priv
->sor
.nr
; i
++) {
692 tmp
= nv_rd32(priv
, 0x61c000 + (i
* 0x800));
693 nv_wr32(priv
, 0x6301c4 + (i
* 0x800), tmp
);
696 /* steal display away from vbios, or something like that */
697 if (nv_rd32(priv
, 0x6100ac) & 0x00000100) {
698 nv_wr32(priv
, 0x6100ac, 0x00000100);
699 nv_mask(priv
, 0x6194e8, 0x00000001, 0x00000000);
700 if (!nv_wait(priv
, 0x6194e8, 0x00000002, 0x00000000)) {
701 nv_error(priv
, "timeout acquiring display\n");
706 /* point at display engine memory area (hash table, objects) */
707 nv_wr32(priv
, 0x610010, (nv_gpuobj(object
->parent
)->addr
>> 8) | 9);
709 /* enable supervisor interrupts, disable everything else */
710 nv_wr32(priv
, 0x610090, 0x00000000);
711 nv_wr32(priv
, 0x6100a0, 0x00000000);
712 nv_wr32(priv
, 0x6100b0, 0x00000307);
714 /* disable underflow reporting, preventing an intermittent issue
715 * on some gk104 boards where the production vbios left this
716 * setting enabled by default.
718 * ftp://download.nvidia.com/open-gpu-doc/gk104-disable-underflow-reporting/1/gk104-disable-underflow-reporting.txt
720 for (i
= 0; i
< priv
->head
.nr
; i
++)
721 nv_mask(priv
, 0x616308 + (i
* 0x800), 0x00000111, 0x00000010);
727 gf110_disp_main_fini(struct nvkm_object
*object
, bool suspend
)
729 struct nv50_disp_priv
*priv
= (void *)object
->engine
;
730 struct nv50_disp_base
*base
= (void *)object
;
732 /* disable all interrupts */
733 nv_wr32(priv
, 0x6100b0, 0x00000000);
735 return nvkm_parent_fini(&base
->base
, suspend
);
739 gf110_disp_main_ofuncs
= {
740 .ctor
= nv50_disp_main_ctor
,
741 .dtor
= nv50_disp_main_dtor
,
742 .init
= gf110_disp_main_init
,
743 .fini
= gf110_disp_main_fini
,
744 .mthd
= nv50_disp_main_mthd
,
745 .ntfy
= nvkm_disp_ntfy
,
748 static struct nvkm_oclass
749 gf110_disp_main_oclass
[] = {
750 { GF110_DISP
, &gf110_disp_main_ofuncs
},
754 static struct nvkm_oclass
755 gf110_disp_sclass
[] = {
756 { GF110_DISP_CORE_CHANNEL_DMA
, &gf110_disp_core_ofuncs
.base
},
757 { GF110_DISP_BASE_CHANNEL_DMA
, &gf110_disp_base_ofuncs
.base
},
758 { GF110_DISP_OVERLAY_CONTROL_DMA
, &gf110_disp_ovly_ofuncs
.base
},
759 { GF110_DISP_OVERLAY
, &gf110_disp_oimm_ofuncs
.base
},
760 { GF110_DISP_CURSOR
, &gf110_disp_curs_ofuncs
.base
},
764 /*******************************************************************************
765 * Display engine implementation
766 ******************************************************************************/
769 gf110_disp_vblank_init(struct nvkm_event
*event
, int type
, int head
)
771 struct nvkm_disp
*disp
= container_of(event
, typeof(*disp
), vblank
);
772 nv_mask(disp
, 0x6100c0 + (head
* 0x800), 0x00000001, 0x00000001);
776 gf110_disp_vblank_fini(struct nvkm_event
*event
, int type
, int head
)
778 struct nvkm_disp
*disp
= container_of(event
, typeof(*disp
), vblank
);
779 nv_mask(disp
, 0x6100c0 + (head
* 0x800), 0x00000001, 0x00000000);
782 const struct nvkm_event_func
783 gf110_disp_vblank_func
= {
784 .ctor
= nvkm_disp_vblank_ctor
,
785 .init
= gf110_disp_vblank_init
,
786 .fini
= gf110_disp_vblank_fini
,
789 static struct nvkm_output
*
790 exec_lookup(struct nv50_disp_priv
*priv
, int head
, int or, u32 ctrl
,
791 u32
*data
, u8
*ver
, u8
*hdr
, u8
*cnt
, u8
*len
,
792 struct nvbios_outp
*info
)
794 struct nvkm_bios
*bios
= nvkm_bios(priv
);
795 struct nvkm_output
*outp
;
799 type
= DCB_OUTPUT_ANALOG
;
803 switch (ctrl
& 0x00000f00) {
804 case 0x00000000: type
= DCB_OUTPUT_LVDS
; mask
= 1; break;
805 case 0x00000100: type
= DCB_OUTPUT_TMDS
; mask
= 1; break;
806 case 0x00000200: type
= DCB_OUTPUT_TMDS
; mask
= 2; break;
807 case 0x00000500: type
= DCB_OUTPUT_TMDS
; mask
= 3; break;
808 case 0x00000800: type
= DCB_OUTPUT_DP
; mask
= 1; break;
809 case 0x00000900: type
= DCB_OUTPUT_DP
; mask
= 2; break;
811 nv_error(priv
, "unknown SOR mc 0x%08x\n", ctrl
);
816 mask
= 0x00c0 & (mask
<< 6);
817 mask
|= 0x0001 << or;
818 mask
|= 0x0100 << head
;
820 list_for_each_entry(outp
, &priv
->base
.outp
, head
) {
821 if ((outp
->info
.hasht
& 0xff) == type
&&
822 (outp
->info
.hashm
& mask
) == mask
) {
823 *data
= nvbios_outp_match(bios
, outp
->info
.hasht
,
825 ver
, hdr
, cnt
, len
, info
);
835 static struct nvkm_output
*
836 exec_script(struct nv50_disp_priv
*priv
, int head
, int id
)
838 struct nvkm_bios
*bios
= nvkm_bios(priv
);
839 struct nvkm_output
*outp
;
840 struct nvbios_outp info
;
841 u8 ver
, hdr
, cnt
, len
;
845 for (or = 0; !(ctrl
& (1 << head
)) && or < 8; or++) {
846 ctrl
= nv_rd32(priv
, 0x640180 + (or * 0x20));
847 if (ctrl
& (1 << head
))
854 outp
= exec_lookup(priv
, head
, or, ctrl
, &data
, &ver
, &hdr
, &cnt
, &len
, &info
);
856 struct nvbios_init init
= {
857 .subdev
= nv_subdev(priv
),
859 .offset
= info
.script
[id
],
871 static struct nvkm_output
*
872 exec_clkcmp(struct nv50_disp_priv
*priv
, int head
, int id
, u32 pclk
, u32
*conf
)
874 struct nvkm_bios
*bios
= nvkm_bios(priv
);
875 struct nvkm_output
*outp
;
876 struct nvbios_outp info1
;
877 struct nvbios_ocfg info2
;
878 u8 ver
, hdr
, cnt
, len
;
882 for (or = 0; !(ctrl
& (1 << head
)) && or < 8; or++) {
883 ctrl
= nv_rd32(priv
, 0x660180 + (or * 0x20));
884 if (ctrl
& (1 << head
))
891 outp
= exec_lookup(priv
, head
, or, ctrl
, &data
, &ver
, &hdr
, &cnt
, &len
, &info1
);
895 switch (outp
->info
.type
) {
896 case DCB_OUTPUT_TMDS
:
897 *conf
= (ctrl
& 0x00000f00) >> 8;
901 case DCB_OUTPUT_LVDS
:
902 *conf
= priv
->sor
.lvdsconf
;
905 *conf
= (ctrl
& 0x00000f00) >> 8;
907 case DCB_OUTPUT_ANALOG
:
913 data
= nvbios_ocfg_match(bios
, data
, *conf
, &ver
, &hdr
, &cnt
, &len
, &info2
);
914 if (data
&& id
< 0xff) {
915 data
= nvbios_oclk_match(bios
, info2
.clkcmp
[id
], pclk
);
917 struct nvbios_init init
= {
918 .subdev
= nv_subdev(priv
),
934 gf110_disp_intr_unk1_0(struct nv50_disp_priv
*priv
, int head
)
936 exec_script(priv
, head
, 1);
940 gf110_disp_intr_unk2_0(struct nv50_disp_priv
*priv
, int head
)
942 struct nvkm_output
*outp
= exec_script(priv
, head
, 2);
944 /* see note in nv50_disp_intr_unk20_0() */
945 if (outp
&& outp
->info
.type
== DCB_OUTPUT_DP
) {
946 struct nvkm_output_dp
*outpdp
= (void *)outp
;
947 struct nvbios_init init
= {
948 .subdev
= nv_subdev(priv
),
949 .bios
= nvkm_bios(priv
),
952 .offset
= outpdp
->info
.script
[4],
957 atomic_set(&outpdp
->lt
.done
, 0);
962 gf110_disp_intr_unk2_1(struct nv50_disp_priv
*priv
, int head
)
964 struct nvkm_devinit
*devinit
= nvkm_devinit(priv
);
965 u32 pclk
= nv_rd32(priv
, 0x660450 + (head
* 0x300)) / 1000;
967 devinit
->pll_set(devinit
, PLL_VPLL0
+ head
, pclk
);
968 nv_wr32(priv
, 0x612200 + (head
* 0x800), 0x00000000);
972 gf110_disp_intr_unk2_2_tu(struct nv50_disp_priv
*priv
, int head
,
973 struct dcb_output
*outp
)
975 const int or = ffs(outp
->or) - 1;
976 const u32 ctrl
= nv_rd32(priv
, 0x660200 + (or * 0x020));
977 const u32 conf
= nv_rd32(priv
, 0x660404 + (head
* 0x300));
978 const s32 vactive
= nv_rd32(priv
, 0x660414 + (head
* 0x300)) & 0xffff;
979 const s32 vblanke
= nv_rd32(priv
, 0x66041c + (head
* 0x300)) & 0xffff;
980 const s32 vblanks
= nv_rd32(priv
, 0x660420 + (head
* 0x300)) & 0xffff;
981 const u32 pclk
= nv_rd32(priv
, 0x660450 + (head
* 0x300)) / 1000;
982 const u32 link
= ((ctrl
& 0xf00) == 0x800) ? 0 : 1;
983 const u32 hoff
= (head
* 0x800);
984 const u32 soff
= ( or * 0x800);
985 const u32 loff
= (link
* 0x080) + soff
;
986 const u32 symbol
= 100000;
988 u32 dpctrl
= nv_rd32(priv
, 0x61c10c + loff
);
989 u32 clksor
= nv_rd32(priv
, 0x612300 + soff
);
990 u32 datarate
, link_nr
, link_bw
, bits
;
993 link_nr
= hweight32(dpctrl
& 0x000f0000);
994 link_bw
= (clksor
& 0x007c0000) >> 18;
997 /* symbols/hblank - algorithm taken from comments in tegra driver */
998 value
= vblanke
+ vactive
- vblanks
- 7;
999 value
= value
* link_bw
;
1000 do_div(value
, pclk
);
1001 value
= value
- (3 * !!(dpctrl
& 0x00004000)) - (12 / link_nr
);
1002 nv_mask(priv
, 0x616620 + hoff
, 0x0000ffff, value
);
1004 /* symbols/vblank - algorithm taken from comments in tegra driver */
1005 value
= vblanks
- vblanke
- 25;
1006 value
= value
* link_bw
;
1007 do_div(value
, pclk
);
1008 value
= value
- ((36 / link_nr
) + 3) - 1;
1009 nv_mask(priv
, 0x616624 + hoff
, 0x00ffffff, value
);
1012 if ((conf
& 0x3c0) == 0x180) bits
= 30;
1013 else if ((conf
& 0x3c0) == 0x140) bits
= 24;
1015 datarate
= (pclk
* bits
) / 8;
1019 do_div(ratio
, link_nr
* link_bw
);
1021 value
= (symbol
- ratio
) * TU
;
1023 do_div(value
, symbol
);
1024 do_div(value
, symbol
);
1027 value
|= 0x08000000;
1029 nv_wr32(priv
, 0x616610 + hoff
, value
);
1033 gf110_disp_intr_unk2_2(struct nv50_disp_priv
*priv
, int head
)
1035 struct nvkm_output
*outp
;
1036 u32 pclk
= nv_rd32(priv
, 0x660450 + (head
* 0x300)) / 1000;
1037 u32 conf
, addr
, data
;
1039 outp
= exec_clkcmp(priv
, head
, 0xff, pclk
, &conf
);
1043 /* see note in nv50_disp_intr_unk20_2() */
1044 if (outp
->info
.type
== DCB_OUTPUT_DP
) {
1045 u32 sync
= nv_rd32(priv
, 0x660404 + (head
* 0x300));
1046 switch ((sync
& 0x000003c0) >> 6) {
1047 case 6: pclk
= pclk
* 30; break;
1048 case 5: pclk
= pclk
* 24; break;
1055 if (nvkm_output_dp_train(outp
, pclk
, true))
1056 ERR("link not trained before attach\n");
1058 if (priv
->sor
.magic
)
1059 priv
->sor
.magic(outp
);
1062 exec_clkcmp(priv
, head
, 0, pclk
, &conf
);
1064 if (outp
->info
.type
== DCB_OUTPUT_ANALOG
) {
1065 addr
= 0x612280 + (ffs(outp
->info
.or) - 1) * 0x800;
1068 addr
= 0x612300 + (ffs(outp
->info
.or) - 1) * 0x800;
1069 data
= (conf
& 0x0100) ? 0x00000101 : 0x00000000;
1070 switch (outp
->info
.type
) {
1071 case DCB_OUTPUT_TMDS
:
1072 nv_mask(priv
, addr
, 0x007c0000, 0x00280000);
1075 gf110_disp_intr_unk2_2_tu(priv
, head
, &outp
->info
);
1082 nv_mask(priv
, addr
, 0x00000707, data
);
1086 gf110_disp_intr_unk4_0(struct nv50_disp_priv
*priv
, int head
)
1088 u32 pclk
= nv_rd32(priv
, 0x660450 + (head
* 0x300)) / 1000;
1091 exec_clkcmp(priv
, head
, 1, pclk
, &conf
);
1095 gf110_disp_intr_supervisor(struct work_struct
*work
)
1097 struct nv50_disp_priv
*priv
=
1098 container_of(work
, struct nv50_disp_priv
, supervisor
);
1099 struct nv50_disp_impl
*impl
= (void *)nv_object(priv
)->oclass
;
1103 nv_debug(priv
, "supervisor %d\n", ffs(priv
->super
));
1104 for (head
= 0; head
< priv
->head
.nr
; head
++) {
1105 mask
[head
] = nv_rd32(priv
, 0x6101d4 + (head
* 0x800));
1106 nv_debug(priv
, "head %d: 0x%08x\n", head
, mask
[head
]);
1109 if (priv
->super
& 0x00000001) {
1110 nv50_disp_mthd_chan(priv
, NV_DBG_DEBUG
, 0, impl
->mthd
.core
);
1111 for (head
= 0; head
< priv
->head
.nr
; head
++) {
1112 if (!(mask
[head
] & 0x00001000))
1114 nv_debug(priv
, "supervisor 1.0 - head %d\n", head
);
1115 gf110_disp_intr_unk1_0(priv
, head
);
1118 if (priv
->super
& 0x00000002) {
1119 for (head
= 0; head
< priv
->head
.nr
; head
++) {
1120 if (!(mask
[head
] & 0x00001000))
1122 nv_debug(priv
, "supervisor 2.0 - head %d\n", head
);
1123 gf110_disp_intr_unk2_0(priv
, head
);
1125 for (head
= 0; head
< priv
->head
.nr
; head
++) {
1126 if (!(mask
[head
] & 0x00010000))
1128 nv_debug(priv
, "supervisor 2.1 - head %d\n", head
);
1129 gf110_disp_intr_unk2_1(priv
, head
);
1131 for (head
= 0; head
< priv
->head
.nr
; head
++) {
1132 if (!(mask
[head
] & 0x00001000))
1134 nv_debug(priv
, "supervisor 2.2 - head %d\n", head
);
1135 gf110_disp_intr_unk2_2(priv
, head
);
1138 if (priv
->super
& 0x00000004) {
1139 for (head
= 0; head
< priv
->head
.nr
; head
++) {
1140 if (!(mask
[head
] & 0x00001000))
1142 nv_debug(priv
, "supervisor 3.0 - head %d\n", head
);
1143 gf110_disp_intr_unk4_0(priv
, head
);
1147 for (head
= 0; head
< priv
->head
.nr
; head
++)
1148 nv_wr32(priv
, 0x6101d4 + (head
* 0x800), 0x00000000);
1149 nv_wr32(priv
, 0x6101d0, 0x80000000);
1153 gf110_disp_intr_error(struct nv50_disp_priv
*priv
, int chid
)
1155 const struct nv50_disp_impl
*impl
= (void *)nv_object(priv
)->oclass
;
1156 u32 mthd
= nv_rd32(priv
, 0x6101f0 + (chid
* 12));
1157 u32 data
= nv_rd32(priv
, 0x6101f4 + (chid
* 12));
1158 u32 unkn
= nv_rd32(priv
, 0x6101f8 + (chid
* 12));
1160 nv_error(priv
, "chid %d mthd 0x%04x data 0x%08x "
1162 chid
, (mthd
& 0x0000ffc), data
, mthd
, unkn
);
1165 switch (mthd
& 0xffc) {
1167 nv50_disp_mthd_chan(priv
, NV_DBG_ERROR
, chid
- 0,
1175 switch (mthd
& 0xffc) {
1177 nv50_disp_mthd_chan(priv
, NV_DBG_ERROR
, chid
- 1,
1185 switch (mthd
& 0xffc) {
1187 nv50_disp_mthd_chan(priv
, NV_DBG_ERROR
, chid
- 5,
1195 nv_wr32(priv
, 0x61009c, (1 << chid
));
1196 nv_wr32(priv
, 0x6101f0 + (chid
* 12), 0x90000000);
1200 gf110_disp_intr(struct nvkm_subdev
*subdev
)
1202 struct nv50_disp_priv
*priv
= (void *)subdev
;
1203 u32 intr
= nv_rd32(priv
, 0x610088);
1206 if (intr
& 0x00000001) {
1207 u32 stat
= nv_rd32(priv
, 0x61008c);
1209 int chid
= __ffs(stat
); stat
&= ~(1 << chid
);
1210 nv50_disp_chan_uevent_send(priv
, chid
);
1211 nv_wr32(priv
, 0x61008c, 1 << chid
);
1213 intr
&= ~0x00000001;
1216 if (intr
& 0x00000002) {
1217 u32 stat
= nv_rd32(priv
, 0x61009c);
1218 int chid
= ffs(stat
) - 1;
1220 gf110_disp_intr_error(priv
, chid
);
1221 intr
&= ~0x00000002;
1224 if (intr
& 0x00100000) {
1225 u32 stat
= nv_rd32(priv
, 0x6100ac);
1226 if (stat
& 0x00000007) {
1227 priv
->super
= (stat
& 0x00000007);
1228 schedule_work(&priv
->supervisor
);
1229 nv_wr32(priv
, 0x6100ac, priv
->super
);
1230 stat
&= ~0x00000007;
1234 nv_info(priv
, "unknown intr24 0x%08x\n", stat
);
1235 nv_wr32(priv
, 0x6100ac, stat
);
1238 intr
&= ~0x00100000;
1241 for (i
= 0; i
< priv
->head
.nr
; i
++) {
1242 u32 mask
= 0x01000000 << i
;
1244 u32 stat
= nv_rd32(priv
, 0x6100bc + (i
* 0x800));
1245 if (stat
& 0x00000001)
1246 nvkm_disp_vblank(&priv
->base
, i
);
1247 nv_mask(priv
, 0x6100bc + (i
* 0x800), 0, 0);
1248 nv_rd32(priv
, 0x6100c0 + (i
* 0x800));
1254 gf110_disp_ctor(struct nvkm_object
*parent
, struct nvkm_object
*engine
,
1255 struct nvkm_oclass
*oclass
, void *data
, u32 size
,
1256 struct nvkm_object
**pobject
)
1258 struct nv50_disp_priv
*priv
;
1259 int heads
= nv_rd32(parent
, 0x022448);
1262 ret
= nvkm_disp_create(parent
, engine
, oclass
, heads
,
1263 "PDISP", "display", &priv
);
1264 *pobject
= nv_object(priv
);
1268 ret
= nvkm_event_init(&gf110_disp_chan_uevent
, 1, 17, &priv
->uevent
);
1272 nv_engine(priv
)->sclass
= gf110_disp_main_oclass
;
1273 nv_engine(priv
)->cclass
= &nv50_disp_cclass
;
1274 nv_subdev(priv
)->intr
= gf110_disp_intr
;
1275 INIT_WORK(&priv
->supervisor
, gf110_disp_intr_supervisor
);
1276 priv
->sclass
= gf110_disp_sclass
;
1277 priv
->head
.nr
= heads
;
1280 priv
->dac
.power
= nv50_dac_power
;
1281 priv
->dac
.sense
= nv50_dac_sense
;
1282 priv
->sor
.power
= nv50_sor_power
;
1283 priv
->sor
.hda_eld
= gf110_hda_eld
;
1284 priv
->sor
.hdmi
= gf110_hdmi_ctrl
;
1288 struct nvkm_oclass
*
1289 gf110_disp_outp_sclass
[] = {
1290 &gf110_sor_dp_impl
.base
.base
,
1294 struct nvkm_oclass
*
1295 gf110_disp_oclass
= &(struct nv50_disp_impl
) {
1296 .base
.base
.handle
= NV_ENGINE(DISP
, 0x90),
1297 .base
.base
.ofuncs
= &(struct nvkm_ofuncs
) {
1298 .ctor
= gf110_disp_ctor
,
1299 .dtor
= _nvkm_disp_dtor
,
1300 .init
= _nvkm_disp_init
,
1301 .fini
= _nvkm_disp_fini
,
1303 .base
.vblank
= &gf110_disp_vblank_func
,
1304 .base
.outp
= gf110_disp_outp_sclass
,
1305 .mthd
.core
= &gf110_disp_core_mthd_chan
,
1306 .mthd
.base
= &gf110_disp_base_mthd_chan
,
1307 .mthd
.ovly
= &gf110_disp_ovly_mthd_chan
,
1308 .mthd
.prev
= -0x020000,
1309 .head
.scanoutpos
= gf110_disp_main_scanoutpos
,