]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
drm/nouveau/disp/nv50-: port OR power state control to nvkm_ior
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / nouveau / nvkm / engine / disp / sorg94.c
1 /*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24 #include "ior.h"
25 #include "nv50.h"
26
27 #include <subdev/timer.h>
28
29 static inline u32
30 g94_sor_soff(struct nvkm_output_dp *outp)
31 {
32 return (ffs(outp->base.info.or) - 1) * 0x800;
33 }
34
35 static inline u32
36 g94_sor_loff(struct nvkm_output_dp *outp)
37 {
38 return g94_sor_soff(outp) + !(outp->base.info.sorconf.link & 1) * 0x80;
39 }
40
41 /*******************************************************************************
42 * DisplayPort
43 ******************************************************************************/
44 u32
45 g94_sor_dp_lane_map(struct nvkm_device *device, u8 lane)
46 {
47 static const u8 gm100[] = { 0, 8, 16, 24 };
48 static const u8 mcp89[] = { 24, 16, 8, 0 }; /* thanks, apple.. */
49 static const u8 g94[] = { 16, 8, 0, 24 };
50 if (device->chipset >= 0x110)
51 return gm100[lane];
52 if (device->chipset == 0xaf)
53 return mcp89[lane];
54 return g94[lane];
55 }
56
57 static int
58 g94_sor_dp_drv_ctl(struct nvkm_output_dp *outp, int ln, int vs, int pe, int pc)
59 {
60 struct nvkm_device *device = outp->base.disp->engine.subdev.device;
61 struct nvkm_bios *bios = device->bios;
62 const u32 shift = g94_sor_dp_lane_map(device, ln);
63 const u32 loff = g94_sor_loff(outp);
64 u32 addr, data[3];
65 u8 ver, hdr, cnt, len;
66 struct nvbios_dpout info;
67 struct nvbios_dpcfg ocfg;
68
69 addr = nvbios_dpout_match(bios, outp->base.info.hasht,
70 outp->base.info.hashm,
71 &ver, &hdr, &cnt, &len, &info);
72 if (!addr)
73 return -ENODEV;
74
75 addr = nvbios_dpcfg_match(bios, addr, 0, vs, pe,
76 &ver, &hdr, &cnt, &len, &ocfg);
77 if (!addr)
78 return -EINVAL;
79
80 data[0] = nvkm_rd32(device, 0x61c118 + loff) & ~(0x000000ff << shift);
81 data[1] = nvkm_rd32(device, 0x61c120 + loff) & ~(0x000000ff << shift);
82 data[2] = nvkm_rd32(device, 0x61c130 + loff);
83 if ((data[2] & 0x0000ff00) < (ocfg.tx_pu << 8) || ln == 0)
84 data[2] = (data[2] & ~0x0000ff00) | (ocfg.tx_pu << 8);
85 nvkm_wr32(device, 0x61c118 + loff, data[0] | (ocfg.dc << shift));
86 nvkm_wr32(device, 0x61c120 + loff, data[1] | (ocfg.pe << shift));
87 nvkm_wr32(device, 0x61c130 + loff, data[2]);
88 return 0;
89 }
90
91 static int
92 g94_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
93 {
94 struct nvkm_device *device = outp->base.disp->engine.subdev.device;
95 const u32 loff = g94_sor_loff(outp);
96 nvkm_mask(device, 0x61c10c + loff, 0x0f000000, pattern << 24);
97 return 0;
98 }
99
100 int
101 g94_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
102 {
103 struct nvkm_device *device = outp->base.disp->engine.subdev.device;
104 const u32 soff = g94_sor_soff(outp);
105 const u32 loff = g94_sor_loff(outp);
106 u32 mask = 0, i;
107
108 for (i = 0; i < nr; i++)
109 mask |= 1 << (g94_sor_dp_lane_map(device, i) >> 3);
110
111 nvkm_mask(device, 0x61c130 + loff, 0x0000000f, mask);
112 nvkm_mask(device, 0x61c034 + soff, 0x80000000, 0x80000000);
113 nvkm_msec(device, 2000,
114 if (!(nvkm_rd32(device, 0x61c034 + soff) & 0x80000000))
115 break;
116 );
117 return 0;
118 }
119
120 static int
121 g94_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
122 {
123 struct nvkm_device *device = outp->base.disp->engine.subdev.device;
124 const u32 soff = g94_sor_soff(outp);
125 const u32 loff = g94_sor_loff(outp);
126 u32 dpctrl = 0x00000000;
127 u32 clksor = 0x00000000;
128
129 dpctrl |= ((1 << nr) - 1) << 16;
130 if (ef)
131 dpctrl |= 0x00004000;
132 if (bw > 0x06)
133 clksor |= 0x00040000;
134
135 nvkm_mask(device, 0x614300 + soff, 0x000c0000, clksor);
136 nvkm_mask(device, 0x61c10c + loff, 0x001f4000, dpctrl);
137 return 0;
138 }
139
140 static const struct nvkm_output_dp_func
141 g94_sor_dp_func = {
142 .pattern = g94_sor_dp_pattern,
143 .lnk_pwr = g94_sor_dp_lnk_pwr,
144 .lnk_ctl = g94_sor_dp_lnk_ctl,
145 .drv_ctl = g94_sor_dp_drv_ctl,
146 };
147
148 int
149 g94_sor_dp_new(struct nvkm_disp *disp, int index, struct dcb_output *dcbE,
150 struct nvkm_output **poutp)
151 {
152 return nvkm_output_dp_new_(&g94_sor_dp_func, disp, index, dcbE, poutp);
153 }
154
155 static bool
156 nv50_disp_dptmds_war(struct nvkm_device *device)
157 {
158 switch (device->chipset) {
159 case 0x94:
160 case 0x96:
161 case 0x98:
162 return true;
163 default:
164 break;
165 }
166 return false;
167 }
168
169 static bool
170 nv50_disp_dptmds_war_needed(struct nv50_disp *disp, struct dcb_output *outp)
171 {
172 struct nvkm_device *device = disp->base.engine.subdev.device;
173 const u32 soff = __ffs(outp->or) * 0x800;
174 if (nv50_disp_dptmds_war(device) && outp->type == DCB_OUTPUT_TMDS) {
175 switch (nvkm_rd32(device, 0x614300 + soff) & 0x00030000) {
176 case 0x00000000:
177 case 0x00030000:
178 return true;
179 default:
180 break;
181 }
182 }
183 return false;
184
185 }
186
187 void
188 nv50_disp_update_sppll1(struct nv50_disp *disp)
189 {
190 struct nvkm_device *device = disp->base.engine.subdev.device;
191 bool used = false;
192 int sor;
193
194 if (!nv50_disp_dptmds_war(device))
195 return;
196
197 for (sor = 0; sor < disp->func->sor.nr; sor++) {
198 u32 clksor = nvkm_rd32(device, 0x614300 + (sor * 0x800));
199 switch (clksor & 0x03000000) {
200 case 0x02000000:
201 case 0x03000000:
202 used = true;
203 break;
204 default:
205 break;
206 }
207 }
208
209 if (used)
210 return;
211
212 nvkm_mask(device, 0x00e840, 0x80000000, 0x00000000);
213 }
214
215 void
216 nv50_disp_dptmds_war_3(struct nv50_disp *disp, struct dcb_output *outp)
217 {
218 struct nvkm_device *device = disp->base.engine.subdev.device;
219 const u32 soff = __ffs(outp->or) * 0x800;
220 u32 sorpwr;
221
222 if (!nv50_disp_dptmds_war_needed(disp, outp))
223 return;
224
225 sorpwr = nvkm_rd32(device, 0x61c004 + soff);
226 if (sorpwr & 0x00000001) {
227 u32 seqctl = nvkm_rd32(device, 0x61c030 + soff);
228 u32 pd_pc = (seqctl & 0x00000f00) >> 8;
229 u32 pu_pc = seqctl & 0x0000000f;
230
231 nvkm_wr32(device, 0x61c040 + soff + pd_pc * 4, 0x1f008000);
232
233 nvkm_msec(device, 2000,
234 if (!(nvkm_rd32(device, 0x61c030 + soff) & 0x10000000))
235 break;
236 );
237 nvkm_mask(device, 0x61c004 + soff, 0x80000001, 0x80000000);
238 nvkm_msec(device, 2000,
239 if (!(nvkm_rd32(device, 0x61c030 + soff) & 0x10000000))
240 break;
241 );
242
243 nvkm_wr32(device, 0x61c040 + soff + pd_pc * 4, 0x00002000);
244 nvkm_wr32(device, 0x61c040 + soff + pu_pc * 4, 0x1f000000);
245 }
246
247 nvkm_mask(device, 0x61c10c + soff, 0x00000001, 0x00000000);
248 nvkm_mask(device, 0x614300 + soff, 0x03000000, 0x00000000);
249
250 if (sorpwr & 0x00000001) {
251 nvkm_mask(device, 0x61c004 + soff, 0x80000001, 0x80000001);
252 }
253 }
254
255 void
256 nv50_disp_dptmds_war_2(struct nv50_disp *disp, struct dcb_output *outp)
257 {
258 struct nvkm_device *device = disp->base.engine.subdev.device;
259 const u32 soff = __ffs(outp->or) * 0x800;
260
261 if (!nv50_disp_dptmds_war_needed(disp, outp))
262 return;
263
264 nvkm_mask(device, 0x00e840, 0x80000000, 0x80000000);
265 nvkm_mask(device, 0x614300 + soff, 0x03000000, 0x03000000);
266 nvkm_mask(device, 0x61c10c + soff, 0x00000001, 0x00000001);
267
268 nvkm_mask(device, 0x61c00c + soff, 0x0f000000, 0x00000000);
269 nvkm_mask(device, 0x61c008 + soff, 0xff000000, 0x14000000);
270 nvkm_usec(device, 400, NVKM_DELAY);
271 nvkm_mask(device, 0x61c008 + soff, 0xff000000, 0x00000000);
272 nvkm_mask(device, 0x61c00c + soff, 0x0f000000, 0x01000000);
273
274 if (nvkm_rd32(device, 0x61c004 + soff) & 0x00000001) {
275 u32 seqctl = nvkm_rd32(device, 0x61c030 + soff);
276 u32 pu_pc = seqctl & 0x0000000f;
277 nvkm_wr32(device, 0x61c040 + soff + pu_pc * 4, 0x1f008000);
278 }
279 }
280
281 void
282 g94_sor_state(struct nvkm_ior *sor, struct nvkm_ior_state *state)
283 {
284 struct nvkm_device *device = sor->disp->engine.subdev.device;
285 const u32 coff = sor->id * 8 + (state == &sor->arm) * 4;
286 u32 ctrl = nvkm_rd32(device, 0x610794 + coff);
287
288 state->proto_evo = (ctrl & 0x00000f00) >> 8;
289 switch (state->proto_evo) {
290 case 0: state->proto = LVDS; state->link = 1; break;
291 case 1: state->proto = TMDS; state->link = 1; break;
292 case 2: state->proto = TMDS; state->link = 2; break;
293 case 5: state->proto = TMDS; state->link = 3; break;
294 case 8: state->proto = DP; state->link = 1; break;
295 case 9: state->proto = DP; state->link = 2; break;
296 default:
297 state->proto = UNKNOWN;
298 break;
299 }
300
301 state->head = ctrl & 0x00000003;
302 }
303
304 static const struct nvkm_ior_func
305 g94_sor = {
306 .state = g94_sor_state,
307 .power = nv50_sor_power,
308 };
309
310 int
311 g94_sor_new(struct nvkm_disp *disp, int id)
312 {
313 return nvkm_ior_new_(&g94_sor, disp, SOR, id);
314 }