]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
7109e21f4564108861e329dab526a3bf353191e2
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / nouveau / nvkm / subdev / fb / ramgf100.c
1 /*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24 #define gf100_ram(p) container_of((p), struct gf100_ram, base)
25 #include "ram.h"
26 #include "ramfuc.h"
27
28 #include <core/option.h>
29 #include <subdev/bios.h>
30 #include <subdev/bios/pll.h>
31 #include <subdev/bios/rammap.h>
32 #include <subdev/bios/timing.h>
33 #include <subdev/clk.h>
34 #include <subdev/clk/pll.h>
35 #include <subdev/ltc.h>
36
37 struct gf100_ramfuc {
38 struct ramfuc base;
39
40 struct ramfuc_reg r_0x10fe20;
41 struct ramfuc_reg r_0x10fe24;
42 struct ramfuc_reg r_0x137320;
43 struct ramfuc_reg r_0x137330;
44
45 struct ramfuc_reg r_0x132000;
46 struct ramfuc_reg r_0x132004;
47 struct ramfuc_reg r_0x132100;
48
49 struct ramfuc_reg r_0x137390;
50
51 struct ramfuc_reg r_0x10f290;
52 struct ramfuc_reg r_0x10f294;
53 struct ramfuc_reg r_0x10f298;
54 struct ramfuc_reg r_0x10f29c;
55 struct ramfuc_reg r_0x10f2a0;
56
57 struct ramfuc_reg r_0x10f300;
58 struct ramfuc_reg r_0x10f338;
59 struct ramfuc_reg r_0x10f340;
60 struct ramfuc_reg r_0x10f344;
61 struct ramfuc_reg r_0x10f348;
62
63 struct ramfuc_reg r_0x10f910;
64 struct ramfuc_reg r_0x10f914;
65
66 struct ramfuc_reg r_0x100b0c;
67 struct ramfuc_reg r_0x10f050;
68 struct ramfuc_reg r_0x10f090;
69 struct ramfuc_reg r_0x10f200;
70 struct ramfuc_reg r_0x10f210;
71 struct ramfuc_reg r_0x10f310;
72 struct ramfuc_reg r_0x10f314;
73 struct ramfuc_reg r_0x10f610;
74 struct ramfuc_reg r_0x10f614;
75 struct ramfuc_reg r_0x10f800;
76 struct ramfuc_reg r_0x10f808;
77 struct ramfuc_reg r_0x10f824;
78 struct ramfuc_reg r_0x10f830;
79 struct ramfuc_reg r_0x10f988;
80 struct ramfuc_reg r_0x10f98c;
81 struct ramfuc_reg r_0x10f990;
82 struct ramfuc_reg r_0x10f998;
83 struct ramfuc_reg r_0x10f9b0;
84 struct ramfuc_reg r_0x10f9b4;
85 struct ramfuc_reg r_0x10fb04;
86 struct ramfuc_reg r_0x10fb08;
87 struct ramfuc_reg r_0x137300;
88 struct ramfuc_reg r_0x137310;
89 struct ramfuc_reg r_0x137360;
90 struct ramfuc_reg r_0x1373ec;
91 struct ramfuc_reg r_0x1373f0;
92 struct ramfuc_reg r_0x1373f8;
93
94 struct ramfuc_reg r_0x61c140;
95 struct ramfuc_reg r_0x611200;
96
97 struct ramfuc_reg r_0x13d8f4;
98 };
99
100 struct gf100_ram {
101 struct nvkm_ram base;
102 struct gf100_ramfuc fuc;
103 struct nvbios_pll refpll;
104 struct nvbios_pll mempll;
105 };
106
107 static void
108 gf100_ram_train(struct gf100_ramfuc *fuc, u32 magic)
109 {
110 struct gf100_ram *ram = container_of(fuc, typeof(*ram), fuc);
111 struct nvkm_fb *fb = ram->base.fb;
112 struct nvkm_device *device = fb->subdev.device;
113 u32 part = nvkm_rd32(device, 0x022438), i;
114 u32 mask = nvkm_rd32(device, 0x022554);
115 u32 addr = 0x110974;
116
117 ram_wr32(fuc, 0x10f910, magic);
118 ram_wr32(fuc, 0x10f914, magic);
119
120 for (i = 0; (magic & 0x80000000) && i < part; addr += 0x1000, i++) {
121 if (mask & (1 << i))
122 continue;
123 ram_wait(fuc, addr, 0x0000000f, 0x00000000, 500000);
124 }
125 }
126
127 int
128 gf100_ram_calc(struct nvkm_ram *base, u32 freq)
129 {
130 struct gf100_ram *ram = gf100_ram(base);
131 struct gf100_ramfuc *fuc = &ram->fuc;
132 struct nvkm_subdev *subdev = &ram->base.fb->subdev;
133 struct nvkm_device *device = subdev->device;
134 struct nvkm_clk *clk = device->clk;
135 struct nvkm_bios *bios = device->bios;
136 struct nvbios_ramcfg cfg;
137 u8 ver, cnt, len, strap;
138 struct {
139 u32 data;
140 u8 size;
141 } rammap, ramcfg, timing;
142 int ref, div, out;
143 int from, mode;
144 int N1, M1, P;
145 int ret;
146
147 /* lookup memory config data relevant to the target frequency */
148 rammap.data = nvbios_rammapEm(bios, freq / 1000, &ver, &rammap.size,
149 &cnt, &ramcfg.size, &cfg);
150 if (!rammap.data || ver != 0x10 || rammap.size < 0x0e) {
151 nvkm_error(subdev, "invalid/missing rammap entry\n");
152 return -EINVAL;
153 }
154
155 /* locate specific data set for the attached memory */
156 strap = nvbios_ramcfg_index(subdev);
157 if (strap >= cnt) {
158 nvkm_error(subdev, "invalid ramcfg strap\n");
159 return -EINVAL;
160 }
161
162 ramcfg.data = rammap.data + rammap.size + (strap * ramcfg.size);
163 if (!ramcfg.data || ver != 0x10 || ramcfg.size < 0x0e) {
164 nvkm_error(subdev, "invalid/missing ramcfg entry\n");
165 return -EINVAL;
166 }
167
168 /* lookup memory timings, if bios says they're present */
169 strap = nvbios_rd08(bios, ramcfg.data + 0x01);
170 if (strap != 0xff) {
171 timing.data = nvbios_timingEe(bios, strap, &ver, &timing.size,
172 &cnt, &len);
173 if (!timing.data || ver != 0x10 || timing.size < 0x19) {
174 nvkm_error(subdev, "invalid/missing timing entry\n");
175 return -EINVAL;
176 }
177 } else {
178 timing.data = 0;
179 }
180
181 ret = ram_init(fuc, ram->base.fb);
182 if (ret)
183 return ret;
184
185 /* determine current mclk configuration */
186 from = !!(ram_rd32(fuc, 0x1373f0) & 0x00000002); /*XXX: ok? */
187
188 /* determine target mclk configuration */
189 if (!(ram_rd32(fuc, 0x137300) & 0x00000100))
190 ref = nvkm_clk_read(clk, nv_clk_src_sppll0);
191 else
192 ref = nvkm_clk_read(clk, nv_clk_src_sppll1);
193 div = max(min((ref * 2) / freq, (u32)65), (u32)2) - 2;
194 out = (ref * 2) / (div + 2);
195 mode = freq != out;
196
197 ram_mask(fuc, 0x137360, 0x00000002, 0x00000000);
198
199 if ((ram_rd32(fuc, 0x132000) & 0x00000002) || 0 /*XXX*/) {
200 ram_nuke(fuc, 0x132000);
201 ram_mask(fuc, 0x132000, 0x00000002, 0x00000002);
202 ram_mask(fuc, 0x132000, 0x00000002, 0x00000000);
203 }
204
205 if (mode == 1) {
206 ram_nuke(fuc, 0x10fe20);
207 ram_mask(fuc, 0x10fe20, 0x00000002, 0x00000002);
208 ram_mask(fuc, 0x10fe20, 0x00000002, 0x00000000);
209 }
210
211 // 0x00020034 // 0x0000000a
212 ram_wr32(fuc, 0x132100, 0x00000001);
213
214 if (mode == 1 && from == 0) {
215 /* calculate refpll */
216 ret = gt215_pll_calc(subdev, &ram->refpll, ram->mempll.refclk,
217 &N1, NULL, &M1, &P);
218 if (ret <= 0) {
219 nvkm_error(subdev, "unable to calc refpll\n");
220 return ret ? ret : -ERANGE;
221 }
222
223 ram_wr32(fuc, 0x10fe20, 0x20010000);
224 ram_wr32(fuc, 0x137320, 0x00000003);
225 ram_wr32(fuc, 0x137330, 0x81200006);
226 ram_wr32(fuc, 0x10fe24, (P << 16) | (N1 << 8) | M1);
227 ram_wr32(fuc, 0x10fe20, 0x20010001);
228 ram_wait(fuc, 0x137390, 0x00020000, 0x00020000, 64000);
229
230 /* calculate mempll */
231 ret = gt215_pll_calc(subdev, &ram->mempll, freq,
232 &N1, NULL, &M1, &P);
233 if (ret <= 0) {
234 nvkm_error(subdev, "unable to calc refpll\n");
235 return ret ? ret : -ERANGE;
236 }
237
238 ram_wr32(fuc, 0x10fe20, 0x20010005);
239 ram_wr32(fuc, 0x132004, (P << 16) | (N1 << 8) | M1);
240 ram_wr32(fuc, 0x132000, 0x18010101);
241 ram_wait(fuc, 0x137390, 0x00000002, 0x00000002, 64000);
242 } else
243 if (mode == 0) {
244 ram_wr32(fuc, 0x137300, 0x00000003);
245 }
246
247 if (from == 0) {
248 ram_nuke(fuc, 0x10fb04);
249 ram_mask(fuc, 0x10fb04, 0x0000ffff, 0x00000000);
250 ram_nuke(fuc, 0x10fb08);
251 ram_mask(fuc, 0x10fb08, 0x0000ffff, 0x00000000);
252 ram_wr32(fuc, 0x10f988, 0x2004ff00);
253 ram_wr32(fuc, 0x10f98c, 0x003fc040);
254 ram_wr32(fuc, 0x10f990, 0x20012001);
255 ram_wr32(fuc, 0x10f998, 0x00011a00);
256 ram_wr32(fuc, 0x13d8f4, 0x00000000);
257 } else {
258 ram_wr32(fuc, 0x10f988, 0x20010000);
259 ram_wr32(fuc, 0x10f98c, 0x00000000);
260 ram_wr32(fuc, 0x10f990, 0x20012001);
261 ram_wr32(fuc, 0x10f998, 0x00010a00);
262 }
263
264 if (from == 0) {
265 // 0x00020039 // 0x000000ba
266 }
267
268 // 0x0002003a // 0x00000002
269 ram_wr32(fuc, 0x100b0c, 0x00080012);
270 // 0x00030014 // 0x00000000 // 0x02b5f070
271 // 0x00030014 // 0x00010000 // 0x02b5f070
272 ram_wr32(fuc, 0x611200, 0x00003300);
273 // 0x00020034 // 0x0000000a
274 // 0x00030020 // 0x00000001 // 0x00000000
275
276 ram_mask(fuc, 0x10f200, 0x00000800, 0x00000000);
277 ram_wr32(fuc, 0x10f210, 0x00000000);
278 ram_nsec(fuc, 1000);
279 if (mode == 0)
280 gf100_ram_train(fuc, 0x000c1001);
281 ram_wr32(fuc, 0x10f310, 0x00000001);
282 ram_nsec(fuc, 1000);
283 ram_wr32(fuc, 0x10f090, 0x00000061);
284 ram_wr32(fuc, 0x10f090, 0xc000007f);
285 ram_nsec(fuc, 1000);
286
287 if (from == 0) {
288 ram_wr32(fuc, 0x10f824, 0x00007fd4);
289 } else {
290 ram_wr32(fuc, 0x1373ec, 0x00020404);
291 }
292
293 if (mode == 0) {
294 ram_mask(fuc, 0x10f808, 0x00080000, 0x00000000);
295 ram_mask(fuc, 0x10f200, 0x00008000, 0x00008000);
296 ram_wr32(fuc, 0x10f830, 0x41500010);
297 ram_mask(fuc, 0x10f830, 0x01000000, 0x00000000);
298 ram_mask(fuc, 0x132100, 0x00000100, 0x00000100);
299 ram_wr32(fuc, 0x10f050, 0xff000090);
300 ram_wr32(fuc, 0x1373ec, 0x00020f0f);
301 ram_wr32(fuc, 0x1373f0, 0x00000003);
302 ram_wr32(fuc, 0x137310, 0x81201616);
303 ram_wr32(fuc, 0x132100, 0x00000001);
304 // 0x00020039 // 0x000000ba
305 ram_wr32(fuc, 0x10f830, 0x00300017);
306 ram_wr32(fuc, 0x1373f0, 0x00000001);
307 ram_wr32(fuc, 0x10f824, 0x00007e77);
308 ram_wr32(fuc, 0x132000, 0x18030001);
309 ram_wr32(fuc, 0x10f090, 0x4000007e);
310 ram_nsec(fuc, 2000);
311 ram_wr32(fuc, 0x10f314, 0x00000001);
312 ram_wr32(fuc, 0x10f210, 0x80000000);
313 ram_wr32(fuc, 0x10f338, 0x00300220);
314 ram_wr32(fuc, 0x10f300, 0x0000011d);
315 ram_nsec(fuc, 1000);
316 ram_wr32(fuc, 0x10f290, 0x02060505);
317 ram_wr32(fuc, 0x10f294, 0x34208288);
318 ram_wr32(fuc, 0x10f298, 0x44050411);
319 ram_wr32(fuc, 0x10f29c, 0x0000114c);
320 ram_wr32(fuc, 0x10f2a0, 0x42e10069);
321 ram_wr32(fuc, 0x10f614, 0x40044f77);
322 ram_wr32(fuc, 0x10f610, 0x40044f77);
323 ram_wr32(fuc, 0x10f344, 0x00600009);
324 ram_nsec(fuc, 1000);
325 ram_wr32(fuc, 0x10f348, 0x00700008);
326 ram_wr32(fuc, 0x61c140, 0x19240000);
327 ram_wr32(fuc, 0x10f830, 0x00300017);
328 gf100_ram_train(fuc, 0x80021001);
329 gf100_ram_train(fuc, 0x80081001);
330 ram_wr32(fuc, 0x10f340, 0x00500004);
331 ram_nsec(fuc, 1000);
332 ram_wr32(fuc, 0x10f830, 0x01300017);
333 ram_wr32(fuc, 0x10f830, 0x00300017);
334 // 0x00030020 // 0x00000000 // 0x00000000
335 // 0x00020034 // 0x0000000b
336 ram_wr32(fuc, 0x100b0c, 0x00080028);
337 ram_wr32(fuc, 0x611200, 0x00003330);
338 } else {
339 ram_wr32(fuc, 0x10f800, 0x00001800);
340 ram_wr32(fuc, 0x13d8f4, 0x00000000);
341 ram_wr32(fuc, 0x1373ec, 0x00020404);
342 ram_wr32(fuc, 0x1373f0, 0x00000003);
343 ram_wr32(fuc, 0x10f830, 0x40700010);
344 ram_wr32(fuc, 0x10f830, 0x40500010);
345 ram_wr32(fuc, 0x13d8f4, 0x00000000);
346 ram_wr32(fuc, 0x1373f8, 0x00000000);
347 ram_wr32(fuc, 0x132100, 0x00000101);
348 ram_wr32(fuc, 0x137310, 0x89201616);
349 ram_wr32(fuc, 0x10f050, 0xff000090);
350 ram_wr32(fuc, 0x1373ec, 0x00030404);
351 ram_wr32(fuc, 0x1373f0, 0x00000002);
352 // 0x00020039 // 0x00000011
353 ram_wr32(fuc, 0x132100, 0x00000001);
354 ram_wr32(fuc, 0x1373f8, 0x00002000);
355 ram_nsec(fuc, 2000);
356 ram_wr32(fuc, 0x10f808, 0x7aaa0050);
357 ram_wr32(fuc, 0x10f830, 0x00500010);
358 ram_wr32(fuc, 0x10f200, 0x00ce1000);
359 ram_wr32(fuc, 0x10f090, 0x4000007e);
360 ram_nsec(fuc, 2000);
361 ram_wr32(fuc, 0x10f314, 0x00000001);
362 ram_wr32(fuc, 0x10f210, 0x80000000);
363 ram_wr32(fuc, 0x10f338, 0x00300200);
364 ram_wr32(fuc, 0x10f300, 0x0000084d);
365 ram_nsec(fuc, 1000);
366 ram_wr32(fuc, 0x10f290, 0x0b343825);
367 ram_wr32(fuc, 0x10f294, 0x3483028e);
368 ram_wr32(fuc, 0x10f298, 0x440c0600);
369 ram_wr32(fuc, 0x10f29c, 0x0000214c);
370 ram_wr32(fuc, 0x10f2a0, 0x42e20069);
371 ram_wr32(fuc, 0x10f200, 0x00ce0000);
372 ram_wr32(fuc, 0x10f614, 0x60044e77);
373 ram_wr32(fuc, 0x10f610, 0x60044e77);
374 ram_wr32(fuc, 0x10f340, 0x00500000);
375 ram_nsec(fuc, 1000);
376 ram_wr32(fuc, 0x10f344, 0x00600228);
377 ram_nsec(fuc, 1000);
378 ram_wr32(fuc, 0x10f348, 0x00700000);
379 ram_wr32(fuc, 0x13d8f4, 0x00000000);
380 ram_wr32(fuc, 0x61c140, 0x09a40000);
381
382 gf100_ram_train(fuc, 0x800e1008);
383
384 ram_nsec(fuc, 1000);
385 ram_wr32(fuc, 0x10f800, 0x00001804);
386 // 0x00030020 // 0x00000000 // 0x00000000
387 // 0x00020034 // 0x0000000b
388 ram_wr32(fuc, 0x13d8f4, 0x00000000);
389 ram_wr32(fuc, 0x100b0c, 0x00080028);
390 ram_wr32(fuc, 0x611200, 0x00003330);
391 ram_nsec(fuc, 100000);
392 ram_wr32(fuc, 0x10f9b0, 0x05313f41);
393 ram_wr32(fuc, 0x10f9b4, 0x00002f50);
394
395 gf100_ram_train(fuc, 0x010c1001);
396 }
397
398 ram_mask(fuc, 0x10f200, 0x00000800, 0x00000800);
399 // 0x00020016 // 0x00000000
400
401 if (mode == 0)
402 ram_mask(fuc, 0x132000, 0x00000001, 0x00000000);
403
404 return 0;
405 }
406
407 int
408 gf100_ram_prog(struct nvkm_ram *base)
409 {
410 struct gf100_ram *ram = gf100_ram(base);
411 struct nvkm_device *device = ram->base.fb->subdev.device;
412 ram_exec(&ram->fuc, nvkm_boolopt(device->cfgopt, "NvMemExec", true));
413 return 0;
414 }
415
416 void
417 gf100_ram_tidy(struct nvkm_ram *base)
418 {
419 struct gf100_ram *ram = gf100_ram(base);
420 ram_exec(&ram->fuc, false);
421 }
422
423 void
424 gf100_ram_put(struct nvkm_ram *ram, struct nvkm_mem **pmem)
425 {
426 struct nvkm_ltc *ltc = ram->fb->subdev.device->ltc;
427 struct nvkm_mem *mem = *pmem;
428
429 *pmem = NULL;
430 if (unlikely(mem == NULL))
431 return;
432
433 mutex_lock(&ram->fb->subdev.mutex);
434 if (mem->tag)
435 nvkm_ltc_tags_free(ltc, &mem->tag);
436 __nv50_ram_put(ram, mem);
437 mutex_unlock(&ram->fb->subdev.mutex);
438
439 kfree(mem);
440 }
441
442 int
443 gf100_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
444 u32 memtype, struct nvkm_mem **pmem)
445 {
446 struct nvkm_device *device = ram->fb->subdev.device;
447 struct nvkm_ltc *ltc = ram->fb->subdev.device->ltc;
448 struct nvkm_mm *mm = &ram->vram;
449 struct nvkm_mm_node **node, *r;
450 struct nvkm_mem *mem;
451 int type = (memtype & 0x0ff);
452 int back = (memtype & 0x800);
453 const bool comp = gf100_pte_storage_type_map[type] != type;
454 int ret;
455
456 size >>= NVKM_RAM_MM_SHIFT;
457 align >>= NVKM_RAM_MM_SHIFT;
458 ncmin >>= NVKM_RAM_MM_SHIFT;
459 if (!ncmin)
460 ncmin = size;
461
462 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
463 if (!mem)
464 return -ENOMEM;
465
466 mem->size = size;
467
468 mutex_lock(&ram->fb->subdev.mutex);
469 if (comp) {
470 /* compression only works with lpages */
471 if (align == (1 << (17 - NVKM_RAM_MM_SHIFT))) {
472 int n = size >> 5;
473 if (!nvkm_ltc_tags_alloc(ltc, n, &mem->tag)) {
474 nvkm_ltc_tags_clear(device, mem->tag->offset,
475 mem->tag->length);
476 }
477 }
478
479 if (unlikely(!mem->tag))
480 type = gf100_pte_storage_type_map[type];
481 }
482 mem->memtype = type;
483
484 node = &mem->mem;
485 do {
486 if (back)
487 ret = nvkm_mm_tail(mm, 0, 1, size, ncmin, align, &r);
488 else
489 ret = nvkm_mm_head(mm, 0, 1, size, ncmin, align, &r);
490 if (ret) {
491 mutex_unlock(&ram->fb->subdev.mutex);
492 ram->func->put(ram, &mem);
493 return ret;
494 }
495
496 *node = r;
497 node = &r->next;
498 size -= r->length;
499 } while (size);
500 mutex_unlock(&ram->fb->subdev.mutex);
501
502 mem->offset = (u64)mem->mem->offset << NVKM_RAM_MM_SHIFT;
503 *pmem = mem;
504 return 0;
505 }
506
507 int
508 gf100_ram_init(struct nvkm_ram *base)
509 {
510 static const u8 train0[] = {
511 0x00, 0xff, 0x55, 0xaa, 0x33, 0xcc,
512 0x00, 0xff, 0xff, 0x00, 0xff, 0x00,
513 };
514 static const u32 train1[] = {
515 0x00000000, 0xffffffff,
516 0x55555555, 0xaaaaaaaa,
517 0x33333333, 0xcccccccc,
518 0xf0f0f0f0, 0x0f0f0f0f,
519 0x00ff00ff, 0xff00ff00,
520 0x0000ffff, 0xffff0000,
521 };
522 struct gf100_ram *ram = gf100_ram(base);
523 struct nvkm_device *device = ram->base.fb->subdev.device;
524 int i;
525
526 switch (ram->base.type) {
527 case NVKM_RAM_TYPE_GDDR5:
528 break;
529 default:
530 return 0;
531 }
532
533 /* prepare for ddr link training, and load training patterns */
534 for (i = 0; i < 0x30; i++) {
535 nvkm_wr32(device, 0x10f968, 0x00000000 | (i << 8));
536 nvkm_wr32(device, 0x10f96c, 0x00000000 | (i << 8));
537 nvkm_wr32(device, 0x10f920, 0x00000100 | train0[i % 12]);
538 nvkm_wr32(device, 0x10f924, 0x00000100 | train0[i % 12]);
539 nvkm_wr32(device, 0x10f918, train1[i % 12]);
540 nvkm_wr32(device, 0x10f91c, train1[i % 12]);
541 nvkm_wr32(device, 0x10f920, 0x00000000 | train0[i % 12]);
542 nvkm_wr32(device, 0x10f924, 0x00000000 | train0[i % 12]);
543 nvkm_wr32(device, 0x10f918, train1[i % 12]);
544 nvkm_wr32(device, 0x10f91c, train1[i % 12]);
545 }
546
547 return 0;
548 }
549
550 u32
551 gf100_ram_probe_fbpa_amount(struct nvkm_device *device, int fbpa)
552 {
553 return nvkm_rd32(device, 0x11020c + (fbpa * 0x1000));
554 }
555
556 u32
557 gf100_ram_probe_fbp_amount(const struct nvkm_ram_func *func, u32 fbpao,
558 struct nvkm_device *device, int fbp, int *pltcs)
559 {
560 if (!(fbpao & BIT(fbp))) {
561 *pltcs = 1;
562 return func->probe_fbpa_amount(device, fbp);
563 }
564 return 0;
565 }
566
567 u32
568 gf100_ram_probe_fbp(const struct nvkm_ram_func *func,
569 struct nvkm_device *device, int fbp, int *pltcs)
570 {
571 u32 fbpao = nvkm_rd32(device, 0x022554);
572 return func->probe_fbp_amount(func, fbpao, device, fbp, pltcs);
573 }
574
575 int
576 gf100_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
577 struct nvkm_ram *ram)
578 {
579 struct nvkm_subdev *subdev = &fb->subdev;
580 struct nvkm_device *device = subdev->device;
581 struct nvkm_bios *bios = device->bios;
582 const u32 rsvd_head = ( 256 * 1024); /* vga memory */
583 const u32 rsvd_tail = (1024 * 1024); /* vbios etc */
584 enum nvkm_ram_type type = nvkm_fb_bios_memtype(bios);
585 u32 fbps = nvkm_rd32(device, 0x022438);
586 u64 total = 0, lcomm = ~0, lower, ubase, usize;
587 int ret, fbp, ltcs, ltcn = 0;
588
589 nvkm_debug(subdev, "%d FBP(s)\n", fbps);
590 for (fbp = 0; fbp < fbps; fbp++) {
591 u32 size = func->probe_fbp(func, device, fbp, &ltcs);
592 if (size) {
593 nvkm_debug(subdev, "FBP %d: %4d MiB, %d LTC(s)\n",
594 fbp, size, ltcs);
595 lcomm = min(lcomm, (u64)(size / ltcs) << 20);
596 total += (u64) size << 20;
597 ltcn += ltcs;
598 } else {
599 nvkm_debug(subdev, "FBP %d: disabled\n", fbp);
600 }
601 }
602
603 lower = lcomm * ltcn;
604 ubase = lcomm + func->upper;
605 usize = total - lower;
606
607 nvkm_debug(subdev, "Lower: %4lld MiB @ %010llx\n", lower >> 20, 0ULL);
608 nvkm_debug(subdev, "Upper: %4lld MiB @ %010llx\n", usize >> 20, ubase);
609 nvkm_debug(subdev, "Total: %4lld MiB\n", total >> 20);
610
611 ret = nvkm_ram_ctor(func, fb, type, total, ram);
612 if (ret)
613 return ret;
614
615 nvkm_mm_fini(&ram->vram);
616
617 /* Some GPUs are in what's known as a "mixed memory" configuration.
618 *
619 * This is either where some FBPs have more memory than the others,
620 * or where LTCs have been disabled on a FBP.
621 */
622 if (lower != total) {
623 /* The common memory amount is addressed normally. */
624 ret = nvkm_mm_init(&ram->vram, NVKM_RAM_MM_NORMAL,
625 rsvd_head >> NVKM_RAM_MM_SHIFT,
626 (lower - rsvd_head) >> NVKM_RAM_MM_SHIFT, 1);
627 if (ret)
628 return ret;
629
630 /* And the rest is much higher in the physical address
631 * space, and may not be usable for certain operations.
632 */
633 ret = nvkm_mm_init(&ram->vram, NVKM_RAM_MM_MIXED,
634 ubase >> NVKM_RAM_MM_SHIFT,
635 (usize - rsvd_tail) >> NVKM_RAM_MM_SHIFT, 1);
636 if (ret)
637 return ret;
638 } else {
639 /* GPUs without mixed-memory are a lot nicer... */
640 ret = nvkm_mm_init(&ram->vram, NVKM_RAM_MM_NORMAL,
641 rsvd_head >> NVKM_RAM_MM_SHIFT,
642 (total - rsvd_head - rsvd_tail) >>
643 NVKM_RAM_MM_SHIFT, 1);
644 if (ret)
645 return ret;
646 }
647
648 return 0;
649 }
650
651 int
652 gf100_ram_new_(const struct nvkm_ram_func *func,
653 struct nvkm_fb *fb, struct nvkm_ram **pram)
654 {
655 struct nvkm_subdev *subdev = &fb->subdev;
656 struct nvkm_bios *bios = subdev->device->bios;
657 struct gf100_ram *ram;
658 int ret;
659
660 if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
661 return -ENOMEM;
662 *pram = &ram->base;
663
664 ret = gf100_ram_ctor(func, fb, &ram->base);
665 if (ret)
666 return ret;
667
668 ret = nvbios_pll_parse(bios, 0x0c, &ram->refpll);
669 if (ret) {
670 nvkm_error(subdev, "mclk refpll data not found\n");
671 return ret;
672 }
673
674 ret = nvbios_pll_parse(bios, 0x04, &ram->mempll);
675 if (ret) {
676 nvkm_error(subdev, "mclk pll data not found\n");
677 return ret;
678 }
679
680 ram->fuc.r_0x10fe20 = ramfuc_reg(0x10fe20);
681 ram->fuc.r_0x10fe24 = ramfuc_reg(0x10fe24);
682 ram->fuc.r_0x137320 = ramfuc_reg(0x137320);
683 ram->fuc.r_0x137330 = ramfuc_reg(0x137330);
684
685 ram->fuc.r_0x132000 = ramfuc_reg(0x132000);
686 ram->fuc.r_0x132004 = ramfuc_reg(0x132004);
687 ram->fuc.r_0x132100 = ramfuc_reg(0x132100);
688
689 ram->fuc.r_0x137390 = ramfuc_reg(0x137390);
690
691 ram->fuc.r_0x10f290 = ramfuc_reg(0x10f290);
692 ram->fuc.r_0x10f294 = ramfuc_reg(0x10f294);
693 ram->fuc.r_0x10f298 = ramfuc_reg(0x10f298);
694 ram->fuc.r_0x10f29c = ramfuc_reg(0x10f29c);
695 ram->fuc.r_0x10f2a0 = ramfuc_reg(0x10f2a0);
696
697 ram->fuc.r_0x10f300 = ramfuc_reg(0x10f300);
698 ram->fuc.r_0x10f338 = ramfuc_reg(0x10f338);
699 ram->fuc.r_0x10f340 = ramfuc_reg(0x10f340);
700 ram->fuc.r_0x10f344 = ramfuc_reg(0x10f344);
701 ram->fuc.r_0x10f348 = ramfuc_reg(0x10f348);
702
703 ram->fuc.r_0x10f910 = ramfuc_reg(0x10f910);
704 ram->fuc.r_0x10f914 = ramfuc_reg(0x10f914);
705
706 ram->fuc.r_0x100b0c = ramfuc_reg(0x100b0c);
707 ram->fuc.r_0x10f050 = ramfuc_reg(0x10f050);
708 ram->fuc.r_0x10f090 = ramfuc_reg(0x10f090);
709 ram->fuc.r_0x10f200 = ramfuc_reg(0x10f200);
710 ram->fuc.r_0x10f210 = ramfuc_reg(0x10f210);
711 ram->fuc.r_0x10f310 = ramfuc_reg(0x10f310);
712 ram->fuc.r_0x10f314 = ramfuc_reg(0x10f314);
713 ram->fuc.r_0x10f610 = ramfuc_reg(0x10f610);
714 ram->fuc.r_0x10f614 = ramfuc_reg(0x10f614);
715 ram->fuc.r_0x10f800 = ramfuc_reg(0x10f800);
716 ram->fuc.r_0x10f808 = ramfuc_reg(0x10f808);
717 ram->fuc.r_0x10f824 = ramfuc_reg(0x10f824);
718 ram->fuc.r_0x10f830 = ramfuc_reg(0x10f830);
719 ram->fuc.r_0x10f988 = ramfuc_reg(0x10f988);
720 ram->fuc.r_0x10f98c = ramfuc_reg(0x10f98c);
721 ram->fuc.r_0x10f990 = ramfuc_reg(0x10f990);
722 ram->fuc.r_0x10f998 = ramfuc_reg(0x10f998);
723 ram->fuc.r_0x10f9b0 = ramfuc_reg(0x10f9b0);
724 ram->fuc.r_0x10f9b4 = ramfuc_reg(0x10f9b4);
725 ram->fuc.r_0x10fb04 = ramfuc_reg(0x10fb04);
726 ram->fuc.r_0x10fb08 = ramfuc_reg(0x10fb08);
727 ram->fuc.r_0x137310 = ramfuc_reg(0x137300);
728 ram->fuc.r_0x137310 = ramfuc_reg(0x137310);
729 ram->fuc.r_0x137360 = ramfuc_reg(0x137360);
730 ram->fuc.r_0x1373ec = ramfuc_reg(0x1373ec);
731 ram->fuc.r_0x1373f0 = ramfuc_reg(0x1373f0);
732 ram->fuc.r_0x1373f8 = ramfuc_reg(0x1373f8);
733
734 ram->fuc.r_0x61c140 = ramfuc_reg(0x61c140);
735 ram->fuc.r_0x611200 = ramfuc_reg(0x611200);
736
737 ram->fuc.r_0x13d8f4 = ramfuc_reg(0x13d8f4);
738 return 0;
739 }
740
741 static const struct nvkm_ram_func
742 gf100_ram = {
743 .upper = 0x0200000000,
744 .probe_fbp = gf100_ram_probe_fbp,
745 .probe_fbp_amount = gf100_ram_probe_fbp_amount,
746 .probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
747 .init = gf100_ram_init,
748 .get = gf100_ram_get,
749 .put = gf100_ram_put,
750 .calc = gf100_ram_calc,
751 .prog = gf100_ram_prog,
752 .tidy = gf100_ram_tidy,
753 };
754
755 int
756 gf100_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
757 {
758 return gf100_ram_new_(&gf100_ram, fb, pram);
759 }