]>
git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c
2 * Copyright 2013 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #define nv50_ram(p) container_of((p), struct nv50_ram, base)
29 #include <core/option.h>
30 #include <subdev/bios.h>
31 #include <subdev/bios/perf.h>
32 #include <subdev/bios/pll.h>
33 #include <subdev/bios/rammap.h>
34 #include <subdev/bios/timing.h>
35 #include <subdev/clk/pll.h>
39 struct hwsq_reg r_0x002504
;
40 struct hwsq_reg r_0x004008
;
41 struct hwsq_reg r_0x00400c
;
42 struct hwsq_reg r_0x00c040
;
43 struct hwsq_reg r_0x100200
;
44 struct hwsq_reg r_0x100210
;
45 struct hwsq_reg r_0x10021c
;
46 struct hwsq_reg r_0x1002d0
;
47 struct hwsq_reg r_0x1002d4
;
48 struct hwsq_reg r_0x1002dc
;
49 struct hwsq_reg r_0x10053c
;
50 struct hwsq_reg r_0x1005a0
;
51 struct hwsq_reg r_0x1005a4
;
52 struct hwsq_reg r_0x100710
;
53 struct hwsq_reg r_0x100714
;
54 struct hwsq_reg r_0x100718
;
55 struct hwsq_reg r_0x10071c
;
56 struct hwsq_reg r_0x100da0
;
57 struct hwsq_reg r_0x100e20
;
58 struct hwsq_reg r_0x100e24
;
59 struct hwsq_reg r_0x611200
;
60 struct hwsq_reg r_timing
[9];
61 struct hwsq_reg r_mr
[4];
66 struct nv50_ramseq hwsq
;
69 #define T(t) cfg->timing_10_##t
71 nv50_ram_timing_calc(struct nv50_ram
*ram
, u32
*timing
)
73 struct nvbios_ramcfg
*cfg
= &ram
->base
.target
.bios
;
74 struct nvkm_subdev
*subdev
= &ram
->base
.fb
->subdev
;
75 struct nvkm_device
*device
= subdev
->device
;
76 u32 cur2
, cur4
, cur7
, cur8
;
79 cur2
= nvkm_rd32(device
, 0x100228);
80 cur4
= nvkm_rd32(device
, 0x100230);
81 cur7
= nvkm_rd32(device
, 0x10023c);
82 cur8
= nvkm_rd32(device
, 0x100240);
84 switch ((!T(CWL
)) * ram
->base
.type
) {
85 case NVKM_RAM_TYPE_DDR2
:
88 case NVKM_RAM_TYPE_GDDR3
:
89 T(CWL
) = ((cur2
& 0xff000000) >> 24) + 1;
93 /* XXX: N=1 is not proper statistics */
94 if (device
->chipset
== 0xa0) {
95 unkt3b
= 0x19 + ram
->base
.next
->bios
.rammap_00_16_40
;
96 timing
[6] = (0x2d + T(CL
) - T(CWL
) +
97 ram
->base
.next
->bios
.rammap_00_16_40
) << 16 |
99 (0x2f + T(CL
) - T(CWL
));
102 timing
[6] = (0x2b + T(CL
) - T(CWL
)) << 16 |
103 max_t(s8
, T(CWL
) - 2, 1) << 8 |
104 (0x2e + T(CL
) - T(CWL
));
107 timing
[0] = (T(RP
) << 24 | T(RAS
) << 16 | T(RFC
) << 8 | T(RC
));
108 timing
[1] = (T(WR
) + 1 + T(CWL
)) << 24 |
109 max_t(u8
, T(18), 1) << 16 |
110 (T(WTR
) + 1 + T(CWL
)) << 8 |
111 (3 + T(CL
) - T(CWL
));
112 timing
[2] = (T(CWL
) - 1) << 24 |
116 timing
[3] = (unkt3b
- 2 + T(CL
)) << 24 |
120 timing
[4] = (cur4
& 0xffff0000) |
123 timing
[5] = T(RFC
) << 24 |
124 max_t(u8
, T(RCDRD
), T(RCDWR
)) << 16 |
126 /* Timing 6 is already done above */
127 timing
[7] = (cur7
& 0xff00ffff) | (T(CL
) - 1) << 16;
128 timing
[8] = (cur8
& 0xffffff00);
130 /* XXX: P.version == 1 only has DDR2 and GDDR3? */
131 if (ram
->base
.type
== NVKM_RAM_TYPE_DDR2
) {
132 timing
[5] |= (T(CL
) + 3) << 8;
133 timing
[8] |= (T(CL
) - 4);
135 if (ram
->base
.type
== NVKM_RAM_TYPE_GDDR3
) {
136 timing
[5] |= (T(CL
) + 2) << 8;
137 timing
[8] |= (T(CL
) - 2);
140 nvkm_debug(subdev
, " 220: %08x %08x %08x %08x\n",
141 timing
[0], timing
[1], timing
[2], timing
[3]);
142 nvkm_debug(subdev
, " 230: %08x %08x %08x %08x\n",
143 timing
[4], timing
[5], timing
[6], timing
[7]);
144 nvkm_debug(subdev
, " 240: %08x\n", timing
[8]);
150 nvkm_sddr2_dll_reset(struct nv50_ramseq
*hwsq
)
152 ram_mask(hwsq
, mr
[0], 0x100, 0x100);
153 ram_mask(hwsq
, mr
[0], 0x100, 0x000);
154 ram_nsec(hwsq
, 24000);
158 nv50_ram_calc(struct nvkm_ram
*base
, u32 freq
)
160 struct nv50_ram
*ram
= nv50_ram(base
);
161 struct nv50_ramseq
*hwsq
= &ram
->hwsq
;
162 struct nvkm_subdev
*subdev
= &ram
->base
.fb
->subdev
;
163 struct nvkm_bios
*bios
= subdev
->device
->bios
;
164 struct nvbios_perfE perfE
;
165 struct nvbios_pll mpll
;
166 struct nvkm_ram_data
*next
;
167 u8 ver
, hdr
, cnt
, len
, strap
, size
;
169 u32 r100da0
, r004008
, unk710
, unk714
, unk718
, unk71c
;
170 int N1
, M1
, N2
, M2
, P
;
174 next
= &ram
->base
.target
;
176 ram
->base
.next
= next
;
178 /* lookup closest matching performance table entry for frequency */
181 data
= nvbios_perfEp(bios
, i
++, &ver
, &hdr
, &cnt
,
183 if (!data
|| (ver
< 0x25 || ver
>= 0x40) ||
185 nvkm_error(subdev
, "invalid/missing perftab entry\n");
188 } while (perfE
.memory
< freq
);
190 nvbios_rammapEp_from_perf(bios
, data
, hdr
, &next
->bios
);
192 /* locate specific data set for the attached memory */
193 strap
= nvbios_ramcfg_index(subdev
);
195 nvkm_error(subdev
, "invalid ramcfg strap\n");
199 data
= nvbios_rammapSp_from_perf(bios
, data
+ hdr
, size
, strap
,
202 nvkm_error(subdev
, "invalid/missing rammap entry ");
206 /* lookup memory timings, if bios says they're present */
207 if (next
->bios
.ramcfg_timing
!= 0xff) {
208 data
= nvbios_timingEp(bios
, next
->bios
.ramcfg_timing
,
209 &ver
, &hdr
, &cnt
, &len
, &next
->bios
);
210 if (!data
|| ver
!= 0x10 || hdr
< 0x12) {
211 nvkm_error(subdev
, "invalid/missing timing entry "
212 "%02x %04x %02x %02x\n",
213 strap
, data
, ver
, hdr
);
218 nv50_ram_timing_calc(ram
, timing
);
220 ret
= ram_init(hwsq
, subdev
);
224 /* Determine ram-specific MR values */
225 ram
->base
.mr
[0] = ram_rd32(hwsq
, mr
[0]);
226 ram
->base
.mr
[1] = ram_rd32(hwsq
, mr
[1]);
227 ram
->base
.mr
[2] = ram_rd32(hwsq
, mr
[2]);
229 switch (ram
->base
.type
) {
230 case NVKM_RAM_TYPE_GDDR3
:
231 ret
= nvkm_gddr3_calc(&ram
->base
);
241 /* Always disable this bit during reclock */
242 ram_mask(hwsq
, 0x100200, 0x00000800, 0x00000000);
244 ram_wait(hwsq
, 0x01, 0x00); /* wait for !vblank */
245 ram_wait(hwsq
, 0x01, 0x01); /* wait for vblank */
246 ram_wr32(hwsq
, 0x611200, 0x00003300);
247 ram_wr32(hwsq
, 0x002504, 0x00000001); /* block fifo */
248 ram_nsec(hwsq
, 8000);
249 ram_setf(hwsq
, 0x10, 0x00); /* disable fb */
250 ram_wait(hwsq
, 0x00, 0x01); /* wait for fb disabled */
251 ram_nsec(hwsq
, 2000);
253 ram_wr32(hwsq
, 0x1002d4, 0x00000001); /* precharge */
254 ram_wr32(hwsq
, 0x1002d0, 0x00000001); /* refresh */
255 ram_wr32(hwsq
, 0x1002d0, 0x00000001); /* refresh */
256 ram_wr32(hwsq
, 0x100210, 0x00000000); /* disable auto-refresh */
257 ram_wr32(hwsq
, 0x1002dc, 0x00000001); /* enable self-refresh */
259 ret
= nvbios_pll_parse(bios
, 0x004008, &mpll
);
260 mpll
.vco2
.max_freq
= 0;
262 ret
= nv04_pll_calc(subdev
, &mpll
, freq
,
263 &N1
, &M1
, &N2
, &M2
, &P
);
271 /* XXX: 750MHz seems rather arbitrary */
272 if (freq
<= 750000) {
273 r100da0
= 0x00000010;
274 r004008
= 0x90000000;
276 r100da0
= 0x00000000;
277 r004008
= 0x80000000;
280 r004008
|= (mpll
.bias_p
<< 19) | (P
<< 22) | (P
<< 16);
282 ram_mask(hwsq
, 0x00c040, 0xc000c000, 0x0000c000);
283 /* XXX: Is rammap_00_16_40 the DLL bit we've seen in GT215? Why does
284 * it have a different rammap bit from DLLoff? */
285 ram_mask(hwsq
, 0x004008, 0x00004200, 0x00000200 |
286 next
->bios
.rammap_00_16_40
<< 14);
287 ram_mask(hwsq
, 0x00400c, 0x0000ffff, (N1
<< 8) | M1
);
288 ram_mask(hwsq
, 0x004008, 0x91ff0000, r004008
);
289 if (subdev
->device
->chipset
>= 0x96)
290 ram_wr32(hwsq
, 0x100da0, r100da0
);
291 ram_nsec(hwsq
, 64000); /*XXX*/
292 ram_nsec(hwsq
, 32000); /*XXX*/
294 ram_mask(hwsq
, 0x004008, 0x00002200, 0x00002000);
296 ram_wr32(hwsq
, 0x1002dc, 0x00000000); /* disable self-refresh */
297 ram_wr32(hwsq
, 0x1002d4, 0x00000001); /* disable self-refresh */
298 ram_wr32(hwsq
, 0x100210, 0x80000000); /* enable auto-refresh */
300 ram_nsec(hwsq
, 12000);
302 switch (ram
->base
.type
) {
303 case NVKM_RAM_TYPE_DDR2
:
304 ram_nuke(hwsq
, mr
[0]); /* force update */
305 ram_mask(hwsq
, mr
[0], 0x000, 0x000);
307 case NVKM_RAM_TYPE_GDDR3
:
308 ram_nuke(hwsq
, mr
[1]); /* force update */
309 ram_wr32(hwsq
, mr
[1], ram
->base
.mr
[1]);
310 ram_nuke(hwsq
, mr
[0]); /* force update */
311 ram_wr32(hwsq
, mr
[0], ram
->base
.mr
[0]);
317 ram_mask(hwsq
, timing
[3], 0xffffffff, timing
[3]);
318 ram_mask(hwsq
, timing
[1], 0xffffffff, timing
[1]);
319 ram_mask(hwsq
, timing
[6], 0xffffffff, timing
[6]);
320 ram_mask(hwsq
, timing
[7], 0xffffffff, timing
[7]);
321 ram_mask(hwsq
, timing
[8], 0xffffffff, timing
[8]);
322 ram_mask(hwsq
, timing
[0], 0xffffffff, timing
[0]);
323 ram_mask(hwsq
, timing
[2], 0xffffffff, timing
[2]);
324 ram_mask(hwsq
, timing
[4], 0xffffffff, timing
[4]);
325 ram_mask(hwsq
, timing
[5], 0xffffffff, timing
[5]);
327 if (!next
->bios
.ramcfg_00_03_02
)
328 ram_mask(hwsq
, 0x10021c, 0x00010000, 0x00000000);
329 ram_mask(hwsq
, 0x100200, 0x00001000, !next
->bios
.ramcfg_00_04_02
<< 12);
331 /* XXX: A lot of this could be "chipset"/"ram type" specific stuff */
332 unk710
= ram_rd32(hwsq
, 0x100710) & ~0x00000101;
333 unk714
= ram_rd32(hwsq
, 0x100714) & ~0xf0000020;
334 unk718
= ram_rd32(hwsq
, 0x100718) & ~0x00000100;
335 unk71c
= ram_rd32(hwsq
, 0x10071c) & ~0x00000100;
337 if ( next
->bios
.ramcfg_00_03_01
)
338 unk71c
|= 0x00000100;
339 if ( next
->bios
.ramcfg_00_03_02
)
340 unk710
|= 0x00000100;
341 if (!next
->bios
.ramcfg_00_03_08
) {
345 if ( next
->bios
.ramcfg_00_04_04
)
346 unk714
|= 0x70000000;
347 if ( next
->bios
.ramcfg_00_04_20
)
348 unk718
|= 0x00000100;
350 ram_mask(hwsq
, 0x100714, 0xffffffff, unk714
);
351 ram_mask(hwsq
, 0x10071c, 0xffffffff, unk71c
);
352 ram_mask(hwsq
, 0x100718, 0xffffffff, unk718
);
353 ram_mask(hwsq
, 0x100710, 0xffffffff, unk710
);
355 if (next
->bios
.rammap_00_16_20
) {
356 ram_wr32(hwsq
, 0x1005a0, next
->bios
.ramcfg_00_07
<< 16 |
357 next
->bios
.ramcfg_00_06
<< 8 |
358 next
->bios
.ramcfg_00_05
);
359 ram_wr32(hwsq
, 0x1005a4, next
->bios
.ramcfg_00_09
<< 8 |
360 next
->bios
.ramcfg_00_08
);
361 ram_mask(hwsq
, 0x10053c, 0x00001000, 0x00000000);
363 ram_mask(hwsq
, 0x10053c, 0x00001000, 0x00001000);
365 ram_mask(hwsq
, mr
[1], 0xffffffff, ram
->base
.mr
[1]);
368 if (!next
->bios
.ramcfg_DLLoff
)
369 nvkm_sddr2_dll_reset(hwsq
);
371 ram_setf(hwsq
, 0x10, 0x01); /* enable fb */
372 ram_wait(hwsq
, 0x00, 0x00); /* wait for fb enabled */
373 ram_wr32(hwsq
, 0x611200, 0x00003330);
374 ram_wr32(hwsq
, 0x002504, 0x00000000); /* un-block fifo */
376 if (next
->bios
.rammap_00_17_02
)
377 ram_mask(hwsq
, 0x100200, 0x00000800, 0x00000800);
378 if (!next
->bios
.rammap_00_16_40
)
379 ram_mask(hwsq
, 0x004008, 0x00004000, 0x00000000);
380 if (next
->bios
.ramcfg_00_03_02
)
381 ram_mask(hwsq
, 0x10021c, 0x00010000, 0x00010000);
387 nv50_ram_prog(struct nvkm_ram
*base
)
389 struct nv50_ram
*ram
= nv50_ram(base
);
390 struct nvkm_device
*device
= ram
->base
.fb
->subdev
.device
;
391 ram_exec(&ram
->hwsq
, nvkm_boolopt(device
->cfgopt
, "NvMemExec", true));
396 nv50_ram_tidy(struct nvkm_ram
*base
)
398 struct nv50_ram
*ram
= nv50_ram(base
);
399 ram_exec(&ram
->hwsq
, false);
403 __nv50_ram_put(struct nvkm_ram
*ram
, struct nvkm_mem
*mem
)
405 struct nvkm_mm_node
*this;
407 while (!list_empty(&mem
->regions
)) {
408 this = list_first_entry(&mem
->regions
, typeof(*this), rl_entry
);
410 list_del(&this->rl_entry
);
411 nvkm_mm_free(&ram
->vram
, &this);
414 nvkm_mm_free(&ram
->tags
, &mem
->tag
);
418 nv50_ram_put(struct nvkm_ram
*ram
, struct nvkm_mem
**pmem
)
420 struct nvkm_mem
*mem
= *pmem
;
423 if (unlikely(mem
== NULL
))
426 mutex_lock(&ram
->fb
->subdev
.mutex
);
427 __nv50_ram_put(ram
, mem
);
428 mutex_unlock(&ram
->fb
->subdev
.mutex
);
434 nv50_ram_get(struct nvkm_ram
*ram
, u64 size
, u32 align
, u32 ncmin
,
435 u32 memtype
, struct nvkm_mem
**pmem
)
437 struct nvkm_mm
*heap
= &ram
->vram
;
438 struct nvkm_mm
*tags
= &ram
->tags
;
439 struct nvkm_mm_node
*r
;
440 struct nvkm_mem
*mem
;
441 int comp
= (memtype
& 0x300) >> 8;
442 int type
= (memtype
& 0x07f);
443 int back
= (memtype
& 0x800);
446 max
= (size
>> NVKM_RAM_MM_SHIFT
);
447 min
= ncmin
? (ncmin
>> NVKM_RAM_MM_SHIFT
) : max
;
448 align
>>= NVKM_RAM_MM_SHIFT
;
450 mem
= kzalloc(sizeof(*mem
), GFP_KERNEL
);
454 mutex_lock(&ram
->fb
->subdev
.mutex
);
456 if (align
== (1 << (16 - NVKM_RAM_MM_SHIFT
))) {
457 int n
= (max
>> 4) * comp
;
459 ret
= nvkm_mm_head(tags
, 0, 1, n
, n
, 1, &mem
->tag
);
464 if (unlikely(!mem
->tag
))
468 INIT_LIST_HEAD(&mem
->regions
);
469 mem
->memtype
= (comp
<< 7) | type
;
472 type
= nv50_fb_memtype
[type
];
475 ret
= nvkm_mm_tail(heap
, 0, type
, max
, min
, align
, &r
);
477 ret
= nvkm_mm_head(heap
, 0, type
, max
, min
, align
, &r
);
479 mutex_unlock(&ram
->fb
->subdev
.mutex
);
480 ram
->func
->put(ram
, &mem
);
484 list_add_tail(&r
->rl_entry
, &mem
->regions
);
487 mutex_unlock(&ram
->fb
->subdev
.mutex
);
489 r
= list_first_entry(&mem
->regions
, struct nvkm_mm_node
, rl_entry
);
490 mem
->offset
= (u64
)r
->offset
<< NVKM_RAM_MM_SHIFT
;
495 static const struct nvkm_ram_func
499 .calc
= nv50_ram_calc
,
500 .prog
= nv50_ram_prog
,
501 .tidy
= nv50_ram_tidy
,
505 nv50_fb_vram_rblock(struct nvkm_ram
*ram
)
507 struct nvkm_subdev
*subdev
= &ram
->fb
->subdev
;
508 struct nvkm_device
*device
= subdev
->device
;
509 int colbits
, rowbitsa
, rowbitsb
, banks
;
510 u64 rowsize
, predicted
;
511 u32 r0
, r4
, rt
, rblock_size
;
513 r0
= nvkm_rd32(device
, 0x100200);
514 r4
= nvkm_rd32(device
, 0x100204);
515 rt
= nvkm_rd32(device
, 0x100250);
516 nvkm_debug(subdev
, "memcfg %08x %08x %08x %08x\n",
517 r0
, r4
, rt
, nvkm_rd32(device
, 0x001540));
519 colbits
= (r4
& 0x0000f000) >> 12;
520 rowbitsa
= ((r4
& 0x000f0000) >> 16) + 8;
521 rowbitsb
= ((r4
& 0x00f00000) >> 20) + 8;
522 banks
= 1 << (((r4
& 0x03000000) >> 24) + 2);
524 rowsize
= ram
->parts
* banks
* (1 << colbits
) * 8;
525 predicted
= rowsize
<< rowbitsa
;
527 predicted
+= rowsize
<< rowbitsb
;
529 if (predicted
!= ram
->size
) {
530 nvkm_warn(subdev
, "memory controller reports %d MiB VRAM\n",
531 (u32
)(ram
->size
>> 20));
534 rblock_size
= rowsize
;
538 nvkm_debug(subdev
, "rblock %d bytes\n", rblock_size
);
543 nv50_ram_ctor(const struct nvkm_ram_func
*func
,
544 struct nvkm_fb
*fb
, struct nvkm_ram
*ram
)
546 struct nvkm_device
*device
= fb
->subdev
.device
;
547 struct nvkm_bios
*bios
= device
->bios
;
548 const u32 rsvd_head
= ( 256 * 1024); /* vga memory */
549 const u32 rsvd_tail
= (1024 * 1024); /* vbios etc */
550 u64 size
= nvkm_rd32(device
, 0x10020c);
551 u32 tags
= nvkm_rd32(device
, 0x100320);
552 enum nvkm_ram_type type
= NVKM_RAM_TYPE_UNKNOWN
;
555 switch (nvkm_rd32(device
, 0x100714) & 0x00000007) {
556 case 0: type
= NVKM_RAM_TYPE_DDR1
; break;
558 if (nvkm_fb_bios_memtype(bios
) == NVKM_RAM_TYPE_DDR3
)
559 type
= NVKM_RAM_TYPE_DDR3
;
561 type
= NVKM_RAM_TYPE_DDR2
;
563 case 2: type
= NVKM_RAM_TYPE_GDDR3
; break;
564 case 3: type
= NVKM_RAM_TYPE_GDDR4
; break;
565 case 4: type
= NVKM_RAM_TYPE_GDDR5
; break;
570 size
= (size
& 0x000000ff) << 32 | (size
& 0xffffff00);
572 ret
= nvkm_ram_ctor(func
, fb
, type
, size
, tags
, ram
);
576 ram
->part_mask
= (nvkm_rd32(device
, 0x001540) & 0x00ff0000) >> 16;
577 ram
->parts
= hweight8(ram
->part_mask
);
578 ram
->ranks
= (nvkm_rd32(device
, 0x100200) & 0x4) ? 2 : 1;
579 nvkm_mm_fini(&ram
->vram
);
581 return nvkm_mm_init(&ram
->vram
, rsvd_head
>> NVKM_RAM_MM_SHIFT
,
582 (size
- rsvd_head
- rsvd_tail
) >> NVKM_RAM_MM_SHIFT
,
583 nv50_fb_vram_rblock(ram
) >> NVKM_RAM_MM_SHIFT
);
587 nv50_ram_new(struct nvkm_fb
*fb
, struct nvkm_ram
**pram
)
589 struct nv50_ram
*ram
;
592 if (!(ram
= kzalloc(sizeof(*ram
), GFP_KERNEL
)))
596 ret
= nv50_ram_ctor(&nv50_ram_func
, fb
, &ram
->base
);
600 ram
->hwsq
.r_0x002504
= hwsq_reg(0x002504);
601 ram
->hwsq
.r_0x00c040
= hwsq_reg(0x00c040);
602 ram
->hwsq
.r_0x004008
= hwsq_reg(0x004008);
603 ram
->hwsq
.r_0x00400c
= hwsq_reg(0x00400c);
604 ram
->hwsq
.r_0x100200
= hwsq_reg(0x100200);
605 ram
->hwsq
.r_0x100210
= hwsq_reg(0x100210);
606 ram
->hwsq
.r_0x10021c
= hwsq_reg(0x10021c);
607 ram
->hwsq
.r_0x1002d0
= hwsq_reg(0x1002d0);
608 ram
->hwsq
.r_0x1002d4
= hwsq_reg(0x1002d4);
609 ram
->hwsq
.r_0x1002dc
= hwsq_reg(0x1002dc);
610 ram
->hwsq
.r_0x10053c
= hwsq_reg(0x10053c);
611 ram
->hwsq
.r_0x1005a0
= hwsq_reg(0x1005a0);
612 ram
->hwsq
.r_0x1005a4
= hwsq_reg(0x1005a4);
613 ram
->hwsq
.r_0x100710
= hwsq_reg(0x100710);
614 ram
->hwsq
.r_0x100714
= hwsq_reg(0x100714);
615 ram
->hwsq
.r_0x100718
= hwsq_reg(0x100718);
616 ram
->hwsq
.r_0x10071c
= hwsq_reg(0x10071c);
617 ram
->hwsq
.r_0x100da0
= hwsq_stride(0x100da0, 4, ram
->base
.part_mask
);
618 ram
->hwsq
.r_0x100e20
= hwsq_reg(0x100e20);
619 ram
->hwsq
.r_0x100e24
= hwsq_reg(0x100e24);
620 ram
->hwsq
.r_0x611200
= hwsq_reg(0x611200);
622 for (i
= 0; i
< 9; i
++)
623 ram
->hwsq
.r_timing
[i
] = hwsq_reg(0x100220 + (i
* 0x04));
625 if (ram
->base
.ranks
> 1) {
626 ram
->hwsq
.r_mr
[0] = hwsq_reg2(0x1002c0, 0x1002c8);
627 ram
->hwsq
.r_mr
[1] = hwsq_reg2(0x1002c4, 0x1002cc);
628 ram
->hwsq
.r_mr
[2] = hwsq_reg2(0x1002e0, 0x1002e8);
629 ram
->hwsq
.r_mr
[3] = hwsq_reg2(0x1002e4, 0x1002ec);
631 ram
->hwsq
.r_mr
[0] = hwsq_reg(0x1002c0);
632 ram
->hwsq
.r_mr
[1] = hwsq_reg(0x1002c4);
633 ram
->hwsq
.r_mr
[2] = hwsq_reg(0x1002e0);
634 ram
->hwsq
.r_mr
[3] = hwsq_reg(0x1002e4);