1 /************************************************************************
3 * Copyright (c) 2013-2015 Intel Corporation.
5 * SPDX-License-Identifier: BSD-2-Clause-Patent
7 * This file contains all of the Cat Mountain Memory Reference Code (MRC).
9 * These functions are generic and should work for any Cat Mountain config.
11 * MRC requires two data structures to be passed in which are initialised by "PreMemInit()".
13 * The basic flow is as follows:
14 * 01) Check for supported DDR speed configuration
15 * 02) Set up MEMORY_MANAGER buffer as pass-through (POR)
16 * 03) Set Channel Interleaving Mode and Channel Stride to the most aggressive setting possible
17 * 04) Set up the MCU logic
18 * 05) Set up the DDR_PHY logic
19 * 06) Initialise the DRAMs (JEDEC)
20 * 07) Perform the Receive Enable Calibration algorithm
21 * 08) Perform the Write Leveling algorithm
22 * 09) Perform the Read Training algorithm (includes internal Vref)
23 * 10) Perform the Write Training algorithm
24 * 11) Set Channel Interleaving Mode and Channel Stride to the desired settings
26 * Dunit configuration based on Valleyview MRC.
28 ***************************************************************************/
31 #include "memory_options.h"
34 #include "meminit_utils.h"
38 // Override ODT to off state if requested
39 #define DRMC_DEFAULT (mrc_params->rd_odt_value==0?BIT12:0)
42 // tRFC values (in picoseconds) per density
43 const uint32_t tRFC
[5] =
52 // tCK clock period in picoseconds per speed index 800, 1066, 1333
53 const uint32_t tCK
[3] =
61 // Select static timings specific to simulation environment
64 // Select static timings specific to ClantonPeek platform
70 const uint16_t ddr_wclk
[] =
73 const uint16_t ddr_wctl
[] =
76 const uint16_t ddr_wcmd
[] =
81 const uint16_t ddr_rcvn
[] =
86 const uint16_t ddr_wdqs
[] =
91 const uint8_t ddr_rdqs
[] =
96 const uint16_t ddr_wdq
[] =
102 // Select MEMORY_MANAGER as the source for PRI interface
103 static void select_memory_manager(
104 MRCParams_t
*mrc_params
)
110 Dco
.raw
= isbR32m(MCU
, DCO
);
111 Dco
.field
.PMICTL
= 0; //0 - PRI owned by MEMORY_MANAGER
112 isbW32m(MCU
, DCO
, Dco
.raw
);
117 // Select HTE as the source for PRI interface
119 MRCParams_t
*mrc_params
)
125 Dco
.raw
= isbR32m(MCU
, DCO
);
126 Dco
.field
.PMICTL
= 1; //1 - PRI owned by HTE
127 isbW32m(MCU
, DCO
, Dco
.raw
);
132 // Send DRAM command, data should be formated
133 // using DCMD_Xxxx macro or emrsXCommand structure.
134 static void dram_init_command(
140 // Send DRAM wake command using special MCU side-band WAKE opcode
141 static void dram_wake_command(
146 Wr32(MMIO
, PCIADDR(0,0,0,SB_PACKET_REG
),
147 (uint32_t) SB_COMMAND(SB_WAKE_CMND_OPCODE
, MCU
, 0));
152 // Stop self refresh driven by MCU
153 static void clear_self_refresh(
154 MRCParams_t
*mrc_params
)
158 // clear the PMSTS Channel Self Refresh bits
159 isbM32m(MCU
, PMSTS
, BIT0
, BIT0
);
164 // Configure MCU before jedec init sequence
165 static void prog_decode_before_jedec(
166 MRCParams_t
*mrc_params
)
176 // Disable power saving features
177 Dpmc0
.raw
= isbR32m(MCU
, DPMC0
);
178 Dpmc0
.field
.CLKGTDIS
= 1;
179 Dpmc0
.field
.DISPWRDN
= 1;
180 Dpmc0
.field
.DYNSREN
= 0;
181 Dpmc0
.field
.PCLSTO
= 0;
182 isbW32m(MCU
, DPMC0
, Dpmc0
.raw
);
184 // Disable out of order transactions
185 Dsch
.raw
= isbR32m(MCU
, DSCH
);
186 Dsch
.field
.OOODIS
= 1;
187 Dsch
.field
.NEWBYPDIS
= 1;
188 isbW32m(MCU
, DSCH
, Dsch
.raw
);
190 // Disable issuing the REF command
191 Drfc
.raw
= isbR32m(MCU
, DRFC
);
192 Drfc
.field
.tREFI
= 0;
193 isbW32m(MCU
, DRFC
, Drfc
.raw
);
195 // Disable ZQ calibration short
196 Dcal
.raw
= isbR32m(MCU
, DCAL
);
197 Dcal
.field
.ZQCINT
= 0;
198 Dcal
.field
.SRXZQCL
= 0;
199 isbW32m(MCU
, DCAL
, Dcal
.raw
);
201 // Training performed in address mode 0, rank population has limited impact, however
202 // simulator complains if enabled non-existing rank.
204 if (mrc_params
->rank_enables
& 1)
205 Drp
.field
.rank0Enabled
= 1;
206 if (mrc_params
->rank_enables
& 2)
207 Drp
.field
.rank1Enabled
= 1;
208 isbW32m(MCU
, DRP
, Drp
.raw
);
213 // After Cold Reset, BIOS should set COLDWAKE bit to 1 before
214 // sending the WAKE message to the Dunit.
215 // For Standby Exit, or any other mode in which the DRAM is in
216 // SR, this bit must be set to 0.
217 static void perform_ddr_reset(
218 MRCParams_t
*mrc_params
)
222 // Set COLDWAKE bit before sending the WAKE message
223 isbM32m(MCU
, DRMC
, BIT16
, BIT16
);
225 // Send wake command to DUNIT (MUST be done before JEDEC)
229 isbW32m(MCU
, DRMC
, DRMC_DEFAULT
);
234 // Dunit Initialisation Complete.
235 // Indicates that initialisation of the Dunit has completed.
236 // Memory accesses are permitted and maintenance operation
237 // begins. Until this bit is set to a 1, the memory controller will
238 // not accept DRAM requests from the MEMORY_MANAGER or HTE.
239 static void set_ddr_init_complete(
240 MRCParams_t
*mrc_params
)
246 Dco
.raw
= isbR32m(MCU
, DCO
);
247 Dco
.field
.PMICTL
= 0; //0 - PRI owned by MEMORY_MANAGER
248 Dco
.field
.IC
= 1; //1 - initialisation complete
249 isbW32m(MCU
, DCO
, Dco
.raw
);
254 static void prog_page_ctrl(
255 MRCParams_t
*mrc_params
)
261 Dpmc0
.raw
= isbR32m(MCU
, DPMC0
);
263 Dpmc0
.field
.PCLSTO
= 0x4;
264 Dpmc0
.field
.PREAPWDEN
= 1;
266 isbW32m(MCU
, DPMC0
, Dpmc0
.raw
);
269 // Configure MCU Power Management Control Register
270 // and Scheduler Control Register.
271 static void prog_ddr_control(
272 MRCParams_t
*mrc_params
)
279 Dpmc0
.raw
= isbR32m(MCU
, DPMC0
);
280 Dsch
.raw
= isbR32m(MCU
, DSCH
);
282 Dpmc0
.field
.DISPWRDN
= mrc_params
->power_down_disable
;
283 Dpmc0
.field
.CLKGTDIS
= 0;
284 Dpmc0
.field
.PCLSTO
= 4;
285 Dpmc0
.field
.PREAPWDEN
= 1;
287 Dsch
.field
.OOODIS
= 0;
288 Dsch
.field
.OOOST3DIS
= 0;
289 Dsch
.field
.NEWBYPDIS
= 0;
291 isbW32m(MCU
, DSCH
, Dsch
.raw
);
292 isbW32m(MCU
, DPMC0
, Dpmc0
.raw
);
294 // CMDTRIST = 2h - CMD/ADDR are tristated when no valid command
295 isbM32m(MCU
, DPMC1
, 2 << 4, BIT5
|BIT4
);
300 // After training complete configure MCU Rank Population Register
301 // specifying: ranks enabled, device width, density, address mode.
302 static void prog_dra_drb(
303 MRCParams_t
*mrc_params
)
310 Dco
.raw
= isbR32m(MCU
, DCO
);
312 isbW32m(MCU
, DCO
, Dco
.raw
);
315 if (mrc_params
->rank_enables
& 1)
316 Drp
.field
.rank0Enabled
= 1;
317 if (mrc_params
->rank_enables
& 2)
318 Drp
.field
.rank1Enabled
= 1;
319 if (mrc_params
->dram_width
== x16
)
321 Drp
.field
.dimm0DevWidth
= 1;
322 Drp
.field
.dimm1DevWidth
= 1;
324 // Density encoding in DRAMParams_t 0=512Mb, 1=Gb, 2=2Gb, 3=4Gb
325 // has to be mapped RANKDENSx encoding (0=1Gb)
326 Drp
.field
.dimm0DevDensity
= mrc_params
->params
.DENSITY
- 1;
327 Drp
.field
.dimm1DevDensity
= mrc_params
->params
.DENSITY
- 1;
329 // Address mode can be overwritten if ECC enabled
330 Drp
.field
.addressMap
= mrc_params
->address_mode
;
332 isbW32m(MCU
, DRP
, Drp
.raw
);
334 Dco
.field
.PMICTL
= 0; //0 - PRI owned by MEMORY_MANAGER
335 Dco
.field
.IC
= 1; //1 - initialisation complete
336 isbW32m(MCU
, DCO
, Dco
.raw
);
341 // Configure refresh rate and short ZQ calibration interval.
342 // Activate dynamic self refresh.
343 static void change_refresh_period(
344 MRCParams_t
*mrc_params
)
352 Drfc
.raw
= isbR32m(MCU
, DRFC
);
353 Drfc
.field
.tREFI
= mrc_params
->refresh_rate
;
354 Drfc
.field
.REFDBTCLR
= 1;
355 isbW32m(MCU
, DRFC
, Drfc
.raw
);
357 Dcal
.raw
= isbR32m(MCU
, DCAL
);
358 Dcal
.field
.ZQCINT
= 3; // 63ms
359 isbW32m(MCU
, DCAL
, Dcal
.raw
);
361 Dpmc0
.raw
= isbR32m(MCU
, DPMC0
);
362 Dpmc0
.field
.ENPHYCLKGATE
= 1;
363 Dpmc0
.field
.DYNSREN
= 1;
364 isbW32m(MCU
, DPMC0
, Dpmc0
.raw
);
369 // Send DRAM wake command
370 static void perform_wake(
371 MRCParams_t
*mrc_params
)
380 // prog_ddr_timing_control (aka mcu_init):
381 // POST_CODE[major] == 0x02
383 // It will initialise timing registers in the MCU (DTR0..DTR4).
384 static void prog_ddr_timing_control(
385 MRCParams_t
*mrc_params
)
388 uint8_t TRP
, TRCD
, TRAS
, TWR
, TWTR
, TRRD
, TRTP
, TFAW
;
400 post_code(0x02, 0x00);
402 Dtr0
.raw
= isbR32m(MCU
, DTR0
);
403 Dtr1
.raw
= isbR32m(MCU
, DTR1
);
404 Dtr2
.raw
= isbR32m(MCU
, DTR2
);
405 Dtr3
.raw
= isbR32m(MCU
, DTR3
);
406 Dtr4
.raw
= isbR32m(MCU
, DTR4
);
408 TCK
= tCK
[mrc_params
->ddr_speed
]; // Clock in picoseconds
409 TCL
= mrc_params
->params
.tCL
; // CAS latency in clocks
410 TRP
= TCL
; // Per CAT MRC
411 TRCD
= TCL
; // Per CAT MRC
412 TRAS
= MCEIL(mrc_params
->params
.tRAS
, TCK
);
413 TWR
= MCEIL(15000, TCK
); // Per JEDEC: tWR=15000ps DDR2/3 from 800-1600
415 TWTR
= MCEIL(mrc_params
->params
.tWTR
, TCK
);
416 TRRD
= MCEIL(mrc_params
->params
.tRRD
, TCK
);
417 TRTP
= 4; // Valid for 800 and 1066, use 5 for 1333
418 TFAW
= MCEIL(mrc_params
->params
.tFAW
, TCK
);
420 WL
= 5 + mrc_params
->ddr_speed
;
422 Dtr0
.field
.dramFrequency
= mrc_params
->ddr_speed
;
424 Dtr0
.field
.tCL
= TCL
- 5; //Convert from TCL (DRAM clocks) to VLV indx
425 Dtr0
.field
.tRP
= TRP
- 5; //5 bit DRAM Clock
426 Dtr0
.field
.tRCD
= TRCD
- 5; //5 bit DRAM Clock
428 Dtr1
.field
.tWCL
= WL
- 3; //Convert from WL (DRAM clocks) to VLV indx
429 Dtr1
.field
.tWTP
= WL
+ 4 + TWR
- 14; //Change to tWTP
430 Dtr1
.field
.tRTP
= MMAX(TRTP
, 4) - 3; //4 bit DRAM Clock
431 Dtr1
.field
.tRRD
= TRRD
- 4; //4 bit DRAM Clock
432 Dtr1
.field
.tCMD
= 1; //2N
433 Dtr1
.field
.tRAS
= TRAS
- 14; //6 bit DRAM Clock
435 Dtr1
.field
.tFAW
= ((TFAW
+ 1) >> 1) - 5; //4 bit DRAM Clock
436 Dtr1
.field
.tCCD
= 0; //Set 4 Clock CAS to CAS delay (multi-burst)
437 Dtr2
.field
.tRRDR
= 1;
438 Dtr2
.field
.tWWDR
= 2;
439 Dtr2
.field
.tRWDR
= 2;
440 Dtr3
.field
.tWRDR
= 2;
441 Dtr3
.field
.tWRDD
= 2;
443 if (mrc_params
->ddr_speed
== DDRFREQ_800
)
445 // Extended RW delay (+1)
446 Dtr3
.field
.tRWSR
= TCL
- 5 + 1;
448 else if(mrc_params
->ddr_speed
== DDRFREQ_1066
)
450 // Extended RW delay (+1)
451 Dtr3
.field
.tRWSR
= TCL
- 5 + 1;
454 Dtr3
.field
.tWRSR
= 4 + WL
+ TWTR
- 11;
456 if (mrc_params
->ddr_speed
== DDRFREQ_800
)
458 Dtr3
.field
.tXP
= MMAX(0, 1 - Dtr1
.field
.tCMD
);
462 Dtr3
.field
.tXP
= MMAX(0, 2 - Dtr1
.field
.tCMD
);
465 Dtr4
.field
.WRODTSTRT
= Dtr1
.field
.tCMD
;
466 Dtr4
.field
.WRODTSTOP
= Dtr1
.field
.tCMD
;
467 Dtr4
.field
.RDODTSTRT
= Dtr1
.field
.tCMD
+ Dtr0
.field
.tCL
- Dtr1
.field
.tWCL
+ 2; //Convert from WL (DRAM clocks) to VLV indx
468 Dtr4
.field
.RDODTSTOP
= Dtr1
.field
.tCMD
+ Dtr0
.field
.tCL
- Dtr1
.field
.tWCL
+ 2;
469 Dtr4
.field
.TRGSTRDIS
= 0;
470 Dtr4
.field
.ODTDIS
= 0;
472 isbW32m(MCU
, DTR0
, Dtr0
.raw
);
473 isbW32m(MCU
, DTR1
, Dtr1
.raw
);
474 isbW32m(MCU
, DTR2
, Dtr2
.raw
);
475 isbW32m(MCU
, DTR3
, Dtr3
.raw
);
476 isbW32m(MCU
, DTR4
, Dtr4
.raw
);
482 // POST_CODE[major] == 0x03
484 // This function performs some initialisation on the DDRIO unit.
485 // This function is dependent on BOARD_ID, DDR_SPEED, and CHANNEL_ENABLES.
486 static void ddrphy_init(MRCParams_t
*mrc_params
)
488 uint32_t tempD
; // temporary DWORD
489 uint8_t channel_i
; // channel counter
490 uint8_t rank_i
; // rank counter
491 uint8_t bl_grp_i
; // byte lane group counter (2 BLs per module)
493 uint8_t bl_divisor
= /*(mrc_params->channel_width==x16)?2:*/1; // byte lane divisor
494 uint8_t speed
= mrc_params
->ddr_speed
& (BIT1
|BIT0
); // For DDR3 --> 0 == 800, 1 == 1066, 2 == 1333
500 tCAS
= mrc_params
->params
.tCL
;
501 tCWL
= 5 + mrc_params
->ddr_speed
;
503 // ddrphy_init starts
504 post_code(0x03, 0x00);
507 // Make sure IOBUFACT is deasserted before initialising the DDR PHY.
509 // Make sure WRPTRENABLE is deasserted before initialising the DDR PHY.
510 for (channel_i
=0; channel_i
<NUM_CHANNELS
; channel_i
++) {
511 if (mrc_params
->channel_enables
& (1<<channel_i
)) {
512 // Deassert DDRPHY Initialisation Complete
513 isbM32m(DDRPHY
, (CMDPMCONFIG0
+ (channel_i
* DDRIOCCC_CH_OFFSET
)), ~BIT20
, BIT20
); // SPID_INIT_COMPLETE=0
515 isbM32m(DDRPHY
, (CMDCFGREG0
+ (channel_i
* DDRIOCCC_CH_OFFSET
)), ~BIT2
, BIT2
); // IOBUFACTRST_N=0
517 isbM32m(DDRPHY
, (CMDPTRREG
+ (channel_i
* DDRIOCCC_CH_OFFSET
)), ~BIT0
, BIT0
); // WRPTRENABLE=0
518 } // if channel enabled
522 isbM32m(DDRPHY
, MASTERRSTN
, 0, BIT0
); // PHYRSTN=0
524 // Initialise DQ01,DQ23,CMD,CLK-CTL,COMP modules
526 post_code(0x03, 0x10);
527 for (channel_i
=0; channel_i
<NUM_CHANNELS
; channel_i
++) {
528 if (mrc_params
->channel_enables
& (1<<channel_i
)) {
531 for (bl_grp_i
=0; bl_grp_i
<((NUM_BYTE_LANES
/bl_divisor
)/2); bl_grp_i
++) {
532 isbM32m(DDRPHY
, (DQOBSCKEBBCTL
+ (bl_grp_i
* DDRIODQ_BL_OFFSET
) + (channel_i
* DDRIODQ_CH_OFFSET
)), ((bl_grp_i
) ? (0x00) : (BIT22
)), (BIT22
)); // Analog MUX select - IO2xCLKSEL
535 switch (mrc_params
->rd_odt_value
) {
536 case 1: tempD
= 0x3; break; // 60 ohm
537 case 2: tempD
= 0x3; break; // 120 ohm
538 case 3: tempD
= 0x3; break; // 180 ohm
539 default: tempD
= 0x3; break; // 120 ohm
541 isbM32m(DDRPHY
, (B0RXIOBUFCTL
+ (bl_grp_i
* DDRIODQ_BL_OFFSET
) + (channel_i
* DDRIODQ_CH_OFFSET
)), (tempD
<<5), (BIT6
|BIT5
)); // ODT strength
542 isbM32m(DDRPHY
, (B1RXIOBUFCTL
+ (bl_grp_i
* DDRIODQ_BL_OFFSET
) + (channel_i
* DDRIODQ_CH_OFFSET
)), (tempD
<<5), (BIT6
|BIT5
)); // ODT strength
543 // Dynamic ODT/DIFFAMP
544 tempD
= (((tCAS
)<<24)|((tCAS
)<<16)|((tCAS
)<<8)|((tCAS
)<<0));
546 case 0: tempD
-= 0x01010101; break; // 800
547 case 1: tempD
-= 0x02020202; break; // 1066
548 case 2: tempD
-= 0x03030303; break; // 1333
549 case 3: tempD
-= 0x04040404; break; // 1600
551 isbM32m(DDRPHY
, (B01LATCTL1
+ (bl_grp_i
* DDRIODQ_BL_OFFSET
) + (channel_i
* DDRIODQ_CH_OFFSET
)), tempD
, ((BIT28
|BIT27
|BIT26
|BIT25
|BIT24
)|(BIT20
|BIT19
|BIT18
|BIT17
|BIT16
)|(BIT12
|BIT11
|BIT10
|BIT9
|BIT8
)|(BIT4
|BIT3
|BIT2
|BIT1
|BIT0
))); // Launch Time: ODT, DIFFAMP, ODT, DIFFAMP
554 case 0: tempD
= ((0x06<<16)|(0x07<<8)); break; // 800
555 case 1: tempD
= ((0x07<<16)|(0x08<<8)); break; // 1066
556 case 2: tempD
= ((0x09<<16)|(0x0A<<8)); break; // 1333
557 case 3: tempD
= ((0x0A<<16)|(0x0B<<8)); break; // 1600
559 isbM32m(DDRPHY
, (B0ONDURCTL
+ (bl_grp_i
* DDRIODQ_BL_OFFSET
) + (channel_i
* DDRIODQ_CH_OFFSET
)), tempD
, ((BIT21
|BIT20
|BIT19
|BIT18
|BIT17
|BIT16
)|(BIT13
|BIT12
|BIT11
|BIT10
|BIT9
|BIT8
))); // On Duration: ODT, DIFFAMP
560 isbM32m(DDRPHY
, (B1ONDURCTL
+ (bl_grp_i
* DDRIODQ_BL_OFFSET
) + (channel_i
* DDRIODQ_CH_OFFSET
)), tempD
, ((BIT21
|BIT20
|BIT19
|BIT18
|BIT17
|BIT16
)|(BIT13
|BIT12
|BIT11
|BIT10
|BIT9
|BIT8
))); // On Duration: ODT, DIFFAMP
562 switch (mrc_params
->rd_odt_value
) {
563 case 0: tempD
= ((0x3F<<16)|(0x3f<<10)); break; // override DIFFAMP=on, ODT=off
564 default: tempD
= ((0x3F<<16)|(0x2A<<10)); break; // override DIFFAMP=on, ODT=on
566 isbM32m(DDRPHY
, (B0OVRCTL
+ (bl_grp_i
* DDRIODQ_BL_OFFSET
) + (channel_i
* DDRIODQ_CH_OFFSET
)), tempD
, ((BIT21
|BIT20
|BIT19
|BIT18
|BIT17
|BIT16
)|(BIT15
|BIT14
|BIT13
|BIT12
|BIT11
|BIT10
))); // Override: DIFFAMP, ODT
567 isbM32m(DDRPHY
, (B1OVRCTL
+ (bl_grp_i
* DDRIODQ_BL_OFFSET
) + (channel_i
* DDRIODQ_CH_OFFSET
)), tempD
, ((BIT21
|BIT20
|BIT19
|BIT18
|BIT17
|BIT16
)|(BIT15
|BIT14
|BIT13
|BIT12
|BIT11
|BIT10
))); // Override: DIFFAMP, ODT
570 // 1xCLK Domain Timings: tEDP,RCVEN,WDQS (PO)
571 isbM32m(DDRPHY
, (B0LATCTL0
+ (bl_grp_i
* DDRIODQ_BL_OFFSET
) + (channel_i
* DDRIODQ_CH_OFFSET
)), (((tCAS
+7)<<16)|((tCAS
-4)<<8)|((tCWL
-2)<<0)), ((BIT21
|BIT20
|BIT19
|BIT18
|BIT17
|BIT16
)|(BIT12
|BIT11
|BIT10
|BIT9
|BIT8
)|(BIT4
|BIT3
|BIT2
|BIT1
|BIT0
))); // 1xCLK: tEDP, RCVEN, WDQS
572 isbM32m(DDRPHY
, (B1LATCTL0
+ (bl_grp_i
* DDRIODQ_BL_OFFSET
) + (channel_i
* DDRIODQ_CH_OFFSET
)), (((tCAS
+7)<<16)|((tCAS
-4)<<8)|((tCWL
-2)<<0)), ((BIT21
|BIT20
|BIT19
|BIT18
|BIT17
|BIT16
)|(BIT12
|BIT11
|BIT10
|BIT9
|BIT8
)|(BIT4
|BIT3
|BIT2
|BIT1
|BIT0
))); // 1xCLK: tEDP, RCVEN, WDQS
575 isbM32m(DDRPHY
, (B0RXIOBUFCTL
+ (bl_grp_i
* DDRIODQ_BL_OFFSET
) + (channel_i
* DDRIODQ_CH_OFFSET
)), ((0x0<<7)|(0x0<<0)), (BIT7
|BIT0
)); // AFE Bypass, RCVEN DIFFAMP
576 isbM32m(DDRPHY
, (B1RXIOBUFCTL
+ (bl_grp_i
* DDRIODQ_BL_OFFSET
) + (channel_i
* DDRIODQ_CH_OFFSET
)), ((0x0<<7)|(0x0<<0)), (BIT7
|BIT0
)); // AFE Bypass, RCVEN DIFFAMP
578 isbM32m(DDRPHY
, (DQCTL
+ (bl_grp_i
* DDRIODQ_BL_OFFSET
) + (channel_i
* DDRIODQ_CH_OFFSET
)), (BIT16
), (BIT16
)); // 0 means driving DQ during DQS-preamble
579 isbM32m(DDRPHY
, (B01PTRCTL1
+ (bl_grp_i
* DDRIODQ_BL_OFFSET
) + (channel_i
* DDRIODQ_CH_OFFSET
)), (BIT8
), (BIT8
)); // WR_LVL mode disable
581 isbM32m(DDRPHY
, (B0VREFCTL
+ (bl_grp_i
* DDRIODQ_BL_OFFSET
) + (channel_i
* DDRIODQ_CH_OFFSET
)), ((0x03<<2)|(0x0<<1)|(0x0<<0)), ((BIT7
|BIT6
|BIT5
|BIT4
|BIT3
|BIT2
)|BIT1
|BIT0
)); // Internal Vref Code, Enable#, Ext_or_Int (1=Ext)
582 isbM32m(DDRPHY
, (B1VREFCTL
+ (bl_grp_i
* DDRIODQ_BL_OFFSET
) + (channel_i
* DDRIODQ_CH_OFFSET
)), ((0x03<<2)|(0x0<<1)|(0x0<<0)), ((BIT7
|BIT6
|BIT5
|BIT4
|BIT3
|BIT2
)|BIT1
|BIT0
)); // Internal Vref Code, Enable#, Ext_or_Int (1=Ext)
583 isbM32m(DDRPHY
, (B0RXIOBUFCTL
+ (bl_grp_i
* DDRIODQ_BL_OFFSET
) + (channel_i
* DDRIODQ_CH_OFFSET
)), (0), (BIT4
)); // Per-Bit De-Skew Enable
584 isbM32m(DDRPHY
, (B1RXIOBUFCTL
+ (bl_grp_i
* DDRIODQ_BL_OFFSET
) + (channel_i
* DDRIODQ_CH_OFFSET
)), (0), (BIT4
)); // Per-Bit De-Skew Enable
587 isbM32m(DDRPHY
, (CMDOBSCKEBBCTL
+ (channel_i
* DDRIOCCC_CH_OFFSET
)), 0, (BIT23
));
589 // Enable tristate control of cmd/address bus
590 isbM32m(DDRPHY
, (CMDCFGREG0
+ (channel_i
* DDRIOCCC_CH_OFFSET
)), 0, (BIT1
|BIT0
));
593 isbM32m(DDRPHY
, (CMDRCOMPODT
+ (channel_i
* DDRIOCCC_CH_OFFSET
)), ((0x03<<5)|(0x03<<0)), ((BIT9
|BIT8
|BIT7
|BIT6
|BIT5
)|(BIT4
|BIT3
|BIT2
|BIT1
|BIT0
)));
595 // CMDPM* registers must be programmed in this order...
596 isbM32m(DDRPHY
, (CMDPMDLYREG4
+ (channel_i
* DDRIOCCC_CH_OFFSET
)), ((0xFFFFU
<<16)|(0xFFFF<<0)), ((BIT31
|BIT30
|BIT29
|BIT28
|BIT27
|BIT26
|BIT25
|BIT24
|BIT23
|BIT22
|BIT21
|BIT20
|BIT19
|BIT18
|BIT17
|BIT16
)|(BIT15
|BIT14
|BIT13
|BIT12
|BIT11
|BIT10
|BIT9
|BIT8
|BIT7
|BIT6
|BIT5
|BIT4
|BIT3
|BIT2
|BIT1
|BIT0
))); // Turn On Delays: SFR (regulator), MPLL
597 isbM32m(DDRPHY
, (CMDPMDLYREG3
+ (channel_i
* DDRIOCCC_CH_OFFSET
)), ((0xFU
<<28)|(0xFFF<<16)|(0xF<<12)|(0x616<<0)), ((BIT31
|BIT30
|BIT29
|BIT28
)|(BIT27
|BIT26
|BIT25
|BIT24
|BIT23
|BIT22
|BIT21
|BIT20
|BIT19
|BIT18
|BIT17
|BIT16
)|(BIT15
|BIT14
|BIT13
|BIT12
)|(BIT11
|BIT10
|BIT9
|BIT8
|BIT7
|BIT6
|BIT5
|BIT4
|BIT3
|BIT2
|BIT1
|BIT0
))); // Delays: ASSERT_IOBUFACT_to_ALLON0_for_PM_MSG_3, VREG (MDLL) Turn On, ALLON0_to_DEASSERT_IOBUFACT_for_PM_MSG_gt0, MDLL Turn On
598 isbM32m(DDRPHY
, (CMDPMDLYREG2
+ (channel_i
* DDRIOCCC_CH_OFFSET
)), ((0xFFU
<<24)|(0xFF<<16)|(0xFF<<8)|(0xFF<<0)), ((BIT31
|BIT30
|BIT29
|BIT28
|BIT27
|BIT26
|BIT25
|BIT24
)|(BIT23
|BIT22
|BIT21
|BIT20
|BIT19
|BIT18
|BIT17
|BIT16
)|(BIT15
|BIT14
|BIT13
|BIT12
|BIT11
|BIT10
|BIT9
|BIT8
)|(BIT7
|BIT6
|BIT5
|BIT4
|BIT3
|BIT2
|BIT1
|BIT0
))); // MPLL Divider Reset Delays
599 isbM32m(DDRPHY
, (CMDPMDLYREG1
+ (channel_i
* DDRIOCCC_CH_OFFSET
)), ((0xFFU
<<24)|(0xFF<<16)|(0xFF<<8)|(0xFF<<0)), ((BIT31
|BIT30
|BIT29
|BIT28
|BIT27
|BIT26
|BIT25
|BIT24
)|(BIT23
|BIT22
|BIT21
|BIT20
|BIT19
|BIT18
|BIT17
|BIT16
)|(BIT15
|BIT14
|BIT13
|BIT12
|BIT11
|BIT10
|BIT9
|BIT8
)|(BIT7
|BIT6
|BIT5
|BIT4
|BIT3
|BIT2
|BIT1
|BIT0
))); // Turn Off Delays: VREG, Staggered MDLL, MDLL, PI
600 isbM32m(DDRPHY
, (CMDPMDLYREG0
+ (channel_i
* DDRIOCCC_CH_OFFSET
)), ((0xFFU
<<24)|(0xFF<<16)|(0xFF<<8)|(0xFF<<0)), ((BIT31
|BIT30
|BIT29
|BIT28
|BIT27
|BIT26
|BIT25
|BIT24
)|(BIT23
|BIT22
|BIT21
|BIT20
|BIT19
|BIT18
|BIT17
|BIT16
)|(BIT15
|BIT14
|BIT13
|BIT12
|BIT11
|BIT10
|BIT9
|BIT8
)|(BIT7
|BIT6
|BIT5
|BIT4
|BIT3
|BIT2
|BIT1
|BIT0
))); // Turn On Delays: MPLL, Staggered MDLL, PI, IOBUFACT
601 isbM32m(DDRPHY
, (CMDPMCONFIG0
+ (channel_i
* DDRIOCCC_CH_OFFSET
)), ((0x6<<8)|BIT6
|(0x4<<0)), (BIT31
|BIT30
|BIT29
|BIT28
|BIT27
|BIT26
|BIT25
|BIT24
|BIT23
|BIT22
|BIT21
|(BIT11
|BIT10
|BIT9
|BIT8
)|BIT6
|(BIT3
|BIT2
|BIT1
|BIT0
))); // Allow PUnit signals
602 isbM32m(DDRPHY
, (CMDMDLLCTL
+ (channel_i
* DDRIOCCC_CH_OFFSET
)), ((0x3<<4)|(0x7<<0)), ((BIT6
|BIT5
|BIT4
)|(BIT3
|BIT2
|BIT1
|BIT0
))); // DLL_VREG Bias Trim, VREF Tuning for DLL_VREG
604 isbM32m(DDRPHY
, (CCOBSCKEBBCTL
+ (channel_i
* DDRIOCCC_CH_OFFSET
)), 0, (BIT24
)); // CLKEBB
605 isbM32m(DDRPHY
, (CCCFGREG0
+ (channel_i
* DDRIOCCC_CH_OFFSET
)), ((0x0<<16)|(0x0<<12)|(0x0<<8)|(0xF<<4)|BIT0
), ((BIT19
|BIT18
|BIT17
|BIT16
)|(BIT15
|BIT14
|BIT13
|BIT12
)|(BIT11
|BIT10
|BIT9
|BIT8
)|(BIT7
|BIT6
|BIT5
|BIT4
)|BIT0
)); // Buffer Enable: CS,CKE,ODT,CLK
606 isbM32m(DDRPHY
, (CCRCOMPODT
+ (channel_i
* DDRIOCCC_CH_OFFSET
)), ((0x03<<8)|(0x03<<0)), ((BIT12
|BIT11
|BIT10
|BIT9
|BIT8
)|(BIT4
|BIT3
|BIT2
|BIT1
|BIT0
))); // ODT RCOMP
607 isbM32m(DDRPHY
, (CCMDLLCTL
+ (channel_i
* DDRIOCCC_CH_OFFSET
)), ((0x3<<4)|(0x7<<0)), ((BIT6
|BIT5
|BIT4
)|(BIT3
|BIT2
|BIT1
|BIT0
))); // DLL_VREG Bias Trim, VREF Tuning for DLL_VREG
609 // COMP (RON channel specific)
610 // - DQ/DQS/DM RON: 32 Ohm
611 // - CTRL/CMD RON: 27 Ohm
613 isbM32m(DDRPHY
, (DQVREFCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), ((0x08<<24)|(0x03<<16)), ((BIT29
|BIT28
|BIT27
|BIT26
|BIT25
|BIT24
)|(BIT21
|BIT20
|BIT19
|BIT18
|BIT17
|BIT16
))); // RCOMP Vref PU/PD
614 isbM32m(DDRPHY
, (CMDVREFCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), ((0x0C<<24)|(0x03<<16)), ((BIT29
|BIT28
|BIT27
|BIT26
|BIT25
|BIT24
)|(BIT21
|BIT20
|BIT19
|BIT18
|BIT17
|BIT16
))); // RCOMP Vref PU/PD
615 isbM32m(DDRPHY
, (CLKVREFCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), ((0x0F<<24)|(0x03<<16)), ((BIT29
|BIT28
|BIT27
|BIT26
|BIT25
|BIT24
)|(BIT21
|BIT20
|BIT19
|BIT18
|BIT17
|BIT16
))); // RCOMP Vref PU/PD
616 isbM32m(DDRPHY
, (DQSVREFCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), ((0x08<<24)|(0x03<<16)), ((BIT29
|BIT28
|BIT27
|BIT26
|BIT25
|BIT24
)|(BIT21
|BIT20
|BIT19
|BIT18
|BIT17
|BIT16
))); // RCOMP Vref PU/PD
617 isbM32m(DDRPHY
, (CTLVREFCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), ((0x0C<<24)|(0x03<<16)), ((BIT29
|BIT28
|BIT27
|BIT26
|BIT25
|BIT24
)|(BIT21
|BIT20
|BIT19
|BIT18
|BIT17
|BIT16
))); // RCOMP Vref PU/PD
619 // DQS Swapped Input Enable
620 isbM32m(DDRPHY
, (COMPEN1CH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), (BIT19
|BIT17
), ((BIT31
|BIT30
)|BIT19
|BIT17
|(BIT15
|BIT14
)));
622 // ODT VREF = 1.5 x 274/360+274 = 0.65V (code of ~50)
623 isbM32m(DDRPHY
, (DQVREFCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), ((0x32<<8)|(0x03<<0)), ((BIT13
|BIT12
|BIT11
|BIT10
|BIT9
|BIT8
)|(BIT5
|BIT4
|BIT3
|BIT2
|BIT1
|BIT0
))); // ODT Vref PU/PD
624 isbM32m(DDRPHY
, (DQSVREFCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), ((0x32<<8)|(0x03<<0)), ((BIT13
|BIT12
|BIT11
|BIT10
|BIT9
|BIT8
)|(BIT5
|BIT4
|BIT3
|BIT2
|BIT1
|BIT0
))); // ODT Vref PU/PD
625 isbM32m(DDRPHY
, (CLKVREFCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), ((0x0E<<8)|(0x05<<0)), ((BIT13
|BIT12
|BIT11
|BIT10
|BIT9
|BIT8
)|(BIT5
|BIT4
|BIT3
|BIT2
|BIT1
|BIT0
))); // ODT Vref PU/PD
627 // Slew rate settings are frequency specific, numbers below are for 800Mhz (speed == 0)
628 // - DQ/DQS/DM/CLK SR: 4V/ns,
629 // - CTRL/CMD SR: 1.5V/ns
630 tempD
= (0x0E<<16)|(0x0E<<12)|(0x08<<8)|(0x0B<<4)|(0x0B<<0);
631 isbM32m(DDRPHY
, (DLYSELCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), (tempD
), ((BIT19
|BIT18
|BIT17
|BIT16
)|(BIT15
|BIT14
|BIT13
|BIT12
)|(BIT11
|BIT10
|BIT9
|BIT8
)|(BIT7
|BIT6
|BIT5
|BIT4
)|(BIT3
|BIT2
|BIT1
|BIT0
))); // DCOMP Delay Select: CTL,CMD,CLK,DQS,DQ
632 isbM32m(DDRPHY
, (TCOVREFCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), ((0x05<<16)|(0x05<<8)|(0x05<<0)), ((BIT21
|BIT20
|BIT19
|BIT18
|BIT17
|BIT16
)|(BIT13
|BIT12
|BIT11
|BIT10
|BIT9
|BIT8
)|(BIT5
|BIT4
|BIT3
|BIT2
|BIT1
|BIT0
))); // TCO Vref CLK,DQS,DQ
633 isbM32m(DDRPHY
, (CCBUFODTCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), ((0x03<<8)|(0x03<<0)), ((BIT12
|BIT11
|BIT10
|BIT9
|BIT8
)|(BIT4
|BIT3
|BIT2
|BIT1
|BIT0
))); // ODTCOMP CMD/CTL PU/PD
634 isbM32m(DDRPHY
, (COMPEN0CH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), (0), ((BIT31
|BIT30
)|BIT8
)); // COMP
638 isbM32m(DDRPHY
, (DQDRVPUCTLCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), (BIT31
|(0x0A<<16)), (BIT31
|(BIT20
|BIT19
|BIT18
|BIT17
|BIT16
))); // RCOMP PU
639 isbM32m(DDRPHY
, (DQDRVPDCTLCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), (BIT31
|(0x0A<<16)), (BIT31
|(BIT20
|BIT19
|BIT18
|BIT17
|BIT16
))); // RCOMP PD
640 isbM32m(DDRPHY
, (DQDLYPUCTLCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), (BIT31
|(0x10<<16)), (BIT31
|(BIT20
|BIT19
|BIT18
|BIT17
|BIT16
))); // DCOMP PU
641 isbM32m(DDRPHY
, (DQDLYPDCTLCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), (BIT31
|(0x10<<16)), (BIT31
|(BIT20
|BIT19
|BIT18
|BIT17
|BIT16
))); // DCOMP PD
642 isbM32m(DDRPHY
, (DQODTPUCTLCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), (BIT31
|(0x0B<<16)), (BIT31
|(BIT20
|BIT19
|BIT18
|BIT17
|BIT16
))); // ODTCOMP PU
643 isbM32m(DDRPHY
, (DQODTPDCTLCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), (BIT31
|(0x0B<<16)), (BIT31
|(BIT20
|BIT19
|BIT18
|BIT17
|BIT16
))); // ODTCOMP PD
644 isbM32m(DDRPHY
, (DQTCOPUCTLCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), (BIT31
), (BIT31
)); // TCOCOMP PU
645 isbM32m(DDRPHY
, (DQTCOPDCTLCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), (BIT31
), (BIT31
)); // TCOCOMP PD
646 // DQS COMP Overrides
647 isbM32m(DDRPHY
, (DQSDRVPUCTLCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), (BIT31
|(0x0A<<16)), (BIT31
|(BIT20
|BIT19
|BIT18
|BIT17
|BIT16
))); // RCOMP PU
648 isbM32m(DDRPHY
, (DQSDRVPDCTLCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), (BIT31
|(0x0A<<16)), (BIT31
|(BIT20
|BIT19
|BIT18
|BIT17
|BIT16
))); // RCOMP PD
649 isbM32m(DDRPHY
, (DQSDLYPUCTLCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), (BIT31
|(0x10<<16)), (BIT31
|(BIT20
|BIT19
|BIT18
|BIT17
|BIT16
))); // DCOMP PU
650 isbM32m(DDRPHY
, (DQSDLYPDCTLCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), (BIT31
|(0x10<<16)), (BIT31
|(BIT20
|BIT19
|BIT18
|BIT17
|BIT16
))); // DCOMP PD
651 isbM32m(DDRPHY
, (DQSODTPUCTLCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), (BIT31
|(0x0B<<16)), (BIT31
|(BIT20
|BIT19
|BIT18
|BIT17
|BIT16
))); // ODTCOMP PU
652 isbM32m(DDRPHY
, (DQSODTPDCTLCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), (BIT31
|(0x0B<<16)), (BIT31
|(BIT20
|BIT19
|BIT18
|BIT17
|BIT16
))); // ODTCOMP PD
653 isbM32m(DDRPHY
, (DQSTCOPUCTLCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), (BIT31
), (BIT31
)); // TCOCOMP PU
654 isbM32m(DDRPHY
, (DQSTCOPDCTLCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), (BIT31
), (BIT31
)); // TCOCOMP PD
655 // CLK COMP Overrides
656 isbM32m(DDRPHY
, (CLKDRVPUCTLCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), (BIT31
|(0x0C<<16)), (BIT31
|(BIT20
|BIT19
|BIT18
|BIT17
|BIT16
))); // RCOMP PU
657 isbM32m(DDRPHY
, (CLKDRVPDCTLCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), (BIT31
|(0x0C<<16)), (BIT31
|(BIT20
|BIT19
|BIT18
|BIT17
|BIT16
))); // RCOMP PD
658 isbM32m(DDRPHY
, (CLKDLYPUCTLCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), (BIT31
|(0x07<<16)), (BIT31
|(BIT20
|BIT19
|BIT18
|BIT17
|BIT16
))); // DCOMP PU
659 isbM32m(DDRPHY
, (CLKDLYPDCTLCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), (BIT31
|(0x07<<16)), (BIT31
|(BIT20
|BIT19
|BIT18
|BIT17
|BIT16
))); // DCOMP PD
660 isbM32m(DDRPHY
, (CLKODTPUCTLCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), (BIT31
|(0x0B<<16)), (BIT31
|(BIT20
|BIT19
|BIT18
|BIT17
|BIT16
))); // ODTCOMP PU
661 isbM32m(DDRPHY
, (CLKODTPDCTLCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), (BIT31
|(0x0B<<16)), (BIT31
|(BIT20
|BIT19
|BIT18
|BIT17
|BIT16
))); // ODTCOMP PD
662 isbM32m(DDRPHY
, (CLKTCOPUCTLCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), (BIT31
), (BIT31
)); // TCOCOMP PU
663 isbM32m(DDRPHY
, (CLKTCOPDCTLCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), (BIT31
), (BIT31
)); // TCOCOMP PD
664 // CMD COMP Overrides
665 isbM32m(DDRPHY
, (CMDDRVPUCTLCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), (BIT31
|(0x0D<<16)), (BIT31
|(BIT21
|BIT20
|BIT19
|BIT18
|BIT17
|BIT16
))); // RCOMP PU
666 isbM32m(DDRPHY
, (CMDDRVPDCTLCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), (BIT31
|(0x0D<<16)), (BIT31
|(BIT21
|BIT20
|BIT19
|BIT18
|BIT17
|BIT16
))); // RCOMP PD
667 isbM32m(DDRPHY
, (CMDDLYPUCTLCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), (BIT31
|(0x0A<<16)), (BIT31
|(BIT20
|BIT19
|BIT18
|BIT17
|BIT16
))); // DCOMP PU
668 isbM32m(DDRPHY
, (CMDDLYPDCTLCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), (BIT31
|(0x0A<<16)), (BIT31
|(BIT20
|BIT19
|BIT18
|BIT17
|BIT16
))); // DCOMP PD
669 // CTL COMP Overrides
670 isbM32m(DDRPHY
, (CTLDRVPUCTLCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), (BIT31
|(0x0D<<16)), (BIT31
|(BIT21
|BIT20
|BIT19
|BIT18
|BIT17
|BIT16
))); // RCOMP PU
671 isbM32m(DDRPHY
, (CTLDRVPDCTLCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), (BIT31
|(0x0D<<16)), (BIT31
|(BIT21
|BIT20
|BIT19
|BIT18
|BIT17
|BIT16
))); // RCOMP PD
672 isbM32m(DDRPHY
, (CTLDLYPUCTLCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), (BIT31
|(0x0A<<16)), (BIT31
|(BIT20
|BIT19
|BIT18
|BIT17
|BIT16
))); // DCOMP PU
673 isbM32m(DDRPHY
, (CTLDLYPDCTLCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), (BIT31
|(0x0A<<16)), (BIT31
|(BIT20
|BIT19
|BIT18
|BIT17
|BIT16
))); // DCOMP PD
675 // DQ TCOCOMP Overrides
676 isbM32m(DDRPHY
, (DQTCOPUCTLCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), (BIT31
|(0x1F<<16)), (BIT31
|(BIT20
|BIT19
|BIT18
|BIT17
|BIT16
))); // TCOCOMP PU
677 isbM32m(DDRPHY
, (DQTCOPDCTLCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), (BIT31
|(0x1F<<16)), (BIT31
|(BIT20
|BIT19
|BIT18
|BIT17
|BIT16
))); // TCOCOMP PD
678 // DQS TCOCOMP Overrides
679 isbM32m(DDRPHY
, (DQSTCOPUCTLCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), (BIT31
|(0x1F<<16)), (BIT31
|(BIT20
|BIT19
|BIT18
|BIT17
|BIT16
))); // TCOCOMP PU
680 isbM32m(DDRPHY
, (DQSTCOPDCTLCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), (BIT31
|(0x1F<<16)), (BIT31
|(BIT20
|BIT19
|BIT18
|BIT17
|BIT16
))); // TCOCOMP PD
681 // CLK TCOCOMP Overrides
682 isbM32m(DDRPHY
, (CLKTCOPUCTLCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), (BIT31
|(0x1F<<16)), (BIT31
|(BIT20
|BIT19
|BIT18
|BIT17
|BIT16
))); // TCOCOMP PU
683 isbM32m(DDRPHY
, (CLKTCOPDCTLCH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), (BIT31
|(0x1F<<16)), (BIT31
|(BIT20
|BIT19
|BIT18
|BIT17
|BIT16
))); // TCOCOMP PD
684 #endif // BACKUP_COMPS
685 // program STATIC delays
687 set_wcmd(channel_i
, ddr_wcmd
[PLATFORM_ID
]);
689 set_wcmd(channel_i
, ddr_wclk
[PLATFORM_ID
] + HALF_CLK
);
690 #endif // BACKUP_WCMD
691 for (rank_i
=0; rank_i
<NUM_RANKS
; rank_i
++) {
692 if (mrc_params
->rank_enables
& (1<<rank_i
)) {
693 set_wclk(channel_i
, rank_i
, ddr_wclk
[PLATFORM_ID
]);
695 set_wctl(channel_i
, rank_i
, ddr_wctl
[PLATFORM_ID
]);
697 set_wctl(channel_i
, rank_i
, ddr_wclk
[PLATFORM_ID
] + HALF_CLK
);
698 #endif // BACKUP_WCTL
703 // COMP (non channel specific)
704 //isbM32m(DDRPHY, (), (), ());
705 isbM32m(DDRPHY
, (DQANADRVPUCTL
), (BIT30
), (BIT30
)); // RCOMP: Dither PU Enable
706 isbM32m(DDRPHY
, (DQANADRVPDCTL
), (BIT30
), (BIT30
)); // RCOMP: Dither PD Enable
707 isbM32m(DDRPHY
, (CMDANADRVPUCTL
), (BIT30
), (BIT30
)); // RCOMP: Dither PU Enable
708 isbM32m(DDRPHY
, (CMDANADRVPDCTL
), (BIT30
), (BIT30
)); // RCOMP: Dither PD Enable
709 isbM32m(DDRPHY
, (CLKANADRVPUCTL
), (BIT30
), (BIT30
)); // RCOMP: Dither PU Enable
710 isbM32m(DDRPHY
, (CLKANADRVPDCTL
), (BIT30
), (BIT30
)); // RCOMP: Dither PD Enable
711 isbM32m(DDRPHY
, (DQSANADRVPUCTL
), (BIT30
), (BIT30
)); // RCOMP: Dither PU Enable
712 isbM32m(DDRPHY
, (DQSANADRVPDCTL
), (BIT30
), (BIT30
)); // RCOMP: Dither PD Enable
713 isbM32m(DDRPHY
, (CTLANADRVPUCTL
), (BIT30
), (BIT30
)); // RCOMP: Dither PU Enable
714 isbM32m(DDRPHY
, (CTLANADRVPDCTL
), (BIT30
), (BIT30
)); // RCOMP: Dither PD Enable
715 isbM32m(DDRPHY
, (DQANAODTPUCTL
), (BIT30
), (BIT30
)); // ODT: Dither PU Enable
716 isbM32m(DDRPHY
, (DQANAODTPDCTL
), (BIT30
), (BIT30
)); // ODT: Dither PD Enable
717 isbM32m(DDRPHY
, (CLKANAODTPUCTL
), (BIT30
), (BIT30
)); // ODT: Dither PU Enable
718 isbM32m(DDRPHY
, (CLKANAODTPDCTL
), (BIT30
), (BIT30
)); // ODT: Dither PD Enable
719 isbM32m(DDRPHY
, (DQSANAODTPUCTL
), (BIT30
), (BIT30
)); // ODT: Dither PU Enable
720 isbM32m(DDRPHY
, (DQSANAODTPDCTL
), (BIT30
), (BIT30
)); // ODT: Dither PD Enable
721 isbM32m(DDRPHY
, (DQANADLYPUCTL
), (BIT30
), (BIT30
)); // DCOMP: Dither PU Enable
722 isbM32m(DDRPHY
, (DQANADLYPDCTL
), (BIT30
), (BIT30
)); // DCOMP: Dither PD Enable
723 isbM32m(DDRPHY
, (CMDANADLYPUCTL
), (BIT30
), (BIT30
)); // DCOMP: Dither PU Enable
724 isbM32m(DDRPHY
, (CMDANADLYPDCTL
), (BIT30
), (BIT30
)); // DCOMP: Dither PD Enable
725 isbM32m(DDRPHY
, (CLKANADLYPUCTL
), (BIT30
), (BIT30
)); // DCOMP: Dither PU Enable
726 isbM32m(DDRPHY
, (CLKANADLYPDCTL
), (BIT30
), (BIT30
)); // DCOMP: Dither PD Enable
727 isbM32m(DDRPHY
, (DQSANADLYPUCTL
), (BIT30
), (BIT30
)); // DCOMP: Dither PU Enable
728 isbM32m(DDRPHY
, (DQSANADLYPDCTL
), (BIT30
), (BIT30
)); // DCOMP: Dither PD Enable
729 isbM32m(DDRPHY
, (CTLANADLYPUCTL
), (BIT30
), (BIT30
)); // DCOMP: Dither PU Enable
730 isbM32m(DDRPHY
, (CTLANADLYPDCTL
), (BIT30
), (BIT30
)); // DCOMP: Dither PD Enable
731 isbM32m(DDRPHY
, (DQANATCOPUCTL
), (BIT30
), (BIT30
)); // TCO: Dither PU Enable
732 isbM32m(DDRPHY
, (DQANATCOPDCTL
), (BIT30
), (BIT30
)); // TCO: Dither PD Enable
733 isbM32m(DDRPHY
, (CLKANATCOPUCTL
), (BIT30
), (BIT30
)); // TCO: Dither PU Enable
734 isbM32m(DDRPHY
, (CLKANATCOPDCTL
), (BIT30
), (BIT30
)); // TCO: Dither PD Enable
735 isbM32m(DDRPHY
, (DQSANATCOPUCTL
), (BIT30
), (BIT30
)); // TCO: Dither PU Enable
736 isbM32m(DDRPHY
, (DQSANATCOPDCTL
), (BIT30
), (BIT30
)); // TCO: Dither PD Enable
737 isbM32m(DDRPHY
, (TCOCNTCTRL
), (0x1<<0), (BIT1
|BIT0
)); // TCOCOMP: Pulse Count
738 isbM32m(DDRPHY
, (CHNLBUFSTATIC
), ((0x03<<24)|(0x03<<16)), ((BIT28
|BIT27
|BIT26
|BIT25
|BIT24
)|(BIT20
|BIT19
|BIT18
|BIT17
|BIT16
))); // ODT: CMD/CTL PD/PU
739 isbM32m(DDRPHY
, (MSCNTR
), (0x64<<0), (BIT7
|BIT6
|BIT5
|BIT4
|BIT3
|BIT2
|BIT1
|BIT0
)); // Set 1us counter
740 isbM32m(DDRPHY
, (LATCH1CTL
), (0x1<<28), (BIT30
|BIT29
|BIT28
)); // ???
742 // Release PHY from reset
743 isbM32m(DDRPHY
, MASTERRSTN
, BIT0
, BIT0
); // PHYRSTN=1
746 post_code(0x03, 0x11);
747 for (channel_i
=0; channel_i
<NUM_CHANNELS
; channel_i
++) {
748 if (mrc_params
->channel_enables
& (1<<channel_i
)) {
750 for (bl_grp_i
=0; bl_grp_i
<((NUM_BYTE_LANES
/bl_divisor
)/2); bl_grp_i
++) {
751 isbM32m(DDRPHY
, (DQMDLLCTL
+ (bl_grp_i
* DDRIODQ_BL_OFFSET
) + (channel_i
* DDRIODQ_CH_OFFSET
)), (BIT13
), (BIT13
)); // Enable VREG
755 isbM32m(DDRPHY
, (ECCMDLLCTL
), (BIT13
), (BIT13
)); // Enable VREG
758 isbM32m(DDRPHY
, (CMDMDLLCTL
+ (channel_i
* DDRIOCCC_CH_OFFSET
)), (BIT13
), (BIT13
)); // Enable VREG
761 isbM32m(DDRPHY
, (CCMDLLCTL
+ (channel_i
* DDRIOCCC_CH_OFFSET
)), (BIT13
), (BIT13
)); // Enable VREG
767 post_code(0x03, 0x12);
769 for (channel_i
=0; channel_i
<NUM_CHANNELS
; channel_i
++) {
770 if (mrc_params
->channel_enables
& (1<<channel_i
)) {
772 for (bl_grp_i
=0; bl_grp_i
<((NUM_BYTE_LANES
/bl_divisor
)/2); bl_grp_i
++) {
773 isbM32m(DDRPHY
, (DQMDLLCTL
+ (bl_grp_i
* DDRIODQ_BL_OFFSET
) + (channel_i
* DDRIODQ_CH_OFFSET
)), (BIT17
), (BIT17
)); // Enable MCDLL
777 isbM32m(DDRPHY
, (ECCMDLLCTL
), (BIT17
), (BIT17
)); // Enable MCDLL
780 isbM32m(DDRPHY
, (CMDMDLLCTL
+ (channel_i
* DDRIOCCC_CH_OFFSET
)), (BIT18
), (BIT18
)); // Enable MCDLL
783 isbM32m(DDRPHY
, (CCMDLLCTL
+ (channel_i
* DDRIOCCC_CH_OFFSET
)), (BIT18
), (BIT18
)); // Enable MCDLL
789 post_code(0x03, 0x13);
791 for (channel_i
=0; channel_i
<NUM_CHANNELS
; channel_i
++) {
792 if (mrc_params
->channel_enables
& (1<<channel_i
)) {
794 for (bl_grp_i
=0; bl_grp_i
<((NUM_BYTE_LANES
/bl_divisor
)/2); bl_grp_i
++) {
795 #ifdef FORCE_16BIT_DDRIO
796 tempD
= ((bl_grp_i
) && (mrc_params
->channel_width
== x16
)) ? ((0x1<<12)|(0x1<<8)|(0xF<<4)|(0xF<<0)) : ((0xF<<12)|(0xF<<8)|(0xF<<4)|(0xF<<0));
798 tempD
= ((0xF<<12)|(0xF<<8)|(0xF<<4)|(0xF<<0));
800 isbM32m(DDRPHY
, (DQDLLTXCTL
+ (bl_grp_i
* DDRIODQ_BL_OFFSET
) + (channel_i
* DDRIODQ_CH_OFFSET
)), (tempD
), ((BIT15
|BIT14
|BIT13
|BIT12
)|(BIT11
|BIT10
|BIT9
|BIT8
)|(BIT7
|BIT6
|BIT5
|BIT4
)|(BIT3
|BIT2
|BIT1
|BIT0
))); // Enable TXDLL
802 isbM32m(DDRPHY
, (DQDLLRXCTL
+ (bl_grp_i
* DDRIODQ_BL_OFFSET
) + (channel_i
* DDRIODQ_CH_OFFSET
)), (BIT3
|BIT2
|BIT1
|BIT0
), (BIT3
|BIT2
|BIT1
|BIT0
)); // Enable RXDLL
804 isbM32m(DDRPHY
, (B0OVRCTL
+ (bl_grp_i
* DDRIODQ_BL_OFFSET
) + (channel_i
* DDRIODQ_CH_OFFSET
)), (BIT3
|BIT2
|BIT1
|BIT0
), (BIT3
|BIT2
|BIT1
|BIT0
)); // Enable RXDLL Overrides BL0
808 tempD
= ((0xF<<12)|(0xF<<8)|(0xF<<4)|(0xF<<0));
809 isbM32m(DDRPHY
, (ECCDLLTXCTL
), (tempD
), ((BIT15
|BIT14
|BIT13
|BIT12
)|(BIT11
|BIT10
|BIT9
|BIT8
)|(BIT7
|BIT6
|BIT5
|BIT4
)|(BIT3
|BIT2
|BIT1
|BIT0
))); // Enable TXDLL
813 isbM32m(DDRPHY
, (CMDDLLTXCTL
+ (channel_i
* DDRIOCCC_CH_OFFSET
)), ((0xF<<12)|(0xF<<8)|(0xF<<4)|(0xF<<0)), ((BIT15
|BIT14
|BIT13
|BIT12
)|(BIT11
|BIT10
|BIT9
|BIT8
)|(BIT7
|BIT6
|BIT5
|BIT4
)|(BIT3
|BIT2
|BIT1
|BIT0
))); // Enable TXDLL
820 post_code(0x03, 0x14);
821 for (channel_i
=0; channel_i
<NUM_CHANNELS
; channel_i
++) {
822 if (mrc_params
->channel_enables
& (1<<channel_i
)) {
823 // Host To Memory Clock Alignment (HMC) for 800/1066
824 for (bl_grp_i
=0; bl_grp_i
<((NUM_BYTE_LANES
/bl_divisor
)/2); bl_grp_i
++) {
825 isbM32m(DDRPHY
, (DQCLKALIGNREG2
+ (bl_grp_i
* DDRIODQ_BL_OFFSET
) + (channel_i
* DDRIODQ_CH_OFFSET
)), ((bl_grp_i
)?(0x3):(0x1)), (BIT3
|BIT2
|BIT1
|BIT0
)); // CLK_ALIGN_MOD_ID
827 isbM32m(DDRPHY
, (ECCCLKALIGNREG2
+ (channel_i
* DDRIODQ_CH_OFFSET
)), 0x2, (BIT3
|BIT2
|BIT1
|BIT0
)); // CLK_ALIGN_MOD_ID
828 isbM32m(DDRPHY
, (CMDCLKALIGNREG2
+ (channel_i
* DDRIODQ_CH_OFFSET
)), 0x0, (BIT3
|BIT2
|BIT1
|BIT0
)); // CLK_ALIGN_MOD_ID
829 isbM32m(DDRPHY
, (CCCLKALIGNREG2
+ (channel_i
* DDRIODQ_CH_OFFSET
)), 0x2, (BIT3
|BIT2
|BIT1
|BIT0
)); // CLK_ALIGN_MOD_ID
830 isbM32m(DDRPHY
, (CMDCLKALIGNREG0
+ (channel_i
* DDRIOCCC_CH_OFFSET
)), (0x2<<4), (BIT5
|BIT4
)); // CLK_ALIGN_MODE
831 isbM32m(DDRPHY
, (CMDCLKALIGNREG1
+ (channel_i
* DDRIOCCC_CH_OFFSET
)), ((0x18<<16)|(0x10<<8)|(0x8<<2)|(0x1<<0)), ((BIT22
|BIT21
|BIT20
|BIT19
|BIT18
|BIT17
|BIT16
)|(BIT14
|BIT13
|BIT12
|BIT11
|BIT10
|BIT9
|BIT8
)|(BIT7
|BIT6
|BIT5
|BIT4
|BIT3
|BIT2
)|(BIT1
|BIT0
))); // NUM_SAMPLES, MAX_SAMPLES, MACRO_PI_STEP, MICRO_PI_STEP
832 isbM32m(DDRPHY
, (CMDCLKALIGNREG2
+ (channel_i
* DDRIOCCC_CH_OFFSET
)), ((0x10<<16)|(0x4<<8)|(0x2<<4)), ((BIT20
|BIT19
|BIT18
|BIT17
|BIT16
)|(BIT11
|BIT10
|BIT9
|BIT8
)|(BIT7
|BIT6
|BIT5
|BIT4
))); // ???, TOTAL_NUM_MODULES, FIRST_U_PARTITION
834 isbM32m(DDRPHY
, (CMDCLKALIGNREG0
+ (channel_i
* DDRIOCCC_CH_OFFSET
)), BIT24
, BIT24
); // START_CLK_ALIGN=1
835 while (isbR32m(DDRPHY
, (CMDCLKALIGNREG0
+ (channel_i
* DDRIOCCC_CH_OFFSET
))) & BIT24
); // wait for START_CLK_ALIGN=0
838 // Set RD/WR Pointer Seperation & COUNTEN & FIFOPTREN
839 isbM32m(DDRPHY
, (CMDPTRREG
+ (channel_i
* DDRIOCCC_CH_OFFSET
)), BIT0
, BIT0
); // WRPTRENABLE=1
843 // comp is not working on simulator
846 isbM32m(DDRPHY
, (COMPEN0CH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), BIT5
, BIT5
); // enable bypass for CLK buffer (PO)
847 isbM32m(DDRPHY
, (CMPCTRL
), (BIT0
), (BIT0
)); // Initial COMP Enable
848 while (isbR32m(DDRPHY
, (CMPCTRL
)) & BIT0
); // wait for Initial COMP Enable = 0
849 isbM32m(DDRPHY
, (COMPEN0CH0
+ (channel_i
* DDRCOMP_CH_OFFSET
)), ~BIT5
, BIT5
); // disable bypass for CLK buffer (PO)
854 isbM32m(DDRPHY
, (CMDCFGREG0
+ (channel_i
* DDRIOCCC_CH_OFFSET
)), BIT2
, BIT2
); // IOBUFACTRST_N=1
856 // DDRPHY initialisation complete
857 isbM32m(DDRPHY
, (CMDPMCONFIG0
+ (channel_i
* DDRIOCCC_CH_OFFSET
)), BIT20
, BIT20
); // SPID_INIT_COMPLETE=1
865 // jedec_init (aka PerformJedecInit):
866 // This function performs JEDEC initialisation on all enabled channels.
867 static void jedec_init(
868 MRCParams_t
*mrc_params
,
871 uint8_t TWR
, WL
, Rank
;
876 DramInitDDR3MRS0 mrs0Command
;
877 DramInitDDR3EMR1 emrs1Command
;
878 DramInitDDR3EMR2 emrs2Command
;
879 DramInitDDR3EMR3 emrs3Command
;
886 post_code(0x04, 0x00);
889 // Assert RESET# for 200us
890 isbM32m(DDRPHY
, CCDDR3RESETCTL
, BIT1
, (BIT8
|BIT1
)); // DDR3_RESET_SET=0, DDR3_RESET_RESET=1
892 // Don't waste time during simulation
897 isbM32m(DDRPHY
, CCDDR3RESETCTL
, BIT8
, (BIT8
|BIT1
)); // DDR3_RESET_SET=1, DDR3_RESET_RESET=0
899 DTR0reg
.raw
= isbR32m(MCU
, DTR0
);
901 // Set CKEVAL for populated ranks
902 // then send NOP to each rank (#4550197)
907 DRPbuffer
= isbR32m(MCU
, DRP
);
909 DRMCbuffer
= isbR32m(MCU
, DRMC
);
910 DRMCbuffer
&= 0xFFFFFFFC;
911 DRMCbuffer
|= (BIT4
| DRPbuffer
);
913 isbW32m(MCU
, DRMC
, DRMCbuffer
);
915 for (Rank
= 0; Rank
< NUM_RANKS
; Rank
++)
917 // Skip to next populated rank
918 if ((mrc_params
->rank_enables
& (1 << Rank
)) == 0)
923 dram_init_command(DCMD_NOP(Rank
));
926 isbW32m(MCU
, DRMC
, DRMC_DEFAULT
);
930 // BIT[15:11] --> Always "0"
931 // BIT[10:09] --> Rtt_WR: want "Dynamic ODT Off" (0)
932 // BIT[08] --> Always "0"
933 // BIT[07] --> SRT: use sr_temp_range
934 // BIT[06] --> ASR: want "Manual SR Reference" (0)
935 // BIT[05:03] --> CWL: use oem_tCWL
936 // BIT[02:00] --> PASR: want "Full Array" (0)
937 emrs2Command
.raw
= 0;
938 emrs2Command
.field
.bankAddress
= 2;
940 WL
= 5 + mrc_params
->ddr_speed
;
941 emrs2Command
.field
.CWL
= WL
- 5;
942 emrs2Command
.field
.SRT
= mrc_params
->sr_temp_range
;
945 // BIT[15:03] --> Always "0"
946 // BIT[02] --> MPR: want "Normal Operation" (0)
947 // BIT[01:00] --> MPR_Loc: want "Predefined Pattern" (0)
948 emrs3Command
.raw
= 0;
949 emrs3Command
.field
.bankAddress
= 3;
952 // BIT[15:13] --> Always "0"
953 // BIT[12:12] --> Qoff: want "Output Buffer Enabled" (0)
954 // BIT[11:11] --> TDQS: want "Disabled" (0)
955 // BIT[10:10] --> Always "0"
956 // BIT[09,06,02] --> Rtt_nom: use rtt_nom_value
957 // BIT[08] --> Always "0"
958 // BIT[07] --> WR_LVL: want "Disabled" (0)
959 // BIT[05,01] --> DIC: use ron_value
960 // BIT[04:03] --> AL: additive latency want "0" (0)
961 // BIT[00] --> DLL: want "Enable" (0)
963 // (BIT5|BIT1) set Ron value
964 // 00 --> RZQ/6 (40ohm)
965 // 01 --> RZQ/7 (34ohm)
968 // (BIT9|BIT6|BIT2) set Rtt_nom value
970 // 001 --> RZQ/4 ( 60ohm)
971 // 010 --> RZQ/2 (120ohm)
972 // 011 --> RZQ/6 ( 40ohm)
974 emrs1Command
.raw
= 0;
975 emrs1Command
.field
.bankAddress
= 1;
976 emrs1Command
.field
.dllEnabled
= 0; // 0 = Enable , 1 = Disable
978 if (mrc_params
->ron_value
== 0)
980 emrs1Command
.field
.DIC0
= DDR3_EMRS1_DIC_34
;
984 emrs1Command
.field
.DIC0
= DDR3_EMRS1_DIC_40
;
988 if (mrc_params
->rtt_nom_value
== 0)
990 emrs1Command
.raw
|= (DDR3_EMRS1_RTTNOM_40
<< 6);
992 else if (mrc_params
->rtt_nom_value
== 1)
994 emrs1Command
.raw
|= (DDR3_EMRS1_RTTNOM_60
<< 6);
996 else if (mrc_params
->rtt_nom_value
== 2)
998 emrs1Command
.raw
|= (DDR3_EMRS1_RTTNOM_120
<< 6);
1001 // save MRS1 value (excluding control fields)
1002 mrc_params
->mrs1
= emrs1Command
.raw
>> 6;
1005 // BIT[15:13] --> Always "0"
1006 // BIT[12] --> PPD: for Quark (1)
1007 // BIT[11:09] --> WR: use oem_tWR
1008 // BIT[08] --> DLL: want "Reset" (1, self clearing)
1009 // BIT[07] --> MODE: want "Normal" (0)
1010 // BIT[06:04,02] --> CL: use oem_tCAS
1011 // BIT[03] --> RD_BURST_TYPE: want "Interleave" (1)
1012 // BIT[01:00] --> BL: want "8 Fixed" (0)
1023 // BIT[02:02] "0" if oem_tCAS <= 11 (1866?)
1024 // BIT[06:04] use oem_tCAS-4
1025 mrs0Command
.raw
= 0;
1026 mrs0Command
.field
.bankAddress
= 0;
1027 mrs0Command
.field
.dllReset
= 1;
1028 mrs0Command
.field
.BL
= 0;
1029 mrs0Command
.field
.PPD
= 1;
1030 mrs0Command
.field
.casLatency
= DTR0reg
.field
.tCL
+ 1;
1032 TCK
= tCK
[mrc_params
->ddr_speed
];
1033 TWR
= MCEIL(15000, TCK
); // Per JEDEC: tWR=15000ps DDR2/3 from 800-1600
1034 mrs0Command
.field
.writeRecovery
= TWR
- 4;
1036 for (Rank
= 0; Rank
< NUM_RANKS
; Rank
++)
1038 // Skip to next populated rank
1039 if ((mrc_params
->rank_enables
& (1 << Rank
)) == 0)
1044 emrs2Command
.field
.rankSelect
= Rank
;
1045 dram_init_command(emrs2Command
.raw
);
1047 emrs3Command
.field
.rankSelect
= Rank
;
1048 dram_init_command(emrs3Command
.raw
);
1050 emrs1Command
.field
.rankSelect
= Rank
;
1051 dram_init_command(emrs1Command
.raw
);
1053 mrs0Command
.field
.rankSelect
= Rank
;
1054 dram_init_command(mrs0Command
.raw
);
1056 dram_init_command(DCMD_ZQCL(Rank
));
1064 // POST_CODE[major] == 0x05
1066 // This function will perform our RCVEN Calibration Algorithm.
1067 // We will only use the 2xCLK domain timings to perform RCVEN Calibration.
1068 // All byte lanes will be calibrated "simultaneously" per channel per rank.
1069 static void rcvn_cal(
1070 MRCParams_t
*mrc_params
)
1072 uint8_t channel_i
; // channel counter
1073 uint8_t rank_i
; // rank counter
1074 uint8_t bl_i
; // byte lane counter
1075 uint8_t bl_divisor
= (mrc_params
->channel_width
== x16
) ? 2 : 1; // byte lane divisor
1078 uint32_t final_delay
[NUM_CHANNELS
][NUM_BYTE_LANES
]; // used to find placement for rank2rank sharing configs
1080 uint32_t num_ranks_enabled
= 0; // used to find placement for rank2rank sharing configs
1081 #endif // BACKUP_RCVN
1082 #endif // R2R_SHARING
1086 uint32_t tempD
; // temporary DWORD
1087 uint32_t delay
[NUM_BYTE_LANES
]; // absolute PI value to be programmed on the byte lane
1090 #endif // BACKUP_RCVN
1094 post_code(0x05, 0x00);
1097 // need separate burst to sample DQS preamble
1098 dtr1
.raw
= dtr1save
.raw
= isbR32m(MCU
, DTR1
);
1099 dtr1
.field
.tCCD
= 1;
1100 isbW32m(MCU
, DTR1
, dtr1
.raw
);
1104 // need to set "final_delay[][]" elements to "0"
1105 memset((void *) (final_delay
), 0x00, (size_t) sizeof(final_delay
));
1106 #endif // R2R_SHARING
1108 // loop through each enabled channel
1109 for (channel_i
= 0; channel_i
< NUM_CHANNELS
; channel_i
++)
1111 if (mrc_params
->channel_enables
& (1 << channel_i
))
1113 // perform RCVEN Calibration on a per rank basis
1114 for (rank_i
= 0; rank_i
< NUM_RANKS
; rank_i
++)
1116 if (mrc_params
->rank_enables
& (1 << rank_i
))
1118 // POST_CODE here indicates the current channel and rank being calibrated
1119 post_code(0x05, (0x10 + ((channel_i
<< 4) | rank_i
)));
1122 // set hard-coded timing values
1123 for (bl_i
=0; bl_i
<(NUM_BYTE_LANES
/bl_divisor
); bl_i
++)
1125 set_rcvn(channel_i
, rank_i
, bl_i
, ddr_rcvn
[PLATFORM_ID
]);
1129 for (bl_i
= 0; bl_i
< (NUM_BYTE_LANES
/ bl_divisor
); bl_i
+= 2)
1131 isbM32m(DDRPHY
, (B01PTRCTL1
+ ((bl_i
>> 1) * DDRIODQ_BL_OFFSET
) + (channel_i
* DDRIODQ_CH_OFFSET
)), 0,
1132 BIT8
); // 0 is enabled
1134 // initialise the starting delay to 128 PI (tCAS +1 CLK)
1135 for (bl_i
= 0; bl_i
< (NUM_BYTE_LANES
/ bl_divisor
); bl_i
++)
1138 // Original value was late at the end of DQS sequence
1139 delay
[bl_i
] = 3 * FULL_CLK
;
1141 delay
[bl_i
] = (4 + 1) * FULL_CLK
; // 1x CLK domain timing is tCAS-4
1144 set_rcvn(channel_i
, rank_i
, bl_i
, delay
[bl_i
]);
1147 // now find the rising edge
1148 find_rising_edge(mrc_params
, delay
, channel_i
, rank_i
, true);
1149 // Now increase delay by 32 PI (1/4 CLK) to place in center of high pulse.
1150 for (bl_i
= 0; bl_i
< (NUM_BYTE_LANES
/ bl_divisor
); bl_i
++)
1152 delay
[bl_i
] += QRTR_CLK
;
1153 set_rcvn(channel_i
, rank_i
, bl_i
, delay
[bl_i
]);
1155 // Now decrement delay by 128 PI (1 CLK) until we sample a "0"
1159 tempD
= sample_dqs(mrc_params
, channel_i
, rank_i
, true);
1160 for (bl_i
= 0; bl_i
< (NUM_BYTE_LANES
/ bl_divisor
); bl_i
++)
1162 if (tempD
& (1 << bl_i
))
1164 if (delay
[bl_i
] >= FULL_CLK
)
1166 delay
[bl_i
] -= FULL_CLK
;
1167 set_rcvn(channel_i
, rank_i
, bl_i
, delay
[bl_i
]);
1172 training_message(channel_i
, rank_i
, bl_i
);
1173 post_code(0xEE, 0x50);
1177 } while (tempD
& 0xFF);
1180 // increment "num_ranks_enabled"
1181 num_ranks_enabled
++;
1182 // Finally increment delay by 32 PI (1/4 CLK) to place in center of preamble.
1183 for (bl_i
= 0; bl_i
< (NUM_BYTE_LANES
/ bl_divisor
); bl_i
++)
1185 delay
[bl_i
] += QRTR_CLK
;
1186 // add "delay[]" values to "final_delay[][]" for rolling average
1187 final_delay
[channel_i
][bl_i
] += delay
[bl_i
];
1188 // set timing based on rolling average values
1189 set_rcvn(channel_i
, rank_i
, bl_i
, ((final_delay
[channel_i
][bl_i
]) / num_ranks_enabled
));
1192 // Finally increment delay by 32 PI (1/4 CLK) to place in center of preamble.
1193 for (bl_i
=0; bl_i
<(NUM_BYTE_LANES
/bl_divisor
); bl_i
++)
1195 delay
[bl_i
] += QRTR_CLK
;
1196 set_rcvn(channel_i
, rank_i
, bl_i
, delay
[bl_i
]);
1199 #endif // R2R_SHARING
1202 for (bl_i
= 0; bl_i
< (NUM_BYTE_LANES
/ bl_divisor
); bl_i
+= 2)
1204 isbM32m(DDRPHY
, (B01PTRCTL1
+ ((bl_i
>> 1) * DDRIODQ_BL_OFFSET
) + (channel_i
* DDRIODQ_CH_OFFSET
)), BIT8
,
1205 BIT8
); // 1 is disabled
1208 #endif // BACKUP_RCVN
1210 } // if rank is enabled
1212 } // if channel is enabled
1217 isbW32m(MCU
, DTR1
, dtr1save
.raw
);
1221 if (mrc_params
->tune_rcvn
)
1223 uint32_t rcven
, val
;
1224 uint32_t rdcmd2rcven
;
1227 Formulas for RDCMD2DATAVALID & DIFFAMP dynamic timings
1229 1. Set after RCVEN training
1231 //Tune RDCMD2DATAVALID
1234 MAX OF 2 RANKS : round up (rdcmd2rcven (rcven 1x) + 2x x 2 + PI/128) + 5
1236 //rdcmd2rcven x80/84[12:8]
1237 //rcven 2x x70[23:20] & [11:8]
1239 //Tune DIFFAMP Timings
1241 //diffampen launch x88[20:16] & [4:0] -- B01LATCTL1
1242 MIN OF 2 RANKS : round down (rcven 1x + 2x x 2 + PI/128) - 1
1244 //diffampen length x8C/x90 [13:8] -- B0ONDURCTL B1ONDURCTL
1245 MAX OF 2 RANKS : roundup (rcven 1x + 2x x 2 + PI/128) + 5
1248 2. need to do a fiforst after settings these values
1251 DPF(D_INFO
, "BEFORE\n");
1252 DPF(D_INFO
, "### %x\n", isbR32m(DDRPHY
, B0LATCTL0
));
1253 DPF(D_INFO
, "### %x\n", isbR32m(DDRPHY
, B01LATCTL1
));
1254 DPF(D_INFO
, "### %x\n", isbR32m(DDRPHY
, B0ONDURCTL
));
1256 DPF(D_INFO
, "### %x\n", isbR32m(DDRPHY
, B1LATCTL0
));
1257 DPF(D_INFO
, "### %x\n", isbR32m(DDRPHY
, B1ONDURCTL
));
1259 rcven
= get_rcvn(0, 0, 0) / 128;
1260 rdcmd2rcven
= (isbR32m(DDRPHY
, B0LATCTL0
) >> 8) & 0x1F;
1261 val
= rdcmd2rcven
+ rcven
+ 6;
1262 isbM32m(DDRPHY
, B0LATCTL0
, val
<< 16, (BIT21
|BIT20
|BIT19
|BIT18
|BIT17
|BIT16
));
1264 val
= rdcmd2rcven
+ rcven
- 1;
1265 isbM32m(DDRPHY
, B01LATCTL1
, val
<< 0, (BIT4
|BIT3
|BIT2
|BIT1
|BIT0
));
1267 val
= rdcmd2rcven
+ rcven
+ 5;
1268 isbM32m(DDRPHY
, B0ONDURCTL
, val
<< 8, (BIT13
|BIT12
|BIT11
|BIT10
|BIT9
|BIT8
));
1270 rcven
= get_rcvn(0, 0, 1) / 128;
1271 rdcmd2rcven
= (isbR32m(DDRPHY
, B1LATCTL0
) >> 8) & 0x1F;
1272 val
= rdcmd2rcven
+ rcven
+ 6;
1273 isbM32m(DDRPHY
, B1LATCTL0
, val
<< 16, (BIT21
|BIT20
|BIT19
|BIT18
|BIT17
|BIT16
));
1275 val
= rdcmd2rcven
+ rcven
- 1;
1276 isbM32m(DDRPHY
, B01LATCTL1
, val
<< 16, (BIT20
|BIT19
|BIT18
|BIT17
|BIT16
));
1278 val
= rdcmd2rcven
+ rcven
+ 5;
1279 isbM32m(DDRPHY
, B1ONDURCTL
, val
<< 8, (BIT13
|BIT12
|BIT11
|BIT10
|BIT9
|BIT8
));
1281 DPF(D_INFO
, "AFTER\n");
1282 DPF(D_INFO
, "### %x\n", isbR32m(DDRPHY
, B0LATCTL0
));
1283 DPF(D_INFO
, "### %x\n", isbR32m(DDRPHY
, B01LATCTL1
));
1284 DPF(D_INFO
, "### %x\n", isbR32m(DDRPHY
, B0ONDURCTL
));
1286 DPF(D_INFO
, "### %x\n", isbR32m(DDRPHY
, B1LATCTL0
));
1287 DPF(D_INFO
, "### %x\n", isbR32m(DDRPHY
, B1ONDURCTL
));
1289 DPF(D_INFO
, "\nPress a key\n");
1293 isbM32m(DDRPHY
, B01PTRCTL1
, 0, BIT8
); // 0 is enabled
1295 isbM32m(DDRPHY
, B01PTRCTL1
, BIT8
, BIT8
); // 1 is disabled
1303 // Check memory executing write/read/verify of many data patterns
1304 // at the specified address. Bits in the result indicate failure
1305 // on specific byte lane.
1306 static uint32_t check_bls_ex(
1307 MRCParams_t
*mrc_params
,
1311 uint8_t first_run
= 0;
1313 if (mrc_params
->hte_setup
)
1315 mrc_params
->hte_setup
= 0;
1318 select_hte(mrc_params
);
1321 result
= WriteStressBitLanesHTE(mrc_params
, address
, first_run
);
1323 DPF(D_TRN
, "check_bls_ex result is %x\n", result
);
1327 // Check memory executing simple write/read/verify at
1328 // the specified address. Bits in the result indicate failure
1329 // on specific byte lane.
1330 static uint32_t check_rw_coarse(
1331 MRCParams_t
*mrc_params
,
1334 uint32_t result
= 0;
1335 uint8_t first_run
= 0;
1337 if (mrc_params
->hte_setup
)
1339 mrc_params
->hte_setup
= 0;
1342 select_hte(mrc_params
);
1345 result
= BasicWriteReadHTE(mrc_params
, address
, first_run
, WRITE_TRAIN
);
1347 DPF(D_TRN
, "check_rw_coarse result is %x\n", result
);
1352 // POST_CODE[major] == 0x06
1354 // This function will perform the Write Levelling algorithm (align WCLK and WDQS).
1355 // This algorithm will act on each rank in each channel separately.
1356 static void wr_level(
1357 MRCParams_t
*mrc_params
)
1359 uint8_t channel_i
; // channel counter
1360 uint8_t rank_i
; // rank counter
1361 uint8_t bl_i
; // byte lane counter
1362 uint8_t bl_divisor
= (mrc_params
->channel_width
== x16
) ? 2 : 1; // byte lane divisor
1365 uint32_t final_delay
[NUM_CHANNELS
][NUM_BYTE_LANES
]; // used to find placement for rank2rank sharing configs
1367 uint32_t num_ranks_enabled
= 0; // used to find placement for rank2rank sharing configs
1368 #endif // BACKUP_WDQS
1369 #endif // R2R_SHARING
1373 bool all_edges_found
; // determines stop condition for CRS_WR_LVL
1374 uint32_t delay
[NUM_BYTE_LANES
]; // absolute PI value to be programmed on the byte lane
1375 // static makes it so the data is loaded in the heap once by shadow(), where
1376 // non-static copies the data onto the stack every time this function is called.
1378 uint32_t address
; // address to be checked during COARSE_WR_LVL
1381 #endif // BACKUP_WDQS
1386 post_code(0x06, 0x00);
1389 // need to set "final_delay[][]" elements to "0"
1390 memset((void *) (final_delay
), 0x00, (size_t) sizeof(final_delay
));
1391 #endif // R2R_SHARING
1392 // loop through each enabled channel
1393 for (channel_i
= 0; channel_i
< NUM_CHANNELS
; channel_i
++)
1395 if (mrc_params
->channel_enables
& (1 << channel_i
))
1397 // perform WRITE LEVELING algorithm on a per rank basis
1398 for (rank_i
= 0; rank_i
< NUM_RANKS
; rank_i
++)
1400 if (mrc_params
->rank_enables
& (1 << rank_i
))
1402 // POST_CODE here indicates the current rank and channel being calibrated
1403 post_code(0x06, (0x10 + ((channel_i
<< 4) | rank_i
)));
1406 for (bl_i
=0; bl_i
<(NUM_BYTE_LANES
/bl_divisor
); bl_i
++)
1408 set_wdqs(channel_i
, rank_i
, bl_i
, ddr_wdqs
[PLATFORM_ID
]);
1409 set_wdq(channel_i
, rank_i
, bl_i
, (ddr_wdqs
[PLATFORM_ID
] - QRTR_CLK
));
1413 { // Begin product specific code
1415 // perform a single PRECHARGE_ALL command to make DRAM state machine go to IDLE state
1416 dram_init_command(DCMD_PREA(rank_i
));
1418 // enable Write Levelling Mode (EMRS1 w/ Write Levelling Mode Enable)
1419 dram_init_command(DCMD_MRS1(rank_i
,0x0082));
1421 // set ODT DRAM Full Time Termination disable in MCU
1422 dtr4
.raw
= dtr4save
.raw
= isbR32m(MCU
, DTR4
);
1423 dtr4
.field
.ODTDIS
= 1;
1424 isbW32m(MCU
, DTR4
, dtr4
.raw
);
1426 for (bl_i
= 0; bl_i
< ((NUM_BYTE_LANES
/ bl_divisor
) / 2); bl_i
++)
1428 isbM32m(DDRPHY
, DQCTL
+ (DDRIODQ_BL_OFFSET
* bl_i
) + (DDRIODQ_CH_OFFSET
* channel_i
),
1429 (BIT28
| (0x1 << 8) | (0x1 << 6) | (0x1 << 4) | (0x1 << 2)),
1430 (BIT28
| (BIT9
|BIT8
) | (BIT7
|BIT6
) | (BIT5
|BIT4
) | (BIT3
|BIT2
))); // Enable Sandy Bridge Mode (WDQ Tri-State) & Ensure 5 WDQS pulses during Write Leveling
1433 isbM32m(DDRPHY
, CCDDR3RESETCTL
+ (DDRIOCCC_CH_OFFSET
* channel_i
), (BIT16
), (BIT16
)); // Write Leveling Mode enabled in IO
1434 } // End product specific code
1435 // Initialise the starting delay to WCLK
1436 for (bl_i
= 0; bl_i
< (NUM_BYTE_LANES
/ bl_divisor
); bl_i
++)
1438 { // Begin product specific code
1441 delay
[bl_i
] = get_wclk(channel_i
, rank_i
);
1442 } // End product specific code
1443 set_wdqs(channel_i
, rank_i
, bl_i
, delay
[bl_i
]);
1445 // now find the rising edge
1446 find_rising_edge(mrc_params
, delay
, channel_i
, rank_i
, false);
1447 { // Begin product specific code
1448 // disable Write Levelling Mode
1449 isbM32m(DDRPHY
, CCDDR3RESETCTL
+ (DDRIOCCC_CH_OFFSET
* channel_i
), (0), (BIT16
)); // Write Leveling Mode disabled in IO
1451 for (bl_i
= 0; bl_i
< ((NUM_BYTE_LANES
/ bl_divisor
) / 2); bl_i
++)
1453 isbM32m(DDRPHY
, DQCTL
+ (DDRIODQ_BL_OFFSET
* bl_i
) + (DDRIODQ_CH_OFFSET
* channel_i
),
1454 ((0x1 << 8) | (0x1 << 6) | (0x1 << 4) | (0x1 << 2)),
1455 (BIT28
| (BIT9
|BIT8
) | (BIT7
|BIT6
) | (BIT5
|BIT4
) | (BIT3
|BIT2
))); // Disable Sandy Bridge Mode & Ensure 4 WDQS pulses during normal operation
1458 // restore original DTR4
1459 isbW32m(MCU
, DTR4
, dtr4save
.raw
);
1461 // restore original value (Write Levelling Mode Disable)
1462 dram_init_command(DCMD_MRS1(rank_i
, mrc_params
->mrs1
));
1464 // perform a single PRECHARGE_ALL command to make DRAM state machine go to IDLE state
1465 dram_init_command(DCMD_PREA(rank_i
));
1466 } // End product specific code
1468 post_code(0x06, (0x30 + ((channel_i
<< 4) | rank_i
)));
1470 // COARSE WRITE LEVEL:
1471 // check that we're on the correct clock edge
1473 // hte reconfiguration request
1474 mrc_params
->hte_setup
= 1;
1476 // start CRS_WR_LVL with WDQS = WDQS + 128 PI
1477 for (bl_i
= 0; bl_i
< (NUM_BYTE_LANES
/ bl_divisor
); bl_i
++)
1479 delay
[bl_i
] = get_wdqs(channel_i
, rank_i
, bl_i
) + FULL_CLK
;
1480 set_wdqs(channel_i
, rank_i
, bl_i
, delay
[bl_i
]);
1481 // program WDQ timings based on WDQS (WDQ = WDQS - 32 PI)
1482 set_wdq(channel_i
, rank_i
, bl_i
, (delay
[bl_i
] - QRTR_CLK
));
1485 // get an address in the targeted channel/rank
1486 address
= get_addr(mrc_params
, channel_i
, rank_i
);
1489 uint32_t coarse_result
= 0x00;
1490 uint32_t coarse_result_mask
= byte_lane_mask(mrc_params
);
1491 all_edges_found
= true; // assume pass
1494 // need restore memory to idle state as write can be in bad sync
1495 dram_init_command (DCMD_PREA(rank_i
));
1498 mrc_params
->hte_setup
= 1;
1499 coarse_result
= check_rw_coarse(mrc_params
, address
);
1501 // check for failures and margin the byte lane back 128 PI (1 CLK)
1502 for (bl_i
= 0; bl_i
< (NUM_BYTE_LANES
/ bl_divisor
); bl_i
++)
1504 if (coarse_result
& (coarse_result_mask
<< bl_i
))
1506 all_edges_found
= false;
1507 delay
[bl_i
] -= FULL_CLK
;
1508 set_wdqs(channel_i
, rank_i
, bl_i
, delay
[bl_i
]);
1509 // program WDQ timings based on WDQS (WDQ = WDQS - 32 PI)
1510 set_wdq(channel_i
, rank_i
, bl_i
, (delay
[bl_i
] - QRTR_CLK
));
1514 } while (!all_edges_found
);
1517 // increment "num_ranks_enabled"
1518 num_ranks_enabled
++;
1519 // accumulate "final_delay[][]" values from "delay[]" values for rolling average
1520 for (bl_i
= 0; bl_i
< (NUM_BYTE_LANES
/ bl_divisor
); bl_i
++)
1522 final_delay
[channel_i
][bl_i
] += delay
[bl_i
];
1523 set_wdqs(channel_i
, rank_i
, bl_i
, ((final_delay
[channel_i
][bl_i
]) / num_ranks_enabled
));
1524 // program WDQ timings based on WDQS (WDQ = WDQS - 32 PI)
1525 set_wdq(channel_i
, rank_i
, bl_i
, ((final_delay
[channel_i
][bl_i
]) / num_ranks_enabled
) - QRTR_CLK
);
1527 #endif // R2R_SHARING
1528 #endif // BACKUP_WDQS
1530 } // if rank is enabled
1532 } // if channel is enabled
1540 // POST_CODE[major] == 0x07
1542 // This function will perform the READ TRAINING Algorithm on all channels/ranks/byte_lanes simultaneously to minimize execution time.
1543 // The idea here is to train the VREF and RDQS (and eventually RDQ) values to achieve maximum READ margins.
1544 // The algorithm will first determine the X coordinate (RDQS setting).
1545 // This is done by collapsing the VREF eye until we find a minimum required RDQS eye for VREF_MIN and VREF_MAX.
1546 // Then we take the averages of the RDQS eye at VREF_MIN and VREF_MAX, then average those; this will be the final X coordinate.
1547 // The algorithm will then determine the Y coordinate (VREF setting).
1548 // This is done by collapsing the RDQS eye until we find a minimum required VREF eye for RDQS_MIN and RDQS_MAX.
1549 // Then we take the averages of the VREF eye at RDQS_MIN and RDQS_MAX, then average those; this will be the final Y coordinate.
1550 // NOTE: this algorithm assumes the eye curves have a one-to-one relationship, meaning for each X the curve has only one Y and vice-a-versa.
1551 static void rd_train(
1552 MRCParams_t
*mrc_params
)
1555 #define MIN_RDQS_EYE 10 // in PI Codes
1556 #define MIN_VREF_EYE 10 // in VREF Codes
1557 #define RDQS_STEP 1 // how many RDQS codes to jump while margining
1558 #define VREF_STEP 1 // how many VREF codes to jump while margining
1559 #define VREF_MIN (0x00) // offset into "vref_codes[]" for minimum allowed VREF setting
1560 #define VREF_MAX (0x3F) // offset into "vref_codes[]" for maximum allowed VREF setting
1561 #define RDQS_MIN (0x00) // minimum RDQS delay value
1562 #define RDQS_MAX (0x3F) // maximum RDQS delay value
1563 #define B 0 // BOTTOM VREF
1564 #define T 1 // TOP VREF
1565 #define L 0 // LEFT RDQS
1566 #define R 1 // RIGHT RDQS
1568 uint8_t channel_i
; // channel counter
1569 uint8_t rank_i
; // rank counter
1570 uint8_t bl_i
; // byte lane counter
1571 uint8_t bl_divisor
= (mrc_params
->channel_width
== x16
) ? 2 : 1; // byte lane divisor
1574 uint8_t side_x
; // tracks LEFT/RIGHT approach vectors
1575 uint8_t side_y
; // tracks BOTTOM/TOP approach vectors
1576 uint8_t x_coordinate
[2/*side_x*/][2/*side_y*/][NUM_CHANNELS
][NUM_RANKS
][NUM_BYTE_LANES
]; // X coordinate data (passing RDQS values) for approach vectors
1577 uint8_t y_coordinate
[2/*side_x*/][2/*side_y*/][NUM_CHANNELS
][NUM_BYTE_LANES
]; // Y coordinate data (passing VREF values) for approach vectors
1578 uint8_t x_center
[NUM_CHANNELS
][NUM_RANKS
][NUM_BYTE_LANES
]; // centered X (RDQS)
1579 uint8_t y_center
[NUM_CHANNELS
][NUM_BYTE_LANES
]; // centered Y (VREF)
1580 uint32_t address
; // target address for "check_bls_ex()"
1581 uint32_t result
; // result of "check_bls_ex()"
1582 uint32_t bl_mask
; // byte lane mask for "result" checking
1584 uint32_t final_delay
[NUM_CHANNELS
][NUM_BYTE_LANES
]; // used to find placement for rank2rank sharing configs
1585 uint32_t num_ranks_enabled
= 0; // used to find placement for rank2rank sharing configs
1586 #endif // R2R_SHARING
1587 #endif // BACKUP_RDQS
1589 post_code(0x07, 0x00);
1594 for (channel_i
=0; channel_i
<NUM_CHANNELS
; channel_i
++)
1596 if (mrc_params
->channel_enables
& (1<<channel_i
))
1598 for (rank_i
=0; rank_i
<NUM_RANKS
; rank_i
++)
1600 if (mrc_params
->rank_enables
& (1<<rank_i
))
1602 for (bl_i
=0; bl_i
<(NUM_BYTE_LANES
/bl_divisor
); bl_i
++)
1604 set_rdqs(channel_i
, rank_i
, bl_i
, ddr_rdqs
[PLATFORM_ID
]);
1606 } // if rank is enabled
1608 } // if channel is enabled
1611 // initialise x/y_coordinate arrays
1612 for (channel_i
= 0; channel_i
< NUM_CHANNELS
; channel_i
++)
1614 if (mrc_params
->channel_enables
& (1 << channel_i
))
1616 for (rank_i
= 0; rank_i
< NUM_RANKS
; rank_i
++)
1618 if (mrc_params
->rank_enables
& (1 << rank_i
))
1620 for (bl_i
= 0; bl_i
< (NUM_BYTE_LANES
/ bl_divisor
); bl_i
++)
1623 x_coordinate
[L
][B
][channel_i
][rank_i
][bl_i
] = RDQS_MIN
;
1624 x_coordinate
[R
][B
][channel_i
][rank_i
][bl_i
] = RDQS_MAX
;
1625 x_coordinate
[L
][T
][channel_i
][rank_i
][bl_i
] = RDQS_MIN
;
1626 x_coordinate
[R
][T
][channel_i
][rank_i
][bl_i
] = RDQS_MAX
;
1628 y_coordinate
[L
][B
][channel_i
][bl_i
] = VREF_MIN
;
1629 y_coordinate
[R
][B
][channel_i
][bl_i
] = VREF_MIN
;
1630 y_coordinate
[L
][T
][channel_i
][bl_i
] = VREF_MAX
;
1631 y_coordinate
[R
][T
][channel_i
][bl_i
] = VREF_MAX
;
1633 } // if rank is enabled
1635 } // if channel is enabled
1638 // initialise other variables
1639 bl_mask
= byte_lane_mask(mrc_params
);
1640 address
= get_addr(mrc_params
, 0, 0);
1643 // need to set "final_delay[][]" elements to "0"
1644 memset((void *) (final_delay
), 0x00, (size_t) sizeof(final_delay
));
1645 #endif // R2R_SHARING
1647 // look for passing coordinates
1648 for (side_y
= B
; side_y
<= T
; side_y
++)
1650 for (side_x
= L
; side_x
<= R
; side_x
++)
1653 post_code(0x07, (0x10 + (side_y
* 2) + (side_x
)));
1655 // find passing values
1656 for (channel_i
= 0; channel_i
< NUM_CHANNELS
; channel_i
++)
1658 if (mrc_params
->channel_enables
& (0x1 << channel_i
))
1660 for (rank_i
= 0; rank_i
< NUM_RANKS
; rank_i
++)
1663 if (mrc_params
->rank_enables
& (0x1 << rank_i
))
1665 // set x/y_coordinate search starting settings
1666 for (bl_i
= 0; bl_i
< (NUM_BYTE_LANES
/ bl_divisor
); bl_i
++)
1668 set_rdqs(channel_i
, rank_i
, bl_i
, x_coordinate
[side_x
][side_y
][channel_i
][rank_i
][bl_i
]);
1669 set_vref(channel_i
, bl_i
, y_coordinate
[side_x
][side_y
][channel_i
][bl_i
]);
1671 // get an address in the target channel/rank
1672 address
= get_addr(mrc_params
, channel_i
, rank_i
);
1674 // request HTE reconfiguration
1675 mrc_params
->hte_setup
= 1;
1677 // test the settings
1681 // result[07:00] == failing byte lane (MAX 8)
1682 result
= check_bls_ex( mrc_params
, address
);
1684 // check for failures
1687 // at least 1 byte lane failed
1688 for (bl_i
= 0; bl_i
< (NUM_BYTE_LANES
/ bl_divisor
); bl_i
++)
1690 if (result
& (bl_mask
<< bl_i
))
1692 // adjust the RDQS values accordingly
1695 x_coordinate
[L
][side_y
][channel_i
][rank_i
][bl_i
] += RDQS_STEP
;
1699 x_coordinate
[R
][side_y
][channel_i
][rank_i
][bl_i
] -= RDQS_STEP
;
1701 // check that we haven't closed the RDQS_EYE too much
1702 if ((x_coordinate
[L
][side_y
][channel_i
][rank_i
][bl_i
] > (RDQS_MAX
- MIN_RDQS_EYE
)) ||
1703 (x_coordinate
[R
][side_y
][channel_i
][rank_i
][bl_i
] < (RDQS_MIN
+ MIN_RDQS_EYE
))
1705 (x_coordinate
[L
][side_y
][channel_i
][rank_i
][bl_i
]
1706 == x_coordinate
[R
][side_y
][channel_i
][rank_i
][bl_i
]))
1708 // not enough RDQS margin available at this VREF
1709 // update VREF values accordingly
1712 y_coordinate
[side_x
][B
][channel_i
][bl_i
] += VREF_STEP
;
1716 y_coordinate
[side_x
][T
][channel_i
][bl_i
] -= VREF_STEP
;
1718 // check that we haven't closed the VREF_EYE too much
1719 if ((y_coordinate
[side_x
][B
][channel_i
][bl_i
] > (VREF_MAX
- MIN_VREF_EYE
)) ||
1720 (y_coordinate
[side_x
][T
][channel_i
][bl_i
] < (VREF_MIN
+ MIN_VREF_EYE
)) ||
1721 (y_coordinate
[side_x
][B
][channel_i
][bl_i
] == y_coordinate
[side_x
][T
][channel_i
][bl_i
]))
1723 // VREF_EYE collapsed below MIN_VREF_EYE
1724 training_message(channel_i
, rank_i
, bl_i
);
1725 post_code(0xEE, (0x70 + (side_y
* 2) + (side_x
)));
1729 // update the VREF setting
1730 set_vref(channel_i
, bl_i
, y_coordinate
[side_x
][side_y
][channel_i
][bl_i
]);
1731 // reset the X coordinate to begin the search at the new VREF
1732 x_coordinate
[side_x
][side_y
][channel_i
][rank_i
][bl_i
] =
1733 (side_x
== L
) ? (RDQS_MIN
) : (RDQS_MAX
);
1736 // update the RDQS setting
1737 set_rdqs(channel_i
, rank_i
, bl_i
, x_coordinate
[side_x
][side_y
][channel_i
][rank_i
][bl_i
]);
1740 } // at least 1 byte lane failed
1741 } while (result
& 0xFF);
1742 } // if rank is enabled
1744 } // if channel is enabled
1749 post_code(0x07, 0x20);
1751 // find final RDQS (X coordinate) & final VREF (Y coordinate)
1752 for (channel_i
= 0; channel_i
< NUM_CHANNELS
; channel_i
++)
1754 if (mrc_params
->channel_enables
& (1 << channel_i
))
1756 for (rank_i
= 0; rank_i
< NUM_RANKS
; rank_i
++)
1758 if (mrc_params
->rank_enables
& (1 << rank_i
))
1760 for (bl_i
= 0; bl_i
< (NUM_BYTE_LANES
/ bl_divisor
); bl_i
++)
1766 DPF(D_INFO
, "RDQS T/B eye rank%d lane%d : %d-%d %d-%d\n", rank_i
, bl_i
,
1767 x_coordinate
[L
][T
][channel_i
][rank_i
][bl_i
],
1768 x_coordinate
[R
][T
][channel_i
][rank_i
][bl_i
],
1769 x_coordinate
[L
][B
][channel_i
][rank_i
][bl_i
],
1770 x_coordinate
[R
][B
][channel_i
][rank_i
][bl_i
]);
1772 tempD1
= (x_coordinate
[R
][T
][channel_i
][rank_i
][bl_i
] + x_coordinate
[L
][T
][channel_i
][rank_i
][bl_i
]) / 2; // average the TOP side LEFT & RIGHT values
1773 tempD2
= (x_coordinate
[R
][B
][channel_i
][rank_i
][bl_i
] + x_coordinate
[L
][B
][channel_i
][rank_i
][bl_i
]) / 2; // average the BOTTOM side LEFT & RIGHT values
1774 x_center
[channel_i
][rank_i
][bl_i
] = (uint8_t) ((tempD1
+ tempD2
) / 2); // average the above averages
1777 DPF(D_INFO
, "VREF R/L eye lane%d : %d-%d %d-%d\n", bl_i
,
1778 y_coordinate
[R
][B
][channel_i
][bl_i
],
1779 y_coordinate
[R
][T
][channel_i
][bl_i
],
1780 y_coordinate
[L
][B
][channel_i
][bl_i
],
1781 y_coordinate
[L
][T
][channel_i
][bl_i
]);
1783 tempD1
= (y_coordinate
[R
][T
][channel_i
][bl_i
] + y_coordinate
[R
][B
][channel_i
][bl_i
]) / 2; // average the RIGHT side TOP & BOTTOM values
1784 tempD2
= (y_coordinate
[L
][T
][channel_i
][bl_i
] + y_coordinate
[L
][B
][channel_i
][bl_i
]) / 2; // average the LEFT side TOP & BOTTOM values
1785 y_center
[channel_i
][bl_i
] = (uint8_t) ((tempD1
+ tempD2
) / 2); // average the above averages
1787 } // if rank is enabled
1789 } // if channel is enabled
1793 // perform an eye check
1794 for (side_y
=B
; side_y
<=T
; side_y
++)
1796 for (side_x
=L
; side_x
<=R
; side_x
++)
1799 post_code(0x07, (0x30 + (side_y
* 2) + (side_x
)));
1801 // update the settings for the eye check
1802 for (channel_i
=0; channel_i
<NUM_CHANNELS
; channel_i
++)
1804 if (mrc_params
->channel_enables
& (1<<channel_i
))
1806 for (rank_i
=0; rank_i
<NUM_RANKS
; rank_i
++)
1808 if (mrc_params
->rank_enables
& (1<<rank_i
))
1810 for (bl_i
=0; bl_i
<(NUM_BYTE_LANES
/bl_divisor
); bl_i
++)
1814 set_rdqs(channel_i
, rank_i
, bl_i
, (x_center
[channel_i
][rank_i
][bl_i
] - (MIN_RDQS_EYE
/ 2)));
1818 set_rdqs(channel_i
, rank_i
, bl_i
, (x_center
[channel_i
][rank_i
][bl_i
] + (MIN_RDQS_EYE
/ 2)));
1822 set_vref(channel_i
, bl_i
, (y_center
[channel_i
][bl_i
] - (MIN_VREF_EYE
/ 2)));
1826 set_vref(channel_i
, bl_i
, (y_center
[channel_i
][bl_i
] + (MIN_VREF_EYE
/ 2)));
1829 } // if rank is enabled
1831 } // if channel is enabled
1834 // request HTE reconfiguration
1835 mrc_params
->hte_setup
= 1;
1838 if (check_bls_ex( mrc_params
, address
) & 0xFF)
1840 // one or more byte lanes failed
1841 post_code(0xEE, (0x74 + (side_x
* 2) + (side_y
)));
1845 #endif // RX_EYE_CHECK
1847 post_code(0x07, 0x40);
1849 // set final placements
1850 for (channel_i
= 0; channel_i
< NUM_CHANNELS
; channel_i
++)
1852 if (mrc_params
->channel_enables
& (1 << channel_i
))
1854 for (rank_i
= 0; rank_i
< NUM_RANKS
; rank_i
++)
1856 if (mrc_params
->rank_enables
& (1 << rank_i
))
1859 // increment "num_ranks_enabled"
1860 num_ranks_enabled
++;
1861 #endif // R2R_SHARING
1862 for (bl_i
= 0; bl_i
< (NUM_BYTE_LANES
/ bl_divisor
); bl_i
++)
1866 final_delay
[channel_i
][bl_i
] += x_center
[channel_i
][rank_i
][bl_i
];
1867 set_rdqs(channel_i
, rank_i
, bl_i
, ((final_delay
[channel_i
][bl_i
]) / num_ranks_enabled
));
1869 set_rdqs(channel_i
, rank_i
, bl_i
, x_center
[channel_i
][rank_i
][bl_i
]);
1870 #endif // R2R_SHARING
1872 set_vref(channel_i
, bl_i
, y_center
[channel_i
][bl_i
]);
1874 } // if rank is enabled
1876 } // if channel is enabled
1878 #endif // BACKUP_RDQS
1884 // POST_CODE[major] == 0x08
1886 // This function will perform the WRITE TRAINING Algorithm on all channels/ranks/byte_lanes simultaneously to minimize execution time.
1887 // The idea here is to train the WDQ timings to achieve maximum WRITE margins.
1888 // The algorithm will start with WDQ at the current WDQ setting (tracks WDQS in WR_LVL) +/- 32 PIs (+/- 1/4 CLK) and collapse the eye until all data patterns pass.
1889 // This is because WDQS will be aligned to WCLK by the Write Leveling algorithm and WDQ will only ever have a 1/2 CLK window of validity.
1890 static void wr_train(
1891 MRCParams_t
*mrc_params
)
1894 #define WDQ_STEP 1 // how many WDQ codes to jump while margining
1895 #define L 0 // LEFT side loop value definition
1896 #define R 1 // RIGHT side loop value definition
1898 uint8_t channel_i
; // channel counter
1899 uint8_t rank_i
; // rank counter
1900 uint8_t bl_i
; // byte lane counter
1901 uint8_t bl_divisor
= (mrc_params
->channel_width
== x16
) ? 2 : 1; // byte lane divisor
1904 uint8_t side_i
; // LEFT/RIGHT side indicator (0=L, 1=R)
1905 uint32_t tempD
; // temporary DWORD
1906 uint32_t delay
[2/*side_i*/][NUM_CHANNELS
][NUM_RANKS
][NUM_BYTE_LANES
]; // 2 arrays, for L & R side passing delays
1907 uint32_t address
; // target address for "check_bls_ex()"
1908 uint32_t result
; // result of "check_bls_ex()"
1909 uint32_t bl_mask
; // byte lane mask for "result" checking
1911 uint32_t final_delay
[NUM_CHANNELS
][NUM_BYTE_LANES
]; // used to find placement for rank2rank sharing configs
1912 uint32_t num_ranks_enabled
= 0; // used to find placement for rank2rank sharing configs
1913 #endif // R2R_SHARING
1914 #endif // BACKUP_WDQ
1917 post_code(0x08, 0x00);
1922 for (channel_i
=0; channel_i
<NUM_CHANNELS
; channel_i
++)
1924 if (mrc_params
->channel_enables
& (1<<channel_i
))
1926 for (rank_i
=0; rank_i
<NUM_RANKS
; rank_i
++)
1928 if (mrc_params
->rank_enables
& (1<<rank_i
))
1930 for (bl_i
=0; bl_i
<(NUM_BYTE_LANES
/bl_divisor
); bl_i
++)
1932 set_wdq(channel_i
, rank_i
, bl_i
, ddr_wdq
[PLATFORM_ID
]);
1934 } // if rank is enabled
1936 } // if channel is enabled
1939 // initialise "delay"
1940 for (channel_i
= 0; channel_i
< NUM_CHANNELS
; channel_i
++)
1942 if (mrc_params
->channel_enables
& (1 << channel_i
))
1944 for (rank_i
= 0; rank_i
< NUM_RANKS
; rank_i
++)
1946 if (mrc_params
->rank_enables
& (1 << rank_i
))
1948 for (bl_i
= 0; bl_i
< (NUM_BYTE_LANES
/ bl_divisor
); bl_i
++)
1950 // want to start with WDQ = (WDQS - QRTR_CLK) +/- QRTR_CLK
1951 tempD
= get_wdqs(channel_i
, rank_i
, bl_i
) - QRTR_CLK
;
1952 delay
[L
][channel_i
][rank_i
][bl_i
] = tempD
- QRTR_CLK
;
1953 delay
[R
][channel_i
][rank_i
][bl_i
] = tempD
+ QRTR_CLK
;
1955 } // if rank is enabled
1957 } // if channel is enabled
1960 // initialise other variables
1961 bl_mask
= byte_lane_mask(mrc_params
);
1962 address
= get_addr(mrc_params
, 0, 0);
1965 // need to set "final_delay[][]" elements to "0"
1966 memset((void *) (final_delay
), 0x00, (size_t) sizeof(final_delay
));
1967 #endif // R2R_SHARING
1969 // start algorithm on the LEFT side and train each channel/bl until no failures are observed, then repeat for the RIGHT side.
1970 for (side_i
= L
; side_i
<= R
; side_i
++)
1972 post_code(0x08, (0x10 + (side_i
)));
1974 // set starting values
1975 for (channel_i
= 0; channel_i
< NUM_CHANNELS
; channel_i
++)
1977 if (mrc_params
->channel_enables
& (1 << channel_i
))
1979 for (rank_i
= 0; rank_i
< NUM_RANKS
; rank_i
++)
1981 if (mrc_params
->rank_enables
& (1 << rank_i
))
1983 for (bl_i
= 0; bl_i
< (NUM_BYTE_LANES
/ bl_divisor
); bl_i
++)
1985 set_wdq(channel_i
, rank_i
, bl_i
, delay
[side_i
][channel_i
][rank_i
][bl_i
]);
1987 } // if rank is enabled
1989 } // if channel is enabled
1992 // find passing values
1993 for (channel_i
= 0; channel_i
< NUM_CHANNELS
; channel_i
++)
1995 if (mrc_params
->channel_enables
& (0x1 << channel_i
))
1997 for (rank_i
= 0; rank_i
< NUM_RANKS
; rank_i
++)
1999 if (mrc_params
->rank_enables
& (0x1 << rank_i
))
2001 // get an address in the target channel/rank
2002 address
= get_addr(mrc_params
, channel_i
, rank_i
);
2004 // request HTE reconfiguration
2005 mrc_params
->hte_setup
= 1;
2007 // check the settings
2012 // need restore memory to idle state as write can be in bad sync
2013 dram_init_command (DCMD_PREA(rank_i
));
2016 // result[07:00] == failing byte lane (MAX 8)
2017 result
= check_bls_ex( mrc_params
, address
);
2018 // check for failures
2021 // at least 1 byte lane failed
2022 for (bl_i
= 0; bl_i
< (NUM_BYTE_LANES
/ bl_divisor
); bl_i
++)
2024 if (result
& (bl_mask
<< bl_i
))
2028 delay
[L
][channel_i
][rank_i
][bl_i
] += WDQ_STEP
;
2032 delay
[R
][channel_i
][rank_i
][bl_i
] -= WDQ_STEP
;
2034 // check for algorithm failure
2035 if (delay
[L
][channel_i
][rank_i
][bl_i
] != delay
[R
][channel_i
][rank_i
][bl_i
])
2037 // margin available, update delay setting
2038 set_wdq(channel_i
, rank_i
, bl_i
, delay
[side_i
][channel_i
][rank_i
][bl_i
]);
2042 // no margin available, notify the user and halt
2043 training_message(channel_i
, rank_i
, bl_i
);
2044 post_code(0xEE, (0x80 + side_i
));
2048 } // at least 1 byte lane failed
2049 } while (result
& 0xFF); // stop when all byte lanes pass
2050 } // if rank is enabled
2052 } // if channel is enabled
2056 // program WDQ to the middle of passing window
2057 for (channel_i
= 0; channel_i
< NUM_CHANNELS
; channel_i
++)
2059 if (mrc_params
->channel_enables
& (1 << channel_i
))
2061 for (rank_i
= 0; rank_i
< NUM_RANKS
; rank_i
++)
2063 if (mrc_params
->rank_enables
& (1 << rank_i
))
2066 // increment "num_ranks_enabled"
2067 num_ranks_enabled
++;
2068 #endif // R2R_SHARING
2069 for (bl_i
= 0; bl_i
< (NUM_BYTE_LANES
/ bl_divisor
); bl_i
++)
2072 DPF(D_INFO
, "WDQ eye rank%d lane%d : %d-%d\n", rank_i
, bl_i
,
2073 delay
[L
][channel_i
][rank_i
][bl_i
],
2074 delay
[R
][channel_i
][rank_i
][bl_i
]);
2076 tempD
= (delay
[R
][channel_i
][rank_i
][bl_i
] + delay
[L
][channel_i
][rank_i
][bl_i
]) / 2;
2079 final_delay
[channel_i
][bl_i
] += tempD
;
2080 set_wdq(channel_i
, rank_i
, bl_i
, ((final_delay
[channel_i
][bl_i
]) / num_ranks_enabled
));
2082 set_wdq(channel_i
, rank_i
, bl_i
, tempD
);
2083 #endif // R2R_SHARING
2086 } // if rank is enabled
2088 } // if channel is enabled
2090 #endif // BACKUP_WDQ
2095 // Wrapper for jedec initialisation routine
2096 static void perform_jedec_init(
2097 MRCParams_t
*mrc_params
)
2099 jedec_init(mrc_params
, 0);
2102 // Configure DDRPHY for Auto-Refresh, Periodic Compensations,
2103 // Dynamic Diff-Amp, ZQSPERIOD, Auto-Precharge, CKE Power-Down
2104 static void set_auto_refresh(
2105 MRCParams_t
*mrc_params
)
2110 uint32_t bl_divisor
= /*(mrc_params->channel_width==x16)?2:*/1;
2115 // enable Auto-Refresh, Periodic Compensations, Dynamic Diff-Amp, ZQSPERIOD, Auto-Precharge, CKE Power-Down
2116 for (channel_i
= 0; channel_i
< NUM_CHANNELS
; channel_i
++)
2118 if (mrc_params
->channel_enables
& (1 << channel_i
))
2120 // Enable Periodic RCOMPS
2121 isbM32m(DDRPHY
, CMPCTRL
, (BIT1
), (BIT1
));
2124 // Enable Dynamic DiffAmp & Set Read ODT Value
2125 switch (mrc_params
->rd_odt_value
)
2127 case 0: tempD
= 0x3F; break; // OFF
2128 default: tempD
= 0x00; break; // Auto
2129 } // rd_odt_value switch
2131 for (bl_i
=0; bl_i
<((NUM_BYTE_LANES
/bl_divisor
)/2); bl_i
++)
2133 isbM32m(DDRPHY
, (B0OVRCTL
+ (bl_i
* DDRIODQ_BL_OFFSET
) + (channel_i
* DDRIODQ_CH_OFFSET
)),
2134 ((0x00<<16)|(tempD
<<10)),
2135 ((BIT21
|BIT20
|BIT19
|BIT18
|BIT17
|BIT16
)|(BIT15
|BIT14
|BIT13
|BIT12
|BIT11
|BIT10
))); // Override: DIFFAMP, ODT
2137 isbM32m(DDRPHY
, (B1OVRCTL
+ (bl_i
* DDRIODQ_BL_OFFSET
) + (channel_i
* DDRIODQ_CH_OFFSET
)),
2138 ((0x00<<16)|(tempD
<<10)),
2139 ((BIT21
|BIT20
|BIT19
|BIT18
|BIT17
|BIT16
)|(BIT15
|BIT14
|BIT13
|BIT12
|BIT11
|BIT10
)));// Override: DIFFAMP, ODT
2142 // Issue ZQCS command
2143 for (rank_i
= 0; rank_i
< NUM_RANKS
; rank_i
++)
2145 if (mrc_params
->rank_enables
& (1 << rank_i
))
2147 dram_init_command(DCMD_ZQCS(rank_i
));
2148 } // if rank_i enabled
2151 } // if channel_i enabled
2160 // Depending on configuration enables ECC support.
2161 // Available memory size is decresed, and updated with 0s
2162 // in order to clear error status. Address mode 2 forced.
2163 static void ecc_enable(
2164 MRCParams_t
*mrc_params
)
2170 if (mrc_params
->ecc_enables
== 0) return;
2174 // Configuration required in ECC mode
2175 Drp
.raw
= isbR32m(MCU
, DRP
);
2176 Drp
.field
.addressMap
= 2;
2177 Drp
.field
.split64
= 1;
2178 isbW32m(MCU
, DRP
, Drp
.raw
);
2180 // Disable new request bypass
2181 Dsch
.raw
= isbR32m(MCU
, DSCH
);
2182 Dsch
.field
.NEWBYPDIS
= 1;
2183 isbW32m(MCU
, DSCH
, Dsch
.raw
);
2187 Ctr
.field
.SBEEN
= 1;
2188 Ctr
.field
.DBEEN
= 1;
2189 Ctr
.field
.ENCBGEN
= 1;
2190 isbW32m(MCU
, DECCCTRL
, Ctr
.raw
);
2193 // Read back to be sure writing took place
2194 Ctr
.raw
= isbR32m(MCU
, DECCCTRL
);
2197 // Assume 8 bank memory, one bank is gone for ECC
2198 mrc_params
->mem_size
-= mrc_params
->mem_size
/ 8;
2200 // For S3 resume memory content has to be preserved
2201 if (mrc_params
->boot_mode
!= bmS3
)
2203 select_hte(mrc_params
);
2204 HteMemInit(mrc_params
, MrcMemInit
, MrcHaltHteEngineOnError
);
2205 select_memory_manager(mrc_params
);
2212 // Lock MCU registers at the end of initialisation sequence.
2213 static void lock_registers(
2214 MRCParams_t
*mrc_params
)
2220 Dco
.raw
= isbR32m(MCU
, DCO
);
2221 Dco
.field
.PMIDIS
= 0; //0 - PRI enabled
2222 Dco
.field
.PMICTL
= 0; //0 - PRI owned by MEMORY_MANAGER
2223 Dco
.field
.DRPLOCK
= 1;
2224 Dco
.field
.REUTLOCK
= 1;
2225 isbW32m(MCU
, DCO
, Dco
.raw
);
2233 // cache write back invalidate
2234 static void asm_wbinvd(void)
2236 #if defined (SIM) || defined (GCC)
2246 static void asm_invd(void)
2248 #if defined (SIM) || defined (GCC)
2258 static void cpu_read(void)
2260 uint32_t adr
, dat
, limit
;
2265 for (adr
= 0; adr
< limit
; adr
+= 4)
2267 dat
= *(uint32_t*) adr
;
2268 if ((adr
& 0x0F) == 0)
2270 DPF(D_INFO
, "\n%x : ", adr
);
2272 DPF(D_INFO
, "%x ", dat
);
2276 DPF(D_INFO
, "CPU read done\n");
2280 static void cpu_write(void)
2282 uint32_t adr
, limit
;
2285 for (adr
= 0; adr
< limit
; adr
+= 4)
2287 *(uint32_t*) adr
= 0xDEAD0000 + adr
;
2292 DPF(D_INFO
, "CPU write done\n");
2296 static void cpu_memory_test(
2297 MRCParams_t
*mrc_params
)
2299 uint32_t result
= 0;
2300 uint32_t val
, dat
, adr
, adr0
, step
, limit
;
2307 adr0
= 1 * 1024 * 1024;
2308 limit
= 256 * 1024 * 1024;
2310 for (step
= 0; step
<= 4; step
++)
2312 DPF(D_INFO
, "Mem test step %d starting from %xh\n", step
, adr0
);
2314 my_tsc
= read_tsc();
2315 for (adr
= adr0
; adr
< limit
; adr
+= sizeof(uint32_t))
2317 if (step
== 0) dat
= adr
;
2318 else if (step
== 1) dat
= (1 << ((adr
>> 2) & 0x1f));
2319 else if (step
== 2) dat
= ~(1 << ((adr
>> 2) & 0x1f));
2320 else if (step
== 3) dat
= 0x5555AAAA;
2321 else if (step
== 4) dat
= 0xAAAA5555;
2323 *(uint32_t*) adr
= dat
;
2325 DPF(D_INFO
, "Write time %llXh\n", read_tsc() - my_tsc
);
2327 my_tsc
= read_tsc();
2328 for (adr
= adr0
; adr
< limit
; adr
+= sizeof(uint32_t))
2330 if (step
== 0) dat
= adr
;
2331 else if (step
== 1) dat
= (1 << ((adr
>> 2) & 0x1f));
2332 else if (step
== 2) dat
= ~(1 << ((adr
>> 2) & 0x1f));
2333 else if (step
== 3) dat
= 0x5555AAAA;
2334 else if (step
== 4) dat
= 0xAAAA5555;
2336 val
= *(uint32_t*) adr
;
2340 DPF(D_INFO
, "%x vs. %x@%x\n", dat
, val
, adr
);
2344 DPF(D_INFO
, "Read time %llXh\n", read_tsc() - my_tsc
);
2347 DPF( D_INFO
, "Memory test result %x\n", result
);
2353 // Execute memory test, if error dtected it is
2354 // indicated in mrc_params->status.
2355 static void memory_test(
2356 MRCParams_t
*mrc_params
)
2358 uint32_t result
= 0;
2362 select_hte(mrc_params
);
2363 result
= HteMemInit(mrc_params
, MrcMemTest
, MrcHaltHteEngineOnError
);
2364 select_memory_manager(mrc_params
);
2366 DPF(D_INFO
, "Memory test result %x\n", result
);
2367 mrc_params
->status
= ((result
== 0) ? MRC_SUCCESS
: MRC_E_MEMTEST
);
2372 // Force same timings as with backup settings
2373 static void static_timings(
2374 MRCParams_t
*mrc_params
)
2379 for (ch
= 0; ch
< NUM_CHANNELS
; ch
++)
2381 for (rk
= 0; rk
< NUM_RANKS
; rk
++)
2383 for (bl
= 0; bl
< NUM_BYTE_LANES
; bl
++)
2385 set_rcvn(ch
, rk
, bl
, 498); // RCVN
2386 set_rdqs(ch
, rk
, bl
, 24); // RDQS
2387 set_wdqs(ch
, rk
, bl
, 292); // WDQS
2388 set_wdq( ch
, rk
, bl
, 260); // WDQ
2391 set_vref(ch
, bl
, 32); // VREF (RANK0 only)
2394 set_wctl(ch
, rk
, 217); // WCTL
2396 set_wcmd(ch
, 220); // WCMD
2403 // Initialise system memory.
2406 MRCParams_t
*mrc_params
)
2408 static const MemInit_t init
[] =
2410 { 0x0101, bmCold
|bmFast
|bmWarm
|bmS3
, clear_self_refresh
}, //0
2411 { 0x0200, bmCold
|bmFast
|bmWarm
|bmS3
, prog_ddr_timing_control
}, //1 initialise the MCU
2412 { 0x0103, bmCold
|bmFast
, prog_decode_before_jedec
}, //2
2413 { 0x0104, bmCold
|bmFast
, perform_ddr_reset
}, //3
2414 { 0x0300, bmCold
|bmFast
|bmS3
, ddrphy_init
}, //4 initialise the DDRPHY
2415 { 0x0400, bmCold
|bmFast
, perform_jedec_init
}, //5 perform JEDEC initialisation of DRAMs
2416 { 0x0105, bmCold
|bmFast
, set_ddr_init_complete
}, //6
2417 { 0x0106, bmFast
|bmWarm
|bmS3
, restore_timings
}, //7
2418 { 0x0106, bmCold
, default_timings
}, //8
2419 { 0x0500, bmCold
, rcvn_cal
}, //9 perform RCVN_CAL algorithm
2420 { 0x0600, bmCold
, wr_level
}, //10 perform WR_LEVEL algorithm
2421 { 0x0120, bmCold
, prog_page_ctrl
}, //11
2422 { 0x0700, bmCold
, rd_train
}, //12 perform RD_TRAIN algorithm
2423 { 0x0800, bmCold
, wr_train
}, //13 perform WR_TRAIN algorithm
2424 { 0x010B, bmCold
, store_timings
}, //14
2425 { 0x010C, bmCold
|bmFast
|bmWarm
|bmS3
, enable_scrambling
}, //15
2426 { 0x010D, bmCold
|bmFast
|bmWarm
|bmS3
, prog_ddr_control
}, //16
2427 { 0x010E, bmCold
|bmFast
|bmWarm
|bmS3
, prog_dra_drb
}, //17
2428 { 0x010F, bmWarm
|bmS3
, perform_wake
}, //18
2429 { 0x0110, bmCold
|bmFast
|bmWarm
|bmS3
, change_refresh_period
}, //19
2430 { 0x0111, bmCold
|bmFast
|bmWarm
|bmS3
, set_auto_refresh
}, //20
2431 { 0x0112, bmCold
|bmFast
|bmWarm
|bmS3
, ecc_enable
}, //21
2432 { 0x0113, bmCold
|bmFast
, memory_test
}, //22
2433 { 0x0114, bmCold
|bmFast
|bmWarm
|bmS3
, lock_registers
} //23 set init done
2440 DPF(D_INFO
, "Meminit build %s %s\n", __DATE__
, __TIME__
);
2443 post_code(0x01, 0x00);
2445 if (mrc_params
->boot_mode
!= bmCold
)
2447 if (mrc_params
->ddr_speed
!= mrc_params
->timings
.ddr_speed
)
2449 // full training required as frequency changed
2450 mrc_params
->boot_mode
= bmCold
;
2454 for (i
= 0; i
< MCOUNT(init
); i
++)
2459 if (mrc_params
->menu_after_mrc
&& i
> 14)
2465 DPF(D_INFO
, "-- c - continue --\n");
2466 DPF(D_INFO
, "-- j - move to jedec init --\n");
2467 DPF(D_INFO
, "-- m - memory test --\n");
2468 DPF(D_INFO
, "-- r - cpu read --\n");
2469 DPF(D_INFO
, "-- w - cpu write --\n");
2470 DPF(D_INFO
, "-- b - hte base test --\n");
2471 DPF(D_INFO
, "-- g - hte extended test --\n");
2478 case 'j': //move to jedec init
2485 uint32_t n
, res
, cnt
=0;
2487 for(n
=0; mgetch()==0; n
++)
2489 if( ch
== 'M' || n
% 256 == 0)
2491 DPF(D_INFO
, "n=%d e=%d\n", n
, cnt
);
2498 memory_test(mrc_params
);
2499 res
|= mrc_params
->status
;
2502 mrc_params
->hte_setup
= 1;
2503 res
|= check_bls_ex(mrc_params
, 0x00000000);
2504 res
|= check_bls_ex(mrc_params
, 0x00000000);
2505 res
|= check_bls_ex(mrc_params
, 0x00000000);
2506 res
|= check_bls_ex(mrc_params
, 0x00000000);
2508 if( mrc_params
->rank_enables
& 2)
2510 mrc_params
->hte_setup
= 1;
2511 res
|= check_bls_ex(mrc_params
, 0x40000000);
2512 res
|= check_bls_ex(mrc_params
, 0x40000000);
2513 res
|= check_bls_ex(mrc_params
, 0x40000000);
2514 res
|= check_bls_ex(mrc_params
, 0x40000000);
2519 DPF(D_INFO
, "###########\n");
2521 DPF(D_INFO
, "# Error count %d\n", ++cnt
);
2523 DPF(D_INFO
, "###########\n");
2528 select_memory_manager(mrc_params
);
2532 memory_test(mrc_params
);
2535 cpu_memory_test(mrc_params
);
2540 if (ch
<= '9') DpfPrintMask
^= (ch
- '0') << 3;
2541 DPF(D_INFO
, "Log mask %x\n", DpfPrintMask
);
2544 print_timings(mrc_params
);
2547 rd_train(mrc_params
);
2550 wr_train(mrc_params
);
2563 select_hte(mrc_params
);
2564 mrc_params
->hte_setup
= 1;
2565 result
= check_bls_ex(mrc_params
, 0);
2566 DPF(D_INFO
, "Extended test result %x\n", result
);
2567 select_memory_manager(mrc_params
);
2573 select_hte(mrc_params
);
2574 mrc_params
->hte_setup
= 1;
2575 result
= check_rw_coarse(mrc_params
, 0);
2576 DPF(D_INFO
, "Base test result %x\n", result
);
2577 select_memory_manager(mrc_params
);
2581 select_hte(mrc_params
);
2582 HteMemOp(0x2340, 1, 1);
2583 select_memory_manager(mrc_params
);
2590 DPF( D_INFO
, "===>> Start suspend\n");
2591 isbR32m(MCU
, DSTAT
);
2593 DPMC0reg
.raw
= isbR32m(MCU
, DPMC0
);
2594 DPMC0reg
.field
.DYNSREN
= 0;
2595 DPMC0reg
.field
.powerModeOpCode
= 0x05; // Disable Master DLL
2596 isbW32m(MCU
, DPMC0
, DPMC0reg
.raw
);
2598 // Should be off for negative test case verification
2600 Wr32(MMIO
, PCIADDR(0,0,0,SB_PACKET_REG
),
2601 (uint32_t)SB_COMMAND(SB_SUSPEND_CMND_OPCODE
, MCU
, 0));
2604 DPF( D_INFO
, "press key\n");
2606 DPF( D_INFO
, "===>> Start resume\n");
2607 isbR32m(MCU
, DSTAT
);
2609 mrc_params
->boot_mode
= bmS3
;
2618 if (mrc_params
->boot_mode
& init
[i
].boot_path
)
2620 uint8_t major
= init
[i
].post_code
>> 8 & 0xFF;
2621 uint8_t minor
= init
[i
].post_code
>> 0 & 0xFF;
2622 post_code(major
, minor
);
2624 my_tsc
= read_tsc();
2625 init
[i
].init_fn(mrc_params
);
2626 DPF(D_TIME
, "Execution time %llX", read_tsc() - my_tsc
);
2630 // display the timings
2631 print_timings(mrc_params
);
2634 post_code(0x01, 0xFF);