+/************************************************************************\r
+ *\r
+ * Copyright (c) 2013-2015 Intel Corporation.\r
+ *\r
+* This program and the accompanying materials\r
+* are licensed and made available under the terms and conditions of the BSD License\r
+* which accompanies this distribution. The full text of the license may be found at\r
+* http://opensource.org/licenses/bsd-license.php\r
+*\r
+* THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+* WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+ *\r
+ * This file contains all of the Cat Mountain Memory Reference Code (MRC).\r
+ *\r
+ * These functions are generic and should work for any Cat Mountain config.\r
+ *\r
+ * MRC requires two data structures to be passed in which are initialised by "PreMemInit()".\r
+ *\r
+ * The basic flow is as follows:\r
+ * 01) Check for supported DDR speed configuration\r
+ * 02) Set up MEMORY_MANAGER buffer as pass-through (POR)\r
+ * 03) Set Channel Interleaving Mode and Channel Stride to the most aggressive setting possible\r
+ * 04) Set up the MCU logic\r
+ * 05) Set up the DDR_PHY logic\r
+ * 06) Initialise the DRAMs (JEDEC)\r
+ * 07) Perform the Receive Enable Calibration algorithm\r
+ * 08) Perform the Write Leveling algorithm\r
+ * 09) Perform the Read Training algorithm (includes internal Vref)\r
+ * 10) Perform the Write Training algorithm\r
+ * 11) Set Channel Interleaving Mode and Channel Stride to the desired settings\r
+ *\r
+ * Dunit configuration based on Valleyview MRC.\r
+ *\r
+ ***************************************************************************/\r
+\r
+#include "mrc.h"\r
+#include "memory_options.h"\r
+\r
+#include "meminit.h"\r
+#include "meminit_utils.h"\r
+#include "hte.h"\r
+#include "io.h"\r
+\r
+// Override ODT to off state if requested\r
+#define DRMC_DEFAULT (mrc_params->rd_odt_value==0?BIT12:0)\r
+\r
+\r
+// tRFC values (in picoseconds) per density\r
+const uint32_t tRFC[5] =\r
+{\r
+ 90000, // 512Mb\r
+ 110000, // 1Gb\r
+ 160000, // 2Gb\r
+ 300000, // 4Gb\r
+ 350000, // 8Gb\r
+ };\r
+\r
+// tCK clock period in picoseconds per speed index 800, 1066, 1333\r
+const uint32_t tCK[3] =\r
+{\r
+ 2500,\r
+ 1875,\r
+ 1500\r
+};\r
+\r
+#ifdef SIM\r
+// Select static timings specific to simulation environment\r
+#define PLATFORM_ID 0\r
+#else\r
+// Select static timings specific to ClantonPeek platform\r
+#define PLATFORM_ID 1\r
+#endif\r
+\r
+\r
+// Global variables\r
+const uint16_t ddr_wclk[] =\r
+ {193, 158};\r
+\r
+const uint16_t ddr_wctl[] =\r
+ { 1, 217};\r
+\r
+const uint16_t ddr_wcmd[] =\r
+ { 1, 220};\r
+\r
+\r
+#ifdef BACKUP_RCVN\r
+const uint16_t ddr_rcvn[] =\r
+ {129, 498};\r
+#endif // BACKUP_RCVN\r
+\r
+#ifdef BACKUP_WDQS\r
+const uint16_t ddr_wdqs[] =\r
+ { 65, 289};\r
+#endif // BACKUP_WDQS\r
+\r
+#ifdef BACKUP_RDQS\r
+const uint8_t ddr_rdqs[] =\r
+ { 32, 24};\r
+#endif // BACKUP_RDQS\r
+\r
+#ifdef BACKUP_WDQ\r
+const uint16_t ddr_wdq[] =\r
+ { 32, 257};\r
+#endif // BACKUP_WDQ\r
+\r
+\r
+\r
+// Select MEMORY_MANAGER as the source for PRI interface\r
+static void select_memory_manager(\r
+ MRCParams_t *mrc_params)\r
+{\r
+ RegDCO Dco;\r
+\r
+ ENTERFN();\r
+\r
+ Dco.raw = isbR32m(MCU, DCO);\r
+ Dco.field.PMICTL = 0; //0 - PRI owned by MEMORY_MANAGER\r
+ isbW32m(MCU, DCO, Dco.raw);\r
+\r
+ LEAVEFN();\r
+}\r
+\r
+// Select HTE as the source for PRI interface\r
+void select_hte(\r
+ MRCParams_t *mrc_params)\r
+{\r
+ RegDCO Dco;\r
+\r
+ ENTERFN();\r
+\r
+ Dco.raw = isbR32m(MCU, DCO);\r
+ Dco.field.PMICTL = 1; //1 - PRI owned by HTE\r
+ isbW32m(MCU, DCO, Dco.raw);\r
+\r
+ LEAVEFN();\r
+}\r
+\r
+// Send DRAM command, data should be formated\r
+// using DCMD_Xxxx macro or emrsXCommand structure.\r
+static void dram_init_command(\r
+ uint32_t data)\r
+{\r
+ Wr32(DCMD, 0, data);\r
+}\r
+\r
+// Send DRAM wake command using special MCU side-band WAKE opcode\r
+static void dram_wake_command(\r
+ void)\r
+{\r
+ ENTERFN();\r
+\r
+ Wr32(MMIO, PCIADDR(0,0,0,SB_PACKET_REG),\r
+ (uint32_t) SB_COMMAND(SB_WAKE_CMND_OPCODE, MCU, 0));\r
+\r
+ LEAVEFN();\r
+}\r
+\r
+// Stop self refresh driven by MCU\r
+static void clear_self_refresh(\r
+ MRCParams_t *mrc_params)\r
+{\r
+ ENTERFN();\r
+\r
+ // clear the PMSTS Channel Self Refresh bits\r
+ isbM32m(MCU, PMSTS, BIT0, BIT0);\r
+\r
+ LEAVEFN();\r
+}\r
+\r
+// Configure MCU before jedec init sequence\r
+static void prog_decode_before_jedec(\r
+ MRCParams_t *mrc_params)\r
+{\r
+ RegDRP Drp;\r
+ RegDRCF Drfc;\r
+ RegDCAL Dcal;\r
+ RegDSCH Dsch;\r
+ RegDPMC0 Dpmc0;\r
+\r
+ ENTERFN();\r
+\r
+ // Disable power saving features\r
+ Dpmc0.raw = isbR32m(MCU, DPMC0);\r
+ Dpmc0.field.CLKGTDIS = 1;\r
+ Dpmc0.field.DISPWRDN = 1;\r
+ Dpmc0.field.DYNSREN = 0;\r
+ Dpmc0.field.PCLSTO = 0;\r
+ isbW32m(MCU, DPMC0, Dpmc0.raw);\r
+\r
+ // Disable out of order transactions\r
+ Dsch.raw = isbR32m(MCU, DSCH);\r
+ Dsch.field.OOODIS = 1;\r
+ Dsch.field.NEWBYPDIS = 1;\r
+ isbW32m(MCU, DSCH, Dsch.raw);\r
+\r
+ // Disable issuing the REF command\r
+ Drfc.raw = isbR32m(MCU, DRFC);\r
+ Drfc.field.tREFI = 0;\r
+ isbW32m(MCU, DRFC, Drfc.raw);\r
+\r
+ // Disable ZQ calibration short\r
+ Dcal.raw = isbR32m(MCU, DCAL);\r
+ Dcal.field.ZQCINT = 0;\r
+ Dcal.field.SRXZQCL = 0;\r
+ isbW32m(MCU, DCAL, Dcal.raw);\r
+\r
+ // Training performed in address mode 0, rank population has limited impact, however\r
+ // simulator complains if enabled non-existing rank.\r
+ Drp.raw = 0;\r
+ if (mrc_params->rank_enables & 1)\r
+ Drp.field.rank0Enabled = 1;\r
+ if (mrc_params->rank_enables & 2)\r
+ Drp.field.rank1Enabled = 1;\r
+ isbW32m(MCU, DRP, Drp.raw);\r
+\r
+ LEAVEFN();\r
+}\r
+\r
+// After Cold Reset, BIOS should set COLDWAKE bit to 1 before\r
+// sending the WAKE message to the Dunit.\r
+// For Standby Exit, or any other mode in which the DRAM is in\r
+// SR, this bit must be set to 0.\r
+static void perform_ddr_reset(\r
+ MRCParams_t *mrc_params)\r
+{\r
+ ENTERFN();\r
+\r
+ // Set COLDWAKE bit before sending the WAKE message\r
+ isbM32m(MCU, DRMC, BIT16, BIT16);\r
+\r
+ // Send wake command to DUNIT (MUST be done before JEDEC)\r
+ dram_wake_command();\r
+\r
+ // Set default value\r
+ isbW32m(MCU, DRMC, DRMC_DEFAULT);\r
+\r
+ LEAVEFN();\r
+}\r
+\r
+// Dunit Initialisation Complete.\r
+// Indicates that initialisation of the Dunit has completed.\r
+// Memory accesses are permitted and maintenance operation\r
+// begins. Until this bit is set to a 1, the memory controller will\r
+// not accept DRAM requests from the MEMORY_MANAGER or HTE.\r
+static void set_ddr_init_complete(\r
+ MRCParams_t *mrc_params)\r
+{\r
+ RegDCO Dco;\r
+\r
+ ENTERFN();\r
+\r
+ Dco.raw = isbR32m(MCU, DCO);\r
+ Dco.field.PMICTL = 0; //0 - PRI owned by MEMORY_MANAGER\r
+ Dco.field.IC = 1; //1 - initialisation complete\r
+ isbW32m(MCU, DCO, Dco.raw);\r
+\r
+ LEAVEFN();\r
+}\r
+\r
+static void prog_page_ctrl(\r
+ MRCParams_t *mrc_params)\r
+{\r
+ RegDPMC0 Dpmc0;\r
+\r
+ ENTERFN();\r
+\r
+ Dpmc0.raw = isbR32m(MCU, DPMC0);\r
+\r
+ Dpmc0.field.PCLSTO = 0x4;\r
+ Dpmc0.field.PREAPWDEN = 1;\r
+\r
+ isbW32m(MCU, DPMC0, Dpmc0.raw);\r
+}\r
+\r
+// Configure MCU Power Management Control Register\r
+// and Scheduler Control Register.\r
+static void prog_ddr_control(\r
+ MRCParams_t *mrc_params)\r
+{\r
+ RegDSCH Dsch;\r
+ RegDPMC0 Dpmc0;\r
+\r
+ ENTERFN();\r
+\r
+ Dpmc0.raw = isbR32m(MCU, DPMC0);\r
+ Dsch.raw = isbR32m(MCU, DSCH);\r
+\r
+ Dpmc0.field.DISPWRDN = mrc_params->power_down_disable;\r
+ Dpmc0.field.CLKGTDIS = 0;\r
+ Dpmc0.field.PCLSTO = 4;\r
+ Dpmc0.field.PREAPWDEN = 1;\r
+\r
+ Dsch.field.OOODIS = 0;\r
+ Dsch.field.OOOST3DIS = 0;\r
+ Dsch.field.NEWBYPDIS = 0;\r
+\r
+ isbW32m(MCU, DSCH, Dsch.raw);\r
+ isbW32m(MCU, DPMC0, Dpmc0.raw);\r
+\r
+ // CMDTRIST = 2h - CMD/ADDR are tristated when no valid command\r
+ isbM32m(MCU, DPMC1, 2 << 4, BIT5|BIT4);\r
+\r
+ LEAVEFN();\r
+}\r
+\r
+// After training complete configure MCU Rank Population Register\r
+// specifying: ranks enabled, device width, density, address mode.\r
+static void prog_dra_drb(\r
+ MRCParams_t *mrc_params)\r
+{\r
+ RegDRP Drp;\r
+ RegDCO Dco;\r
+\r
+ ENTERFN();\r
+\r
+ Dco.raw = isbR32m(MCU, DCO);\r
+ Dco.field.IC = 0;\r
+ isbW32m(MCU, DCO, Dco.raw);\r
+\r
+ Drp.raw = 0;\r
+ if (mrc_params->rank_enables & 1)\r
+ Drp.field.rank0Enabled = 1;\r
+ if (mrc_params->rank_enables & 2)\r
+ Drp.field.rank1Enabled = 1;\r
+ if (mrc_params->dram_width == x16)\r
+ {\r
+ Drp.field.dimm0DevWidth = 1;\r
+ Drp.field.dimm1DevWidth = 1;\r
+ }\r
+ // Density encoding in DRAMParams_t 0=512Mb, 1=Gb, 2=2Gb, 3=4Gb\r
+ // has to be mapped RANKDENSx encoding (0=1Gb)\r
+ Drp.field.dimm0DevDensity = mrc_params->params.DENSITY - 1;\r
+ Drp.field.dimm1DevDensity = mrc_params->params.DENSITY - 1;\r
+\r
+ // Address mode can be overwritten if ECC enabled\r
+ Drp.field.addressMap = mrc_params->address_mode;\r
+\r
+ isbW32m(MCU, DRP, Drp.raw);\r
+\r
+ Dco.field.PMICTL = 0; //0 - PRI owned by MEMORY_MANAGER\r
+ Dco.field.IC = 1; //1 - initialisation complete\r
+ isbW32m(MCU, DCO, Dco.raw);\r
+\r
+ LEAVEFN();\r
+}\r
+\r
+// Configure refresh rate and short ZQ calibration interval.\r
+// Activate dynamic self refresh.\r
+static void change_refresh_period(\r
+ MRCParams_t *mrc_params)\r
+{\r
+ RegDRCF Drfc;\r
+ RegDCAL Dcal;\r
+ RegDPMC0 Dpmc0;\r
+\r
+ ENTERFN();\r
+\r
+ Drfc.raw = isbR32m(MCU, DRFC);\r
+ Drfc.field.tREFI = mrc_params->refresh_rate;\r
+ Drfc.field.REFDBTCLR = 1;\r
+ isbW32m(MCU, DRFC, Drfc.raw);\r
+\r
+ Dcal.raw = isbR32m(MCU, DCAL);\r
+ Dcal.field.ZQCINT = 3; // 63ms\r
+ isbW32m(MCU, DCAL, Dcal.raw);\r
+\r
+ Dpmc0.raw = isbR32m(MCU, DPMC0);\r
+ Dpmc0.field.ENPHYCLKGATE = 1;\r
+ Dpmc0.field.DYNSREN = 1;\r
+ isbW32m(MCU, DPMC0, Dpmc0.raw);\r
+\r
+ LEAVEFN();\r
+}\r
+\r
+// Send DRAM wake command\r
+static void perform_wake(\r
+ MRCParams_t *mrc_params)\r
+{\r
+ ENTERFN();\r
+\r
+ dram_wake_command();\r
+\r
+ LEAVEFN();\r
+}\r
+\r
+// prog_ddr_timing_control (aka mcu_init):\r
+// POST_CODE[major] == 0x02\r
+//\r
+// It will initialise timing registers in the MCU (DTR0..DTR4).\r
+static void prog_ddr_timing_control(\r
+ MRCParams_t *mrc_params)\r
+{\r
+ uint8_t TCL, WL;\r
+ uint8_t TRP, TRCD, TRAS, TRFC, TWR, TWTR, TRRD, TRTP, TFAW;\r
+ uint32_t TCK;\r
+\r
+ RegDTR0 Dtr0;\r
+ RegDTR1 Dtr1;\r
+ RegDTR2 Dtr2;\r
+ RegDTR3 Dtr3;\r
+ RegDTR4 Dtr4;\r
+\r
+ ENTERFN();\r
+\r
+ // mcu_init starts\r
+ post_code(0x02, 0x00);\r
+\r
+ Dtr0.raw = isbR32m(MCU, DTR0);\r
+ Dtr1.raw = isbR32m(MCU, DTR1);\r
+ Dtr2.raw = isbR32m(MCU, DTR2);\r
+ Dtr3.raw = isbR32m(MCU, DTR3);\r
+ Dtr4.raw = isbR32m(MCU, DTR4);\r
+\r
+ TCK = tCK[mrc_params->ddr_speed]; // Clock in picoseconds\r
+ TCL = mrc_params->params.tCL; // CAS latency in clocks\r
+ TRP = TCL; // Per CAT MRC\r
+ TRCD = TCL; // Per CAT MRC\r
+ TRAS = MCEIL(mrc_params->params.tRAS, TCK);\r
+ TRFC = MCEIL(tRFC[mrc_params->params.DENSITY], TCK);\r
+ TWR = MCEIL(15000, TCK); // Per JEDEC: tWR=15000ps DDR2/3 from 800-1600\r
+\r
+ TWTR = MCEIL(mrc_params->params.tWTR, TCK);\r
+ TRRD = MCEIL(mrc_params->params.tRRD, TCK);\r
+ TRTP = 4; // Valid for 800 and 1066, use 5 for 1333\r
+ TFAW = MCEIL(mrc_params->params.tFAW, TCK);\r
+\r
+ WL = 5 + mrc_params->ddr_speed;\r
+\r
+ Dtr0.field.dramFrequency = mrc_params->ddr_speed;\r
+\r
+ Dtr0.field.tCL = TCL - 5; //Convert from TCL (DRAM clocks) to VLV indx\r
+ Dtr0.field.tRP = TRP - 5; //5 bit DRAM Clock\r
+ Dtr0.field.tRCD = TRCD - 5; //5 bit DRAM Clock\r
+\r
+ Dtr1.field.tWCL = WL - 3; //Convert from WL (DRAM clocks) to VLV indx\r
+ Dtr1.field.tWTP = WL + 4 + TWR - 14; //Change to tWTP\r
+ Dtr1.field.tRTP = MMAX(TRTP, 4) - 3; //4 bit DRAM Clock\r
+ Dtr1.field.tRRD = TRRD - 4; //4 bit DRAM Clock\r
+ Dtr1.field.tCMD = 1; //2N\r
+ Dtr1.field.tRAS = TRAS - 14; //6 bit DRAM Clock\r
+\r
+ Dtr1.field.tFAW = ((TFAW + 1) >> 1) - 5; //4 bit DRAM Clock\r
+ Dtr1.field.tCCD = 0; //Set 4 Clock CAS to CAS delay (multi-burst)\r
+ Dtr2.field.tRRDR = 1;\r
+ Dtr2.field.tWWDR = 2;\r
+ Dtr2.field.tRWDR = 2;\r
+ Dtr3.field.tWRDR = 2;\r
+ Dtr3.field.tWRDD = 2;\r
+\r
+ if (mrc_params->ddr_speed == DDRFREQ_800)\r
+ {\r
+ // Extended RW delay (+1)\r
+ Dtr3.field.tRWSR = TCL - 5 + 1;\r
+ }\r
+ else if(mrc_params->ddr_speed == DDRFREQ_1066)\r
+ {\r
+ // Extended RW delay (+1)\r
+ Dtr3.field.tRWSR = TCL - 5 + 1;\r
+ }\r
+\r
+ Dtr3.field.tWRSR = 4 + WL + TWTR - 11;\r
+\r
+ if (mrc_params->ddr_speed == DDRFREQ_800)\r
+ {\r
+ Dtr3.field.tXP = MMAX(0, 1 - Dtr1.field.tCMD);\r
+ }\r
+ else\r
+ {\r
+ Dtr3.field.tXP = MMAX(0, 2 - Dtr1.field.tCMD);\r
+ }\r
+\r
+ Dtr4.field.WRODTSTRT = Dtr1.field.tCMD;\r
+ Dtr4.field.WRODTSTOP = Dtr1.field.tCMD;\r
+ Dtr4.field.RDODTSTRT = Dtr1.field.tCMD + Dtr0.field.tCL - Dtr1.field.tWCL + 2; //Convert from WL (DRAM clocks) to VLV indx\r
+ Dtr4.field.RDODTSTOP = Dtr1.field.tCMD + Dtr0.field.tCL - Dtr1.field.tWCL + 2;\r
+ Dtr4.field.TRGSTRDIS = 0;\r
+ Dtr4.field.ODTDIS = 0;\r
+\r
+ isbW32m(MCU, DTR0, Dtr0.raw);\r
+ isbW32m(MCU, DTR1, Dtr1.raw);\r
+ isbW32m(MCU, DTR2, Dtr2.raw);\r
+ isbW32m(MCU, DTR3, Dtr3.raw);\r
+ isbW32m(MCU, DTR4, Dtr4.raw);\r
+\r
+ LEAVEFN();\r
+}\r
+\r
+// ddrphy_init:\r
+// POST_CODE[major] == 0x03\r
+//\r
+// This function performs some initialisation on the DDRIO unit.\r
+// This function is dependent on BOARD_ID, DDR_SPEED, and CHANNEL_ENABLES.\r
+static void ddrphy_init(MRCParams_t *mrc_params)\r
+{\r
+ uint32_t tempD; // temporary DWORD\r
+ uint8_t channel_i; // channel counter\r
+ uint8_t rank_i; // rank counter\r
+ uint8_t bl_grp_i; // byte lane group counter (2 BLs per module)\r
+\r
+ uint8_t bl_divisor = /*(mrc_params->channel_width==x16)?2:*/1; // byte lane divisor\r
+ uint8_t speed = mrc_params->ddr_speed & (BIT1|BIT0); // For DDR3 --> 0 == 800, 1 == 1066, 2 == 1333\r
+ uint8_t tCAS;\r
+ uint8_t tCWL;\r
+\r
+ ENTERFN();\r
+\r
+ tCAS = mrc_params->params.tCL;\r
+ tCWL = 5 + mrc_params->ddr_speed;\r
+\r
+ // ddrphy_init starts\r
+ post_code(0x03, 0x00);\r
+\r
+ // HSD#231531\r
+ // Make sure IOBUFACT is deasserted before initialising the DDR PHY.\r
+ // HSD#234845\r
+ // Make sure WRPTRENABLE is deasserted before initialising the DDR PHY.\r
+ for (channel_i=0; channel_i<NUM_CHANNELS; channel_i++) {\r
+ if (mrc_params->channel_enables & (1<<channel_i)) {\r
+ // Deassert DDRPHY Initialisation Complete\r
+ isbM32m(DDRPHY, (CMDPMCONFIG0 + (channel_i * DDRIOCCC_CH_OFFSET)), ~BIT20, BIT20); // SPID_INIT_COMPLETE=0\r
+ // Deassert IOBUFACT\r
+ isbM32m(DDRPHY, (CMDCFGREG0 + (channel_i * DDRIOCCC_CH_OFFSET)), ~BIT2, BIT2); // IOBUFACTRST_N=0\r
+ // Disable WRPTR\r
+ isbM32m(DDRPHY, (CMDPTRREG + (channel_i * DDRIOCCC_CH_OFFSET)), ~BIT0, BIT0); // WRPTRENABLE=0\r
+ } // if channel enabled\r
+ } // channel_i loop\r
+\r
+ // Put PHY in reset\r
+ isbM32m(DDRPHY, MASTERRSTN, 0, BIT0); // PHYRSTN=0\r
+\r
+ // Initialise DQ01,DQ23,CMD,CLK-CTL,COMP modules\r
+ // STEP0:\r
+ post_code(0x03, 0x10);\r
+ for (channel_i=0; channel_i<NUM_CHANNELS; channel_i++) {\r
+ if (mrc_params->channel_enables & (1<<channel_i)) {\r
+\r
+ // DQ01-DQ23\r
+ for (bl_grp_i=0; bl_grp_i<((NUM_BYTE_LANES/bl_divisor)/2); bl_grp_i++) {\r
+ isbM32m(DDRPHY, (DQOBSCKEBBCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), ((bl_grp_i) ? (0x00) : (BIT22)), (BIT22)); // Analog MUX select - IO2xCLKSEL\r
+\r
+ // ODT Strength\r
+ switch (mrc_params->rd_odt_value) {\r
+ case 1: tempD = 0x3; break; // 60 ohm\r
+ case 2: tempD = 0x3; break; // 120 ohm\r
+ case 3: tempD = 0x3; break; // 180 ohm\r
+ default: tempD = 0x3; break; // 120 ohm\r
+ }\r
+ isbM32m(DDRPHY, (B0RXIOBUFCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (tempD<<5), (BIT6|BIT5)); // ODT strength\r
+ isbM32m(DDRPHY, (B1RXIOBUFCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (tempD<<5), (BIT6|BIT5)); // ODT strength\r
+ // Dynamic ODT/DIFFAMP\r
+ tempD = (((tCAS)<<24)|((tCAS)<<16)|((tCAS)<<8)|((tCAS)<<0));\r
+ switch (speed) {\r
+ case 0: tempD -= 0x01010101; break; // 800\r
+ case 1: tempD -= 0x02020202; break; // 1066\r
+ case 2: tempD -= 0x03030303; break; // 1333\r
+ case 3: tempD -= 0x04040404; break; // 1600\r
+ }\r
+ isbM32m(DDRPHY, (B01LATCTL1 + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), tempD, ((BIT28|BIT27|BIT26|BIT25|BIT24)|(BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT4|BIT3|BIT2|BIT1|BIT0))); // Launch Time: ODT, DIFFAMP, ODT, DIFFAMP\r
+ switch (speed) {\r
+ // HSD#234715\r
+ case 0: tempD = ((0x06<<16)|(0x07<<8)); break; // 800\r
+ case 1: tempD = ((0x07<<16)|(0x08<<8)); break; // 1066\r
+ case 2: tempD = ((0x09<<16)|(0x0A<<8)); break; // 1333\r
+ case 3: tempD = ((0x0A<<16)|(0x0B<<8)); break; // 1600\r
+ }\r
+ isbM32m(DDRPHY, (B0ONDURCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), tempD, ((BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT13|BIT12|BIT11|BIT10|BIT9|BIT8))); // On Duration: ODT, DIFFAMP\r
+ isbM32m(DDRPHY, (B1ONDURCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), tempD, ((BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT13|BIT12|BIT11|BIT10|BIT9|BIT8))); // On Duration: ODT, DIFFAMP\r
+\r
+ switch (mrc_params->rd_odt_value) {\r
+ case 0: tempD = ((0x3F<<16)|(0x3f<<10)); break; // override DIFFAMP=on, ODT=off\r
+ default: tempD = ((0x3F<<16)|(0x2A<<10)); break; // override DIFFAMP=on, ODT=on\r
+ }\r
+ isbM32m(DDRPHY, (B0OVRCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), tempD, ((BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT15|BIT14|BIT13|BIT12|BIT11|BIT10))); // Override: DIFFAMP, ODT\r
+ isbM32m(DDRPHY, (B1OVRCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), tempD, ((BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT15|BIT14|BIT13|BIT12|BIT11|BIT10))); // Override: DIFFAMP, ODT\r
+\r
+ // DLL Setup\r
+ // 1xCLK Domain Timings: tEDP,RCVEN,WDQS (PO)\r
+ isbM32m(DDRPHY, (B0LATCTL0 + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (((tCAS+7)<<16)|((tCAS-4)<<8)|((tCWL-2)<<0)), ((BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT4|BIT3|BIT2|BIT1|BIT0))); // 1xCLK: tEDP, RCVEN, WDQS\r
+ isbM32m(DDRPHY, (B1LATCTL0 + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (((tCAS+7)<<16)|((tCAS-4)<<8)|((tCWL-2)<<0)), ((BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT4|BIT3|BIT2|BIT1|BIT0))); // 1xCLK: tEDP, RCVEN, WDQS\r
+\r
+ // RCVEN Bypass (PO)\r
+ isbM32m(DDRPHY, (B0RXIOBUFCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), ((0x0<<7)|(0x0<<0)), (BIT7|BIT0)); // AFE Bypass, RCVEN DIFFAMP\r
+ isbM32m(DDRPHY, (B1RXIOBUFCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), ((0x0<<7)|(0x0<<0)), (BIT7|BIT0)); // AFE Bypass, RCVEN DIFFAMP\r
+ // TX\r
+ isbM32m(DDRPHY, (DQCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (BIT16), (BIT16)); // 0 means driving DQ during DQS-preamble\r
+ isbM32m(DDRPHY, (B01PTRCTL1 + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (BIT8), (BIT8)); // WR_LVL mode disable\r
+ // RX (PO)\r
+ isbM32m(DDRPHY, (B0VREFCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), ((0x03<<2)|(0x0<<1)|(0x0<<0)), ((BIT7|BIT6|BIT5|BIT4|BIT3|BIT2)|BIT1|BIT0)); // Internal Vref Code, Enable#, Ext_or_Int (1=Ext)\r
+ isbM32m(DDRPHY, (B1VREFCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), ((0x03<<2)|(0x0<<1)|(0x0<<0)), ((BIT7|BIT6|BIT5|BIT4|BIT3|BIT2)|BIT1|BIT0)); // Internal Vref Code, Enable#, Ext_or_Int (1=Ext)\r
+ isbM32m(DDRPHY, (B0RXIOBUFCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (0), (BIT4)); // Per-Bit De-Skew Enable\r
+ isbM32m(DDRPHY, (B1RXIOBUFCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (0), (BIT4)); // Per-Bit De-Skew Enable\r
+ }\r
+ // CLKEBB\r
+ isbM32m(DDRPHY, (CMDOBSCKEBBCTL + (channel_i * DDRIOCCC_CH_OFFSET)), 0, (BIT23));\r
+\r
+ // Enable tristate control of cmd/address bus\r
+ isbM32m(DDRPHY, (CMDCFGREG0 + (channel_i * DDRIOCCC_CH_OFFSET)), 0, (BIT1|BIT0));\r
+\r
+ // ODT RCOMP\r
+ isbM32m(DDRPHY, (CMDRCOMPODT + (channel_i * DDRIOCCC_CH_OFFSET)), ((0x03<<5)|(0x03<<0)), ((BIT9|BIT8|BIT7|BIT6|BIT5)|(BIT4|BIT3|BIT2|BIT1|BIT0)));\r
+\r
+ // CMDPM* registers must be programmed in this order...\r
+ isbM32m(DDRPHY, (CMDPMDLYREG4 + (channel_i * DDRIOCCC_CH_OFFSET)), ((0xFFFFU<<16)|(0xFFFF<<0)), ((BIT31|BIT30|BIT29|BIT28|BIT27|BIT26|BIT25|BIT24|BIT23|BIT22|BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT15|BIT14|BIT13|BIT12|BIT11|BIT10|BIT9|BIT8|BIT7|BIT6|BIT5|BIT4|BIT3|BIT2|BIT1|BIT0))); // Turn On Delays: SFR (regulator), MPLL\r
+ isbM32m(DDRPHY, (CMDPMDLYREG3 + (channel_i * DDRIOCCC_CH_OFFSET)), ((0xFU<<28)|(0xFFF<<16)|(0xF<<12)|(0x616<<0)), ((BIT31|BIT30|BIT29|BIT28)|(BIT27|BIT26|BIT25|BIT24|BIT23|BIT22|BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT15|BIT14|BIT13|BIT12)|(BIT11|BIT10|BIT9|BIT8|BIT7|BIT6|BIT5|BIT4|BIT3|BIT2|BIT1|BIT0))); // Delays: ASSERT_IOBUFACT_to_ALLON0_for_PM_MSG_3, VREG (MDLL) Turn On, ALLON0_to_DEASSERT_IOBUFACT_for_PM_MSG_gt0, MDLL Turn On\r
+ isbM32m(DDRPHY, (CMDPMDLYREG2 + (channel_i * DDRIOCCC_CH_OFFSET)), ((0xFFU<<24)|(0xFF<<16)|(0xFF<<8)|(0xFF<<0)), ((BIT31|BIT30|BIT29|BIT28|BIT27|BIT26|BIT25|BIT24)|(BIT23|BIT22|BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT15|BIT14|BIT13|BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT7|BIT6|BIT5|BIT4|BIT3|BIT2|BIT1|BIT0))); // MPLL Divider Reset Delays\r
+ isbM32m(DDRPHY, (CMDPMDLYREG1 + (channel_i * DDRIOCCC_CH_OFFSET)), ((0xFFU<<24)|(0xFF<<16)|(0xFF<<8)|(0xFF<<0)), ((BIT31|BIT30|BIT29|BIT28|BIT27|BIT26|BIT25|BIT24)|(BIT23|BIT22|BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT15|BIT14|BIT13|BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT7|BIT6|BIT5|BIT4|BIT3|BIT2|BIT1|BIT0))); // Turn Off Delays: VREG, Staggered MDLL, MDLL, PI\r
+ isbM32m(DDRPHY, (CMDPMDLYREG0 + (channel_i * DDRIOCCC_CH_OFFSET)), ((0xFFU<<24)|(0xFF<<16)|(0xFF<<8)|(0xFF<<0)), ((BIT31|BIT30|BIT29|BIT28|BIT27|BIT26|BIT25|BIT24)|(BIT23|BIT22|BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT15|BIT14|BIT13|BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT7|BIT6|BIT5|BIT4|BIT3|BIT2|BIT1|BIT0))); // Turn On Delays: MPLL, Staggered MDLL, PI, IOBUFACT\r
+ isbM32m(DDRPHY, (CMDPMCONFIG0 + (channel_i * DDRIOCCC_CH_OFFSET)), ((0x6<<8)|BIT6|(0x4<<0)), (BIT31|BIT30|BIT29|BIT28|BIT27|BIT26|BIT25|BIT24|BIT23|BIT22|BIT21|(BIT11|BIT10|BIT9|BIT8)|BIT6|(BIT3|BIT2|BIT1|BIT0))); // Allow PUnit signals\r
+ isbM32m(DDRPHY, (CMDMDLLCTL + (channel_i * DDRIOCCC_CH_OFFSET)), ((0x3<<4)|(0x7<<0)), ((BIT6|BIT5|BIT4)|(BIT3|BIT2|BIT1|BIT0))); // DLL_VREG Bias Trim, VREF Tuning for DLL_VREG\r
+ // CLK-CTL\r
+ isbM32m(DDRPHY, (CCOBSCKEBBCTL + (channel_i * DDRIOCCC_CH_OFFSET)), 0, (BIT24)); // CLKEBB\r
+ isbM32m(DDRPHY, (CCCFGREG0 + (channel_i * DDRIOCCC_CH_OFFSET)), ((0x0<<16)|(0x0<<12)|(0x0<<8)|(0xF<<4)|BIT0), ((BIT19|BIT18|BIT17|BIT16)|(BIT15|BIT14|BIT13|BIT12)|(BIT11|BIT10|BIT9|BIT8)|(BIT7|BIT6|BIT5|BIT4)|BIT0)); // Buffer Enable: CS,CKE,ODT,CLK\r
+ isbM32m(DDRPHY, (CCRCOMPODT + (channel_i * DDRIOCCC_CH_OFFSET)), ((0x03<<8)|(0x03<<0)), ((BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT4|BIT3|BIT2|BIT1|BIT0))); // ODT RCOMP\r
+ isbM32m(DDRPHY, (CCMDLLCTL + (channel_i * DDRIOCCC_CH_OFFSET)), ((0x3<<4)|(0x7<<0)), ((BIT6|BIT5|BIT4)|(BIT3|BIT2|BIT1|BIT0))); // DLL_VREG Bias Trim, VREF Tuning for DLL_VREG\r
+\r
+ // COMP (RON channel specific)\r
+ // - DQ/DQS/DM RON: 32 Ohm\r
+ // - CTRL/CMD RON: 27 Ohm\r
+ // - CLK RON: 26 Ohm\r
+ isbM32m(DDRPHY, (DQVREFCH0 + (channel_i * DDRCOMP_CH_OFFSET)), ((0x08<<24)|(0x03<<16)), ((BIT29|BIT28|BIT27|BIT26|BIT25|BIT24)|(BIT21|BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP Vref PU/PD\r
+ isbM32m(DDRPHY, (CMDVREFCH0 + (channel_i * DDRCOMP_CH_OFFSET)), ((0x0C<<24)|(0x03<<16)), ((BIT29|BIT28|BIT27|BIT26|BIT25|BIT24)|(BIT21|BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP Vref PU/PD\r
+ isbM32m(DDRPHY, (CLKVREFCH0 + (channel_i * DDRCOMP_CH_OFFSET)), ((0x0F<<24)|(0x03<<16)), ((BIT29|BIT28|BIT27|BIT26|BIT25|BIT24)|(BIT21|BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP Vref PU/PD\r
+ isbM32m(DDRPHY, (DQSVREFCH0 + (channel_i * DDRCOMP_CH_OFFSET)), ((0x08<<24)|(0x03<<16)), ((BIT29|BIT28|BIT27|BIT26|BIT25|BIT24)|(BIT21|BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP Vref PU/PD\r
+ isbM32m(DDRPHY, (CTLVREFCH0 + (channel_i * DDRCOMP_CH_OFFSET)), ((0x0C<<24)|(0x03<<16)), ((BIT29|BIT28|BIT27|BIT26|BIT25|BIT24)|(BIT21|BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP Vref PU/PD\r
+\r
+ // DQS Swapped Input Enable\r
+ isbM32m(DDRPHY, (COMPEN1CH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT19|BIT17), ((BIT31|BIT30)|BIT19|BIT17|(BIT15|BIT14)));\r
+\r
+ // ODT VREF = 1.5 x 274/360+274 = 0.65V (code of ~50)\r
+ isbM32m(DDRPHY, (DQVREFCH0 + (channel_i * DDRCOMP_CH_OFFSET)), ((0x32<<8)|(0x03<<0)), ((BIT13|BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT5|BIT4|BIT3|BIT2|BIT1|BIT0))); // ODT Vref PU/PD\r
+ isbM32m(DDRPHY, (DQSVREFCH0 + (channel_i * DDRCOMP_CH_OFFSET)), ((0x32<<8)|(0x03<<0)), ((BIT13|BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT5|BIT4|BIT3|BIT2|BIT1|BIT0))); // ODT Vref PU/PD\r
+ isbM32m(DDRPHY, (CLKVREFCH0 + (channel_i * DDRCOMP_CH_OFFSET)), ((0x0E<<8)|(0x05<<0)), ((BIT13|BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT5|BIT4|BIT3|BIT2|BIT1|BIT0))); // ODT Vref PU/PD\r
+\r
+ // Slew rate settings are frequency specific, numbers below are for 800Mhz (speed == 0)\r
+ // - DQ/DQS/DM/CLK SR: 4V/ns,\r
+ // - CTRL/CMD SR: 1.5V/ns\r
+ tempD = (0x0E<<16)|(0x0E<<12)|(0x08<<8)|(0x0B<<4)|(0x0B<<0);\r
+ isbM32m(DDRPHY, (DLYSELCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (tempD), ((BIT19|BIT18|BIT17|BIT16)|(BIT15|BIT14|BIT13|BIT12)|(BIT11|BIT10|BIT9|BIT8)|(BIT7|BIT6|BIT5|BIT4)|(BIT3|BIT2|BIT1|BIT0))); // DCOMP Delay Select: CTL,CMD,CLK,DQS,DQ\r
+ isbM32m(DDRPHY, (TCOVREFCH0 + (channel_i * DDRCOMP_CH_OFFSET)), ((0x05<<16)|(0x05<<8)|(0x05<<0)), ((BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT13|BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT5|BIT4|BIT3|BIT2|BIT1|BIT0))); // TCO Vref CLK,DQS,DQ\r
+ isbM32m(DDRPHY, (CCBUFODTCH0 + (channel_i * DDRCOMP_CH_OFFSET)), ((0x03<<8)|(0x03<<0)), ((BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT4|BIT3|BIT2|BIT1|BIT0))); // ODTCOMP CMD/CTL PU/PD\r
+ isbM32m(DDRPHY, (COMPEN0CH0 + (channel_i * DDRCOMP_CH_OFFSET)), (0), ((BIT31|BIT30)|BIT8)); // COMP\r
+\r
+ #ifdef BACKUP_COMPS\r
+ // DQ COMP Overrides\r
+ isbM32m(DDRPHY, (DQDRVPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0A<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP PU\r
+ isbM32m(DDRPHY, (DQDRVPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0A<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP PD\r
+ isbM32m(DDRPHY, (DQDLYPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x10<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // DCOMP PU\r
+ isbM32m(DDRPHY, (DQDLYPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x10<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // DCOMP PD\r
+ isbM32m(DDRPHY, (DQODTPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0B<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // ODTCOMP PU\r
+ isbM32m(DDRPHY, (DQODTPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0B<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // ODTCOMP PD\r
+ isbM32m(DDRPHY, (DQTCOPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31), (BIT31)); // TCOCOMP PU\r
+ isbM32m(DDRPHY, (DQTCOPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31), (BIT31)); // TCOCOMP PD\r
+ // DQS COMP Overrides\r
+ isbM32m(DDRPHY, (DQSDRVPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0A<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP PU\r
+ isbM32m(DDRPHY, (DQSDRVPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0A<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP PD\r
+ isbM32m(DDRPHY, (DQSDLYPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x10<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // DCOMP PU\r
+ isbM32m(DDRPHY, (DQSDLYPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x10<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // DCOMP PD\r
+ isbM32m(DDRPHY, (DQSODTPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0B<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // ODTCOMP PU\r
+ isbM32m(DDRPHY, (DQSODTPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0B<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // ODTCOMP PD\r
+ isbM32m(DDRPHY, (DQSTCOPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31), (BIT31)); // TCOCOMP PU\r
+ isbM32m(DDRPHY, (DQSTCOPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31), (BIT31)); // TCOCOMP PD\r
+ // CLK COMP Overrides\r
+ isbM32m(DDRPHY, (CLKDRVPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0C<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP PU\r
+ isbM32m(DDRPHY, (CLKDRVPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0C<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP PD\r
+ isbM32m(DDRPHY, (CLKDLYPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x07<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // DCOMP PU\r
+ isbM32m(DDRPHY, (CLKDLYPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x07<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // DCOMP PD\r
+ isbM32m(DDRPHY, (CLKODTPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0B<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // ODTCOMP PU\r
+ isbM32m(DDRPHY, (CLKODTPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0B<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // ODTCOMP PD\r
+ isbM32m(DDRPHY, (CLKTCOPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31), (BIT31)); // TCOCOMP PU\r
+ isbM32m(DDRPHY, (CLKTCOPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31), (BIT31)); // TCOCOMP PD\r
+ // CMD COMP Overrides\r
+ isbM32m(DDRPHY, (CMDDRVPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0D<<16)), (BIT31|(BIT21|BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP PU\r
+ isbM32m(DDRPHY, (CMDDRVPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0D<<16)), (BIT31|(BIT21|BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP PD\r
+ isbM32m(DDRPHY, (CMDDLYPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0A<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // DCOMP PU\r
+ isbM32m(DDRPHY, (CMDDLYPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0A<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // DCOMP PD\r
+ // CTL COMP Overrides\r
+ isbM32m(DDRPHY, (CTLDRVPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0D<<16)), (BIT31|(BIT21|BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP PU\r
+ isbM32m(DDRPHY, (CTLDRVPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0D<<16)), (BIT31|(BIT21|BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP PD\r
+ isbM32m(DDRPHY, (CTLDLYPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0A<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // DCOMP PU\r
+ isbM32m(DDRPHY, (CTLDLYPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0A<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // DCOMP PD\r
+ #else\r
+ // DQ TCOCOMP Overrides\r
+ isbM32m(DDRPHY, (DQTCOPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x1F<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // TCOCOMP PU\r
+ isbM32m(DDRPHY, (DQTCOPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x1F<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // TCOCOMP PD\r
+ // DQS TCOCOMP Overrides\r
+ isbM32m(DDRPHY, (DQSTCOPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x1F<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // TCOCOMP PU\r
+ isbM32m(DDRPHY, (DQSTCOPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x1F<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // TCOCOMP PD\r
+ // CLK TCOCOMP Overrides\r
+ isbM32m(DDRPHY, (CLKTCOPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x1F<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // TCOCOMP PU\r
+ isbM32m(DDRPHY, (CLKTCOPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x1F<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // TCOCOMP PD\r
+ #endif // BACKUP_COMPS\r
+ // program STATIC delays\r
+ #ifdef BACKUP_WCMD\r
+ set_wcmd(channel_i, ddr_wcmd[PLATFORM_ID]);\r
+ #else\r
+ set_wcmd(channel_i, ddr_wclk[PLATFORM_ID] + HALF_CLK);\r
+ #endif // BACKUP_WCMD\r
+ for (rank_i=0; rank_i<NUM_RANKS; rank_i++) {\r
+ if (mrc_params->rank_enables & (1<<rank_i)) {\r
+ set_wclk(channel_i, rank_i, ddr_wclk[PLATFORM_ID]);\r
+ #ifdef BACKUP_WCTL\r
+ set_wctl(channel_i, rank_i, ddr_wctl[PLATFORM_ID]);\r
+ #else\r
+ set_wctl(channel_i, rank_i, ddr_wclk[PLATFORM_ID] + HALF_CLK);\r
+ #endif // BACKUP_WCTL\r
+ }\r
+ }\r
+ }\r
+ }\r
+ // COMP (non channel specific)\r
+ //isbM32m(DDRPHY, (), (), ());\r
+ isbM32m(DDRPHY, (DQANADRVPUCTL), (BIT30), (BIT30)); // RCOMP: Dither PU Enable\r
+ isbM32m(DDRPHY, (DQANADRVPDCTL), (BIT30), (BIT30)); // RCOMP: Dither PD Enable\r
+ isbM32m(DDRPHY, (CMDANADRVPUCTL), (BIT30), (BIT30)); // RCOMP: Dither PU Enable\r
+ isbM32m(DDRPHY, (CMDANADRVPDCTL), (BIT30), (BIT30)); // RCOMP: Dither PD Enable\r
+ isbM32m(DDRPHY, (CLKANADRVPUCTL), (BIT30), (BIT30)); // RCOMP: Dither PU Enable\r
+ isbM32m(DDRPHY, (CLKANADRVPDCTL), (BIT30), (BIT30)); // RCOMP: Dither PD Enable\r
+ isbM32m(DDRPHY, (DQSANADRVPUCTL), (BIT30), (BIT30)); // RCOMP: Dither PU Enable\r
+ isbM32m(DDRPHY, (DQSANADRVPDCTL), (BIT30), (BIT30)); // RCOMP: Dither PD Enable\r
+ isbM32m(DDRPHY, (CTLANADRVPUCTL), (BIT30), (BIT30)); // RCOMP: Dither PU Enable\r
+ isbM32m(DDRPHY, (CTLANADRVPDCTL), (BIT30), (BIT30)); // RCOMP: Dither PD Enable\r
+ isbM32m(DDRPHY, (DQANAODTPUCTL), (BIT30), (BIT30)); // ODT: Dither PU Enable\r
+ isbM32m(DDRPHY, (DQANAODTPDCTL), (BIT30), (BIT30)); // ODT: Dither PD Enable\r
+ isbM32m(DDRPHY, (CLKANAODTPUCTL), (BIT30), (BIT30)); // ODT: Dither PU Enable\r
+ isbM32m(DDRPHY, (CLKANAODTPDCTL), (BIT30), (BIT30)); // ODT: Dither PD Enable\r
+ isbM32m(DDRPHY, (DQSANAODTPUCTL), (BIT30), (BIT30)); // ODT: Dither PU Enable\r
+ isbM32m(DDRPHY, (DQSANAODTPDCTL), (BIT30), (BIT30)); // ODT: Dither PD Enable\r
+ isbM32m(DDRPHY, (DQANADLYPUCTL), (BIT30), (BIT30)); // DCOMP: Dither PU Enable\r
+ isbM32m(DDRPHY, (DQANADLYPDCTL), (BIT30), (BIT30)); // DCOMP: Dither PD Enable\r
+ isbM32m(DDRPHY, (CMDANADLYPUCTL), (BIT30), (BIT30)); // DCOMP: Dither PU Enable\r
+ isbM32m(DDRPHY, (CMDANADLYPDCTL), (BIT30), (BIT30)); // DCOMP: Dither PD Enable\r
+ isbM32m(DDRPHY, (CLKANADLYPUCTL), (BIT30), (BIT30)); // DCOMP: Dither PU Enable\r
+ isbM32m(DDRPHY, (CLKANADLYPDCTL), (BIT30), (BIT30)); // DCOMP: Dither PD Enable\r
+ isbM32m(DDRPHY, (DQSANADLYPUCTL), (BIT30), (BIT30)); // DCOMP: Dither PU Enable\r
+ isbM32m(DDRPHY, (DQSANADLYPDCTL), (BIT30), (BIT30)); // DCOMP: Dither PD Enable\r
+ isbM32m(DDRPHY, (CTLANADLYPUCTL), (BIT30), (BIT30)); // DCOMP: Dither PU Enable\r
+ isbM32m(DDRPHY, (CTLANADLYPDCTL), (BIT30), (BIT30)); // DCOMP: Dither PD Enable\r
+ isbM32m(DDRPHY, (DQANATCOPUCTL), (BIT30), (BIT30)); // TCO: Dither PU Enable\r
+ isbM32m(DDRPHY, (DQANATCOPDCTL), (BIT30), (BIT30)); // TCO: Dither PD Enable\r
+ isbM32m(DDRPHY, (CLKANATCOPUCTL), (BIT30), (BIT30)); // TCO: Dither PU Enable\r
+ isbM32m(DDRPHY, (CLKANATCOPDCTL), (BIT30), (BIT30)); // TCO: Dither PD Enable\r
+ isbM32m(DDRPHY, (DQSANATCOPUCTL), (BIT30), (BIT30)); // TCO: Dither PU Enable\r
+ isbM32m(DDRPHY, (DQSANATCOPDCTL), (BIT30), (BIT30)); // TCO: Dither PD Enable\r
+ isbM32m(DDRPHY, (TCOCNTCTRL), (0x1<<0), (BIT1|BIT0)); // TCOCOMP: Pulse Count\r
+ isbM32m(DDRPHY, (CHNLBUFSTATIC), ((0x03<<24)|(0x03<<16)), ((BIT28|BIT27|BIT26|BIT25|BIT24)|(BIT20|BIT19|BIT18|BIT17|BIT16))); // ODT: CMD/CTL PD/PU\r
+ isbM32m(DDRPHY, (MSCNTR), (0x64<<0), (BIT7|BIT6|BIT5|BIT4|BIT3|BIT2|BIT1|BIT0)); // Set 1us counter\r
+ isbM32m(DDRPHY, (LATCH1CTL), (0x1<<28), (BIT30|BIT29|BIT28)); // ???\r
+\r
+ // Release PHY from reset\r
+ isbM32m(DDRPHY, MASTERRSTN, BIT0, BIT0); // PHYRSTN=1\r
+\r
+ // STEP1:\r
+ post_code(0x03, 0x11);\r
+ for (channel_i=0; channel_i<NUM_CHANNELS; channel_i++) {\r
+ if (mrc_params->channel_enables & (1<<channel_i)) {\r
+ // DQ01-DQ23\r
+ for (bl_grp_i=0; bl_grp_i<((NUM_BYTE_LANES/bl_divisor)/2); bl_grp_i++) {\r
+ isbM32m(DDRPHY, (DQMDLLCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (BIT13), (BIT13)); // Enable VREG\r
+ delay_n(3);\r
+ }\r
+ // ECC\r
+ isbM32m(DDRPHY, (ECCMDLLCTL), (BIT13), (BIT13)); // Enable VREG\r
+ delay_n(3);\r
+ // CMD\r
+ isbM32m(DDRPHY, (CMDMDLLCTL + (channel_i * DDRIOCCC_CH_OFFSET)), (BIT13), (BIT13)); // Enable VREG\r
+ delay_n(3);\r
+ // CLK-CTL\r
+ isbM32m(DDRPHY, (CCMDLLCTL + (channel_i * DDRIOCCC_CH_OFFSET)), (BIT13), (BIT13)); // Enable VREG\r
+ delay_n(3);\r
+ }\r
+ }\r
+\r
+ // STEP2:\r
+ post_code(0x03, 0x12);\r
+ delay_n(200);\r
+ for (channel_i=0; channel_i<NUM_CHANNELS; channel_i++) {\r
+ if (mrc_params->channel_enables & (1<<channel_i)) {\r
+ // DQ01-DQ23\r
+ for (bl_grp_i=0; bl_grp_i<((NUM_BYTE_LANES/bl_divisor)/2); bl_grp_i++) {\r
+ isbM32m(DDRPHY, (DQMDLLCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (BIT17), (BIT17)); // Enable MCDLL\r
+ delay_n(50);\r
+ }\r
+ // ECC\r
+ isbM32m(DDRPHY, (ECCMDLLCTL), (BIT17), (BIT17)); // Enable MCDLL\r
+ delay_n(50);\r
+ // CMD\r
+ isbM32m(DDRPHY, (CMDMDLLCTL + (channel_i * DDRIOCCC_CH_OFFSET)), (BIT18), (BIT18)); // Enable MCDLL\r
+ delay_n(50);\r
+ // CLK-CTL\r
+ isbM32m(DDRPHY, (CCMDLLCTL + (channel_i * DDRIOCCC_CH_OFFSET)), (BIT18), (BIT18)); // Enable MCDLL\r
+ delay_n(50);\r
+ }\r
+ }\r
+\r
+ // STEP3:\r
+ post_code(0x03, 0x13);\r
+ delay_n(100);\r
+ for (channel_i=0; channel_i<NUM_CHANNELS; channel_i++) {\r
+ if (mrc_params->channel_enables & (1<<channel_i)) {\r
+ // DQ01-DQ23\r
+ for (bl_grp_i=0; bl_grp_i<((NUM_BYTE_LANES/bl_divisor)/2); bl_grp_i++) {\r
+#ifdef FORCE_16BIT_DDRIO\r
+ tempD = ((bl_grp_i) && (mrc_params->channel_width == x16)) ? ((0x1<<12)|(0x1<<8)|(0xF<<4)|(0xF<<0)) : ((0xF<<12)|(0xF<<8)|(0xF<<4)|(0xF<<0));\r
+#else\r
+ tempD = ((0xF<<12)|(0xF<<8)|(0xF<<4)|(0xF<<0));\r
+#endif\r
+ isbM32m(DDRPHY, (DQDLLTXCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (tempD), ((BIT15|BIT14|BIT13|BIT12)|(BIT11|BIT10|BIT9|BIT8)|(BIT7|BIT6|BIT5|BIT4)|(BIT3|BIT2|BIT1|BIT0))); // Enable TXDLL\r
+ delay_n(3);\r
+ isbM32m(DDRPHY, (DQDLLRXCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (BIT3|BIT2|BIT1|BIT0), (BIT3|BIT2|BIT1|BIT0)); // Enable RXDLL\r
+ delay_n(3);\r
+ isbM32m(DDRPHY, (B0OVRCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (BIT3|BIT2|BIT1|BIT0), (BIT3|BIT2|BIT1|BIT0)); // Enable RXDLL Overrides BL0\r
+ }\r
+\r
+ // ECC\r
+ tempD = ((0xF<<12)|(0xF<<8)|(0xF<<4)|(0xF<<0));\r
+ isbM32m(DDRPHY, (ECCDLLTXCTL), (tempD), ((BIT15|BIT14|BIT13|BIT12)|(BIT11|BIT10|BIT9|BIT8)|(BIT7|BIT6|BIT5|BIT4)|(BIT3|BIT2|BIT1|BIT0))); // Enable TXDLL\r
+ delay_n(3);\r
+\r
+ // CMD (PO)\r
+ isbM32m(DDRPHY, (CMDDLLTXCTL + (channel_i * DDRIOCCC_CH_OFFSET)), ((0xF<<12)|(0xF<<8)|(0xF<<4)|(0xF<<0)), ((BIT15|BIT14|BIT13|BIT12)|(BIT11|BIT10|BIT9|BIT8)|(BIT7|BIT6|BIT5|BIT4)|(BIT3|BIT2|BIT1|BIT0))); // Enable TXDLL\r
+ delay_n(3);\r
+ }\r
+ }\r
+\r
+\r
+ // STEP4:\r
+ post_code(0x03, 0x14);\r
+ for (channel_i=0; channel_i<NUM_CHANNELS; channel_i++) {\r
+ if (mrc_params->channel_enables & (1<<channel_i)) {\r
+ // Host To Memory Clock Alignment (HMC) for 800/1066\r
+ for (bl_grp_i=0; bl_grp_i<((NUM_BYTE_LANES/bl_divisor)/2); bl_grp_i++) {\r
+ isbM32m(DDRPHY, (DQCLKALIGNREG2 + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), ((bl_grp_i)?(0x3):(0x1)), (BIT3|BIT2|BIT1|BIT0)); // CLK_ALIGN_MOD_ID\r
+ }\r
+ isbM32m(DDRPHY, (ECCCLKALIGNREG2 + (channel_i * DDRIODQ_CH_OFFSET)), 0x2, (BIT3|BIT2|BIT1|BIT0)); // CLK_ALIGN_MOD_ID\r
+ isbM32m(DDRPHY, (CMDCLKALIGNREG2 + (channel_i * DDRIODQ_CH_OFFSET)), 0x0, (BIT3|BIT2|BIT1|BIT0)); // CLK_ALIGN_MOD_ID\r
+ isbM32m(DDRPHY, (CCCLKALIGNREG2 + (channel_i * DDRIODQ_CH_OFFSET)), 0x2, (BIT3|BIT2|BIT1|BIT0)); // CLK_ALIGN_MOD_ID\r
+ isbM32m(DDRPHY, (CMDCLKALIGNREG0 + (channel_i * DDRIOCCC_CH_OFFSET)), (0x2<<4), (BIT5|BIT4)); // CLK_ALIGN_MODE\r
+ isbM32m(DDRPHY, (CMDCLKALIGNREG1 + (channel_i * DDRIOCCC_CH_OFFSET)), ((0x18<<16)|(0x10<<8)|(0x8<<2)|(0x1<<0)), ((BIT22|BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT14|BIT13|BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT7|BIT6|BIT5|BIT4|BIT3|BIT2)|(BIT1|BIT0))); // NUM_SAMPLES, MAX_SAMPLES, MACRO_PI_STEP, MICRO_PI_STEP\r
+ isbM32m(DDRPHY, (CMDCLKALIGNREG2 + (channel_i * DDRIOCCC_CH_OFFSET)), ((0x10<<16)|(0x4<<8)|(0x2<<4)), ((BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT11|BIT10|BIT9|BIT8)|(BIT7|BIT6|BIT5|BIT4))); // ???, TOTAL_NUM_MODULES, FIRST_U_PARTITION\r
+ #ifdef HMC_TEST\r
+ isbM32m(DDRPHY, (CMDCLKALIGNREG0 + (channel_i * DDRIOCCC_CH_OFFSET)), BIT24, BIT24); // START_CLK_ALIGN=1\r
+ while (isbR32m(DDRPHY, (CMDCLKALIGNREG0 + (channel_i * DDRIOCCC_CH_OFFSET))) & BIT24); // wait for START_CLK_ALIGN=0\r
+ #endif // HMC_TEST\r
+\r
+ // Set RD/WR Pointer Seperation & COUNTEN & FIFOPTREN\r
+ isbM32m(DDRPHY, (CMDPTRREG + (channel_i * DDRIOCCC_CH_OFFSET)), BIT0, BIT0); // WRPTRENABLE=1\r
+\r
+\r
+#ifdef SIM\r
+ // comp is not working on simulator\r
+#else\r
+ // COMP initial\r
+ isbM32m(DDRPHY, (COMPEN0CH0 + (channel_i * DDRCOMP_CH_OFFSET)), BIT5, BIT5); // enable bypass for CLK buffer (PO)\r
+ isbM32m(DDRPHY, (CMPCTRL), (BIT0), (BIT0)); // Initial COMP Enable\r
+ while (isbR32m(DDRPHY, (CMPCTRL)) & BIT0); // wait for Initial COMP Enable = 0\r
+ isbM32m(DDRPHY, (COMPEN0CH0 + (channel_i * DDRCOMP_CH_OFFSET)), ~BIT5, BIT5); // disable bypass for CLK buffer (PO)\r
+#endif\r
+\r
+ // IOBUFACT\r
+ // STEP4a\r
+ isbM32m(DDRPHY, (CMDCFGREG0 + (channel_i * DDRIOCCC_CH_OFFSET)), BIT2, BIT2); // IOBUFACTRST_N=1\r
+\r
+ // DDRPHY initialisation complete\r
+ isbM32m(DDRPHY, (CMDPMCONFIG0 + (channel_i * DDRIOCCC_CH_OFFSET)), BIT20, BIT20); // SPID_INIT_COMPLETE=1\r
+ }\r
+ }\r
+\r
+ LEAVEFN();\r
+ return;\r
+}\r
+\r
+// jedec_init (aka PerformJedecInit):\r
+// This function performs JEDEC initialisation on all enabled channels.\r
+static void jedec_init(\r
+ MRCParams_t *mrc_params,\r
+ uint32_t silent)\r
+{\r
+ uint8_t TWR, WL, Rank;\r
+ uint32_t TCK;\r
+\r
+ RegDTR0 DTR0reg;\r
+\r
+ DramInitDDR3MRS0 mrs0Command;\r
+ DramInitDDR3EMR1 emrs1Command;\r
+ DramInitDDR3EMR2 emrs2Command;\r
+ DramInitDDR3EMR3 emrs3Command;\r
+\r
+ ENTERFN();\r
+\r
+ // jedec_init starts\r
+ if (!silent)\r
+ {\r
+ post_code(0x04, 0x00);\r
+ }\r
+\r
+ // Assert RESET# for 200us\r
+ isbM32m(DDRPHY, CCDDR3RESETCTL, BIT1, (BIT8|BIT1)); // DDR3_RESET_SET=0, DDR3_RESET_RESET=1\r
+#ifdef QUICKSIM\r
+ // Don't waste time during simulation\r
+ delay_u(2);\r
+#else\r
+ delay_u(200);\r
+#endif\r
+ isbM32m(DDRPHY, CCDDR3RESETCTL, BIT8, (BIT8|BIT1)); // DDR3_RESET_SET=1, DDR3_RESET_RESET=0\r
+\r
+ DTR0reg.raw = isbR32m(MCU, DTR0);\r
+\r
+ // Set CKEVAL for populated ranks\r
+ // then send NOP to each rank (#4550197)\r
+ {\r
+ uint32_t DRPbuffer;\r
+ uint32_t DRMCbuffer;\r
+\r
+ DRPbuffer = isbR32m(MCU, DRP);\r
+ DRPbuffer &= 0x3;\r
+ DRMCbuffer = isbR32m(MCU, DRMC);\r
+ DRMCbuffer &= 0xFFFFFFFC;\r
+ DRMCbuffer |= (BIT4 | DRPbuffer);\r
+\r
+ isbW32m(MCU, DRMC, DRMCbuffer);\r
+\r
+ for (Rank = 0; Rank < NUM_RANKS; Rank++)\r
+ {\r
+ // Skip to next populated rank\r
+ if ((mrc_params->rank_enables & (1 << Rank)) == 0)\r
+ {\r
+ continue;\r
+ }\r
+\r
+ dram_init_command(DCMD_NOP(Rank));\r
+ }\r
+\r
+ isbW32m(MCU, DRMC, DRMC_DEFAULT);\r
+ }\r
+\r
+ // setup for emrs 2\r
+ // BIT[15:11] --> Always "0"\r
+ // BIT[10:09] --> Rtt_WR: want "Dynamic ODT Off" (0)\r
+ // BIT[08] --> Always "0"\r
+ // BIT[07] --> SRT: use sr_temp_range\r
+ // BIT[06] --> ASR: want "Manual SR Reference" (0)\r
+ // BIT[05:03] --> CWL: use oem_tCWL\r
+ // BIT[02:00] --> PASR: want "Full Array" (0)\r
+ emrs2Command.raw = 0;\r
+ emrs2Command.field.bankAddress = 2;\r
+\r
+ WL = 5 + mrc_params->ddr_speed;\r
+ emrs2Command.field.CWL = WL - 5;\r
+ emrs2Command.field.SRT = mrc_params->sr_temp_range;\r
+\r
+ // setup for emrs 3\r
+ // BIT[15:03] --> Always "0"\r
+ // BIT[02] --> MPR: want "Normal Operation" (0)\r
+ // BIT[01:00] --> MPR_Loc: want "Predefined Pattern" (0)\r
+ emrs3Command.raw = 0;\r
+ emrs3Command.field.bankAddress = 3;\r
+\r
+ // setup for emrs 1\r
+ // BIT[15:13] --> Always "0"\r
+ // BIT[12:12] --> Qoff: want "Output Buffer Enabled" (0)\r
+ // BIT[11:11] --> TDQS: want "Disabled" (0)\r
+ // BIT[10:10] --> Always "0"\r
+ // BIT[09,06,02] --> Rtt_nom: use rtt_nom_value\r
+ // BIT[08] --> Always "0"\r
+ // BIT[07] --> WR_LVL: want "Disabled" (0)\r
+ // BIT[05,01] --> DIC: use ron_value\r
+ // BIT[04:03] --> AL: additive latency want "0" (0)\r
+ // BIT[00] --> DLL: want "Enable" (0)\r
+ //\r
+ // (BIT5|BIT1) set Ron value\r
+ // 00 --> RZQ/6 (40ohm)\r
+ // 01 --> RZQ/7 (34ohm)\r
+ // 1* --> RESERVED\r
+ //\r
+ // (BIT9|BIT6|BIT2) set Rtt_nom value\r
+ // 000 --> Disabled\r
+ // 001 --> RZQ/4 ( 60ohm)\r
+ // 010 --> RZQ/2 (120ohm)\r
+ // 011 --> RZQ/6 ( 40ohm)\r
+ // 1** --> RESERVED\r
+ emrs1Command.raw = 0;\r
+ emrs1Command.field.bankAddress = 1;\r
+ emrs1Command.field.dllEnabled = 0; // 0 = Enable , 1 = Disable\r
+\r
+ if (mrc_params->ron_value == 0)\r
+ {\r
+ emrs1Command.field.DIC0 = DDR3_EMRS1_DIC_34;\r
+ }\r
+ else\r
+ {\r
+ emrs1Command.field.DIC0 = DDR3_EMRS1_DIC_40;\r
+ }\r
+\r
+\r
+ if (mrc_params->rtt_nom_value == 0)\r
+ {\r
+ emrs1Command.raw |= (DDR3_EMRS1_RTTNOM_40 << 6);\r
+ }\r
+ else if (mrc_params->rtt_nom_value == 1)\r
+ {\r
+ emrs1Command.raw |= (DDR3_EMRS1_RTTNOM_60 << 6);\r
+ }\r
+ else if (mrc_params->rtt_nom_value == 2)\r
+ {\r
+ emrs1Command.raw |= (DDR3_EMRS1_RTTNOM_120 << 6);\r
+ }\r
+\r
+ // save MRS1 value (excluding control fields)\r
+ mrc_params->mrs1 = emrs1Command.raw >> 6;\r
+\r
+ // setup for mrs 0\r
+ // BIT[15:13] --> Always "0"\r
+ // BIT[12] --> PPD: for Quark (1)\r
+ // BIT[11:09] --> WR: use oem_tWR\r
+ // BIT[08] --> DLL: want "Reset" (1, self clearing)\r
+ // BIT[07] --> MODE: want "Normal" (0)\r
+ // BIT[06:04,02] --> CL: use oem_tCAS\r
+ // BIT[03] --> RD_BURST_TYPE: want "Interleave" (1)\r
+ // BIT[01:00] --> BL: want "8 Fixed" (0)\r
+ // WR:\r
+ // 0 --> 16\r
+ // 1 --> 5\r
+ // 2 --> 6\r
+ // 3 --> 7\r
+ // 4 --> 8\r
+ // 5 --> 10\r
+ // 6 --> 12\r
+ // 7 --> 14\r
+ // CL:\r
+ // BIT[02:02] "0" if oem_tCAS <= 11 (1866?)\r
+ // BIT[06:04] use oem_tCAS-4\r
+ mrs0Command.raw = 0;\r
+ mrs0Command.field.bankAddress = 0;\r
+ mrs0Command.field.dllReset = 1;\r
+ mrs0Command.field.BL = 0;\r
+ mrs0Command.field.PPD = 1;\r
+ mrs0Command.field.casLatency = DTR0reg.field.tCL + 1;\r
+\r
+ TCK = tCK[mrc_params->ddr_speed];\r
+ TWR = MCEIL(15000, TCK); // Per JEDEC: tWR=15000ps DDR2/3 from 800-1600\r
+ mrs0Command.field.writeRecovery = TWR - 4;\r
+\r
+ for (Rank = 0; Rank < NUM_RANKS; Rank++)\r
+ {\r
+ // Skip to next populated rank\r
+ if ((mrc_params->rank_enables & (1 << Rank)) == 0)\r
+ {\r
+ continue;\r
+ }\r
+\r
+ emrs2Command.field.rankSelect = Rank;\r
+ dram_init_command(emrs2Command.raw);\r
+\r
+ emrs3Command.field.rankSelect = Rank;\r
+ dram_init_command(emrs3Command.raw);\r
+\r
+ emrs1Command.field.rankSelect = Rank;\r
+ dram_init_command(emrs1Command.raw);\r
+\r
+ mrs0Command.field.rankSelect = Rank;\r
+ dram_init_command(mrs0Command.raw);\r
+\r
+ dram_init_command(DCMD_ZQCL(Rank));\r
+ }\r
+\r
+ LEAVEFN();\r
+ return;\r
+}\r
+\r
+// rcvn_cal:\r
+// POST_CODE[major] == 0x05\r
+//\r
+// This function will perform our RCVEN Calibration Algorithm.\r
+// We will only use the 2xCLK domain timings to perform RCVEN Calibration.\r
+// All byte lanes will be calibrated "simultaneously" per channel per rank.\r
+static void rcvn_cal(\r
+ MRCParams_t *mrc_params)\r
+{\r
+ uint8_t channel_i; // channel counter\r
+ uint8_t rank_i; // rank counter\r
+ uint8_t bl_i; // byte lane counter\r
+ uint8_t bl_divisor = (mrc_params->channel_width == x16) ? 2 : 1; // byte lane divisor\r
+\r
+#ifdef R2R_SHARING\r
+ uint32_t final_delay[NUM_CHANNELS][NUM_BYTE_LANES]; // used to find placement for rank2rank sharing configs\r
+#ifndef BACKUP_RCVN\r
+ uint32_t num_ranks_enabled = 0; // used to find placement for rank2rank sharing configs\r
+#endif // BACKUP_RCVN\r
+#endif // R2R_SHARING\r
+\r
+#ifdef BACKUP_RCVN\r
+#else\r
+ uint32_t tempD; // temporary DWORD\r
+ uint32_t delay[NUM_BYTE_LANES]; // absolute PI value to be programmed on the byte lane\r
+ RegDTR1 dtr1;\r
+ RegDTR1 dtr1save;\r
+#endif // BACKUP_RCVN\r
+ ENTERFN();\r
+\r
+ // rcvn_cal starts\r
+ post_code(0x05, 0x00);\r
+\r
+#ifndef BACKUP_RCVN\r
+ // need separate burst to sample DQS preamble\r
+ dtr1.raw = dtr1save.raw = isbR32m(MCU, DTR1);\r
+ dtr1.field.tCCD = 1;\r
+ isbW32m(MCU, DTR1, dtr1.raw);\r
+#endif\r
+\r
+#ifdef R2R_SHARING\r
+ // need to set "final_delay[][]" elements to "0"\r
+ memset((void *) (final_delay), 0x00, (size_t) sizeof(final_delay));\r
+#endif // R2R_SHARING\r
+\r
+ // loop through each enabled channel\r
+ for (channel_i = 0; channel_i < NUM_CHANNELS; channel_i++)\r
+ {\r
+ if (mrc_params->channel_enables & (1 << channel_i))\r
+ {\r
+ // perform RCVEN Calibration on a per rank basis\r
+ for (rank_i = 0; rank_i < NUM_RANKS; rank_i++)\r
+ {\r
+ if (mrc_params->rank_enables & (1 << rank_i))\r
+ {\r
+ // POST_CODE here indicates the current channel and rank being calibrated\r
+ post_code(0x05, (0x10 + ((channel_i << 4) | rank_i)));\r
+\r
+#ifdef BACKUP_RCVN\r
+ // set hard-coded timing values\r
+ for (bl_i=0; bl_i<(NUM_BYTE_LANES/bl_divisor); bl_i++)\r
+ {\r
+ set_rcvn(channel_i, rank_i, bl_i, ddr_rcvn[PLATFORM_ID]);\r
+ }\r
+#else\r
+ // enable FIFORST\r
+ for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i += 2)\r
+ {\r
+ isbM32m(DDRPHY, (B01PTRCTL1 + ((bl_i >> 1) * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), 0,\r
+ BIT8); // 0 is enabled\r
+ } // bl_i loop\r
+ // initialise the starting delay to 128 PI (tCAS +1 CLK)\r
+ for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
+ {\r
+#ifdef SIM\r
+ // Original value was late at the end of DQS sequence\r
+ delay[bl_i] = 3 * FULL_CLK;\r
+#else\r
+ delay[bl_i] = (4 + 1) * FULL_CLK; // 1x CLK domain timing is tCAS-4\r
+#endif\r
+\r
+ set_rcvn(channel_i, rank_i, bl_i, delay[bl_i]);\r
+ } // bl_i loop\r
+\r
+ // now find the rising edge\r
+ find_rising_edge(mrc_params, delay, channel_i, rank_i, true);\r
+ // Now increase delay by 32 PI (1/4 CLK) to place in center of high pulse.\r
+ for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
+ {\r
+ delay[bl_i] += QRTR_CLK;\r
+ set_rcvn(channel_i, rank_i, bl_i, delay[bl_i]);\r
+ } // bl_i loop\r
+ // Now decrement delay by 128 PI (1 CLK) until we sample a "0"\r
+ do\r
+ {\r
+\r
+ tempD = sample_dqs(mrc_params, channel_i, rank_i, true);\r
+ for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
+ {\r
+ if (tempD & (1 << bl_i))\r
+ {\r
+ if (delay[bl_i] >= FULL_CLK)\r
+ {\r
+ delay[bl_i] -= FULL_CLK;\r
+ set_rcvn(channel_i, rank_i, bl_i, delay[bl_i]);\r
+ }\r
+ else\r
+ {\r
+ // not enough delay\r
+ training_message(channel_i, rank_i, bl_i);\r
+ post_code(0xEE, 0x50);\r
+ }\r
+ }\r
+ } // bl_i loop\r
+ } while (tempD & 0xFF);\r
+\r
+#ifdef R2R_SHARING\r
+ // increment "num_ranks_enabled"\r
+ num_ranks_enabled++;\r
+ // Finally increment delay by 32 PI (1/4 CLK) to place in center of preamble.\r
+ for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
+ {\r
+ delay[bl_i] += QRTR_CLK;\r
+ // add "delay[]" values to "final_delay[][]" for rolling average\r
+ final_delay[channel_i][bl_i] += delay[bl_i];\r
+ // set timing based on rolling average values\r
+ set_rcvn(channel_i, rank_i, bl_i, ((final_delay[channel_i][bl_i]) / num_ranks_enabled));\r
+ } // bl_i loop\r
+#else\r
+ // Finally increment delay by 32 PI (1/4 CLK) to place in center of preamble.\r
+ for (bl_i=0; bl_i<(NUM_BYTE_LANES/bl_divisor); bl_i++)\r
+ {\r
+ delay[bl_i] += QRTR_CLK;\r
+ set_rcvn(channel_i, rank_i, bl_i, delay[bl_i]);\r
+ } // bl_i loop\r
+\r
+#endif // R2R_SHARING\r
+\r
+ // disable FIFORST\r
+ for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i += 2)\r
+ {\r
+ isbM32m(DDRPHY, (B01PTRCTL1 + ((bl_i >> 1) * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), BIT8,\r
+ BIT8); // 1 is disabled\r
+ } // bl_i loop\r
+\r
+#endif // BACKUP_RCVN\r
+\r
+ } // if rank is enabled\r
+ } // rank_i loop\r
+ } // if channel is enabled\r
+ } // channel_i loop\r
+\r
+#ifndef BACKUP_RCVN\r
+ // restore original\r
+ isbW32m(MCU, DTR1, dtr1save.raw);\r
+#endif\r
+\r
+#ifdef MRC_SV\r
+ if (mrc_params->tune_rcvn)\r
+ {\r
+ uint32_t rcven, val;\r
+ uint32_t rdcmd2rcven;\r
+\r
+ /*\r
+ Formulas for RDCMD2DATAVALID & DIFFAMP dynamic timings\r
+\r
+ 1. Set after RCVEN training\r
+\r
+ //Tune RDCMD2DATAVALID\r
+\r
+ x80/x84[21:16]\r
+ MAX OF 2 RANKS : round up (rdcmd2rcven (rcven 1x) + 2x x 2 + PI/128) + 5\r
+\r
+ //rdcmd2rcven x80/84[12:8]\r
+ //rcven 2x x70[23:20] & [11:8]\r
+\r
+ //Tune DIFFAMP Timings\r
+\r
+ //diffampen launch x88[20:16] & [4:0] -- B01LATCTL1\r
+ MIN OF 2 RANKS : round down (rcven 1x + 2x x 2 + PI/128) - 1\r
+\r
+ //diffampen length x8C/x90 [13:8] -- B0ONDURCTL B1ONDURCTL\r
+ MAX OF 2 RANKS : roundup (rcven 1x + 2x x 2 + PI/128) + 5\r
+\r
+\r
+ 2. need to do a fiforst after settings these values\r
+ */\r
+\r
+ DPF(D_INFO, "BEFORE\n");\r
+ DPF(D_INFO, "### %x\n", isbR32m(DDRPHY, B0LATCTL0));\r
+ DPF(D_INFO, "### %x\n", isbR32m(DDRPHY, B01LATCTL1));\r
+ DPF(D_INFO, "### %x\n", isbR32m(DDRPHY, B0ONDURCTL));\r
+\r
+ DPF(D_INFO, "### %x\n", isbR32m(DDRPHY, B1LATCTL0));\r
+ DPF(D_INFO, "### %x\n", isbR32m(DDRPHY, B1ONDURCTL));\r
+\r
+ rcven = get_rcvn(0, 0, 0) / 128;\r
+ rdcmd2rcven = (isbR32m(DDRPHY, B0LATCTL0) >> 8) & 0x1F;\r
+ val = rdcmd2rcven + rcven + 6;\r
+ isbM32m(DDRPHY, B0LATCTL0, val << 16, (BIT21|BIT20|BIT19|BIT18|BIT17|BIT16));\r
+\r
+ val = rdcmd2rcven + rcven - 1;\r
+ isbM32m(DDRPHY, B01LATCTL1, val << 0, (BIT4|BIT3|BIT2|BIT1|BIT0));\r
+\r
+ val = rdcmd2rcven + rcven + 5;\r
+ isbM32m(DDRPHY, B0ONDURCTL, val << 8, (BIT13|BIT12|BIT11|BIT10|BIT9|BIT8));\r
+\r
+ rcven = get_rcvn(0, 0, 1) / 128;\r
+ rdcmd2rcven = (isbR32m(DDRPHY, B1LATCTL0) >> 8) & 0x1F;\r
+ val = rdcmd2rcven + rcven + 6;\r
+ isbM32m(DDRPHY, B1LATCTL0, val << 16, (BIT21|BIT20|BIT19|BIT18|BIT17|BIT16));\r
+\r
+ val = rdcmd2rcven + rcven - 1;\r
+ isbM32m(DDRPHY, B01LATCTL1, val << 16, (BIT20|BIT19|BIT18|BIT17|BIT16));\r
+\r
+ val = rdcmd2rcven + rcven + 5;\r
+ isbM32m(DDRPHY, B1ONDURCTL, val << 8, (BIT13|BIT12|BIT11|BIT10|BIT9|BIT8));\r
+\r
+ DPF(D_INFO, "AFTER\n");\r
+ DPF(D_INFO, "### %x\n", isbR32m(DDRPHY, B0LATCTL0));\r
+ DPF(D_INFO, "### %x\n", isbR32m(DDRPHY, B01LATCTL1));\r
+ DPF(D_INFO, "### %x\n", isbR32m(DDRPHY, B0ONDURCTL));\r
+\r
+ DPF(D_INFO, "### %x\n", isbR32m(DDRPHY, B1LATCTL0));\r
+ DPF(D_INFO, "### %x\n", isbR32m(DDRPHY, B1ONDURCTL));\r
+\r
+ DPF(D_INFO, "\nPress a key\n");\r
+ mgetc();\r
+\r
+ // fifo reset\r
+ isbM32m(DDRPHY, B01PTRCTL1, 0, BIT8); // 0 is enabled\r
+ delay_n(3);\r
+ isbM32m(DDRPHY, B01PTRCTL1, BIT8, BIT8); // 1 is disabled\r
+ }\r
+#endif\r
+\r
+ LEAVEFN();\r
+ return;\r
+}\r
+\r
+// Check memory executing write/read/verify of many data patterns\r
+// at the specified address. Bits in the result indicate failure\r
+// on specific byte lane.\r
+static uint32_t check_bls_ex(\r
+ MRCParams_t *mrc_params,\r
+ uint32_t address)\r
+{\r
+ uint32_t result;\r
+ uint8_t first_run = 0;\r
+\r
+ if (mrc_params->hte_setup)\r
+ {\r
+ mrc_params->hte_setup = 0;\r
+\r
+ first_run = 1;\r
+ select_hte(mrc_params);\r
+ }\r
+\r
+ result = WriteStressBitLanesHTE(mrc_params, address, first_run);\r
+\r
+ DPF(D_TRN, "check_bls_ex result is %x\n", result);\r
+ return result;\r
+}\r
+\r
+// Check memory executing simple write/read/verify at\r
+// the specified address. Bits in the result indicate failure\r
+// on specific byte lane.\r
+static uint32_t check_rw_coarse(\r
+ MRCParams_t *mrc_params,\r
+ uint32_t address)\r
+{\r
+ uint32_t result = 0;\r
+ uint8_t first_run = 0;\r
+\r
+ if (mrc_params->hte_setup)\r
+ {\r
+ mrc_params->hte_setup = 0;\r
+\r
+ first_run = 1;\r
+ select_hte(mrc_params);\r
+ }\r
+\r
+ result = BasicWriteReadHTE(mrc_params, address, first_run, WRITE_TRAIN);\r
+\r
+ DPF(D_TRN, "check_rw_coarse result is %x\n", result);\r
+ return result;\r
+}\r
+\r
+// wr_level:\r
+// POST_CODE[major] == 0x06\r
+//\r
+// This function will perform the Write Levelling algorithm (align WCLK and WDQS).\r
+// This algorithm will act on each rank in each channel separately.\r
+static void wr_level(\r
+ MRCParams_t *mrc_params)\r
+{\r
+ uint8_t channel_i; // channel counter\r
+ uint8_t rank_i; // rank counter\r
+ uint8_t bl_i; // byte lane counter\r
+ uint8_t bl_divisor = (mrc_params->channel_width == x16) ? 2 : 1; // byte lane divisor\r
+\r
+#ifdef R2R_SHARING\r
+ uint32_t final_delay[NUM_CHANNELS][NUM_BYTE_LANES]; // used to find placement for rank2rank sharing configs\r
+#ifndef BACKUP_WDQS\r
+ uint32_t num_ranks_enabled = 0; // used to find placement for rank2rank sharing configs\r
+#endif // BACKUP_WDQS\r
+#endif // R2R_SHARING\r
+\r
+#ifdef BACKUP_WDQS\r
+#else\r
+ bool all_edges_found; // determines stop condition for CRS_WR_LVL\r
+ uint32_t delay[NUM_BYTE_LANES]; // absolute PI value to be programmed on the byte lane\r
+ // static makes it so the data is loaded in the heap once by shadow(), where\r
+ // non-static copies the data onto the stack every time this function is called.\r
+\r
+ uint32_t address; // address to be checked during COARSE_WR_LVL\r
+ RegDTR4 dtr4;\r
+ RegDTR4 dtr4save;\r
+#endif // BACKUP_WDQS\r
+\r
+ ENTERFN();\r
+\r
+ // wr_level starts\r
+ post_code(0x06, 0x00);\r
+\r
+#ifdef R2R_SHARING\r
+ // need to set "final_delay[][]" elements to "0"\r
+ memset((void *) (final_delay), 0x00, (size_t) sizeof(final_delay));\r
+#endif // R2R_SHARING\r
+ // loop through each enabled channel\r
+ for (channel_i = 0; channel_i < NUM_CHANNELS; channel_i++)\r
+ {\r
+ if (mrc_params->channel_enables & (1 << channel_i))\r
+ {\r
+ // perform WRITE LEVELING algorithm on a per rank basis\r
+ for (rank_i = 0; rank_i < NUM_RANKS; rank_i++)\r
+ {\r
+ if (mrc_params->rank_enables & (1 << rank_i))\r
+ {\r
+ // POST_CODE here indicates the current rank and channel being calibrated\r
+ post_code(0x06, (0x10 + ((channel_i << 4) | rank_i)));\r
+\r
+#ifdef BACKUP_WDQS\r
+ for (bl_i=0; bl_i<(NUM_BYTE_LANES/bl_divisor); bl_i++)\r
+ {\r
+ set_wdqs(channel_i, rank_i, bl_i, ddr_wdqs[PLATFORM_ID]);\r
+ set_wdq(channel_i, rank_i, bl_i, (ddr_wdqs[PLATFORM_ID] - QRTR_CLK));\r
+ }\r
+#else\r
+\r
+ { // Begin product specific code\r
+\r
+ // perform a single PRECHARGE_ALL command to make DRAM state machine go to IDLE state\r
+ dram_init_command(DCMD_PREA(rank_i));\r
+\r
+ // enable Write Levelling Mode (EMRS1 w/ Write Levelling Mode Enable)\r
+ dram_init_command(DCMD_MRS1(rank_i,0x0082));\r
+\r
+ // set ODT DRAM Full Time Termination disable in MCU\r
+ dtr4.raw = dtr4save.raw = isbR32m(MCU, DTR4);\r
+ dtr4.field.ODTDIS = 1;\r
+ isbW32m(MCU, DTR4, dtr4.raw);\r
+\r
+ for (bl_i = 0; bl_i < ((NUM_BYTE_LANES / bl_divisor) / 2); bl_i++)\r
+ {\r
+ isbM32m(DDRPHY, DQCTL + (DDRIODQ_BL_OFFSET * bl_i) + (DDRIODQ_CH_OFFSET * channel_i),\r
+ (BIT28 | (0x1 << 8) | (0x1 << 6) | (0x1 << 4) | (0x1 << 2)),\r
+ (BIT28 | (BIT9|BIT8) | (BIT7|BIT6) | (BIT5|BIT4) | (BIT3|BIT2))); // Enable Sandy Bridge Mode (WDQ Tri-State) & Ensure 5 WDQS pulses during Write Leveling\r
+ }\r
+\r
+ isbM32m(DDRPHY, CCDDR3RESETCTL + (DDRIOCCC_CH_OFFSET * channel_i), (BIT16), (BIT16)); // Write Leveling Mode enabled in IO\r
+ } // End product specific code\r
+ // Initialise the starting delay to WCLK\r
+ for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
+ {\r
+ { // Begin product specific code\r
+ // CLK0 --> RK0\r
+ // CLK1 --> RK1\r
+ delay[bl_i] = get_wclk(channel_i, rank_i);\r
+ } // End product specific code\r
+ set_wdqs(channel_i, rank_i, bl_i, delay[bl_i]);\r
+ } // bl_i loop\r
+ // now find the rising edge\r
+ find_rising_edge(mrc_params, delay, channel_i, rank_i, false);\r
+ { // Begin product specific code\r
+ // disable Write Levelling Mode\r
+ isbM32m(DDRPHY, CCDDR3RESETCTL + (DDRIOCCC_CH_OFFSET * channel_i), (0), (BIT16)); // Write Leveling Mode disabled in IO\r
+\r
+ for (bl_i = 0; bl_i < ((NUM_BYTE_LANES / bl_divisor) / 2); bl_i++)\r
+ {\r
+ isbM32m(DDRPHY, DQCTL + (DDRIODQ_BL_OFFSET * bl_i) + (DDRIODQ_CH_OFFSET * channel_i),\r
+ ((0x1 << 8) | (0x1 << 6) | (0x1 << 4) | (0x1 << 2)),\r
+ (BIT28 | (BIT9|BIT8) | (BIT7|BIT6) | (BIT5|BIT4) | (BIT3|BIT2))); // Disable Sandy Bridge Mode & Ensure 4 WDQS pulses during normal operation\r
+ } // bl_i loop\r
+\r
+ // restore original DTR4\r
+ isbW32m(MCU, DTR4, dtr4save.raw);\r
+\r
+ // restore original value (Write Levelling Mode Disable)\r
+ dram_init_command(DCMD_MRS1(rank_i, mrc_params->mrs1));\r
+\r
+ // perform a single PRECHARGE_ALL command to make DRAM state machine go to IDLE state\r
+ dram_init_command(DCMD_PREA(rank_i));\r
+ } // End product specific code\r
+\r
+ post_code(0x06, (0x30 + ((channel_i << 4) | rank_i)));\r
+\r
+ // COARSE WRITE LEVEL:\r
+ // check that we're on the correct clock edge\r
+\r
+ // hte reconfiguration request\r
+ mrc_params->hte_setup = 1;\r
+\r
+ // start CRS_WR_LVL with WDQS = WDQS + 128 PI\r
+ for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
+ {\r
+ delay[bl_i] = get_wdqs(channel_i, rank_i, bl_i) + FULL_CLK;\r
+ set_wdqs(channel_i, rank_i, bl_i, delay[bl_i]);\r
+ // program WDQ timings based on WDQS (WDQ = WDQS - 32 PI)\r
+ set_wdq(channel_i, rank_i, bl_i, (delay[bl_i] - QRTR_CLK));\r
+ } // bl_i loop\r
+\r
+ // get an address in the targeted channel/rank\r
+ address = get_addr(mrc_params, channel_i, rank_i);\r
+ do\r
+ {\r
+ uint32_t coarse_result = 0x00;\r
+ uint32_t coarse_result_mask = byte_lane_mask(mrc_params);\r
+ all_edges_found = true; // assume pass\r
+\r
+#ifdef SIM\r
+ // need restore memory to idle state as write can be in bad sync\r
+ dram_init_command (DCMD_PREA(rank_i));\r
+#endif\r
+\r
+ mrc_params->hte_setup = 1;\r
+ coarse_result = check_rw_coarse(mrc_params, address);\r
+\r
+ // check for failures and margin the byte lane back 128 PI (1 CLK)\r
+ for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
+ {\r
+ if (coarse_result & (coarse_result_mask << bl_i))\r
+ {\r
+ all_edges_found = false;\r
+ delay[bl_i] -= FULL_CLK;\r
+ set_wdqs(channel_i, rank_i, bl_i, delay[bl_i]);\r
+ // program WDQ timings based on WDQS (WDQ = WDQS - 32 PI)\r
+ set_wdq(channel_i, rank_i, bl_i, (delay[bl_i] - QRTR_CLK));\r
+ }\r
+ } // bl_i loop\r
+\r
+ } while (!all_edges_found);\r
+\r
+#ifdef R2R_SHARING\r
+ // increment "num_ranks_enabled"\r
+ num_ranks_enabled++;\r
+ // accumulate "final_delay[][]" values from "delay[]" values for rolling average\r
+ for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
+ {\r
+ final_delay[channel_i][bl_i] += delay[bl_i];\r
+ set_wdqs(channel_i, rank_i, bl_i, ((final_delay[channel_i][bl_i]) / num_ranks_enabled));\r
+ // program WDQ timings based on WDQS (WDQ = WDQS - 32 PI)\r
+ set_wdq(channel_i, rank_i, bl_i, ((final_delay[channel_i][bl_i]) / num_ranks_enabled) - QRTR_CLK);\r
+ } // bl_i loop\r
+#endif // R2R_SHARING\r
+#endif // BACKUP_WDQS\r
+\r
+ } // if rank is enabled\r
+ } // rank_i loop\r
+ } // if channel is enabled\r
+ } // channel_i loop\r
+\r
+ LEAVEFN();\r
+ return;\r
+}\r
+\r
+// rd_train:\r
+// POST_CODE[major] == 0x07\r
+//\r
+// This function will perform the READ TRAINING Algorithm on all channels/ranks/byte_lanes simultaneously to minimize execution time.\r
+// The idea here is to train the VREF and RDQS (and eventually RDQ) values to achieve maximum READ margins.\r
+// The algorithm will first determine the X coordinate (RDQS setting).\r
+// This is done by collapsing the VREF eye until we find a minimum required RDQS eye for VREF_MIN and VREF_MAX.\r
+// Then we take the averages of the RDQS eye at VREF_MIN and VREF_MAX, then average those; this will be the final X coordinate.\r
+// The algorithm will then determine the Y coordinate (VREF setting).\r
+// This is done by collapsing the RDQS eye until we find a minimum required VREF eye for RDQS_MIN and RDQS_MAX.\r
+// Then we take the averages of the VREF eye at RDQS_MIN and RDQS_MAX, then average those; this will be the final Y coordinate.\r
+// NOTE: this algorithm assumes the eye curves have a one-to-one relationship, meaning for each X the curve has only one Y and vice-a-versa.\r
+static void rd_train(\r
+ MRCParams_t *mrc_params)\r
+{\r
+\r
+#define MIN_RDQS_EYE 10 // in PI Codes\r
+#define MIN_VREF_EYE 10 // in VREF Codes\r
+#define RDQS_STEP 1 // how many RDQS codes to jump while margining\r
+#define VREF_STEP 1 // how many VREF codes to jump while margining\r
+#define VREF_MIN (0x00) // offset into "vref_codes[]" for minimum allowed VREF setting\r
+#define VREF_MAX (0x3F) // offset into "vref_codes[]" for maximum allowed VREF setting\r
+#define RDQS_MIN (0x00) // minimum RDQS delay value\r
+#define RDQS_MAX (0x3F) // maximum RDQS delay value\r
+#define B 0 // BOTTOM VREF\r
+#define T 1 // TOP VREF\r
+#define L 0 // LEFT RDQS\r
+#define R 1 // RIGHT RDQS\r
+\r
+ uint8_t channel_i; // channel counter\r
+ uint8_t rank_i; // rank counter\r
+ uint8_t bl_i; // byte lane counter\r
+ uint8_t bl_divisor = (mrc_params->channel_width == x16) ? 2 : 1; // byte lane divisor\r
+#ifdef BACKUP_RDQS\r
+#else\r
+ uint8_t side_x; // tracks LEFT/RIGHT approach vectors\r
+ uint8_t side_y; // tracks BOTTOM/TOP approach vectors\r
+ uint8_t x_coordinate[2/*side_x*/][2/*side_y*/][NUM_CHANNELS][NUM_RANKS][NUM_BYTE_LANES]; // X coordinate data (passing RDQS values) for approach vectors\r
+ uint8_t y_coordinate[2/*side_x*/][2/*side_y*/][NUM_CHANNELS][NUM_BYTE_LANES]; // Y coordinate data (passing VREF values) for approach vectors\r
+ uint8_t x_center[NUM_CHANNELS][NUM_RANKS][NUM_BYTE_LANES]; // centered X (RDQS)\r
+ uint8_t y_center[NUM_CHANNELS][NUM_BYTE_LANES]; // centered Y (VREF)\r
+ uint32_t address; // target address for "check_bls_ex()"\r
+ uint32_t result; // result of "check_bls_ex()"\r
+ uint32_t bl_mask; // byte lane mask for "result" checking\r
+#ifdef R2R_SHARING\r
+ uint32_t final_delay[NUM_CHANNELS][NUM_BYTE_LANES]; // used to find placement for rank2rank sharing configs\r
+ uint32_t num_ranks_enabled = 0; // used to find placement for rank2rank sharing configs\r
+#endif // R2R_SHARING\r
+#endif // BACKUP_RDQS\r
+ // rd_train starts\r
+ post_code(0x07, 0x00);\r
+\r
+ ENTERFN();\r
+\r
+#ifdef BACKUP_RDQS\r
+ for (channel_i=0; channel_i<NUM_CHANNELS; channel_i++)\r
+ {\r
+ if (mrc_params->channel_enables & (1<<channel_i))\r
+ {\r
+ for (rank_i=0; rank_i<NUM_RANKS; rank_i++)\r
+ {\r
+ if (mrc_params->rank_enables & (1<<rank_i))\r
+ {\r
+ for (bl_i=0; bl_i<(NUM_BYTE_LANES/bl_divisor); bl_i++)\r
+ {\r
+ set_rdqs(channel_i, rank_i, bl_i, ddr_rdqs[PLATFORM_ID]);\r
+ } // bl_i loop\r
+ } // if rank is enabled\r
+ } // rank_i loop\r
+ } // if channel is enabled\r
+ } // channel_i loop\r
+#else\r
+ // initialise x/y_coordinate arrays\r
+ for (channel_i = 0; channel_i < NUM_CHANNELS; channel_i++)\r
+ {\r
+ if (mrc_params->channel_enables & (1 << channel_i))\r
+ {\r
+ for (rank_i = 0; rank_i < NUM_RANKS; rank_i++)\r
+ {\r
+ if (mrc_params->rank_enables & (1 << rank_i))\r
+ {\r
+ for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
+ {\r
+ // x_coordinate:\r
+ x_coordinate[L][B][channel_i][rank_i][bl_i] = RDQS_MIN;\r
+ x_coordinate[R][B][channel_i][rank_i][bl_i] = RDQS_MAX;\r
+ x_coordinate[L][T][channel_i][rank_i][bl_i] = RDQS_MIN;\r
+ x_coordinate[R][T][channel_i][rank_i][bl_i] = RDQS_MAX;\r
+ // y_coordinate:\r
+ y_coordinate[L][B][channel_i][bl_i] = VREF_MIN;\r
+ y_coordinate[R][B][channel_i][bl_i] = VREF_MIN;\r
+ y_coordinate[L][T][channel_i][bl_i] = VREF_MAX;\r
+ y_coordinate[R][T][channel_i][bl_i] = VREF_MAX;\r
+ } // bl_i loop\r
+ } // if rank is enabled\r
+ } // rank_i loop\r
+ } // if channel is enabled\r
+ } // channel_i loop\r
+\r
+ // initialise other variables\r
+ bl_mask = byte_lane_mask(mrc_params);\r
+ address = get_addr(mrc_params, 0, 0);\r
+\r
+#ifdef R2R_SHARING\r
+ // need to set "final_delay[][]" elements to "0"\r
+ memset((void *) (final_delay), 0x00, (size_t) sizeof(final_delay));\r
+#endif // R2R_SHARING\r
+\r
+ // look for passing coordinates\r
+ for (side_y = B; side_y <= T; side_y++)\r
+ {\r
+ for (side_x = L; side_x <= R; side_x++)\r
+ {\r
+\r
+ post_code(0x07, (0x10 + (side_y * 2) + (side_x)));\r
+\r
+ // find passing values\r
+ for (channel_i = 0; channel_i < NUM_CHANNELS; channel_i++)\r
+ {\r
+ if (mrc_params->channel_enables & (0x1 << channel_i))\r
+ {\r
+ for (rank_i = 0; rank_i < NUM_RANKS; rank_i++)\r
+ {\r
+\r
+ if (mrc_params->rank_enables & (0x1 << rank_i))\r
+ {\r
+ // set x/y_coordinate search starting settings\r
+ for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
+ {\r
+ set_rdqs(channel_i, rank_i, bl_i, x_coordinate[side_x][side_y][channel_i][rank_i][bl_i]);\r
+ set_vref(channel_i, bl_i, y_coordinate[side_x][side_y][channel_i][bl_i]);\r
+ } // bl_i loop\r
+ // get an address in the target channel/rank\r
+ address = get_addr(mrc_params, channel_i, rank_i);\r
+\r
+ // request HTE reconfiguration\r
+ mrc_params->hte_setup = 1;\r
+\r
+ // test the settings\r
+ do\r
+ {\r
+\r
+ // result[07:00] == failing byte lane (MAX 8)\r
+ result = check_bls_ex( mrc_params, address);\r
+\r
+ // check for failures\r
+ if (result & 0xFF)\r
+ {\r
+ // at least 1 byte lane failed\r
+ for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
+ {\r
+ if (result & (bl_mask << bl_i))\r
+ {\r
+ // adjust the RDQS values accordingly\r
+ if (side_x == L)\r
+ {\r
+ x_coordinate[L][side_y][channel_i][rank_i][bl_i] += RDQS_STEP;\r
+ }\r
+ else\r
+ {\r
+ x_coordinate[R][side_y][channel_i][rank_i][bl_i] -= RDQS_STEP;\r
+ }\r
+ // check that we haven't closed the RDQS_EYE too much\r
+ if ((x_coordinate[L][side_y][channel_i][rank_i][bl_i] > (RDQS_MAX - MIN_RDQS_EYE)) ||\r
+ (x_coordinate[R][side_y][channel_i][rank_i][bl_i] < (RDQS_MIN + MIN_RDQS_EYE))\r
+ ||\r
+ (x_coordinate[L][side_y][channel_i][rank_i][bl_i]\r
+ == x_coordinate[R][side_y][channel_i][rank_i][bl_i]))\r
+ {\r
+ // not enough RDQS margin available at this VREF\r
+ // update VREF values accordingly\r
+ if (side_y == B)\r
+ {\r
+ y_coordinate[side_x][B][channel_i][bl_i] += VREF_STEP;\r
+ }\r
+ else\r
+ {\r
+ y_coordinate[side_x][T][channel_i][bl_i] -= VREF_STEP;\r
+ }\r
+ // check that we haven't closed the VREF_EYE too much\r
+ if ((y_coordinate[side_x][B][channel_i][bl_i] > (VREF_MAX - MIN_VREF_EYE)) ||\r
+ (y_coordinate[side_x][T][channel_i][bl_i] < (VREF_MIN + MIN_VREF_EYE)) ||\r
+ (y_coordinate[side_x][B][channel_i][bl_i] == y_coordinate[side_x][T][channel_i][bl_i]))\r
+ {\r
+ // VREF_EYE collapsed below MIN_VREF_EYE\r
+ training_message(channel_i, rank_i, bl_i);\r
+ post_code(0xEE, (0x70 + (side_y * 2) + (side_x)));\r
+ }\r
+ else\r
+ {\r
+ // update the VREF setting\r
+ set_vref(channel_i, bl_i, y_coordinate[side_x][side_y][channel_i][bl_i]);\r
+ // reset the X coordinate to begin the search at the new VREF\r
+ x_coordinate[side_x][side_y][channel_i][rank_i][bl_i] =\r
+ (side_x == L) ? (RDQS_MIN) : (RDQS_MAX);\r
+ }\r
+ }\r
+ // update the RDQS setting\r
+ set_rdqs(channel_i, rank_i, bl_i, x_coordinate[side_x][side_y][channel_i][rank_i][bl_i]);\r
+ } // if bl_i failed\r
+ } // bl_i loop\r
+ } // at least 1 byte lane failed\r
+ } while (result & 0xFF);\r
+ } // if rank is enabled\r
+ } // rank_i loop\r
+ } // if channel is enabled\r
+ } // channel_i loop\r
+ } // side_x loop\r
+ } // side_y loop\r
+\r
+ post_code(0x07, 0x20);\r
+\r
+ // find final RDQS (X coordinate) & final VREF (Y coordinate)\r
+ for (channel_i = 0; channel_i < NUM_CHANNELS; channel_i++)\r
+ {\r
+ if (mrc_params->channel_enables & (1 << channel_i))\r
+ {\r
+ for (rank_i = 0; rank_i < NUM_RANKS; rank_i++)\r
+ {\r
+ if (mrc_params->rank_enables & (1 << rank_i))\r
+ {\r
+ for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
+ {\r
+ uint32_t tempD1;\r
+ uint32_t tempD2;\r
+\r
+ // x_coordinate:\r
+ DPF(D_INFO, "RDQS T/B eye rank%d lane%d : %d-%d %d-%d\n", rank_i, bl_i,\r
+ x_coordinate[L][T][channel_i][rank_i][bl_i],\r
+ x_coordinate[R][T][channel_i][rank_i][bl_i],\r
+ x_coordinate[L][B][channel_i][rank_i][bl_i],\r
+ x_coordinate[R][B][channel_i][rank_i][bl_i]);\r
+\r
+ tempD1 = (x_coordinate[R][T][channel_i][rank_i][bl_i] + x_coordinate[L][T][channel_i][rank_i][bl_i]) / 2; // average the TOP side LEFT & RIGHT values\r
+ tempD2 = (x_coordinate[R][B][channel_i][rank_i][bl_i] + x_coordinate[L][B][channel_i][rank_i][bl_i]) / 2; // average the BOTTOM side LEFT & RIGHT values\r
+ x_center[channel_i][rank_i][bl_i] = (uint8_t) ((tempD1 + tempD2) / 2); // average the above averages\r
+\r
+ // y_coordinate:\r
+ DPF(D_INFO, "VREF R/L eye lane%d : %d-%d %d-%d\n", bl_i,\r
+ y_coordinate[R][B][channel_i][bl_i],\r
+ y_coordinate[R][T][channel_i][bl_i],\r
+ y_coordinate[L][B][channel_i][bl_i],\r
+ y_coordinate[L][T][channel_i][bl_i]);\r
+\r
+ tempD1 = (y_coordinate[R][T][channel_i][bl_i] + y_coordinate[R][B][channel_i][bl_i]) / 2; // average the RIGHT side TOP & BOTTOM values\r
+ tempD2 = (y_coordinate[L][T][channel_i][bl_i] + y_coordinate[L][B][channel_i][bl_i]) / 2; // average the LEFT side TOP & BOTTOM values\r
+ y_center[channel_i][bl_i] = (uint8_t) ((tempD1 + tempD2) / 2); // average the above averages\r
+ } // bl_i loop\r
+ } // if rank is enabled\r
+ } // rank_i loop\r
+ } // if channel is enabled\r
+ } // channel_i loop\r
+\r
+#ifdef RX_EYE_CHECK\r
+ // perform an eye check\r
+ for (side_y=B; side_y<=T; side_y++)\r
+ {\r
+ for (side_x=L; side_x<=R; side_x++)\r
+ {\r
+\r
+ post_code(0x07, (0x30 + (side_y * 2) + (side_x)));\r
+\r
+ // update the settings for the eye check\r
+ for (channel_i=0; channel_i<NUM_CHANNELS; channel_i++)\r
+ {\r
+ if (mrc_params->channel_enables & (1<<channel_i))\r
+ {\r
+ for (rank_i=0; rank_i<NUM_RANKS; rank_i++)\r
+ {\r
+ if (mrc_params->rank_enables & (1<<rank_i))\r
+ {\r
+ for (bl_i=0; bl_i<(NUM_BYTE_LANES/bl_divisor); bl_i++)\r
+ {\r
+ if (side_x == L)\r
+ {\r
+ set_rdqs(channel_i, rank_i, bl_i, (x_center[channel_i][rank_i][bl_i] - (MIN_RDQS_EYE / 2)));\r
+ }\r
+ else\r
+ {\r
+ set_rdqs(channel_i, rank_i, bl_i, (x_center[channel_i][rank_i][bl_i] + (MIN_RDQS_EYE / 2)));\r
+ }\r
+ if (side_y == B)\r
+ {\r
+ set_vref(channel_i, bl_i, (y_center[channel_i][bl_i] - (MIN_VREF_EYE / 2)));\r
+ }\r
+ else\r
+ {\r
+ set_vref(channel_i, bl_i, (y_center[channel_i][bl_i] + (MIN_VREF_EYE / 2)));\r
+ }\r
+ } // bl_i loop\r
+ } // if rank is enabled\r
+ } // rank_i loop\r
+ } // if channel is enabled\r
+ } // channel_i loop\r
+\r
+ // request HTE reconfiguration\r
+ mrc_params->hte_setup = 1;\r
+\r
+ // check the eye\r
+ if (check_bls_ex( mrc_params, address) & 0xFF)\r
+ {\r
+ // one or more byte lanes failed\r
+ post_code(0xEE, (0x74 + (side_x * 2) + (side_y)));\r
+ }\r
+ } // side_x loop\r
+ } // side_y loop\r
+#endif // RX_EYE_CHECK\r
+\r
+ post_code(0x07, 0x40);\r
+\r
+ // set final placements\r
+ for (channel_i = 0; channel_i < NUM_CHANNELS; channel_i++)\r
+ {\r
+ if (mrc_params->channel_enables & (1 << channel_i))\r
+ {\r
+ for (rank_i = 0; rank_i < NUM_RANKS; rank_i++)\r
+ {\r
+ if (mrc_params->rank_enables & (1 << rank_i))\r
+ {\r
+#ifdef R2R_SHARING\r
+ // increment "num_ranks_enabled"\r
+ num_ranks_enabled++;\r
+#endif // R2R_SHARING\r
+ for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
+ {\r
+ // x_coordinate:\r
+#ifdef R2R_SHARING\r
+ final_delay[channel_i][bl_i] += x_center[channel_i][rank_i][bl_i];\r
+ set_rdqs(channel_i, rank_i, bl_i, ((final_delay[channel_i][bl_i]) / num_ranks_enabled));\r
+#else\r
+ set_rdqs(channel_i, rank_i, bl_i, x_center[channel_i][rank_i][bl_i]);\r
+#endif // R2R_SHARING\r
+ // y_coordinate:\r
+ set_vref(channel_i, bl_i, y_center[channel_i][bl_i]);\r
+ } // bl_i loop\r
+ } // if rank is enabled\r
+ } // rank_i loop\r
+ } // if channel is enabled\r
+ } // channel_i loop\r
+#endif // BACKUP_RDQS\r
+ LEAVEFN();\r
+ return;\r
+}\r
+\r
+// wr_train:\r
+// POST_CODE[major] == 0x08\r
+//\r
+// This function will perform the WRITE TRAINING Algorithm on all channels/ranks/byte_lanes simultaneously to minimize execution time.\r
+// The idea here is to train the WDQ timings to achieve maximum WRITE margins.\r
+// The algorithm will start with WDQ at the current WDQ setting (tracks WDQS in WR_LVL) +/- 32 PIs (+/- 1/4 CLK) and collapse the eye until all data patterns pass.\r
+// This is because WDQS will be aligned to WCLK by the Write Leveling algorithm and WDQ will only ever have a 1/2 CLK window of validity.\r
+static void wr_train(\r
+ MRCParams_t *mrc_params)\r
+{\r
+\r
+#define WDQ_STEP 1 // how many WDQ codes to jump while margining\r
+#define L 0 // LEFT side loop value definition\r
+#define R 1 // RIGHT side loop value definition\r
+\r
+ uint8_t channel_i; // channel counter\r
+ uint8_t rank_i; // rank counter\r
+ uint8_t bl_i; // byte lane counter\r
+ uint8_t bl_divisor = (mrc_params->channel_width == x16) ? 2 : 1; // byte lane divisor\r
+#ifdef BACKUP_WDQ\r
+#else\r
+ uint8_t side_i; // LEFT/RIGHT side indicator (0=L, 1=R)\r
+ uint32_t tempD; // temporary DWORD\r
+ uint32_t delay[2/*side_i*/][NUM_CHANNELS][NUM_RANKS][NUM_BYTE_LANES]; // 2 arrays, for L & R side passing delays\r
+ uint32_t address; // target address for "check_bls_ex()"\r
+ uint32_t result; // result of "check_bls_ex()"\r
+ uint32_t bl_mask; // byte lane mask for "result" checking\r
+#ifdef R2R_SHARING\r
+ uint32_t final_delay[NUM_CHANNELS][NUM_BYTE_LANES]; // used to find placement for rank2rank sharing configs\r
+ uint32_t num_ranks_enabled = 0; // used to find placement for rank2rank sharing configs\r
+#endif // R2R_SHARING\r
+#endif // BACKUP_WDQ\r
+\r
+ // wr_train starts\r
+ post_code(0x08, 0x00);\r
+\r
+ ENTERFN();\r
+\r
+#ifdef BACKUP_WDQ\r
+ for (channel_i=0; channel_i<NUM_CHANNELS; channel_i++)\r
+ {\r
+ if (mrc_params->channel_enables & (1<<channel_i))\r
+ {\r
+ for (rank_i=0; rank_i<NUM_RANKS; rank_i++)\r
+ {\r
+ if (mrc_params->rank_enables & (1<<rank_i))\r
+ {\r
+ for (bl_i=0; bl_i<(NUM_BYTE_LANES/bl_divisor); bl_i++)\r
+ {\r
+ set_wdq(channel_i, rank_i, bl_i, ddr_wdq[PLATFORM_ID]);\r
+ } // bl_i loop\r
+ } // if rank is enabled\r
+ } // rank_i loop\r
+ } // if channel is enabled\r
+ } // channel_i loop\r
+#else\r
+ // initialise "delay"\r
+ for (channel_i = 0; channel_i < NUM_CHANNELS; channel_i++)\r
+ {\r
+ if (mrc_params->channel_enables & (1 << channel_i))\r
+ {\r
+ for (rank_i = 0; rank_i < NUM_RANKS; rank_i++)\r
+ {\r
+ if (mrc_params->rank_enables & (1 << rank_i))\r
+ {\r
+ for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
+ {\r
+ // want to start with WDQ = (WDQS - QRTR_CLK) +/- QRTR_CLK\r
+ tempD = get_wdqs(channel_i, rank_i, bl_i) - QRTR_CLK;\r
+ delay[L][channel_i][rank_i][bl_i] = tempD - QRTR_CLK;\r
+ delay[R][channel_i][rank_i][bl_i] = tempD + QRTR_CLK;\r
+ } // bl_i loop\r
+ } // if rank is enabled\r
+ } // rank_i loop\r
+ } // if channel is enabled\r
+ } // channel_i loop\r
+\r
+ // initialise other variables\r
+ bl_mask = byte_lane_mask(mrc_params);\r
+ address = get_addr(mrc_params, 0, 0);\r
+\r
+#ifdef R2R_SHARING\r
+ // need to set "final_delay[][]" elements to "0"\r
+ memset((void *) (final_delay), 0x00, (size_t) sizeof(final_delay));\r
+#endif // R2R_SHARING\r
+\r
+ // start algorithm on the LEFT side and train each channel/bl until no failures are observed, then repeat for the RIGHT side.\r
+ for (side_i = L; side_i <= R; side_i++)\r
+ {\r
+ post_code(0x08, (0x10 + (side_i)));\r
+\r
+ // set starting values\r
+ for (channel_i = 0; channel_i < NUM_CHANNELS; channel_i++)\r
+ {\r
+ if (mrc_params->channel_enables & (1 << channel_i))\r
+ {\r
+ for (rank_i = 0; rank_i < NUM_RANKS; rank_i++)\r
+ {\r
+ if (mrc_params->rank_enables & (1 << rank_i))\r
+ {\r
+ for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
+ {\r
+ set_wdq(channel_i, rank_i, bl_i, delay[side_i][channel_i][rank_i][bl_i]);\r
+ } // bl_i loop\r
+ } // if rank is enabled\r
+ } // rank_i loop\r
+ } // if channel is enabled\r
+ } // channel_i loop\r
+\r
+ // find passing values\r
+ for (channel_i = 0; channel_i < NUM_CHANNELS; channel_i++)\r
+ {\r
+ if (mrc_params->channel_enables & (0x1 << channel_i))\r
+ {\r
+ for (rank_i = 0; rank_i < NUM_RANKS; rank_i++)\r
+ {\r
+ if (mrc_params->rank_enables & (0x1 << rank_i))\r
+ {\r
+ // get an address in the target channel/rank\r
+ address = get_addr(mrc_params, channel_i, rank_i);\r
+\r
+ // request HTE reconfiguration\r
+ mrc_params->hte_setup = 1;\r
+\r
+ // check the settings\r
+ do\r
+ {\r
+\r
+#ifdef SIM\r
+ // need restore memory to idle state as write can be in bad sync\r
+ dram_init_command (DCMD_PREA(rank_i));\r
+#endif\r
+\r
+ // result[07:00] == failing byte lane (MAX 8)\r
+ result = check_bls_ex( mrc_params, address);\r
+ // check for failures\r
+ if (result & 0xFF)\r
+ {\r
+ // at least 1 byte lane failed\r
+ for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
+ {\r
+ if (result & (bl_mask << bl_i))\r
+ {\r
+ if (side_i == L)\r
+ {\r
+ delay[L][channel_i][rank_i][bl_i] += WDQ_STEP;\r
+ }\r
+ else\r
+ {\r
+ delay[R][channel_i][rank_i][bl_i] -= WDQ_STEP;\r
+ }\r
+ // check for algorithm failure\r
+ if (delay[L][channel_i][rank_i][bl_i] != delay[R][channel_i][rank_i][bl_i])\r
+ {\r
+ // margin available, update delay setting\r
+ set_wdq(channel_i, rank_i, bl_i, delay[side_i][channel_i][rank_i][bl_i]);\r
+ }\r
+ else\r
+ {\r
+ // no margin available, notify the user and halt\r
+ training_message(channel_i, rank_i, bl_i);\r
+ post_code(0xEE, (0x80 + side_i));\r
+ }\r
+ } // if bl_i failed\r
+ } // bl_i loop\r
+ } // at least 1 byte lane failed\r
+ } while (result & 0xFF); // stop when all byte lanes pass\r
+ } // if rank is enabled\r
+ } // rank_i loop\r
+ } // if channel is enabled\r
+ } // channel_i loop\r
+ } // side_i loop\r
+\r
+ // program WDQ to the middle of passing window\r
+ for (channel_i = 0; channel_i < NUM_CHANNELS; channel_i++)\r
+ {\r
+ if (mrc_params->channel_enables & (1 << channel_i))\r
+ {\r
+ for (rank_i = 0; rank_i < NUM_RANKS; rank_i++)\r
+ {\r
+ if (mrc_params->rank_enables & (1 << rank_i))\r
+ {\r
+#ifdef R2R_SHARING\r
+ // increment "num_ranks_enabled"\r
+ num_ranks_enabled++;\r
+#endif // R2R_SHARING\r
+ for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
+ {\r
+\r
+ DPF(D_INFO, "WDQ eye rank%d lane%d : %d-%d\n", rank_i, bl_i,\r
+ delay[L][channel_i][rank_i][bl_i],\r
+ delay[R][channel_i][rank_i][bl_i]);\r
+\r
+ tempD = (delay[R][channel_i][rank_i][bl_i] + delay[L][channel_i][rank_i][bl_i]) / 2;\r
+\r
+#ifdef R2R_SHARING\r
+ final_delay[channel_i][bl_i] += tempD;\r
+ set_wdq(channel_i, rank_i, bl_i, ((final_delay[channel_i][bl_i]) / num_ranks_enabled));\r
+#else\r
+ set_wdq(channel_i, rank_i, bl_i, tempD);\r
+#endif // R2R_SHARING\r
+\r
+ } // bl_i loop\r
+ } // if rank is enabled\r
+ } // rank_i loop\r
+ } // if channel is enabled\r
+ } // channel_i loop\r
+#endif // BACKUP_WDQ\r
+ LEAVEFN();\r
+ return;\r
+}\r
+\r
+// Wrapper for jedec initialisation routine\r
+static void perform_jedec_init(\r
+ MRCParams_t *mrc_params)\r
+{\r
+ jedec_init(mrc_params, 0);\r
+}\r
+\r
+// Configure DDRPHY for Auto-Refresh, Periodic Compensations,\r
+// Dynamic Diff-Amp, ZQSPERIOD, Auto-Precharge, CKE Power-Down\r
+static void set_auto_refresh(\r
+ MRCParams_t *mrc_params)\r
+{\r
+ uint32_t channel_i;\r
+ uint32_t rank_i;\r
+ uint32_t bl_i;\r
+ uint32_t bl_divisor = /*(mrc_params->channel_width==x16)?2:*/1;\r
+ uint32_t tempD;\r
+\r
+ ENTERFN();\r
+\r
+ // enable Auto-Refresh, Periodic Compensations, Dynamic Diff-Amp, ZQSPERIOD, Auto-Precharge, CKE Power-Down\r
+ for (channel_i = 0; channel_i < NUM_CHANNELS; channel_i++)\r
+ {\r
+ if (mrc_params->channel_enables & (1 << channel_i))\r
+ {\r
+ // Enable Periodic RCOMPS\r
+ isbM32m(DDRPHY, CMPCTRL, (BIT1), (BIT1));\r
+\r
+\r
+ // Enable Dynamic DiffAmp & Set Read ODT Value\r
+ switch (mrc_params->rd_odt_value)\r
+ {\r
+ case 0: tempD = 0x3F; break; // OFF\r
+ default: tempD = 0x00; break; // Auto\r
+ } // rd_odt_value switch\r
+\r
+ for (bl_i=0; bl_i<((NUM_BYTE_LANES/bl_divisor)/2); bl_i++)\r
+ {\r
+ isbM32m(DDRPHY, (B0OVRCTL + (bl_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)),\r
+ ((0x00<<16)|(tempD<<10)),\r
+ ((BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT15|BIT14|BIT13|BIT12|BIT11|BIT10))); // Override: DIFFAMP, ODT\r
+\r
+ isbM32m(DDRPHY, (B1OVRCTL + (bl_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)),\r
+ ((0x00<<16)|(tempD<<10)),\r
+ ((BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT15|BIT14|BIT13|BIT12|BIT11|BIT10)));// Override: DIFFAMP, ODT\r
+ } // bl_i loop\r
+\r
+ // Issue ZQCS command\r
+ for (rank_i = 0; rank_i < NUM_RANKS; rank_i++)\r
+ {\r
+ if (mrc_params->rank_enables & (1 << rank_i))\r
+ {\r
+ dram_init_command(DCMD_ZQCS(rank_i));\r
+ } // if rank_i enabled\r
+ } // rank_i loop\r
+\r
+ } // if channel_i enabled\r
+ } // channel_i loop\r
+\r
+ clear_pointers();\r
+\r
+ LEAVEFN();\r
+ return;\r
+}\r
+\r
+// Depending on configuration enables ECC support.\r
+// Available memory size is decresed, and updated with 0s\r
+// in order to clear error status. Address mode 2 forced.\r
+static void ecc_enable(\r
+ MRCParams_t *mrc_params)\r
+{\r
+ RegDRP Drp;\r
+ RegDSCH Dsch;\r
+ RegDECCCTRL Ctr;\r
+\r
+ if (mrc_params->ecc_enables == 0) return;\r
+\r
+ ENTERFN();\r
+\r
+ // Configuration required in ECC mode\r
+ Drp.raw = isbR32m(MCU, DRP);\r
+ Drp.field.addressMap = 2;\r
+ Drp.field.split64 = 1;\r
+ isbW32m(MCU, DRP, Drp.raw);\r
+\r
+ // Disable new request bypass\r
+ Dsch.raw = isbR32m(MCU, DSCH);\r
+ Dsch.field.NEWBYPDIS = 1;\r
+ isbW32m(MCU, DSCH, Dsch.raw);\r
+\r
+ // Enable ECC\r
+ Ctr.raw = 0;\r
+ Ctr.field.SBEEN = 1;\r
+ Ctr.field.DBEEN = 1;\r
+ Ctr.field.ENCBGEN = 1;\r
+ isbW32m(MCU, DECCCTRL, Ctr.raw);\r
+\r
+#ifdef SIM\r
+ // Read back to be sure writing took place\r
+ Ctr.raw = isbR32m(MCU, DECCCTRL);\r
+#endif\r
+\r
+ // Assume 8 bank memory, one bank is gone for ECC\r
+ mrc_params->mem_size -= mrc_params->mem_size / 8;\r
+\r
+ // For S3 resume memory content has to be preserved\r
+ if (mrc_params->boot_mode != bmS3)\r
+ {\r
+ select_hte(mrc_params);\r
+ HteMemInit(mrc_params, MrcMemInit, MrcHaltHteEngineOnError);\r
+ select_memory_manager(mrc_params);\r
+ }\r
+\r
+ LEAVEFN();\r
+ return;\r
+}\r
+\r
+// Lock MCU registers at the end of initialisation sequence.\r
+static void lock_registers(\r
+ MRCParams_t *mrc_params)\r
+{\r
+ RegDCO Dco;\r
+\r
+ ENTERFN();\r
+\r
+ Dco.raw = isbR32m(MCU, DCO);\r
+ Dco.field.PMIDIS = 0; //0 - PRI enabled\r
+ Dco.field.PMICTL = 0; //0 - PRI owned by MEMORY_MANAGER\r
+ Dco.field.DRPLOCK = 1;\r
+ Dco.field.REUTLOCK = 1;\r
+ isbW32m(MCU, DCO, Dco.raw);\r
+\r
+ LEAVEFN();\r
+\r
+}\r
+\r
+#ifdef MRC_SV\r
+\r
+// cache write back invalidate\r
+static void asm_wbinvd(void)\r
+{\r
+#if defined (SIM) || defined (GCC)\r
+ asm(\r
+ "wbinvd;"\r
+ );\r
+#else\r
+ __asm wbinvd;\r
+#endif\r
+}\r
+\r
+// cache invalidate\r
+static void asm_invd(void)\r
+{\r
+#if defined (SIM) || defined (GCC)\r
+ asm(\r
+ "invd;"\r
+ );\r
+#else\r
+ __asm invd;\r
+#endif\r
+}\r
+\r
+\r
+static void cpu_read(void)\r
+{\r
+ uint32_t adr, dat, limit;\r
+\r
+ asm_invd();\r
+\r
+ limit = 8 * 1024;\r
+ for (adr = 0; adr < limit; adr += 4)\r
+ {\r
+ dat = *(uint32_t*) adr;\r
+ if ((adr & 0x0F) == 0)\r
+ {\r
+ DPF(D_INFO, "\n%x : ", adr);\r
+ }\r
+ DPF(D_INFO, "%x ", dat);\r
+ }\r
+ DPF(D_INFO, "\n");\r
+\r
+ DPF(D_INFO, "CPU read done\n");\r
+}\r
+\r
+\r
+static void cpu_write(void)\r
+{\r
+ uint32_t adr, limit;\r
+\r
+ limit = 8 * 1024;\r
+ for (adr = 0; adr < limit; adr += 4)\r
+ {\r
+ *(uint32_t*) adr = 0xDEAD0000 + adr;\r
+ }\r
+\r
+ asm_wbinvd();\r
+\r
+ DPF(D_INFO, "CPU write done\n");\r
+}\r
+\r
+\r
+static void cpu_memory_test(\r
+ MRCParams_t *mrc_params)\r
+{\r
+ uint32_t result = 0;\r
+ uint32_t val, dat, adr, adr0, step, limit;\r
+ uint64_t my_tsc;\r
+\r
+ ENTERFN();\r
+\r
+ asm_invd();\r
+\r
+ adr0 = 1 * 1024 * 1024;\r
+ limit = 256 * 1024 * 1024;\r
+\r
+ for (step = 0; step <= 4; step++)\r
+ {\r
+ DPF(D_INFO, "Mem test step %d starting from %xh\n", step, adr0);\r
+\r
+ my_tsc = read_tsc();\r
+ for (adr = adr0; adr < limit; adr += sizeof(uint32_t))\r
+ {\r
+ if (step == 0) dat = adr;\r
+ else if (step == 1) dat = (1 << ((adr >> 2) & 0x1f));\r
+ else if (step == 2) dat = ~(1 << ((adr >> 2) & 0x1f));\r
+ else if (step == 3) dat = 0x5555AAAA;\r
+ else if (step == 4) dat = 0xAAAA5555;\r
+\r
+ *(uint32_t*) adr = dat;\r
+ }\r
+ DPF(D_INFO, "Write time %llXh\n", read_tsc() - my_tsc);\r
+\r
+ my_tsc = read_tsc();\r
+ for (adr = adr0; adr < limit; adr += sizeof(uint32_t))\r
+ {\r
+ if (step == 0) dat = adr;\r
+ else if (step == 1) dat = (1 << ((adr >> 2) & 0x1f));\r
+ else if (step == 2) dat = ~(1 << ((adr >> 2) & 0x1f));\r
+ else if (step == 3) dat = 0x5555AAAA;\r
+ else if (step == 4) dat = 0xAAAA5555;\r
+\r
+ val = *(uint32_t*) adr;\r
+\r
+ if (val != dat)\r
+ {\r
+ DPF(D_INFO, "%x vs. %x@%x\n", dat, val, adr);\r
+ result = adr|BIT31;\r
+ }\r
+ }\r
+ DPF(D_INFO, "Read time %llXh\n", read_tsc() - my_tsc);\r
+ }\r
+\r
+ DPF( D_INFO, "Memory test result %x\n", result);\r
+ LEAVEFN();\r
+}\r
+#endif // MRC_SV\r
+\r
+\r
+// Execute memory test, if error dtected it is\r
+// indicated in mrc_params->status.\r
+static void memory_test(\r
+ MRCParams_t *mrc_params)\r
+{\r
+ uint32_t result = 0;\r
+\r
+ ENTERFN();\r
+\r
+ select_hte(mrc_params);\r
+ result = HteMemInit(mrc_params, MrcMemTest, MrcHaltHteEngineOnError);\r
+ select_memory_manager(mrc_params);\r
+\r
+ DPF(D_INFO, "Memory test result %x\n", result);\r
+ mrc_params->status = ((result == 0) ? MRC_SUCCESS : MRC_E_MEMTEST);\r
+ LEAVEFN();\r
+}\r
+\r
+\r
+// Force same timings as with backup settings\r
+static void static_timings(\r
+ MRCParams_t *mrc_params)\r
+\r
+{\r
+ uint8_t ch, rk, bl;\r
+\r
+ for (ch = 0; ch < NUM_CHANNELS; ch++)\r
+ {\r
+ for (rk = 0; rk < NUM_RANKS; rk++)\r
+ {\r
+ for (bl = 0; bl < NUM_BYTE_LANES; bl++)\r
+ {\r
+ set_rcvn(ch, rk, bl, 498); // RCVN\r
+ set_rdqs(ch, rk, bl, 24); // RDQS\r
+ set_wdqs(ch, rk, bl, 292); // WDQS\r
+ set_wdq( ch, rk, bl, 260); // WDQ\r
+ if (rk == 0)\r
+ {\r
+ set_vref(ch, bl, 32); // VREF (RANK0 only)\r
+ }\r
+ }\r
+ set_wctl(ch, rk, 217); // WCTL\r
+ }\r
+ set_wcmd(ch, 220); // WCMD\r
+ }\r
+\r
+ return;\r
+}\r
+\r
+//\r
+// Initialise system memory.\r
+//\r
+void MemInit(\r
+ MRCParams_t *mrc_params)\r
+{\r
+ static const MemInit_t init[] =\r
+ {\r
+ { 0x0101, bmCold|bmFast|bmWarm|bmS3, clear_self_refresh }, //0\r
+ { 0x0200, bmCold|bmFast|bmWarm|bmS3, prog_ddr_timing_control }, //1 initialise the MCU\r
+ { 0x0103, bmCold|bmFast , prog_decode_before_jedec }, //2\r
+ { 0x0104, bmCold|bmFast , perform_ddr_reset }, //3\r
+ { 0x0300, bmCold|bmFast |bmS3, ddrphy_init }, //4 initialise the DDRPHY\r
+ { 0x0400, bmCold|bmFast , perform_jedec_init }, //5 perform JEDEC initialisation of DRAMs\r
+ { 0x0105, bmCold|bmFast , set_ddr_init_complete }, //6\r
+ { 0x0106, bmFast|bmWarm|bmS3, restore_timings }, //7\r
+ { 0x0106, bmCold , default_timings }, //8\r
+ { 0x0500, bmCold , rcvn_cal }, //9 perform RCVN_CAL algorithm\r
+ { 0x0600, bmCold , wr_level }, //10 perform WR_LEVEL algorithm\r
+ { 0x0120, bmCold , prog_page_ctrl }, //11\r
+ { 0x0700, bmCold , rd_train }, //12 perform RD_TRAIN algorithm\r
+ { 0x0800, bmCold , wr_train }, //13 perform WR_TRAIN algorithm\r
+ { 0x010B, bmCold , store_timings }, //14\r
+ { 0x010C, bmCold|bmFast|bmWarm|bmS3, enable_scrambling }, //15\r
+ { 0x010D, bmCold|bmFast|bmWarm|bmS3, prog_ddr_control }, //16\r
+ { 0x010E, bmCold|bmFast|bmWarm|bmS3, prog_dra_drb }, //17\r
+ { 0x010F, bmWarm|bmS3, perform_wake }, //18\r
+ { 0x0110, bmCold|bmFast|bmWarm|bmS3, change_refresh_period }, //19\r
+ { 0x0111, bmCold|bmFast|bmWarm|bmS3, set_auto_refresh }, //20\r
+ { 0x0112, bmCold|bmFast|bmWarm|bmS3, ecc_enable }, //21\r
+ { 0x0113, bmCold|bmFast , memory_test }, //22\r
+ { 0x0114, bmCold|bmFast|bmWarm|bmS3, lock_registers } //23 set init done\r
+ };\r
+\r
+ uint32_t i;\r
+\r
+ ENTERFN();\r
+\r
+ DPF(D_INFO, "Meminit build %s %s\n", __DATE__, __TIME__);\r
+\r
+ // MRC started\r
+ post_code(0x01, 0x00);\r
+\r
+ if (mrc_params->boot_mode != bmCold)\r
+ {\r
+ if (mrc_params->ddr_speed != mrc_params->timings.ddr_speed)\r
+ {\r
+ // full training required as frequency changed\r
+ mrc_params->boot_mode = bmCold;\r
+ }\r
+ }\r
+\r
+ for (i = 0; i < MCOUNT(init); i++)\r
+ {\r
+ uint64_t my_tsc;\r
+\r
+#ifdef MRC_SV\r
+ if (mrc_params->menu_after_mrc && i > 14)\r
+ {\r
+ uint8_t ch;\r
+\r
+ mylop:\r
+\r
+ DPF(D_INFO, "-- c - continue --\n");\r
+ DPF(D_INFO, "-- j - move to jedec init --\n");\r
+ DPF(D_INFO, "-- m - memory test --\n");\r
+ DPF(D_INFO, "-- r - cpu read --\n");\r
+ DPF(D_INFO, "-- w - cpu write --\n");\r
+ DPF(D_INFO, "-- b - hte base test --\n");\r
+ DPF(D_INFO, "-- g - hte extended test --\n");\r
+\r
+ ch = mgetc();\r
+ switch (ch)\r
+ {\r
+ case 'c':\r
+ break;\r
+ case 'j': //move to jedec init\r
+ i = 5;\r
+ break;\r
+\r
+ case 'M':\r
+ case 'N':\r
+ {\r
+ uint32_t n, res, cnt=0;\r
+\r
+ for(n=0; mgetch()==0; n++)\r
+ {\r
+ if( ch == 'M' || n % 256 == 0)\r
+ {\r
+ DPF(D_INFO, "n=%d e=%d\n", n, cnt);\r
+ }\r
+\r
+ res = 0;\r
+\r
+ if( ch == 'M')\r
+ {\r
+ memory_test(mrc_params);\r
+ res |= mrc_params->status;\r
+ }\r
+\r
+ mrc_params->hte_setup = 1;\r
+ res |= check_bls_ex(mrc_params, 0x00000000);\r
+ res |= check_bls_ex(mrc_params, 0x00000000);\r
+ res |= check_bls_ex(mrc_params, 0x00000000);\r
+ res |= check_bls_ex(mrc_params, 0x00000000);\r
+\r
+ if( mrc_params->rank_enables & 2)\r
+ {\r
+ mrc_params->hte_setup = 1;\r
+ res |= check_bls_ex(mrc_params, 0x40000000);\r
+ res |= check_bls_ex(mrc_params, 0x40000000);\r
+ res |= check_bls_ex(mrc_params, 0x40000000);\r
+ res |= check_bls_ex(mrc_params, 0x40000000);\r
+ }\r
+\r
+ if( res != 0)\r
+ {\r
+ DPF(D_INFO, "###########\n");\r
+ DPF(D_INFO, "#\n");\r
+ DPF(D_INFO, "# Error count %d\n", ++cnt);\r
+ DPF(D_INFO, "#\n");\r
+ DPF(D_INFO, "###########\n");\r
+ }\r
+\r
+ } // for\r
+\r
+ select_memory_manager(mrc_params);\r
+ }\r
+ goto mylop;\r
+ case 'm':\r
+ memory_test(mrc_params);\r
+ goto mylop;\r
+ case 'n':\r
+ cpu_memory_test(mrc_params);\r
+ goto mylop;\r
+\r
+ case 'l':\r
+ ch = mgetc();\r
+ if (ch <= '9') DpfPrintMask ^= (ch - '0') << 3;\r
+ DPF(D_INFO, "Log mask %x\n", DpfPrintMask);\r
+ goto mylop;\r
+ case 'p':\r
+ print_timings(mrc_params);\r
+ goto mylop;\r
+ case 'R':\r
+ rd_train(mrc_params);\r
+ goto mylop;\r
+ case 'W':\r
+ wr_train(mrc_params);\r
+ goto mylop;\r
+\r
+ case 'r':\r
+ cpu_read();\r
+ goto mylop;\r
+ case 'w':\r
+ cpu_write();\r
+ goto mylop;\r
+\r
+ case 'g':\r
+ {\r
+ uint32_t result;\r
+ select_hte(mrc_params);\r
+ mrc_params->hte_setup = 1;\r
+ result = check_bls_ex(mrc_params, 0);\r
+ DPF(D_INFO, "Extended test result %x\n", result);\r
+ select_memory_manager(mrc_params);\r
+ }\r
+ goto mylop;\r
+ case 'b':\r
+ {\r
+ uint32_t result;\r
+ select_hte(mrc_params);\r
+ mrc_params->hte_setup = 1;\r
+ result = check_rw_coarse(mrc_params, 0);\r
+ DPF(D_INFO, "Base test result %x\n", result);\r
+ select_memory_manager(mrc_params);\r
+ }\r
+ goto mylop;\r
+ case 'B':\r
+ select_hte(mrc_params);\r
+ HteMemOp(0x2340, 1, 1);\r
+ select_memory_manager(mrc_params);\r
+ goto mylop;\r
+\r
+ case '3':\r
+ {\r
+ RegDPMC0 DPMC0reg;\r
+\r
+ DPF( D_INFO, "===>> Start suspend\n");\r
+ isbR32m(MCU, DSTAT);\r
+\r
+ DPMC0reg.raw = isbR32m(MCU, DPMC0);\r
+ DPMC0reg.field.DYNSREN = 0;\r
+ DPMC0reg.field.powerModeOpCode = 0x05; // Disable Master DLL\r
+ isbW32m(MCU, DPMC0, DPMC0reg.raw);\r
+\r
+ // Should be off for negative test case verification\r
+ #if 1\r
+ Wr32(MMIO, PCIADDR(0,0,0,SB_PACKET_REG),\r
+ (uint32_t)SB_COMMAND(SB_SUSPEND_CMND_OPCODE, MCU, 0));\r
+ #endif\r
+\r
+ DPF( D_INFO, "press key\n");\r
+ mgetc();\r
+ DPF( D_INFO, "===>> Start resume\n");\r
+ isbR32m(MCU, DSTAT);\r
+\r
+ mrc_params->boot_mode = bmS3;\r
+ i = 0;\r
+ }\r
+\r
+ } // switch\r
+\r
+ } // if( menu\r
+#endif //MRC_SV\r
+\r
+ if (mrc_params->boot_mode & init[i].boot_path)\r
+ {\r
+ uint8_t major = init[i].post_code >> 8 & 0xFF;\r
+ uint8_t minor = init[i].post_code >> 0 & 0xFF;\r
+ post_code(major, minor);\r
+\r
+ my_tsc = read_tsc();\r
+ init[i].init_fn(mrc_params);\r
+ DPF(D_TIME, "Execution time %llX", read_tsc() - my_tsc);\r
+ }\r
+ }\r
+\r
+ // display the timings\r
+ print_timings(mrc_params);\r
+\r
+ // MRC is complete.\r
+ post_code(0x01, 0xFF);\r
+\r
+ LEAVEFN();\r
+ return;\r
+}\r