]> git.proxmox.com Git - mirror_edk2.git/blame - QuarkSocPkg/QuarkNorthCluster/MemoryInit/Pei/meminit.c
QuarkSocPkg: QNCSmmLib: remove set but unused variables
[mirror_edk2.git] / QuarkSocPkg / QuarkNorthCluster / MemoryInit / Pei / meminit.c
CommitLineData
9b6bbcdb
MK
1/************************************************************************\r
2 *\r
3 * Copyright (c) 2013-2015 Intel Corporation.\r
4 *\r
5* This program and the accompanying materials\r
6* are licensed and made available under the terms and conditions of the BSD License\r
7* which accompanies this distribution. The full text of the license may be found at\r
8* http://opensource.org/licenses/bsd-license.php\r
9*\r
10* THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
11* WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
12 *\r
13 * This file contains all of the Cat Mountain Memory Reference Code (MRC).\r
14 *\r
15 * These functions are generic and should work for any Cat Mountain config.\r
16 *\r
17 * MRC requires two data structures to be passed in which are initialised by "PreMemInit()".\r
18 *\r
19 * The basic flow is as follows:\r
20 * 01) Check for supported DDR speed configuration\r
21 * 02) Set up MEMORY_MANAGER buffer as pass-through (POR)\r
22 * 03) Set Channel Interleaving Mode and Channel Stride to the most aggressive setting possible\r
23 * 04) Set up the MCU logic\r
24 * 05) Set up the DDR_PHY logic\r
25 * 06) Initialise the DRAMs (JEDEC)\r
26 * 07) Perform the Receive Enable Calibration algorithm\r
27 * 08) Perform the Write Leveling algorithm\r
28 * 09) Perform the Read Training algorithm (includes internal Vref)\r
29 * 10) Perform the Write Training algorithm\r
30 * 11) Set Channel Interleaving Mode and Channel Stride to the desired settings\r
31 *\r
32 * Dunit configuration based on Valleyview MRC.\r
33 *\r
34 ***************************************************************************/\r
35\r
36#include "mrc.h"\r
37#include "memory_options.h"\r
38\r
39#include "meminit.h"\r
40#include "meminit_utils.h"\r
41#include "hte.h"\r
42#include "io.h"\r
43\r
44// Override ODT to off state if requested\r
45#define DRMC_DEFAULT (mrc_params->rd_odt_value==0?BIT12:0)\r
46\r
47\r
48// tRFC values (in picoseconds) per density\r
49const uint32_t tRFC[5] =\r
50{\r
51 90000, // 512Mb\r
52 110000, // 1Gb\r
53 160000, // 2Gb\r
54 300000, // 4Gb\r
55 350000, // 8Gb\r
56 };\r
57\r
58// tCK clock period in picoseconds per speed index 800, 1066, 1333\r
59const uint32_t tCK[3] =\r
60{\r
61 2500,\r
62 1875,\r
63 1500\r
64};\r
65\r
66#ifdef SIM\r
67// Select static timings specific to simulation environment\r
68#define PLATFORM_ID 0\r
69#else\r
70// Select static timings specific to ClantonPeek platform\r
71#define PLATFORM_ID 1\r
72#endif\r
73\r
74\r
75// Global variables\r
76const uint16_t ddr_wclk[] =\r
77 {193, 158};\r
78\r
79const uint16_t ddr_wctl[] =\r
80 { 1, 217};\r
81\r
82const uint16_t ddr_wcmd[] =\r
83 { 1, 220};\r
84\r
85\r
86#ifdef BACKUP_RCVN\r
87const uint16_t ddr_rcvn[] =\r
88 {129, 498};\r
89#endif // BACKUP_RCVN\r
90\r
91#ifdef BACKUP_WDQS\r
92const uint16_t ddr_wdqs[] =\r
93 { 65, 289};\r
94#endif // BACKUP_WDQS\r
95\r
96#ifdef BACKUP_RDQS\r
97const uint8_t ddr_rdqs[] =\r
98 { 32, 24};\r
99#endif // BACKUP_RDQS\r
100\r
101#ifdef BACKUP_WDQ\r
102const uint16_t ddr_wdq[] =\r
103 { 32, 257};\r
104#endif // BACKUP_WDQ\r
105\r
106\r
107\r
108// Select MEMORY_MANAGER as the source for PRI interface\r
109static void select_memory_manager(\r
110 MRCParams_t *mrc_params)\r
111{\r
112 RegDCO Dco;\r
113\r
114 ENTERFN();\r
115\r
116 Dco.raw = isbR32m(MCU, DCO);\r
117 Dco.field.PMICTL = 0; //0 - PRI owned by MEMORY_MANAGER\r
118 isbW32m(MCU, DCO, Dco.raw);\r
119\r
120 LEAVEFN();\r
121}\r
122\r
123// Select HTE as the source for PRI interface\r
124void select_hte(\r
125 MRCParams_t *mrc_params)\r
126{\r
127 RegDCO Dco;\r
128\r
129 ENTERFN();\r
130\r
131 Dco.raw = isbR32m(MCU, DCO);\r
132 Dco.field.PMICTL = 1; //1 - PRI owned by HTE\r
133 isbW32m(MCU, DCO, Dco.raw);\r
134\r
135 LEAVEFN();\r
136}\r
137\r
138// Send DRAM command, data should be formated\r
139// using DCMD_Xxxx macro or emrsXCommand structure.\r
140static void dram_init_command(\r
141 uint32_t data)\r
142{\r
143 Wr32(DCMD, 0, data);\r
144}\r
145\r
146// Send DRAM wake command using special MCU side-band WAKE opcode\r
147static void dram_wake_command(\r
148 void)\r
149{\r
150 ENTERFN();\r
151\r
152 Wr32(MMIO, PCIADDR(0,0,0,SB_PACKET_REG),\r
153 (uint32_t) SB_COMMAND(SB_WAKE_CMND_OPCODE, MCU, 0));\r
154\r
155 LEAVEFN();\r
156}\r
157\r
158// Stop self refresh driven by MCU\r
159static void clear_self_refresh(\r
160 MRCParams_t *mrc_params)\r
161{\r
162 ENTERFN();\r
163\r
164 // clear the PMSTS Channel Self Refresh bits\r
165 isbM32m(MCU, PMSTS, BIT0, BIT0);\r
166\r
167 LEAVEFN();\r
168}\r
169\r
170// Configure MCU before jedec init sequence\r
171static void prog_decode_before_jedec(\r
172 MRCParams_t *mrc_params)\r
173{\r
174 RegDRP Drp;\r
175 RegDRCF Drfc;\r
176 RegDCAL Dcal;\r
177 RegDSCH Dsch;\r
178 RegDPMC0 Dpmc0;\r
179\r
180 ENTERFN();\r
181\r
182 // Disable power saving features\r
183 Dpmc0.raw = isbR32m(MCU, DPMC0);\r
184 Dpmc0.field.CLKGTDIS = 1;\r
185 Dpmc0.field.DISPWRDN = 1;\r
186 Dpmc0.field.DYNSREN = 0;\r
187 Dpmc0.field.PCLSTO = 0;\r
188 isbW32m(MCU, DPMC0, Dpmc0.raw);\r
189\r
190 // Disable out of order transactions\r
191 Dsch.raw = isbR32m(MCU, DSCH);\r
192 Dsch.field.OOODIS = 1;\r
193 Dsch.field.NEWBYPDIS = 1;\r
194 isbW32m(MCU, DSCH, Dsch.raw);\r
195\r
196 // Disable issuing the REF command\r
197 Drfc.raw = isbR32m(MCU, DRFC);\r
198 Drfc.field.tREFI = 0;\r
199 isbW32m(MCU, DRFC, Drfc.raw);\r
200\r
201 // Disable ZQ calibration short\r
202 Dcal.raw = isbR32m(MCU, DCAL);\r
203 Dcal.field.ZQCINT = 0;\r
204 Dcal.field.SRXZQCL = 0;\r
205 isbW32m(MCU, DCAL, Dcal.raw);\r
206\r
207 // Training performed in address mode 0, rank population has limited impact, however\r
208 // simulator complains if enabled non-existing rank.\r
209 Drp.raw = 0;\r
210 if (mrc_params->rank_enables & 1)\r
211 Drp.field.rank0Enabled = 1;\r
212 if (mrc_params->rank_enables & 2)\r
213 Drp.field.rank1Enabled = 1;\r
214 isbW32m(MCU, DRP, Drp.raw);\r
215\r
216 LEAVEFN();\r
217}\r
218\r
219// After Cold Reset, BIOS should set COLDWAKE bit to 1 before\r
220// sending the WAKE message to the Dunit.\r
221// For Standby Exit, or any other mode in which the DRAM is in\r
222// SR, this bit must be set to 0.\r
223static void perform_ddr_reset(\r
224 MRCParams_t *mrc_params)\r
225{\r
226 ENTERFN();\r
227\r
228 // Set COLDWAKE bit before sending the WAKE message\r
229 isbM32m(MCU, DRMC, BIT16, BIT16);\r
230\r
231 // Send wake command to DUNIT (MUST be done before JEDEC)\r
232 dram_wake_command();\r
233\r
234 // Set default value\r
235 isbW32m(MCU, DRMC, DRMC_DEFAULT);\r
236\r
237 LEAVEFN();\r
238}\r
239\r
240// Dunit Initialisation Complete.\r
241// Indicates that initialisation of the Dunit has completed.\r
242// Memory accesses are permitted and maintenance operation\r
243// begins. Until this bit is set to a 1, the memory controller will\r
244// not accept DRAM requests from the MEMORY_MANAGER or HTE.\r
245static void set_ddr_init_complete(\r
246 MRCParams_t *mrc_params)\r
247{\r
248 RegDCO Dco;\r
249\r
250 ENTERFN();\r
251\r
252 Dco.raw = isbR32m(MCU, DCO);\r
253 Dco.field.PMICTL = 0; //0 - PRI owned by MEMORY_MANAGER\r
254 Dco.field.IC = 1; //1 - initialisation complete\r
255 isbW32m(MCU, DCO, Dco.raw);\r
256\r
257 LEAVEFN();\r
258}\r
259\r
260static void prog_page_ctrl(\r
261 MRCParams_t *mrc_params)\r
262{\r
263 RegDPMC0 Dpmc0;\r
264\r
265 ENTERFN();\r
266\r
267 Dpmc0.raw = isbR32m(MCU, DPMC0);\r
268\r
269 Dpmc0.field.PCLSTO = 0x4;\r
270 Dpmc0.field.PREAPWDEN = 1;\r
271\r
272 isbW32m(MCU, DPMC0, Dpmc0.raw);\r
273}\r
274\r
275// Configure MCU Power Management Control Register\r
276// and Scheduler Control Register.\r
277static void prog_ddr_control(\r
278 MRCParams_t *mrc_params)\r
279{\r
280 RegDSCH Dsch;\r
281 RegDPMC0 Dpmc0;\r
282\r
283 ENTERFN();\r
284\r
285 Dpmc0.raw = isbR32m(MCU, DPMC0);\r
286 Dsch.raw = isbR32m(MCU, DSCH);\r
287\r
288 Dpmc0.field.DISPWRDN = mrc_params->power_down_disable;\r
289 Dpmc0.field.CLKGTDIS = 0;\r
290 Dpmc0.field.PCLSTO = 4;\r
291 Dpmc0.field.PREAPWDEN = 1;\r
292\r
293 Dsch.field.OOODIS = 0;\r
294 Dsch.field.OOOST3DIS = 0;\r
295 Dsch.field.NEWBYPDIS = 0;\r
296\r
297 isbW32m(MCU, DSCH, Dsch.raw);\r
298 isbW32m(MCU, DPMC0, Dpmc0.raw);\r
299\r
300 // CMDTRIST = 2h - CMD/ADDR are tristated when no valid command\r
301 isbM32m(MCU, DPMC1, 2 << 4, BIT5|BIT4);\r
302\r
303 LEAVEFN();\r
304}\r
305\r
306// After training complete configure MCU Rank Population Register\r
307// specifying: ranks enabled, device width, density, address mode.\r
308static void prog_dra_drb(\r
309 MRCParams_t *mrc_params)\r
310{\r
311 RegDRP Drp;\r
312 RegDCO Dco;\r
313\r
314 ENTERFN();\r
315\r
316 Dco.raw = isbR32m(MCU, DCO);\r
317 Dco.field.IC = 0;\r
318 isbW32m(MCU, DCO, Dco.raw);\r
319\r
320 Drp.raw = 0;\r
321 if (mrc_params->rank_enables & 1)\r
322 Drp.field.rank0Enabled = 1;\r
323 if (mrc_params->rank_enables & 2)\r
324 Drp.field.rank1Enabled = 1;\r
325 if (mrc_params->dram_width == x16)\r
326 {\r
327 Drp.field.dimm0DevWidth = 1;\r
328 Drp.field.dimm1DevWidth = 1;\r
329 }\r
330 // Density encoding in DRAMParams_t 0=512Mb, 1=Gb, 2=2Gb, 3=4Gb\r
331 // has to be mapped RANKDENSx encoding (0=1Gb)\r
332 Drp.field.dimm0DevDensity = mrc_params->params.DENSITY - 1;\r
333 Drp.field.dimm1DevDensity = mrc_params->params.DENSITY - 1;\r
334\r
335 // Address mode can be overwritten if ECC enabled\r
336 Drp.field.addressMap = mrc_params->address_mode;\r
337\r
338 isbW32m(MCU, DRP, Drp.raw);\r
339\r
340 Dco.field.PMICTL = 0; //0 - PRI owned by MEMORY_MANAGER\r
341 Dco.field.IC = 1; //1 - initialisation complete\r
342 isbW32m(MCU, DCO, Dco.raw);\r
343\r
344 LEAVEFN();\r
345}\r
346\r
347// Configure refresh rate and short ZQ calibration interval.\r
348// Activate dynamic self refresh.\r
349static void change_refresh_period(\r
350 MRCParams_t *mrc_params)\r
351{\r
352 RegDRCF Drfc;\r
353 RegDCAL Dcal;\r
354 RegDPMC0 Dpmc0;\r
355\r
356 ENTERFN();\r
357\r
358 Drfc.raw = isbR32m(MCU, DRFC);\r
359 Drfc.field.tREFI = mrc_params->refresh_rate;\r
360 Drfc.field.REFDBTCLR = 1;\r
361 isbW32m(MCU, DRFC, Drfc.raw);\r
362\r
363 Dcal.raw = isbR32m(MCU, DCAL);\r
364 Dcal.field.ZQCINT = 3; // 63ms\r
365 isbW32m(MCU, DCAL, Dcal.raw);\r
366\r
367 Dpmc0.raw = isbR32m(MCU, DPMC0);\r
368 Dpmc0.field.ENPHYCLKGATE = 1;\r
369 Dpmc0.field.DYNSREN = 1;\r
370 isbW32m(MCU, DPMC0, Dpmc0.raw);\r
371\r
372 LEAVEFN();\r
373}\r
374\r
375// Send DRAM wake command\r
376static void perform_wake(\r
377 MRCParams_t *mrc_params)\r
378{\r
379 ENTERFN();\r
380\r
381 dram_wake_command();\r
382\r
383 LEAVEFN();\r
384}\r
385\r
386// prog_ddr_timing_control (aka mcu_init):\r
387// POST_CODE[major] == 0x02\r
388//\r
389// It will initialise timing registers in the MCU (DTR0..DTR4).\r
390static void prog_ddr_timing_control(\r
391 MRCParams_t *mrc_params)\r
392{\r
393 uint8_t TCL, WL;\r
394 uint8_t TRP, TRCD, TRAS, TRFC, TWR, TWTR, TRRD, TRTP, TFAW;\r
395 uint32_t TCK;\r
396\r
397 RegDTR0 Dtr0;\r
398 RegDTR1 Dtr1;\r
399 RegDTR2 Dtr2;\r
400 RegDTR3 Dtr3;\r
401 RegDTR4 Dtr4;\r
402\r
403 ENTERFN();\r
404\r
405 // mcu_init starts\r
406 post_code(0x02, 0x00);\r
407\r
408 Dtr0.raw = isbR32m(MCU, DTR0);\r
409 Dtr1.raw = isbR32m(MCU, DTR1);\r
410 Dtr2.raw = isbR32m(MCU, DTR2);\r
411 Dtr3.raw = isbR32m(MCU, DTR3);\r
412 Dtr4.raw = isbR32m(MCU, DTR4);\r
413\r
414 TCK = tCK[mrc_params->ddr_speed]; // Clock in picoseconds\r
415 TCL = mrc_params->params.tCL; // CAS latency in clocks\r
416 TRP = TCL; // Per CAT MRC\r
417 TRCD = TCL; // Per CAT MRC\r
418 TRAS = MCEIL(mrc_params->params.tRAS, TCK);\r
419 TRFC = MCEIL(tRFC[mrc_params->params.DENSITY], TCK);\r
420 TWR = MCEIL(15000, TCK); // Per JEDEC: tWR=15000ps DDR2/3 from 800-1600\r
421\r
422 TWTR = MCEIL(mrc_params->params.tWTR, TCK);\r
423 TRRD = MCEIL(mrc_params->params.tRRD, TCK);\r
424 TRTP = 4; // Valid for 800 and 1066, use 5 for 1333\r
425 TFAW = MCEIL(mrc_params->params.tFAW, TCK);\r
426\r
427 WL = 5 + mrc_params->ddr_speed;\r
428\r
429 Dtr0.field.dramFrequency = mrc_params->ddr_speed;\r
430\r
431 Dtr0.field.tCL = TCL - 5; //Convert from TCL (DRAM clocks) to VLV indx\r
432 Dtr0.field.tRP = TRP - 5; //5 bit DRAM Clock\r
433 Dtr0.field.tRCD = TRCD - 5; //5 bit DRAM Clock\r
434\r
435 Dtr1.field.tWCL = WL - 3; //Convert from WL (DRAM clocks) to VLV indx\r
436 Dtr1.field.tWTP = WL + 4 + TWR - 14; //Change to tWTP\r
437 Dtr1.field.tRTP = MMAX(TRTP, 4) - 3; //4 bit DRAM Clock\r
438 Dtr1.field.tRRD = TRRD - 4; //4 bit DRAM Clock\r
439 Dtr1.field.tCMD = 1; //2N\r
440 Dtr1.field.tRAS = TRAS - 14; //6 bit DRAM Clock\r
441\r
442 Dtr1.field.tFAW = ((TFAW + 1) >> 1) - 5; //4 bit DRAM Clock\r
443 Dtr1.field.tCCD = 0; //Set 4 Clock CAS to CAS delay (multi-burst)\r
444 Dtr2.field.tRRDR = 1;\r
445 Dtr2.field.tWWDR = 2;\r
446 Dtr2.field.tRWDR = 2;\r
447 Dtr3.field.tWRDR = 2;\r
448 Dtr3.field.tWRDD = 2;\r
449\r
450 if (mrc_params->ddr_speed == DDRFREQ_800)\r
451 {\r
452 // Extended RW delay (+1)\r
453 Dtr3.field.tRWSR = TCL - 5 + 1;\r
454 }\r
455 else if(mrc_params->ddr_speed == DDRFREQ_1066)\r
456 {\r
457 // Extended RW delay (+1)\r
458 Dtr3.field.tRWSR = TCL - 5 + 1;\r
459 }\r
460\r
461 Dtr3.field.tWRSR = 4 + WL + TWTR - 11;\r
462\r
463 if (mrc_params->ddr_speed == DDRFREQ_800)\r
464 {\r
465 Dtr3.field.tXP = MMAX(0, 1 - Dtr1.field.tCMD);\r
466 }\r
467 else\r
468 {\r
469 Dtr3.field.tXP = MMAX(0, 2 - Dtr1.field.tCMD);\r
470 }\r
471\r
472 Dtr4.field.WRODTSTRT = Dtr1.field.tCMD;\r
473 Dtr4.field.WRODTSTOP = Dtr1.field.tCMD;\r
474 Dtr4.field.RDODTSTRT = Dtr1.field.tCMD + Dtr0.field.tCL - Dtr1.field.tWCL + 2; //Convert from WL (DRAM clocks) to VLV indx\r
475 Dtr4.field.RDODTSTOP = Dtr1.field.tCMD + Dtr0.field.tCL - Dtr1.field.tWCL + 2;\r
476 Dtr4.field.TRGSTRDIS = 0;\r
477 Dtr4.field.ODTDIS = 0;\r
478\r
479 isbW32m(MCU, DTR0, Dtr0.raw);\r
480 isbW32m(MCU, DTR1, Dtr1.raw);\r
481 isbW32m(MCU, DTR2, Dtr2.raw);\r
482 isbW32m(MCU, DTR3, Dtr3.raw);\r
483 isbW32m(MCU, DTR4, Dtr4.raw);\r
484\r
485 LEAVEFN();\r
486}\r
487\r
488// ddrphy_init:\r
489// POST_CODE[major] == 0x03\r
490//\r
491// This function performs some initialisation on the DDRIO unit.\r
492// This function is dependent on BOARD_ID, DDR_SPEED, and CHANNEL_ENABLES.\r
493static void ddrphy_init(MRCParams_t *mrc_params)\r
494{\r
495 uint32_t tempD; // temporary DWORD\r
496 uint8_t channel_i; // channel counter\r
497 uint8_t rank_i; // rank counter\r
498 uint8_t bl_grp_i; // byte lane group counter (2 BLs per module)\r
499\r
500 uint8_t bl_divisor = /*(mrc_params->channel_width==x16)?2:*/1; // byte lane divisor\r
501 uint8_t speed = mrc_params->ddr_speed & (BIT1|BIT0); // For DDR3 --> 0 == 800, 1 == 1066, 2 == 1333\r
502 uint8_t tCAS;\r
503 uint8_t tCWL;\r
504\r
505 ENTERFN();\r
506\r
507 tCAS = mrc_params->params.tCL;\r
508 tCWL = 5 + mrc_params->ddr_speed;\r
509\r
510 // ddrphy_init starts\r
511 post_code(0x03, 0x00);\r
512\r
513 // HSD#231531\r
514 // Make sure IOBUFACT is deasserted before initialising the DDR PHY.\r
515 // HSD#234845\r
516 // Make sure WRPTRENABLE is deasserted before initialising the DDR PHY.\r
517 for (channel_i=0; channel_i<NUM_CHANNELS; channel_i++) {\r
518 if (mrc_params->channel_enables & (1<<channel_i)) {\r
519 // Deassert DDRPHY Initialisation Complete\r
520 isbM32m(DDRPHY, (CMDPMCONFIG0 + (channel_i * DDRIOCCC_CH_OFFSET)), ~BIT20, BIT20); // SPID_INIT_COMPLETE=0\r
521 // Deassert IOBUFACT\r
522 isbM32m(DDRPHY, (CMDCFGREG0 + (channel_i * DDRIOCCC_CH_OFFSET)), ~BIT2, BIT2); // IOBUFACTRST_N=0\r
523 // Disable WRPTR\r
524 isbM32m(DDRPHY, (CMDPTRREG + (channel_i * DDRIOCCC_CH_OFFSET)), ~BIT0, BIT0); // WRPTRENABLE=0\r
525 } // if channel enabled\r
526 } // channel_i loop\r
527\r
528 // Put PHY in reset\r
529 isbM32m(DDRPHY, MASTERRSTN, 0, BIT0); // PHYRSTN=0\r
530\r
531 // Initialise DQ01,DQ23,CMD,CLK-CTL,COMP modules\r
532 // STEP0:\r
533 post_code(0x03, 0x10);\r
534 for (channel_i=0; channel_i<NUM_CHANNELS; channel_i++) {\r
535 if (mrc_params->channel_enables & (1<<channel_i)) {\r
536\r
537 // DQ01-DQ23\r
538 for (bl_grp_i=0; bl_grp_i<((NUM_BYTE_LANES/bl_divisor)/2); bl_grp_i++) {\r
539 isbM32m(DDRPHY, (DQOBSCKEBBCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), ((bl_grp_i) ? (0x00) : (BIT22)), (BIT22)); // Analog MUX select - IO2xCLKSEL\r
540\r
541 // ODT Strength\r
542 switch (mrc_params->rd_odt_value) {\r
543 case 1: tempD = 0x3; break; // 60 ohm\r
544 case 2: tempD = 0x3; break; // 120 ohm\r
545 case 3: tempD = 0x3; break; // 180 ohm\r
546 default: tempD = 0x3; break; // 120 ohm\r
547 }\r
548 isbM32m(DDRPHY, (B0RXIOBUFCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (tempD<<5), (BIT6|BIT5)); // ODT strength\r
549 isbM32m(DDRPHY, (B1RXIOBUFCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (tempD<<5), (BIT6|BIT5)); // ODT strength\r
550 // Dynamic ODT/DIFFAMP\r
551 tempD = (((tCAS)<<24)|((tCAS)<<16)|((tCAS)<<8)|((tCAS)<<0));\r
552 switch (speed) {\r
553 case 0: tempD -= 0x01010101; break; // 800\r
554 case 1: tempD -= 0x02020202; break; // 1066\r
555 case 2: tempD -= 0x03030303; break; // 1333\r
556 case 3: tempD -= 0x04040404; break; // 1600\r
557 }\r
558 isbM32m(DDRPHY, (B01LATCTL1 + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), tempD, ((BIT28|BIT27|BIT26|BIT25|BIT24)|(BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT4|BIT3|BIT2|BIT1|BIT0))); // Launch Time: ODT, DIFFAMP, ODT, DIFFAMP\r
559 switch (speed) {\r
560 // HSD#234715\r
561 case 0: tempD = ((0x06<<16)|(0x07<<8)); break; // 800\r
562 case 1: tempD = ((0x07<<16)|(0x08<<8)); break; // 1066\r
563 case 2: tempD = ((0x09<<16)|(0x0A<<8)); break; // 1333\r
564 case 3: tempD = ((0x0A<<16)|(0x0B<<8)); break; // 1600\r
565 }\r
566 isbM32m(DDRPHY, (B0ONDURCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), tempD, ((BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT13|BIT12|BIT11|BIT10|BIT9|BIT8))); // On Duration: ODT, DIFFAMP\r
567 isbM32m(DDRPHY, (B1ONDURCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), tempD, ((BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT13|BIT12|BIT11|BIT10|BIT9|BIT8))); // On Duration: ODT, DIFFAMP\r
568\r
569 switch (mrc_params->rd_odt_value) {\r
570 case 0: tempD = ((0x3F<<16)|(0x3f<<10)); break; // override DIFFAMP=on, ODT=off\r
571 default: tempD = ((0x3F<<16)|(0x2A<<10)); break; // override DIFFAMP=on, ODT=on\r
572 }\r
573 isbM32m(DDRPHY, (B0OVRCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), tempD, ((BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT15|BIT14|BIT13|BIT12|BIT11|BIT10))); // Override: DIFFAMP, ODT\r
574 isbM32m(DDRPHY, (B1OVRCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), tempD, ((BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT15|BIT14|BIT13|BIT12|BIT11|BIT10))); // Override: DIFFAMP, ODT\r
575\r
576 // DLL Setup\r
577 // 1xCLK Domain Timings: tEDP,RCVEN,WDQS (PO)\r
578 isbM32m(DDRPHY, (B0LATCTL0 + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (((tCAS+7)<<16)|((tCAS-4)<<8)|((tCWL-2)<<0)), ((BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT4|BIT3|BIT2|BIT1|BIT0))); // 1xCLK: tEDP, RCVEN, WDQS\r
579 isbM32m(DDRPHY, (B1LATCTL0 + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (((tCAS+7)<<16)|((tCAS-4)<<8)|((tCWL-2)<<0)), ((BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT4|BIT3|BIT2|BIT1|BIT0))); // 1xCLK: tEDP, RCVEN, WDQS\r
580\r
581 // RCVEN Bypass (PO)\r
582 isbM32m(DDRPHY, (B0RXIOBUFCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), ((0x0<<7)|(0x0<<0)), (BIT7|BIT0)); // AFE Bypass, RCVEN DIFFAMP\r
583 isbM32m(DDRPHY, (B1RXIOBUFCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), ((0x0<<7)|(0x0<<0)), (BIT7|BIT0)); // AFE Bypass, RCVEN DIFFAMP\r
584 // TX\r
585 isbM32m(DDRPHY, (DQCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (BIT16), (BIT16)); // 0 means driving DQ during DQS-preamble\r
586 isbM32m(DDRPHY, (B01PTRCTL1 + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (BIT8), (BIT8)); // WR_LVL mode disable\r
587 // RX (PO)\r
588 isbM32m(DDRPHY, (B0VREFCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), ((0x03<<2)|(0x0<<1)|(0x0<<0)), ((BIT7|BIT6|BIT5|BIT4|BIT3|BIT2)|BIT1|BIT0)); // Internal Vref Code, Enable#, Ext_or_Int (1=Ext)\r
589 isbM32m(DDRPHY, (B1VREFCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), ((0x03<<2)|(0x0<<1)|(0x0<<0)), ((BIT7|BIT6|BIT5|BIT4|BIT3|BIT2)|BIT1|BIT0)); // Internal Vref Code, Enable#, Ext_or_Int (1=Ext)\r
590 isbM32m(DDRPHY, (B0RXIOBUFCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (0), (BIT4)); // Per-Bit De-Skew Enable\r
591 isbM32m(DDRPHY, (B1RXIOBUFCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (0), (BIT4)); // Per-Bit De-Skew Enable\r
592 }\r
593 // CLKEBB\r
594 isbM32m(DDRPHY, (CMDOBSCKEBBCTL + (channel_i * DDRIOCCC_CH_OFFSET)), 0, (BIT23));\r
595\r
596 // Enable tristate control of cmd/address bus\r
597 isbM32m(DDRPHY, (CMDCFGREG0 + (channel_i * DDRIOCCC_CH_OFFSET)), 0, (BIT1|BIT0));\r
598\r
599 // ODT RCOMP\r
600 isbM32m(DDRPHY, (CMDRCOMPODT + (channel_i * DDRIOCCC_CH_OFFSET)), ((0x03<<5)|(0x03<<0)), ((BIT9|BIT8|BIT7|BIT6|BIT5)|(BIT4|BIT3|BIT2|BIT1|BIT0)));\r
601\r
602 // CMDPM* registers must be programmed in this order...\r
603 isbM32m(DDRPHY, (CMDPMDLYREG4 + (channel_i * DDRIOCCC_CH_OFFSET)), ((0xFFFFU<<16)|(0xFFFF<<0)), ((BIT31|BIT30|BIT29|BIT28|BIT27|BIT26|BIT25|BIT24|BIT23|BIT22|BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT15|BIT14|BIT13|BIT12|BIT11|BIT10|BIT9|BIT8|BIT7|BIT6|BIT5|BIT4|BIT3|BIT2|BIT1|BIT0))); // Turn On Delays: SFR (regulator), MPLL\r
604 isbM32m(DDRPHY, (CMDPMDLYREG3 + (channel_i * DDRIOCCC_CH_OFFSET)), ((0xFU<<28)|(0xFFF<<16)|(0xF<<12)|(0x616<<0)), ((BIT31|BIT30|BIT29|BIT28)|(BIT27|BIT26|BIT25|BIT24|BIT23|BIT22|BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT15|BIT14|BIT13|BIT12)|(BIT11|BIT10|BIT9|BIT8|BIT7|BIT6|BIT5|BIT4|BIT3|BIT2|BIT1|BIT0))); // Delays: ASSERT_IOBUFACT_to_ALLON0_for_PM_MSG_3, VREG (MDLL) Turn On, ALLON0_to_DEASSERT_IOBUFACT_for_PM_MSG_gt0, MDLL Turn On\r
605 isbM32m(DDRPHY, (CMDPMDLYREG2 + (channel_i * DDRIOCCC_CH_OFFSET)), ((0xFFU<<24)|(0xFF<<16)|(0xFF<<8)|(0xFF<<0)), ((BIT31|BIT30|BIT29|BIT28|BIT27|BIT26|BIT25|BIT24)|(BIT23|BIT22|BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT15|BIT14|BIT13|BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT7|BIT6|BIT5|BIT4|BIT3|BIT2|BIT1|BIT0))); // MPLL Divider Reset Delays\r
606 isbM32m(DDRPHY, (CMDPMDLYREG1 + (channel_i * DDRIOCCC_CH_OFFSET)), ((0xFFU<<24)|(0xFF<<16)|(0xFF<<8)|(0xFF<<0)), ((BIT31|BIT30|BIT29|BIT28|BIT27|BIT26|BIT25|BIT24)|(BIT23|BIT22|BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT15|BIT14|BIT13|BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT7|BIT6|BIT5|BIT4|BIT3|BIT2|BIT1|BIT0))); // Turn Off Delays: VREG, Staggered MDLL, MDLL, PI\r
607 isbM32m(DDRPHY, (CMDPMDLYREG0 + (channel_i * DDRIOCCC_CH_OFFSET)), ((0xFFU<<24)|(0xFF<<16)|(0xFF<<8)|(0xFF<<0)), ((BIT31|BIT30|BIT29|BIT28|BIT27|BIT26|BIT25|BIT24)|(BIT23|BIT22|BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT15|BIT14|BIT13|BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT7|BIT6|BIT5|BIT4|BIT3|BIT2|BIT1|BIT0))); // Turn On Delays: MPLL, Staggered MDLL, PI, IOBUFACT\r
608 isbM32m(DDRPHY, (CMDPMCONFIG0 + (channel_i * DDRIOCCC_CH_OFFSET)), ((0x6<<8)|BIT6|(0x4<<0)), (BIT31|BIT30|BIT29|BIT28|BIT27|BIT26|BIT25|BIT24|BIT23|BIT22|BIT21|(BIT11|BIT10|BIT9|BIT8)|BIT6|(BIT3|BIT2|BIT1|BIT0))); // Allow PUnit signals\r
609 isbM32m(DDRPHY, (CMDMDLLCTL + (channel_i * DDRIOCCC_CH_OFFSET)), ((0x3<<4)|(0x7<<0)), ((BIT6|BIT5|BIT4)|(BIT3|BIT2|BIT1|BIT0))); // DLL_VREG Bias Trim, VREF Tuning for DLL_VREG\r
610 // CLK-CTL\r
611 isbM32m(DDRPHY, (CCOBSCKEBBCTL + (channel_i * DDRIOCCC_CH_OFFSET)), 0, (BIT24)); // CLKEBB\r
612 isbM32m(DDRPHY, (CCCFGREG0 + (channel_i * DDRIOCCC_CH_OFFSET)), ((0x0<<16)|(0x0<<12)|(0x0<<8)|(0xF<<4)|BIT0), ((BIT19|BIT18|BIT17|BIT16)|(BIT15|BIT14|BIT13|BIT12)|(BIT11|BIT10|BIT9|BIT8)|(BIT7|BIT6|BIT5|BIT4)|BIT0)); // Buffer Enable: CS,CKE,ODT,CLK\r
613 isbM32m(DDRPHY, (CCRCOMPODT + (channel_i * DDRIOCCC_CH_OFFSET)), ((0x03<<8)|(0x03<<0)), ((BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT4|BIT3|BIT2|BIT1|BIT0))); // ODT RCOMP\r
614 isbM32m(DDRPHY, (CCMDLLCTL + (channel_i * DDRIOCCC_CH_OFFSET)), ((0x3<<4)|(0x7<<0)), ((BIT6|BIT5|BIT4)|(BIT3|BIT2|BIT1|BIT0))); // DLL_VREG Bias Trim, VREF Tuning for DLL_VREG\r
615\r
616 // COMP (RON channel specific)\r
617 // - DQ/DQS/DM RON: 32 Ohm\r
618 // - CTRL/CMD RON: 27 Ohm\r
619 // - CLK RON: 26 Ohm\r
620 isbM32m(DDRPHY, (DQVREFCH0 + (channel_i * DDRCOMP_CH_OFFSET)), ((0x08<<24)|(0x03<<16)), ((BIT29|BIT28|BIT27|BIT26|BIT25|BIT24)|(BIT21|BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP Vref PU/PD\r
621 isbM32m(DDRPHY, (CMDVREFCH0 + (channel_i * DDRCOMP_CH_OFFSET)), ((0x0C<<24)|(0x03<<16)), ((BIT29|BIT28|BIT27|BIT26|BIT25|BIT24)|(BIT21|BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP Vref PU/PD\r
622 isbM32m(DDRPHY, (CLKVREFCH0 + (channel_i * DDRCOMP_CH_OFFSET)), ((0x0F<<24)|(0x03<<16)), ((BIT29|BIT28|BIT27|BIT26|BIT25|BIT24)|(BIT21|BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP Vref PU/PD\r
623 isbM32m(DDRPHY, (DQSVREFCH0 + (channel_i * DDRCOMP_CH_OFFSET)), ((0x08<<24)|(0x03<<16)), ((BIT29|BIT28|BIT27|BIT26|BIT25|BIT24)|(BIT21|BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP Vref PU/PD\r
624 isbM32m(DDRPHY, (CTLVREFCH0 + (channel_i * DDRCOMP_CH_OFFSET)), ((0x0C<<24)|(0x03<<16)), ((BIT29|BIT28|BIT27|BIT26|BIT25|BIT24)|(BIT21|BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP Vref PU/PD\r
625\r
626 // DQS Swapped Input Enable\r
627 isbM32m(DDRPHY, (COMPEN1CH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT19|BIT17), ((BIT31|BIT30)|BIT19|BIT17|(BIT15|BIT14)));\r
628\r
629 // ODT VREF = 1.5 x 274/360+274 = 0.65V (code of ~50)\r
630 isbM32m(DDRPHY, (DQVREFCH0 + (channel_i * DDRCOMP_CH_OFFSET)), ((0x32<<8)|(0x03<<0)), ((BIT13|BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT5|BIT4|BIT3|BIT2|BIT1|BIT0))); // ODT Vref PU/PD\r
631 isbM32m(DDRPHY, (DQSVREFCH0 + (channel_i * DDRCOMP_CH_OFFSET)), ((0x32<<8)|(0x03<<0)), ((BIT13|BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT5|BIT4|BIT3|BIT2|BIT1|BIT0))); // ODT Vref PU/PD\r
632 isbM32m(DDRPHY, (CLKVREFCH0 + (channel_i * DDRCOMP_CH_OFFSET)), ((0x0E<<8)|(0x05<<0)), ((BIT13|BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT5|BIT4|BIT3|BIT2|BIT1|BIT0))); // ODT Vref PU/PD\r
633\r
634 // Slew rate settings are frequency specific, numbers below are for 800Mhz (speed == 0)\r
635 // - DQ/DQS/DM/CLK SR: 4V/ns,\r
636 // - CTRL/CMD SR: 1.5V/ns\r
637 tempD = (0x0E<<16)|(0x0E<<12)|(0x08<<8)|(0x0B<<4)|(0x0B<<0);\r
638 isbM32m(DDRPHY, (DLYSELCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (tempD), ((BIT19|BIT18|BIT17|BIT16)|(BIT15|BIT14|BIT13|BIT12)|(BIT11|BIT10|BIT9|BIT8)|(BIT7|BIT6|BIT5|BIT4)|(BIT3|BIT2|BIT1|BIT0))); // DCOMP Delay Select: CTL,CMD,CLK,DQS,DQ\r
639 isbM32m(DDRPHY, (TCOVREFCH0 + (channel_i * DDRCOMP_CH_OFFSET)), ((0x05<<16)|(0x05<<8)|(0x05<<0)), ((BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT13|BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT5|BIT4|BIT3|BIT2|BIT1|BIT0))); // TCO Vref CLK,DQS,DQ\r
640 isbM32m(DDRPHY, (CCBUFODTCH0 + (channel_i * DDRCOMP_CH_OFFSET)), ((0x03<<8)|(0x03<<0)), ((BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT4|BIT3|BIT2|BIT1|BIT0))); // ODTCOMP CMD/CTL PU/PD\r
641 isbM32m(DDRPHY, (COMPEN0CH0 + (channel_i * DDRCOMP_CH_OFFSET)), (0), ((BIT31|BIT30)|BIT8)); // COMP\r
642\r
643 #ifdef BACKUP_COMPS\r
644 // DQ COMP Overrides\r
645 isbM32m(DDRPHY, (DQDRVPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0A<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP PU\r
646 isbM32m(DDRPHY, (DQDRVPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0A<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP PD\r
647 isbM32m(DDRPHY, (DQDLYPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x10<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // DCOMP PU\r
648 isbM32m(DDRPHY, (DQDLYPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x10<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // DCOMP PD\r
649 isbM32m(DDRPHY, (DQODTPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0B<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // ODTCOMP PU\r
650 isbM32m(DDRPHY, (DQODTPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0B<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // ODTCOMP PD\r
651 isbM32m(DDRPHY, (DQTCOPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31), (BIT31)); // TCOCOMP PU\r
652 isbM32m(DDRPHY, (DQTCOPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31), (BIT31)); // TCOCOMP PD\r
653 // DQS COMP Overrides\r
654 isbM32m(DDRPHY, (DQSDRVPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0A<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP PU\r
655 isbM32m(DDRPHY, (DQSDRVPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0A<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP PD\r
656 isbM32m(DDRPHY, (DQSDLYPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x10<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // DCOMP PU\r
657 isbM32m(DDRPHY, (DQSDLYPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x10<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // DCOMP PD\r
658 isbM32m(DDRPHY, (DQSODTPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0B<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // ODTCOMP PU\r
659 isbM32m(DDRPHY, (DQSODTPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0B<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // ODTCOMP PD\r
660 isbM32m(DDRPHY, (DQSTCOPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31), (BIT31)); // TCOCOMP PU\r
661 isbM32m(DDRPHY, (DQSTCOPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31), (BIT31)); // TCOCOMP PD\r
662 // CLK COMP Overrides\r
663 isbM32m(DDRPHY, (CLKDRVPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0C<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP PU\r
664 isbM32m(DDRPHY, (CLKDRVPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0C<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP PD\r
665 isbM32m(DDRPHY, (CLKDLYPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x07<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // DCOMP PU\r
666 isbM32m(DDRPHY, (CLKDLYPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x07<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // DCOMP PD\r
667 isbM32m(DDRPHY, (CLKODTPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0B<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // ODTCOMP PU\r
668 isbM32m(DDRPHY, (CLKODTPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0B<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // ODTCOMP PD\r
669 isbM32m(DDRPHY, (CLKTCOPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31), (BIT31)); // TCOCOMP PU\r
670 isbM32m(DDRPHY, (CLKTCOPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31), (BIT31)); // TCOCOMP PD\r
671 // CMD COMP Overrides\r
672 isbM32m(DDRPHY, (CMDDRVPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0D<<16)), (BIT31|(BIT21|BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP PU\r
673 isbM32m(DDRPHY, (CMDDRVPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0D<<16)), (BIT31|(BIT21|BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP PD\r
674 isbM32m(DDRPHY, (CMDDLYPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0A<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // DCOMP PU\r
675 isbM32m(DDRPHY, (CMDDLYPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0A<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // DCOMP PD\r
676 // CTL COMP Overrides\r
677 isbM32m(DDRPHY, (CTLDRVPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0D<<16)), (BIT31|(BIT21|BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP PU\r
678 isbM32m(DDRPHY, (CTLDRVPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0D<<16)), (BIT31|(BIT21|BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP PD\r
679 isbM32m(DDRPHY, (CTLDLYPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0A<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // DCOMP PU\r
680 isbM32m(DDRPHY, (CTLDLYPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0A<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // DCOMP PD\r
681 #else\r
682 // DQ TCOCOMP Overrides\r
683 isbM32m(DDRPHY, (DQTCOPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x1F<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // TCOCOMP PU\r
684 isbM32m(DDRPHY, (DQTCOPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x1F<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // TCOCOMP PD\r
685 // DQS TCOCOMP Overrides\r
686 isbM32m(DDRPHY, (DQSTCOPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x1F<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // TCOCOMP PU\r
687 isbM32m(DDRPHY, (DQSTCOPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x1F<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // TCOCOMP PD\r
688 // CLK TCOCOMP Overrides\r
689 isbM32m(DDRPHY, (CLKTCOPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x1F<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // TCOCOMP PU\r
690 isbM32m(DDRPHY, (CLKTCOPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x1F<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // TCOCOMP PD\r
691 #endif // BACKUP_COMPS\r
692 // program STATIC delays\r
693 #ifdef BACKUP_WCMD\r
694 set_wcmd(channel_i, ddr_wcmd[PLATFORM_ID]);\r
695 #else\r
696 set_wcmd(channel_i, ddr_wclk[PLATFORM_ID] + HALF_CLK);\r
697 #endif // BACKUP_WCMD\r
698 for (rank_i=0; rank_i<NUM_RANKS; rank_i++) {\r
699 if (mrc_params->rank_enables & (1<<rank_i)) {\r
700 set_wclk(channel_i, rank_i, ddr_wclk[PLATFORM_ID]);\r
701 #ifdef BACKUP_WCTL\r
702 set_wctl(channel_i, rank_i, ddr_wctl[PLATFORM_ID]);\r
703 #else\r
704 set_wctl(channel_i, rank_i, ddr_wclk[PLATFORM_ID] + HALF_CLK);\r
705 #endif // BACKUP_WCTL\r
706 }\r
707 }\r
708 }\r
709 }\r
710 // COMP (non channel specific)\r
711 //isbM32m(DDRPHY, (), (), ());\r
712 isbM32m(DDRPHY, (DQANADRVPUCTL), (BIT30), (BIT30)); // RCOMP: Dither PU Enable\r
713 isbM32m(DDRPHY, (DQANADRVPDCTL), (BIT30), (BIT30)); // RCOMP: Dither PD Enable\r
714 isbM32m(DDRPHY, (CMDANADRVPUCTL), (BIT30), (BIT30)); // RCOMP: Dither PU Enable\r
715 isbM32m(DDRPHY, (CMDANADRVPDCTL), (BIT30), (BIT30)); // RCOMP: Dither PD Enable\r
716 isbM32m(DDRPHY, (CLKANADRVPUCTL), (BIT30), (BIT30)); // RCOMP: Dither PU Enable\r
717 isbM32m(DDRPHY, (CLKANADRVPDCTL), (BIT30), (BIT30)); // RCOMP: Dither PD Enable\r
718 isbM32m(DDRPHY, (DQSANADRVPUCTL), (BIT30), (BIT30)); // RCOMP: Dither PU Enable\r
719 isbM32m(DDRPHY, (DQSANADRVPDCTL), (BIT30), (BIT30)); // RCOMP: Dither PD Enable\r
720 isbM32m(DDRPHY, (CTLANADRVPUCTL), (BIT30), (BIT30)); // RCOMP: Dither PU Enable\r
721 isbM32m(DDRPHY, (CTLANADRVPDCTL), (BIT30), (BIT30)); // RCOMP: Dither PD Enable\r
722 isbM32m(DDRPHY, (DQANAODTPUCTL), (BIT30), (BIT30)); // ODT: Dither PU Enable\r
723 isbM32m(DDRPHY, (DQANAODTPDCTL), (BIT30), (BIT30)); // ODT: Dither PD Enable\r
724 isbM32m(DDRPHY, (CLKANAODTPUCTL), (BIT30), (BIT30)); // ODT: Dither PU Enable\r
725 isbM32m(DDRPHY, (CLKANAODTPDCTL), (BIT30), (BIT30)); // ODT: Dither PD Enable\r
726 isbM32m(DDRPHY, (DQSANAODTPUCTL), (BIT30), (BIT30)); // ODT: Dither PU Enable\r
727 isbM32m(DDRPHY, (DQSANAODTPDCTL), (BIT30), (BIT30)); // ODT: Dither PD Enable\r
728 isbM32m(DDRPHY, (DQANADLYPUCTL), (BIT30), (BIT30)); // DCOMP: Dither PU Enable\r
729 isbM32m(DDRPHY, (DQANADLYPDCTL), (BIT30), (BIT30)); // DCOMP: Dither PD Enable\r
730 isbM32m(DDRPHY, (CMDANADLYPUCTL), (BIT30), (BIT30)); // DCOMP: Dither PU Enable\r
731 isbM32m(DDRPHY, (CMDANADLYPDCTL), (BIT30), (BIT30)); // DCOMP: Dither PD Enable\r
732 isbM32m(DDRPHY, (CLKANADLYPUCTL), (BIT30), (BIT30)); // DCOMP: Dither PU Enable\r
733 isbM32m(DDRPHY, (CLKANADLYPDCTL), (BIT30), (BIT30)); // DCOMP: Dither PD Enable\r
734 isbM32m(DDRPHY, (DQSANADLYPUCTL), (BIT30), (BIT30)); // DCOMP: Dither PU Enable\r
735 isbM32m(DDRPHY, (DQSANADLYPDCTL), (BIT30), (BIT30)); // DCOMP: Dither PD Enable\r
736 isbM32m(DDRPHY, (CTLANADLYPUCTL), (BIT30), (BIT30)); // DCOMP: Dither PU Enable\r
737 isbM32m(DDRPHY, (CTLANADLYPDCTL), (BIT30), (BIT30)); // DCOMP: Dither PD Enable\r
738 isbM32m(DDRPHY, (DQANATCOPUCTL), (BIT30), (BIT30)); // TCO: Dither PU Enable\r
739 isbM32m(DDRPHY, (DQANATCOPDCTL), (BIT30), (BIT30)); // TCO: Dither PD Enable\r
740 isbM32m(DDRPHY, (CLKANATCOPUCTL), (BIT30), (BIT30)); // TCO: Dither PU Enable\r
741 isbM32m(DDRPHY, (CLKANATCOPDCTL), (BIT30), (BIT30)); // TCO: Dither PD Enable\r
742 isbM32m(DDRPHY, (DQSANATCOPUCTL), (BIT30), (BIT30)); // TCO: Dither PU Enable\r
743 isbM32m(DDRPHY, (DQSANATCOPDCTL), (BIT30), (BIT30)); // TCO: Dither PD Enable\r
744 isbM32m(DDRPHY, (TCOCNTCTRL), (0x1<<0), (BIT1|BIT0)); // TCOCOMP: Pulse Count\r
745 isbM32m(DDRPHY, (CHNLBUFSTATIC), ((0x03<<24)|(0x03<<16)), ((BIT28|BIT27|BIT26|BIT25|BIT24)|(BIT20|BIT19|BIT18|BIT17|BIT16))); // ODT: CMD/CTL PD/PU\r
746 isbM32m(DDRPHY, (MSCNTR), (0x64<<0), (BIT7|BIT6|BIT5|BIT4|BIT3|BIT2|BIT1|BIT0)); // Set 1us counter\r
747 isbM32m(DDRPHY, (LATCH1CTL), (0x1<<28), (BIT30|BIT29|BIT28)); // ???\r
748\r
749 // Release PHY from reset\r
750 isbM32m(DDRPHY, MASTERRSTN, BIT0, BIT0); // PHYRSTN=1\r
751\r
752 // STEP1:\r
753 post_code(0x03, 0x11);\r
754 for (channel_i=0; channel_i<NUM_CHANNELS; channel_i++) {\r
755 if (mrc_params->channel_enables & (1<<channel_i)) {\r
756 // DQ01-DQ23\r
757 for (bl_grp_i=0; bl_grp_i<((NUM_BYTE_LANES/bl_divisor)/2); bl_grp_i++) {\r
758 isbM32m(DDRPHY, (DQMDLLCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (BIT13), (BIT13)); // Enable VREG\r
759 delay_n(3);\r
760 }\r
761 // ECC\r
762 isbM32m(DDRPHY, (ECCMDLLCTL), (BIT13), (BIT13)); // Enable VREG\r
763 delay_n(3);\r
764 // CMD\r
765 isbM32m(DDRPHY, (CMDMDLLCTL + (channel_i * DDRIOCCC_CH_OFFSET)), (BIT13), (BIT13)); // Enable VREG\r
766 delay_n(3);\r
767 // CLK-CTL\r
768 isbM32m(DDRPHY, (CCMDLLCTL + (channel_i * DDRIOCCC_CH_OFFSET)), (BIT13), (BIT13)); // Enable VREG\r
769 delay_n(3);\r
770 }\r
771 }\r
772\r
773 // STEP2:\r
774 post_code(0x03, 0x12);\r
775 delay_n(200);\r
776 for (channel_i=0; channel_i<NUM_CHANNELS; channel_i++) {\r
777 if (mrc_params->channel_enables & (1<<channel_i)) {\r
778 // DQ01-DQ23\r
779 for (bl_grp_i=0; bl_grp_i<((NUM_BYTE_LANES/bl_divisor)/2); bl_grp_i++) {\r
780 isbM32m(DDRPHY, (DQMDLLCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (BIT17), (BIT17)); // Enable MCDLL\r
781 delay_n(50);\r
782 }\r
783 // ECC\r
784 isbM32m(DDRPHY, (ECCMDLLCTL), (BIT17), (BIT17)); // Enable MCDLL\r
785 delay_n(50);\r
786 // CMD\r
787 isbM32m(DDRPHY, (CMDMDLLCTL + (channel_i * DDRIOCCC_CH_OFFSET)), (BIT18), (BIT18)); // Enable MCDLL\r
788 delay_n(50);\r
789 // CLK-CTL\r
790 isbM32m(DDRPHY, (CCMDLLCTL + (channel_i * DDRIOCCC_CH_OFFSET)), (BIT18), (BIT18)); // Enable MCDLL\r
791 delay_n(50);\r
792 }\r
793 }\r
794\r
795 // STEP3:\r
796 post_code(0x03, 0x13);\r
797 delay_n(100);\r
798 for (channel_i=0; channel_i<NUM_CHANNELS; channel_i++) {\r
799 if (mrc_params->channel_enables & (1<<channel_i)) {\r
800 // DQ01-DQ23\r
801 for (bl_grp_i=0; bl_grp_i<((NUM_BYTE_LANES/bl_divisor)/2); bl_grp_i++) {\r
802#ifdef FORCE_16BIT_DDRIO\r
803 tempD = ((bl_grp_i) && (mrc_params->channel_width == x16)) ? ((0x1<<12)|(0x1<<8)|(0xF<<4)|(0xF<<0)) : ((0xF<<12)|(0xF<<8)|(0xF<<4)|(0xF<<0));\r
804#else\r
805 tempD = ((0xF<<12)|(0xF<<8)|(0xF<<4)|(0xF<<0));\r
806#endif\r
807 isbM32m(DDRPHY, (DQDLLTXCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (tempD), ((BIT15|BIT14|BIT13|BIT12)|(BIT11|BIT10|BIT9|BIT8)|(BIT7|BIT6|BIT5|BIT4)|(BIT3|BIT2|BIT1|BIT0))); // Enable TXDLL\r
808 delay_n(3);\r
809 isbM32m(DDRPHY, (DQDLLRXCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (BIT3|BIT2|BIT1|BIT0), (BIT3|BIT2|BIT1|BIT0)); // Enable RXDLL\r
810 delay_n(3);\r
811 isbM32m(DDRPHY, (B0OVRCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (BIT3|BIT2|BIT1|BIT0), (BIT3|BIT2|BIT1|BIT0)); // Enable RXDLL Overrides BL0\r
812 }\r
813\r
814 // ECC\r
815 tempD = ((0xF<<12)|(0xF<<8)|(0xF<<4)|(0xF<<0));\r
816 isbM32m(DDRPHY, (ECCDLLTXCTL), (tempD), ((BIT15|BIT14|BIT13|BIT12)|(BIT11|BIT10|BIT9|BIT8)|(BIT7|BIT6|BIT5|BIT4)|(BIT3|BIT2|BIT1|BIT0))); // Enable TXDLL\r
817 delay_n(3);\r
818\r
819 // CMD (PO)\r
820 isbM32m(DDRPHY, (CMDDLLTXCTL + (channel_i * DDRIOCCC_CH_OFFSET)), ((0xF<<12)|(0xF<<8)|(0xF<<4)|(0xF<<0)), ((BIT15|BIT14|BIT13|BIT12)|(BIT11|BIT10|BIT9|BIT8)|(BIT7|BIT6|BIT5|BIT4)|(BIT3|BIT2|BIT1|BIT0))); // Enable TXDLL\r
821 delay_n(3);\r
822 }\r
823 }\r
824\r
825\r
826 // STEP4:\r
827 post_code(0x03, 0x14);\r
828 for (channel_i=0; channel_i<NUM_CHANNELS; channel_i++) {\r
829 if (mrc_params->channel_enables & (1<<channel_i)) {\r
830 // Host To Memory Clock Alignment (HMC) for 800/1066\r
831 for (bl_grp_i=0; bl_grp_i<((NUM_BYTE_LANES/bl_divisor)/2); bl_grp_i++) {\r
832 isbM32m(DDRPHY, (DQCLKALIGNREG2 + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), ((bl_grp_i)?(0x3):(0x1)), (BIT3|BIT2|BIT1|BIT0)); // CLK_ALIGN_MOD_ID\r
833 }\r
834 isbM32m(DDRPHY, (ECCCLKALIGNREG2 + (channel_i * DDRIODQ_CH_OFFSET)), 0x2, (BIT3|BIT2|BIT1|BIT0)); // CLK_ALIGN_MOD_ID\r
835 isbM32m(DDRPHY, (CMDCLKALIGNREG2 + (channel_i * DDRIODQ_CH_OFFSET)), 0x0, (BIT3|BIT2|BIT1|BIT0)); // CLK_ALIGN_MOD_ID\r
836 isbM32m(DDRPHY, (CCCLKALIGNREG2 + (channel_i * DDRIODQ_CH_OFFSET)), 0x2, (BIT3|BIT2|BIT1|BIT0)); // CLK_ALIGN_MOD_ID\r
837 isbM32m(DDRPHY, (CMDCLKALIGNREG0 + (channel_i * DDRIOCCC_CH_OFFSET)), (0x2<<4), (BIT5|BIT4)); // CLK_ALIGN_MODE\r
838 isbM32m(DDRPHY, (CMDCLKALIGNREG1 + (channel_i * DDRIOCCC_CH_OFFSET)), ((0x18<<16)|(0x10<<8)|(0x8<<2)|(0x1<<0)), ((BIT22|BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT14|BIT13|BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT7|BIT6|BIT5|BIT4|BIT3|BIT2)|(BIT1|BIT0))); // NUM_SAMPLES, MAX_SAMPLES, MACRO_PI_STEP, MICRO_PI_STEP\r
839 isbM32m(DDRPHY, (CMDCLKALIGNREG2 + (channel_i * DDRIOCCC_CH_OFFSET)), ((0x10<<16)|(0x4<<8)|(0x2<<4)), ((BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT11|BIT10|BIT9|BIT8)|(BIT7|BIT6|BIT5|BIT4))); // ???, TOTAL_NUM_MODULES, FIRST_U_PARTITION\r
840 #ifdef HMC_TEST\r
841 isbM32m(DDRPHY, (CMDCLKALIGNREG0 + (channel_i * DDRIOCCC_CH_OFFSET)), BIT24, BIT24); // START_CLK_ALIGN=1\r
842 while (isbR32m(DDRPHY, (CMDCLKALIGNREG0 + (channel_i * DDRIOCCC_CH_OFFSET))) & BIT24); // wait for START_CLK_ALIGN=0\r
843 #endif // HMC_TEST\r
844\r
845 // Set RD/WR Pointer Seperation & COUNTEN & FIFOPTREN\r
846 isbM32m(DDRPHY, (CMDPTRREG + (channel_i * DDRIOCCC_CH_OFFSET)), BIT0, BIT0); // WRPTRENABLE=1\r
847\r
848\r
849#ifdef SIM\r
850 // comp is not working on simulator\r
851#else\r
852 // COMP initial\r
853 isbM32m(DDRPHY, (COMPEN0CH0 + (channel_i * DDRCOMP_CH_OFFSET)), BIT5, BIT5); // enable bypass for CLK buffer (PO)\r
854 isbM32m(DDRPHY, (CMPCTRL), (BIT0), (BIT0)); // Initial COMP Enable\r
855 while (isbR32m(DDRPHY, (CMPCTRL)) & BIT0); // wait for Initial COMP Enable = 0\r
856 isbM32m(DDRPHY, (COMPEN0CH0 + (channel_i * DDRCOMP_CH_OFFSET)), ~BIT5, BIT5); // disable bypass for CLK buffer (PO)\r
857#endif\r
858\r
859 // IOBUFACT\r
860 // STEP4a\r
861 isbM32m(DDRPHY, (CMDCFGREG0 + (channel_i * DDRIOCCC_CH_OFFSET)), BIT2, BIT2); // IOBUFACTRST_N=1\r
862\r
863 // DDRPHY initialisation complete\r
864 isbM32m(DDRPHY, (CMDPMCONFIG0 + (channel_i * DDRIOCCC_CH_OFFSET)), BIT20, BIT20); // SPID_INIT_COMPLETE=1\r
865 }\r
866 }\r
867\r
868 LEAVEFN();\r
869 return;\r
870}\r
871\r
872// jedec_init (aka PerformJedecInit):\r
873// This function performs JEDEC initialisation on all enabled channels.\r
874static void jedec_init(\r
875 MRCParams_t *mrc_params,\r
876 uint32_t silent)\r
877{\r
878 uint8_t TWR, WL, Rank;\r
879 uint32_t TCK;\r
880\r
881 RegDTR0 DTR0reg;\r
882\r
883 DramInitDDR3MRS0 mrs0Command;\r
884 DramInitDDR3EMR1 emrs1Command;\r
885 DramInitDDR3EMR2 emrs2Command;\r
886 DramInitDDR3EMR3 emrs3Command;\r
887\r
888 ENTERFN();\r
889\r
890 // jedec_init starts\r
891 if (!silent)\r
892 {\r
893 post_code(0x04, 0x00);\r
894 }\r
895\r
896 // Assert RESET# for 200us\r
897 isbM32m(DDRPHY, CCDDR3RESETCTL, BIT1, (BIT8|BIT1)); // DDR3_RESET_SET=0, DDR3_RESET_RESET=1\r
898#ifdef QUICKSIM\r
899 // Don't waste time during simulation\r
900 delay_u(2);\r
901#else\r
902 delay_u(200);\r
903#endif\r
904 isbM32m(DDRPHY, CCDDR3RESETCTL, BIT8, (BIT8|BIT1)); // DDR3_RESET_SET=1, DDR3_RESET_RESET=0\r
905\r
906 DTR0reg.raw = isbR32m(MCU, DTR0);\r
907\r
908 // Set CKEVAL for populated ranks\r
909 // then send NOP to each rank (#4550197)\r
910 {\r
911 uint32_t DRPbuffer;\r
912 uint32_t DRMCbuffer;\r
913\r
914 DRPbuffer = isbR32m(MCU, DRP);\r
915 DRPbuffer &= 0x3;\r
916 DRMCbuffer = isbR32m(MCU, DRMC);\r
917 DRMCbuffer &= 0xFFFFFFFC;\r
918 DRMCbuffer |= (BIT4 | DRPbuffer);\r
919\r
920 isbW32m(MCU, DRMC, DRMCbuffer);\r
921\r
922 for (Rank = 0; Rank < NUM_RANKS; Rank++)\r
923 {\r
924 // Skip to next populated rank\r
925 if ((mrc_params->rank_enables & (1 << Rank)) == 0)\r
926 {\r
927 continue;\r
928 }\r
929\r
930 dram_init_command(DCMD_NOP(Rank));\r
931 }\r
932\r
933 isbW32m(MCU, DRMC, DRMC_DEFAULT);\r
934 }\r
935\r
936 // setup for emrs 2\r
937 // BIT[15:11] --> Always "0"\r
938 // BIT[10:09] --> Rtt_WR: want "Dynamic ODT Off" (0)\r
939 // BIT[08] --> Always "0"\r
940 // BIT[07] --> SRT: use sr_temp_range\r
941 // BIT[06] --> ASR: want "Manual SR Reference" (0)\r
942 // BIT[05:03] --> CWL: use oem_tCWL\r
943 // BIT[02:00] --> PASR: want "Full Array" (0)\r
944 emrs2Command.raw = 0;\r
945 emrs2Command.field.bankAddress = 2;\r
946\r
947 WL = 5 + mrc_params->ddr_speed;\r
948 emrs2Command.field.CWL = WL - 5;\r
949 emrs2Command.field.SRT = mrc_params->sr_temp_range;\r
950\r
951 // setup for emrs 3\r
952 // BIT[15:03] --> Always "0"\r
953 // BIT[02] --> MPR: want "Normal Operation" (0)\r
954 // BIT[01:00] --> MPR_Loc: want "Predefined Pattern" (0)\r
955 emrs3Command.raw = 0;\r
956 emrs3Command.field.bankAddress = 3;\r
957\r
958 // setup for emrs 1\r
959 // BIT[15:13] --> Always "0"\r
960 // BIT[12:12] --> Qoff: want "Output Buffer Enabled" (0)\r
961 // BIT[11:11] --> TDQS: want "Disabled" (0)\r
962 // BIT[10:10] --> Always "0"\r
963 // BIT[09,06,02] --> Rtt_nom: use rtt_nom_value\r
964 // BIT[08] --> Always "0"\r
965 // BIT[07] --> WR_LVL: want "Disabled" (0)\r
966 // BIT[05,01] --> DIC: use ron_value\r
967 // BIT[04:03] --> AL: additive latency want "0" (0)\r
968 // BIT[00] --> DLL: want "Enable" (0)\r
969 //\r
970 // (BIT5|BIT1) set Ron value\r
971 // 00 --> RZQ/6 (40ohm)\r
972 // 01 --> RZQ/7 (34ohm)\r
973 // 1* --> RESERVED\r
974 //\r
975 // (BIT9|BIT6|BIT2) set Rtt_nom value\r
976 // 000 --> Disabled\r
977 // 001 --> RZQ/4 ( 60ohm)\r
978 // 010 --> RZQ/2 (120ohm)\r
979 // 011 --> RZQ/6 ( 40ohm)\r
980 // 1** --> RESERVED\r
981 emrs1Command.raw = 0;\r
982 emrs1Command.field.bankAddress = 1;\r
983 emrs1Command.field.dllEnabled = 0; // 0 = Enable , 1 = Disable\r
984\r
985 if (mrc_params->ron_value == 0)\r
986 {\r
987 emrs1Command.field.DIC0 = DDR3_EMRS1_DIC_34;\r
988 }\r
989 else\r
990 {\r
991 emrs1Command.field.DIC0 = DDR3_EMRS1_DIC_40;\r
992 }\r
993\r
994\r
995 if (mrc_params->rtt_nom_value == 0)\r
996 {\r
997 emrs1Command.raw |= (DDR3_EMRS1_RTTNOM_40 << 6);\r
998 }\r
999 else if (mrc_params->rtt_nom_value == 1)\r
1000 {\r
1001 emrs1Command.raw |= (DDR3_EMRS1_RTTNOM_60 << 6);\r
1002 }\r
1003 else if (mrc_params->rtt_nom_value == 2)\r
1004 {\r
1005 emrs1Command.raw |= (DDR3_EMRS1_RTTNOM_120 << 6);\r
1006 }\r
1007\r
1008 // save MRS1 value (excluding control fields)\r
1009 mrc_params->mrs1 = emrs1Command.raw >> 6;\r
1010\r
1011 // setup for mrs 0\r
1012 // BIT[15:13] --> Always "0"\r
1013 // BIT[12] --> PPD: for Quark (1)\r
1014 // BIT[11:09] --> WR: use oem_tWR\r
1015 // BIT[08] --> DLL: want "Reset" (1, self clearing)\r
1016 // BIT[07] --> MODE: want "Normal" (0)\r
1017 // BIT[06:04,02] --> CL: use oem_tCAS\r
1018 // BIT[03] --> RD_BURST_TYPE: want "Interleave" (1)\r
1019 // BIT[01:00] --> BL: want "8 Fixed" (0)\r
1020 // WR:\r
1021 // 0 --> 16\r
1022 // 1 --> 5\r
1023 // 2 --> 6\r
1024 // 3 --> 7\r
1025 // 4 --> 8\r
1026 // 5 --> 10\r
1027 // 6 --> 12\r
1028 // 7 --> 14\r
1029 // CL:\r
1030 // BIT[02:02] "0" if oem_tCAS <= 11 (1866?)\r
1031 // BIT[06:04] use oem_tCAS-4\r
1032 mrs0Command.raw = 0;\r
1033 mrs0Command.field.bankAddress = 0;\r
1034 mrs0Command.field.dllReset = 1;\r
1035 mrs0Command.field.BL = 0;\r
1036 mrs0Command.field.PPD = 1;\r
1037 mrs0Command.field.casLatency = DTR0reg.field.tCL + 1;\r
1038\r
1039 TCK = tCK[mrc_params->ddr_speed];\r
1040 TWR = MCEIL(15000, TCK); // Per JEDEC: tWR=15000ps DDR2/3 from 800-1600\r
1041 mrs0Command.field.writeRecovery = TWR - 4;\r
1042\r
1043 for (Rank = 0; Rank < NUM_RANKS; Rank++)\r
1044 {\r
1045 // Skip to next populated rank\r
1046 if ((mrc_params->rank_enables & (1 << Rank)) == 0)\r
1047 {\r
1048 continue;\r
1049 }\r
1050\r
1051 emrs2Command.field.rankSelect = Rank;\r
1052 dram_init_command(emrs2Command.raw);\r
1053\r
1054 emrs3Command.field.rankSelect = Rank;\r
1055 dram_init_command(emrs3Command.raw);\r
1056\r
1057 emrs1Command.field.rankSelect = Rank;\r
1058 dram_init_command(emrs1Command.raw);\r
1059\r
1060 mrs0Command.field.rankSelect = Rank;\r
1061 dram_init_command(mrs0Command.raw);\r
1062\r
1063 dram_init_command(DCMD_ZQCL(Rank));\r
1064 }\r
1065\r
1066 LEAVEFN();\r
1067 return;\r
1068}\r
1069\r
1070// rcvn_cal:\r
1071// POST_CODE[major] == 0x05\r
1072//\r
1073// This function will perform our RCVEN Calibration Algorithm.\r
1074// We will only use the 2xCLK domain timings to perform RCVEN Calibration.\r
1075// All byte lanes will be calibrated "simultaneously" per channel per rank.\r
1076static void rcvn_cal(\r
1077 MRCParams_t *mrc_params)\r
1078{\r
1079 uint8_t channel_i; // channel counter\r
1080 uint8_t rank_i; // rank counter\r
1081 uint8_t bl_i; // byte lane counter\r
1082 uint8_t bl_divisor = (mrc_params->channel_width == x16) ? 2 : 1; // byte lane divisor\r
1083\r
1084#ifdef R2R_SHARING\r
1085 uint32_t final_delay[NUM_CHANNELS][NUM_BYTE_LANES]; // used to find placement for rank2rank sharing configs\r
1086#ifndef BACKUP_RCVN\r
1087 uint32_t num_ranks_enabled = 0; // used to find placement for rank2rank sharing configs\r
1088#endif // BACKUP_RCVN\r
1089#endif // R2R_SHARING\r
1090\r
1091#ifdef BACKUP_RCVN\r
1092#else\r
1093 uint32_t tempD; // temporary DWORD\r
1094 uint32_t delay[NUM_BYTE_LANES]; // absolute PI value to be programmed on the byte lane\r
1095 RegDTR1 dtr1;\r
1096 RegDTR1 dtr1save;\r
1097#endif // BACKUP_RCVN\r
1098 ENTERFN();\r
1099\r
1100 // rcvn_cal starts\r
1101 post_code(0x05, 0x00);\r
1102\r
1103#ifndef BACKUP_RCVN\r
1104 // need separate burst to sample DQS preamble\r
1105 dtr1.raw = dtr1save.raw = isbR32m(MCU, DTR1);\r
1106 dtr1.field.tCCD = 1;\r
1107 isbW32m(MCU, DTR1, dtr1.raw);\r
1108#endif\r
1109\r
1110#ifdef R2R_SHARING\r
1111 // need to set "final_delay[][]" elements to "0"\r
1112 memset((void *) (final_delay), 0x00, (size_t) sizeof(final_delay));\r
1113#endif // R2R_SHARING\r
1114\r
1115 // loop through each enabled channel\r
1116 for (channel_i = 0; channel_i < NUM_CHANNELS; channel_i++)\r
1117 {\r
1118 if (mrc_params->channel_enables & (1 << channel_i))\r
1119 {\r
1120 // perform RCVEN Calibration on a per rank basis\r
1121 for (rank_i = 0; rank_i < NUM_RANKS; rank_i++)\r
1122 {\r
1123 if (mrc_params->rank_enables & (1 << rank_i))\r
1124 {\r
1125 // POST_CODE here indicates the current channel and rank being calibrated\r
1126 post_code(0x05, (0x10 + ((channel_i << 4) | rank_i)));\r
1127\r
1128#ifdef BACKUP_RCVN\r
1129 // set hard-coded timing values\r
1130 for (bl_i=0; bl_i<(NUM_BYTE_LANES/bl_divisor); bl_i++)\r
1131 {\r
1132 set_rcvn(channel_i, rank_i, bl_i, ddr_rcvn[PLATFORM_ID]);\r
1133 }\r
1134#else\r
1135 // enable FIFORST\r
1136 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i += 2)\r
1137 {\r
1138 isbM32m(DDRPHY, (B01PTRCTL1 + ((bl_i >> 1) * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), 0,\r
1139 BIT8); // 0 is enabled\r
1140 } // bl_i loop\r
1141 // initialise the starting delay to 128 PI (tCAS +1 CLK)\r
1142 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
1143 {\r
1144#ifdef SIM\r
1145 // Original value was late at the end of DQS sequence\r
1146 delay[bl_i] = 3 * FULL_CLK;\r
1147#else\r
1148 delay[bl_i] = (4 + 1) * FULL_CLK; // 1x CLK domain timing is tCAS-4\r
1149#endif\r
1150\r
1151 set_rcvn(channel_i, rank_i, bl_i, delay[bl_i]);\r
1152 } // bl_i loop\r
1153\r
1154 // now find the rising edge\r
1155 find_rising_edge(mrc_params, delay, channel_i, rank_i, true);\r
1156 // Now increase delay by 32 PI (1/4 CLK) to place in center of high pulse.\r
1157 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
1158 {\r
1159 delay[bl_i] += QRTR_CLK;\r
1160 set_rcvn(channel_i, rank_i, bl_i, delay[bl_i]);\r
1161 } // bl_i loop\r
1162 // Now decrement delay by 128 PI (1 CLK) until we sample a "0"\r
1163 do\r
1164 {\r
1165\r
1166 tempD = sample_dqs(mrc_params, channel_i, rank_i, true);\r
1167 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
1168 {\r
1169 if (tempD & (1 << bl_i))\r
1170 {\r
1171 if (delay[bl_i] >= FULL_CLK)\r
1172 {\r
1173 delay[bl_i] -= FULL_CLK;\r
1174 set_rcvn(channel_i, rank_i, bl_i, delay[bl_i]);\r
1175 }\r
1176 else\r
1177 {\r
1178 // not enough delay\r
1179 training_message(channel_i, rank_i, bl_i);\r
1180 post_code(0xEE, 0x50);\r
1181 }\r
1182 }\r
1183 } // bl_i loop\r
1184 } while (tempD & 0xFF);\r
1185\r
1186#ifdef R2R_SHARING\r
1187 // increment "num_ranks_enabled"\r
1188 num_ranks_enabled++;\r
1189 // Finally increment delay by 32 PI (1/4 CLK) to place in center of preamble.\r
1190 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
1191 {\r
1192 delay[bl_i] += QRTR_CLK;\r
1193 // add "delay[]" values to "final_delay[][]" for rolling average\r
1194 final_delay[channel_i][bl_i] += delay[bl_i];\r
1195 // set timing based on rolling average values\r
1196 set_rcvn(channel_i, rank_i, bl_i, ((final_delay[channel_i][bl_i]) / num_ranks_enabled));\r
1197 } // bl_i loop\r
1198#else\r
1199 // Finally increment delay by 32 PI (1/4 CLK) to place in center of preamble.\r
1200 for (bl_i=0; bl_i<(NUM_BYTE_LANES/bl_divisor); bl_i++)\r
1201 {\r
1202 delay[bl_i] += QRTR_CLK;\r
1203 set_rcvn(channel_i, rank_i, bl_i, delay[bl_i]);\r
1204 } // bl_i loop\r
1205\r
1206#endif // R2R_SHARING\r
1207\r
1208 // disable FIFORST\r
1209 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i += 2)\r
1210 {\r
1211 isbM32m(DDRPHY, (B01PTRCTL1 + ((bl_i >> 1) * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), BIT8,\r
1212 BIT8); // 1 is disabled\r
1213 } // bl_i loop\r
1214\r
1215#endif // BACKUP_RCVN\r
1216\r
1217 } // if rank is enabled\r
1218 } // rank_i loop\r
1219 } // if channel is enabled\r
1220 } // channel_i loop\r
1221\r
1222#ifndef BACKUP_RCVN\r
1223 // restore original\r
1224 isbW32m(MCU, DTR1, dtr1save.raw);\r
1225#endif\r
1226\r
1227#ifdef MRC_SV\r
1228 if (mrc_params->tune_rcvn)\r
1229 {\r
1230 uint32_t rcven, val;\r
1231 uint32_t rdcmd2rcven;\r
1232\r
1233 /*\r
1234 Formulas for RDCMD2DATAVALID & DIFFAMP dynamic timings\r
1235\r
1236 1. Set after RCVEN training\r
1237\r
1238 //Tune RDCMD2DATAVALID\r
1239\r
1240 x80/x84[21:16]\r
1241 MAX OF 2 RANKS : round up (rdcmd2rcven (rcven 1x) + 2x x 2 + PI/128) + 5\r
1242\r
1243 //rdcmd2rcven x80/84[12:8]\r
1244 //rcven 2x x70[23:20] & [11:8]\r
1245\r
1246 //Tune DIFFAMP Timings\r
1247\r
1248 //diffampen launch x88[20:16] & [4:0] -- B01LATCTL1\r
1249 MIN OF 2 RANKS : round down (rcven 1x + 2x x 2 + PI/128) - 1\r
1250\r
1251 //diffampen length x8C/x90 [13:8] -- B0ONDURCTL B1ONDURCTL\r
1252 MAX OF 2 RANKS : roundup (rcven 1x + 2x x 2 + PI/128) + 5\r
1253\r
1254\r
1255 2. need to do a fiforst after settings these values\r
1256 */\r
1257\r
1258 DPF(D_INFO, "BEFORE\n");\r
1259 DPF(D_INFO, "### %x\n", isbR32m(DDRPHY, B0LATCTL0));\r
1260 DPF(D_INFO, "### %x\n", isbR32m(DDRPHY, B01LATCTL1));\r
1261 DPF(D_INFO, "### %x\n", isbR32m(DDRPHY, B0ONDURCTL));\r
1262\r
1263 DPF(D_INFO, "### %x\n", isbR32m(DDRPHY, B1LATCTL0));\r
1264 DPF(D_INFO, "### %x\n", isbR32m(DDRPHY, B1ONDURCTL));\r
1265\r
1266 rcven = get_rcvn(0, 0, 0) / 128;\r
1267 rdcmd2rcven = (isbR32m(DDRPHY, B0LATCTL0) >> 8) & 0x1F;\r
1268 val = rdcmd2rcven + rcven + 6;\r
1269 isbM32m(DDRPHY, B0LATCTL0, val << 16, (BIT21|BIT20|BIT19|BIT18|BIT17|BIT16));\r
1270\r
1271 val = rdcmd2rcven + rcven - 1;\r
1272 isbM32m(DDRPHY, B01LATCTL1, val << 0, (BIT4|BIT3|BIT2|BIT1|BIT0));\r
1273\r
1274 val = rdcmd2rcven + rcven + 5;\r
1275 isbM32m(DDRPHY, B0ONDURCTL, val << 8, (BIT13|BIT12|BIT11|BIT10|BIT9|BIT8));\r
1276\r
1277 rcven = get_rcvn(0, 0, 1) / 128;\r
1278 rdcmd2rcven = (isbR32m(DDRPHY, B1LATCTL0) >> 8) & 0x1F;\r
1279 val = rdcmd2rcven + rcven + 6;\r
1280 isbM32m(DDRPHY, B1LATCTL0, val << 16, (BIT21|BIT20|BIT19|BIT18|BIT17|BIT16));\r
1281\r
1282 val = rdcmd2rcven + rcven - 1;\r
1283 isbM32m(DDRPHY, B01LATCTL1, val << 16, (BIT20|BIT19|BIT18|BIT17|BIT16));\r
1284\r
1285 val = rdcmd2rcven + rcven + 5;\r
1286 isbM32m(DDRPHY, B1ONDURCTL, val << 8, (BIT13|BIT12|BIT11|BIT10|BIT9|BIT8));\r
1287\r
1288 DPF(D_INFO, "AFTER\n");\r
1289 DPF(D_INFO, "### %x\n", isbR32m(DDRPHY, B0LATCTL0));\r
1290 DPF(D_INFO, "### %x\n", isbR32m(DDRPHY, B01LATCTL1));\r
1291 DPF(D_INFO, "### %x\n", isbR32m(DDRPHY, B0ONDURCTL));\r
1292\r
1293 DPF(D_INFO, "### %x\n", isbR32m(DDRPHY, B1LATCTL0));\r
1294 DPF(D_INFO, "### %x\n", isbR32m(DDRPHY, B1ONDURCTL));\r
1295\r
1296 DPF(D_INFO, "\nPress a key\n");\r
1297 mgetc();\r
1298\r
1299 // fifo reset\r
1300 isbM32m(DDRPHY, B01PTRCTL1, 0, BIT8); // 0 is enabled\r
1301 delay_n(3);\r
1302 isbM32m(DDRPHY, B01PTRCTL1, BIT8, BIT8); // 1 is disabled\r
1303 }\r
1304#endif\r
1305\r
1306 LEAVEFN();\r
1307 return;\r
1308}\r
1309\r
1310// Check memory executing write/read/verify of many data patterns\r
1311// at the specified address. Bits in the result indicate failure\r
1312// on specific byte lane.\r
1313static uint32_t check_bls_ex(\r
1314 MRCParams_t *mrc_params,\r
1315 uint32_t address)\r
1316{\r
1317 uint32_t result;\r
1318 uint8_t first_run = 0;\r
1319\r
1320 if (mrc_params->hte_setup)\r
1321 {\r
1322 mrc_params->hte_setup = 0;\r
1323\r
1324 first_run = 1;\r
1325 select_hte(mrc_params);\r
1326 }\r
1327\r
1328 result = WriteStressBitLanesHTE(mrc_params, address, first_run);\r
1329\r
1330 DPF(D_TRN, "check_bls_ex result is %x\n", result);\r
1331 return result;\r
1332}\r
1333\r
1334// Check memory executing simple write/read/verify at\r
1335// the specified address. Bits in the result indicate failure\r
1336// on specific byte lane.\r
1337static uint32_t check_rw_coarse(\r
1338 MRCParams_t *mrc_params,\r
1339 uint32_t address)\r
1340{\r
1341 uint32_t result = 0;\r
1342 uint8_t first_run = 0;\r
1343\r
1344 if (mrc_params->hte_setup)\r
1345 {\r
1346 mrc_params->hte_setup = 0;\r
1347\r
1348 first_run = 1;\r
1349 select_hte(mrc_params);\r
1350 }\r
1351\r
1352 result = BasicWriteReadHTE(mrc_params, address, first_run, WRITE_TRAIN);\r
1353\r
1354 DPF(D_TRN, "check_rw_coarse result is %x\n", result);\r
1355 return result;\r
1356}\r
1357\r
1358// wr_level:\r
1359// POST_CODE[major] == 0x06\r
1360//\r
1361// This function will perform the Write Levelling algorithm (align WCLK and WDQS).\r
1362// This algorithm will act on each rank in each channel separately.\r
1363static void wr_level(\r
1364 MRCParams_t *mrc_params)\r
1365{\r
1366 uint8_t channel_i; // channel counter\r
1367 uint8_t rank_i; // rank counter\r
1368 uint8_t bl_i; // byte lane counter\r
1369 uint8_t bl_divisor = (mrc_params->channel_width == x16) ? 2 : 1; // byte lane divisor\r
1370\r
1371#ifdef R2R_SHARING\r
1372 uint32_t final_delay[NUM_CHANNELS][NUM_BYTE_LANES]; // used to find placement for rank2rank sharing configs\r
1373#ifndef BACKUP_WDQS\r
1374 uint32_t num_ranks_enabled = 0; // used to find placement for rank2rank sharing configs\r
1375#endif // BACKUP_WDQS\r
1376#endif // R2R_SHARING\r
1377\r
1378#ifdef BACKUP_WDQS\r
1379#else\r
1380 bool all_edges_found; // determines stop condition for CRS_WR_LVL\r
1381 uint32_t delay[NUM_BYTE_LANES]; // absolute PI value to be programmed on the byte lane\r
1382 // static makes it so the data is loaded in the heap once by shadow(), where\r
1383 // non-static copies the data onto the stack every time this function is called.\r
1384\r
1385 uint32_t address; // address to be checked during COARSE_WR_LVL\r
1386 RegDTR4 dtr4;\r
1387 RegDTR4 dtr4save;\r
1388#endif // BACKUP_WDQS\r
1389\r
1390 ENTERFN();\r
1391\r
1392 // wr_level starts\r
1393 post_code(0x06, 0x00);\r
1394\r
1395#ifdef R2R_SHARING\r
1396 // need to set "final_delay[][]" elements to "0"\r
1397 memset((void *) (final_delay), 0x00, (size_t) sizeof(final_delay));\r
1398#endif // R2R_SHARING\r
1399 // loop through each enabled channel\r
1400 for (channel_i = 0; channel_i < NUM_CHANNELS; channel_i++)\r
1401 {\r
1402 if (mrc_params->channel_enables & (1 << channel_i))\r
1403 {\r
1404 // perform WRITE LEVELING algorithm on a per rank basis\r
1405 for (rank_i = 0; rank_i < NUM_RANKS; rank_i++)\r
1406 {\r
1407 if (mrc_params->rank_enables & (1 << rank_i))\r
1408 {\r
1409 // POST_CODE here indicates the current rank and channel being calibrated\r
1410 post_code(0x06, (0x10 + ((channel_i << 4) | rank_i)));\r
1411\r
1412#ifdef BACKUP_WDQS\r
1413 for (bl_i=0; bl_i<(NUM_BYTE_LANES/bl_divisor); bl_i++)\r
1414 {\r
1415 set_wdqs(channel_i, rank_i, bl_i, ddr_wdqs[PLATFORM_ID]);\r
1416 set_wdq(channel_i, rank_i, bl_i, (ddr_wdqs[PLATFORM_ID] - QRTR_CLK));\r
1417 }\r
1418#else\r
1419\r
1420 { // Begin product specific code\r
1421\r
1422 // perform a single PRECHARGE_ALL command to make DRAM state machine go to IDLE state\r
1423 dram_init_command(DCMD_PREA(rank_i));\r
1424\r
1425 // enable Write Levelling Mode (EMRS1 w/ Write Levelling Mode Enable)\r
1426 dram_init_command(DCMD_MRS1(rank_i,0x0082));\r
1427\r
1428 // set ODT DRAM Full Time Termination disable in MCU\r
1429 dtr4.raw = dtr4save.raw = isbR32m(MCU, DTR4);\r
1430 dtr4.field.ODTDIS = 1;\r
1431 isbW32m(MCU, DTR4, dtr4.raw);\r
1432\r
1433 for (bl_i = 0; bl_i < ((NUM_BYTE_LANES / bl_divisor) / 2); bl_i++)\r
1434 {\r
1435 isbM32m(DDRPHY, DQCTL + (DDRIODQ_BL_OFFSET * bl_i) + (DDRIODQ_CH_OFFSET * channel_i),\r
1436 (BIT28 | (0x1 << 8) | (0x1 << 6) | (0x1 << 4) | (0x1 << 2)),\r
1437 (BIT28 | (BIT9|BIT8) | (BIT7|BIT6) | (BIT5|BIT4) | (BIT3|BIT2))); // Enable Sandy Bridge Mode (WDQ Tri-State) & Ensure 5 WDQS pulses during Write Leveling\r
1438 }\r
1439\r
1440 isbM32m(DDRPHY, CCDDR3RESETCTL + (DDRIOCCC_CH_OFFSET * channel_i), (BIT16), (BIT16)); // Write Leveling Mode enabled in IO\r
1441 } // End product specific code\r
1442 // Initialise the starting delay to WCLK\r
1443 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
1444 {\r
1445 { // Begin product specific code\r
1446 // CLK0 --> RK0\r
1447 // CLK1 --> RK1\r
1448 delay[bl_i] = get_wclk(channel_i, rank_i);\r
1449 } // End product specific code\r
1450 set_wdqs(channel_i, rank_i, bl_i, delay[bl_i]);\r
1451 } // bl_i loop\r
1452 // now find the rising edge\r
1453 find_rising_edge(mrc_params, delay, channel_i, rank_i, false);\r
1454 { // Begin product specific code\r
1455 // disable Write Levelling Mode\r
1456 isbM32m(DDRPHY, CCDDR3RESETCTL + (DDRIOCCC_CH_OFFSET * channel_i), (0), (BIT16)); // Write Leveling Mode disabled in IO\r
1457\r
1458 for (bl_i = 0; bl_i < ((NUM_BYTE_LANES / bl_divisor) / 2); bl_i++)\r
1459 {\r
1460 isbM32m(DDRPHY, DQCTL + (DDRIODQ_BL_OFFSET * bl_i) + (DDRIODQ_CH_OFFSET * channel_i),\r
1461 ((0x1 << 8) | (0x1 << 6) | (0x1 << 4) | (0x1 << 2)),\r
1462 (BIT28 | (BIT9|BIT8) | (BIT7|BIT6) | (BIT5|BIT4) | (BIT3|BIT2))); // Disable Sandy Bridge Mode & Ensure 4 WDQS pulses during normal operation\r
1463 } // bl_i loop\r
1464\r
1465 // restore original DTR4\r
1466 isbW32m(MCU, DTR4, dtr4save.raw);\r
1467\r
1468 // restore original value (Write Levelling Mode Disable)\r
1469 dram_init_command(DCMD_MRS1(rank_i, mrc_params->mrs1));\r
1470\r
1471 // perform a single PRECHARGE_ALL command to make DRAM state machine go to IDLE state\r
1472 dram_init_command(DCMD_PREA(rank_i));\r
1473 } // End product specific code\r
1474\r
1475 post_code(0x06, (0x30 + ((channel_i << 4) | rank_i)));\r
1476\r
1477 // COARSE WRITE LEVEL:\r
1478 // check that we're on the correct clock edge\r
1479\r
1480 // hte reconfiguration request\r
1481 mrc_params->hte_setup = 1;\r
1482\r
1483 // start CRS_WR_LVL with WDQS = WDQS + 128 PI\r
1484 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
1485 {\r
1486 delay[bl_i] = get_wdqs(channel_i, rank_i, bl_i) + FULL_CLK;\r
1487 set_wdqs(channel_i, rank_i, bl_i, delay[bl_i]);\r
1488 // program WDQ timings based on WDQS (WDQ = WDQS - 32 PI)\r
1489 set_wdq(channel_i, rank_i, bl_i, (delay[bl_i] - QRTR_CLK));\r
1490 } // bl_i loop\r
1491\r
1492 // get an address in the targeted channel/rank\r
1493 address = get_addr(mrc_params, channel_i, rank_i);\r
1494 do\r
1495 {\r
1496 uint32_t coarse_result = 0x00;\r
1497 uint32_t coarse_result_mask = byte_lane_mask(mrc_params);\r
1498 all_edges_found = true; // assume pass\r
1499\r
1500#ifdef SIM\r
1501 // need restore memory to idle state as write can be in bad sync\r
1502 dram_init_command (DCMD_PREA(rank_i));\r
1503#endif\r
1504\r
1505 mrc_params->hte_setup = 1;\r
1506 coarse_result = check_rw_coarse(mrc_params, address);\r
1507\r
1508 // check for failures and margin the byte lane back 128 PI (1 CLK)\r
1509 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
1510 {\r
1511 if (coarse_result & (coarse_result_mask << bl_i))\r
1512 {\r
1513 all_edges_found = false;\r
1514 delay[bl_i] -= FULL_CLK;\r
1515 set_wdqs(channel_i, rank_i, bl_i, delay[bl_i]);\r
1516 // program WDQ timings based on WDQS (WDQ = WDQS - 32 PI)\r
1517 set_wdq(channel_i, rank_i, bl_i, (delay[bl_i] - QRTR_CLK));\r
1518 }\r
1519 } // bl_i loop\r
1520\r
1521 } while (!all_edges_found);\r
1522\r
1523#ifdef R2R_SHARING\r
1524 // increment "num_ranks_enabled"\r
1525 num_ranks_enabled++;\r
1526 // accumulate "final_delay[][]" values from "delay[]" values for rolling average\r
1527 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
1528 {\r
1529 final_delay[channel_i][bl_i] += delay[bl_i];\r
1530 set_wdqs(channel_i, rank_i, bl_i, ((final_delay[channel_i][bl_i]) / num_ranks_enabled));\r
1531 // program WDQ timings based on WDQS (WDQ = WDQS - 32 PI)\r
1532 set_wdq(channel_i, rank_i, bl_i, ((final_delay[channel_i][bl_i]) / num_ranks_enabled) - QRTR_CLK);\r
1533 } // bl_i loop\r
1534#endif // R2R_SHARING\r
1535#endif // BACKUP_WDQS\r
1536\r
1537 } // if rank is enabled\r
1538 } // rank_i loop\r
1539 } // if channel is enabled\r
1540 } // channel_i loop\r
1541\r
1542 LEAVEFN();\r
1543 return;\r
1544}\r
1545\r
1546// rd_train:\r
1547// POST_CODE[major] == 0x07\r
1548//\r
1549// This function will perform the READ TRAINING Algorithm on all channels/ranks/byte_lanes simultaneously to minimize execution time.\r
1550// The idea here is to train the VREF and RDQS (and eventually RDQ) values to achieve maximum READ margins.\r
1551// The algorithm will first determine the X coordinate (RDQS setting).\r
1552// This is done by collapsing the VREF eye until we find a minimum required RDQS eye for VREF_MIN and VREF_MAX.\r
1553// Then we take the averages of the RDQS eye at VREF_MIN and VREF_MAX, then average those; this will be the final X coordinate.\r
1554// The algorithm will then determine the Y coordinate (VREF setting).\r
1555// This is done by collapsing the RDQS eye until we find a minimum required VREF eye for RDQS_MIN and RDQS_MAX.\r
1556// Then we take the averages of the VREF eye at RDQS_MIN and RDQS_MAX, then average those; this will be the final Y coordinate.\r
1557// NOTE: this algorithm assumes the eye curves have a one-to-one relationship, meaning for each X the curve has only one Y and vice-a-versa.\r
1558static void rd_train(\r
1559 MRCParams_t *mrc_params)\r
1560{\r
1561\r
1562#define MIN_RDQS_EYE 10 // in PI Codes\r
1563#define MIN_VREF_EYE 10 // in VREF Codes\r
1564#define RDQS_STEP 1 // how many RDQS codes to jump while margining\r
1565#define VREF_STEP 1 // how many VREF codes to jump while margining\r
1566#define VREF_MIN (0x00) // offset into "vref_codes[]" for minimum allowed VREF setting\r
1567#define VREF_MAX (0x3F) // offset into "vref_codes[]" for maximum allowed VREF setting\r
1568#define RDQS_MIN (0x00) // minimum RDQS delay value\r
1569#define RDQS_MAX (0x3F) // maximum RDQS delay value\r
1570#define B 0 // BOTTOM VREF\r
1571#define T 1 // TOP VREF\r
1572#define L 0 // LEFT RDQS\r
1573#define R 1 // RIGHT RDQS\r
1574\r
1575 uint8_t channel_i; // channel counter\r
1576 uint8_t rank_i; // rank counter\r
1577 uint8_t bl_i; // byte lane counter\r
1578 uint8_t bl_divisor = (mrc_params->channel_width == x16) ? 2 : 1; // byte lane divisor\r
1579#ifdef BACKUP_RDQS\r
1580#else\r
1581 uint8_t side_x; // tracks LEFT/RIGHT approach vectors\r
1582 uint8_t side_y; // tracks BOTTOM/TOP approach vectors\r
1583 uint8_t x_coordinate[2/*side_x*/][2/*side_y*/][NUM_CHANNELS][NUM_RANKS][NUM_BYTE_LANES]; // X coordinate data (passing RDQS values) for approach vectors\r
1584 uint8_t y_coordinate[2/*side_x*/][2/*side_y*/][NUM_CHANNELS][NUM_BYTE_LANES]; // Y coordinate data (passing VREF values) for approach vectors\r
1585 uint8_t x_center[NUM_CHANNELS][NUM_RANKS][NUM_BYTE_LANES]; // centered X (RDQS)\r
1586 uint8_t y_center[NUM_CHANNELS][NUM_BYTE_LANES]; // centered Y (VREF)\r
1587 uint32_t address; // target address for "check_bls_ex()"\r
1588 uint32_t result; // result of "check_bls_ex()"\r
1589 uint32_t bl_mask; // byte lane mask for "result" checking\r
1590#ifdef R2R_SHARING\r
1591 uint32_t final_delay[NUM_CHANNELS][NUM_BYTE_LANES]; // used to find placement for rank2rank sharing configs\r
1592 uint32_t num_ranks_enabled = 0; // used to find placement for rank2rank sharing configs\r
1593#endif // R2R_SHARING\r
1594#endif // BACKUP_RDQS\r
1595 // rd_train starts\r
1596 post_code(0x07, 0x00);\r
1597\r
1598 ENTERFN();\r
1599\r
1600#ifdef BACKUP_RDQS\r
1601 for (channel_i=0; channel_i<NUM_CHANNELS; channel_i++)\r
1602 {\r
1603 if (mrc_params->channel_enables & (1<<channel_i))\r
1604 {\r
1605 for (rank_i=0; rank_i<NUM_RANKS; rank_i++)\r
1606 {\r
1607 if (mrc_params->rank_enables & (1<<rank_i))\r
1608 {\r
1609 for (bl_i=0; bl_i<(NUM_BYTE_LANES/bl_divisor); bl_i++)\r
1610 {\r
1611 set_rdqs(channel_i, rank_i, bl_i, ddr_rdqs[PLATFORM_ID]);\r
1612 } // bl_i loop\r
1613 } // if rank is enabled\r
1614 } // rank_i loop\r
1615 } // if channel is enabled\r
1616 } // channel_i loop\r
1617#else\r
1618 // initialise x/y_coordinate arrays\r
1619 for (channel_i = 0; channel_i < NUM_CHANNELS; channel_i++)\r
1620 {\r
1621 if (mrc_params->channel_enables & (1 << channel_i))\r
1622 {\r
1623 for (rank_i = 0; rank_i < NUM_RANKS; rank_i++)\r
1624 {\r
1625 if (mrc_params->rank_enables & (1 << rank_i))\r
1626 {\r
1627 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
1628 {\r
1629 // x_coordinate:\r
1630 x_coordinate[L][B][channel_i][rank_i][bl_i] = RDQS_MIN;\r
1631 x_coordinate[R][B][channel_i][rank_i][bl_i] = RDQS_MAX;\r
1632 x_coordinate[L][T][channel_i][rank_i][bl_i] = RDQS_MIN;\r
1633 x_coordinate[R][T][channel_i][rank_i][bl_i] = RDQS_MAX;\r
1634 // y_coordinate:\r
1635 y_coordinate[L][B][channel_i][bl_i] = VREF_MIN;\r
1636 y_coordinate[R][B][channel_i][bl_i] = VREF_MIN;\r
1637 y_coordinate[L][T][channel_i][bl_i] = VREF_MAX;\r
1638 y_coordinate[R][T][channel_i][bl_i] = VREF_MAX;\r
1639 } // bl_i loop\r
1640 } // if rank is enabled\r
1641 } // rank_i loop\r
1642 } // if channel is enabled\r
1643 } // channel_i loop\r
1644\r
1645 // initialise other variables\r
1646 bl_mask = byte_lane_mask(mrc_params);\r
1647 address = get_addr(mrc_params, 0, 0);\r
1648\r
1649#ifdef R2R_SHARING\r
1650 // need to set "final_delay[][]" elements to "0"\r
1651 memset((void *) (final_delay), 0x00, (size_t) sizeof(final_delay));\r
1652#endif // R2R_SHARING\r
1653\r
1654 // look for passing coordinates\r
1655 for (side_y = B; side_y <= T; side_y++)\r
1656 {\r
1657 for (side_x = L; side_x <= R; side_x++)\r
1658 {\r
1659\r
1660 post_code(0x07, (0x10 + (side_y * 2) + (side_x)));\r
1661\r
1662 // find passing values\r
1663 for (channel_i = 0; channel_i < NUM_CHANNELS; channel_i++)\r
1664 {\r
1665 if (mrc_params->channel_enables & (0x1 << channel_i))\r
1666 {\r
1667 for (rank_i = 0; rank_i < NUM_RANKS; rank_i++)\r
1668 {\r
1669\r
1670 if (mrc_params->rank_enables & (0x1 << rank_i))\r
1671 {\r
1672 // set x/y_coordinate search starting settings\r
1673 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
1674 {\r
1675 set_rdqs(channel_i, rank_i, bl_i, x_coordinate[side_x][side_y][channel_i][rank_i][bl_i]);\r
1676 set_vref(channel_i, bl_i, y_coordinate[side_x][side_y][channel_i][bl_i]);\r
1677 } // bl_i loop\r
1678 // get an address in the target channel/rank\r
1679 address = get_addr(mrc_params, channel_i, rank_i);\r
1680\r
1681 // request HTE reconfiguration\r
1682 mrc_params->hte_setup = 1;\r
1683\r
1684 // test the settings\r
1685 do\r
1686 {\r
1687\r
1688 // result[07:00] == failing byte lane (MAX 8)\r
1689 result = check_bls_ex( mrc_params, address);\r
1690\r
1691 // check for failures\r
1692 if (result & 0xFF)\r
1693 {\r
1694 // at least 1 byte lane failed\r
1695 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
1696 {\r
1697 if (result & (bl_mask << bl_i))\r
1698 {\r
1699 // adjust the RDQS values accordingly\r
1700 if (side_x == L)\r
1701 {\r
1702 x_coordinate[L][side_y][channel_i][rank_i][bl_i] += RDQS_STEP;\r
1703 }\r
1704 else\r
1705 {\r
1706 x_coordinate[R][side_y][channel_i][rank_i][bl_i] -= RDQS_STEP;\r
1707 }\r
1708 // check that we haven't closed the RDQS_EYE too much\r
1709 if ((x_coordinate[L][side_y][channel_i][rank_i][bl_i] > (RDQS_MAX - MIN_RDQS_EYE)) ||\r
1710 (x_coordinate[R][side_y][channel_i][rank_i][bl_i] < (RDQS_MIN + MIN_RDQS_EYE))\r
1711 ||\r
1712 (x_coordinate[L][side_y][channel_i][rank_i][bl_i]\r
1713 == x_coordinate[R][side_y][channel_i][rank_i][bl_i]))\r
1714 {\r
1715 // not enough RDQS margin available at this VREF\r
1716 // update VREF values accordingly\r
1717 if (side_y == B)\r
1718 {\r
1719 y_coordinate[side_x][B][channel_i][bl_i] += VREF_STEP;\r
1720 }\r
1721 else\r
1722 {\r
1723 y_coordinate[side_x][T][channel_i][bl_i] -= VREF_STEP;\r
1724 }\r
1725 // check that we haven't closed the VREF_EYE too much\r
1726 if ((y_coordinate[side_x][B][channel_i][bl_i] > (VREF_MAX - MIN_VREF_EYE)) ||\r
1727 (y_coordinate[side_x][T][channel_i][bl_i] < (VREF_MIN + MIN_VREF_EYE)) ||\r
1728 (y_coordinate[side_x][B][channel_i][bl_i] == y_coordinate[side_x][T][channel_i][bl_i]))\r
1729 {\r
1730 // VREF_EYE collapsed below MIN_VREF_EYE\r
1731 training_message(channel_i, rank_i, bl_i);\r
1732 post_code(0xEE, (0x70 + (side_y * 2) + (side_x)));\r
1733 }\r
1734 else\r
1735 {\r
1736 // update the VREF setting\r
1737 set_vref(channel_i, bl_i, y_coordinate[side_x][side_y][channel_i][bl_i]);\r
1738 // reset the X coordinate to begin the search at the new VREF\r
1739 x_coordinate[side_x][side_y][channel_i][rank_i][bl_i] =\r
1740 (side_x == L) ? (RDQS_MIN) : (RDQS_MAX);\r
1741 }\r
1742 }\r
1743 // update the RDQS setting\r
1744 set_rdqs(channel_i, rank_i, bl_i, x_coordinate[side_x][side_y][channel_i][rank_i][bl_i]);\r
1745 } // if bl_i failed\r
1746 } // bl_i loop\r
1747 } // at least 1 byte lane failed\r
1748 } while (result & 0xFF);\r
1749 } // if rank is enabled\r
1750 } // rank_i loop\r
1751 } // if channel is enabled\r
1752 } // channel_i loop\r
1753 } // side_x loop\r
1754 } // side_y loop\r
1755\r
1756 post_code(0x07, 0x20);\r
1757\r
1758 // find final RDQS (X coordinate) & final VREF (Y coordinate)\r
1759 for (channel_i = 0; channel_i < NUM_CHANNELS; channel_i++)\r
1760 {\r
1761 if (mrc_params->channel_enables & (1 << channel_i))\r
1762 {\r
1763 for (rank_i = 0; rank_i < NUM_RANKS; rank_i++)\r
1764 {\r
1765 if (mrc_params->rank_enables & (1 << rank_i))\r
1766 {\r
1767 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
1768 {\r
1769 uint32_t tempD1;\r
1770 uint32_t tempD2;\r
1771\r
1772 // x_coordinate:\r
1773 DPF(D_INFO, "RDQS T/B eye rank%d lane%d : %d-%d %d-%d\n", rank_i, bl_i,\r
1774 x_coordinate[L][T][channel_i][rank_i][bl_i],\r
1775 x_coordinate[R][T][channel_i][rank_i][bl_i],\r
1776 x_coordinate[L][B][channel_i][rank_i][bl_i],\r
1777 x_coordinate[R][B][channel_i][rank_i][bl_i]);\r
1778\r
1779 tempD1 = (x_coordinate[R][T][channel_i][rank_i][bl_i] + x_coordinate[L][T][channel_i][rank_i][bl_i]) / 2; // average the TOP side LEFT & RIGHT values\r
1780 tempD2 = (x_coordinate[R][B][channel_i][rank_i][bl_i] + x_coordinate[L][B][channel_i][rank_i][bl_i]) / 2; // average the BOTTOM side LEFT & RIGHT values\r
1781 x_center[channel_i][rank_i][bl_i] = (uint8_t) ((tempD1 + tempD2) / 2); // average the above averages\r
1782\r
1783 // y_coordinate:\r
1784 DPF(D_INFO, "VREF R/L eye lane%d : %d-%d %d-%d\n", bl_i,\r
1785 y_coordinate[R][B][channel_i][bl_i],\r
1786 y_coordinate[R][T][channel_i][bl_i],\r
1787 y_coordinate[L][B][channel_i][bl_i],\r
1788 y_coordinate[L][T][channel_i][bl_i]);\r
1789\r
1790 tempD1 = (y_coordinate[R][T][channel_i][bl_i] + y_coordinate[R][B][channel_i][bl_i]) / 2; // average the RIGHT side TOP & BOTTOM values\r
1791 tempD2 = (y_coordinate[L][T][channel_i][bl_i] + y_coordinate[L][B][channel_i][bl_i]) / 2; // average the LEFT side TOP & BOTTOM values\r
1792 y_center[channel_i][bl_i] = (uint8_t) ((tempD1 + tempD2) / 2); // average the above averages\r
1793 } // bl_i loop\r
1794 } // if rank is enabled\r
1795 } // rank_i loop\r
1796 } // if channel is enabled\r
1797 } // channel_i loop\r
1798\r
1799#ifdef RX_EYE_CHECK\r
1800 // perform an eye check\r
1801 for (side_y=B; side_y<=T; side_y++)\r
1802 {\r
1803 for (side_x=L; side_x<=R; side_x++)\r
1804 {\r
1805\r
1806 post_code(0x07, (0x30 + (side_y * 2) + (side_x)));\r
1807\r
1808 // update the settings for the eye check\r
1809 for (channel_i=0; channel_i<NUM_CHANNELS; channel_i++)\r
1810 {\r
1811 if (mrc_params->channel_enables & (1<<channel_i))\r
1812 {\r
1813 for (rank_i=0; rank_i<NUM_RANKS; rank_i++)\r
1814 {\r
1815 if (mrc_params->rank_enables & (1<<rank_i))\r
1816 {\r
1817 for (bl_i=0; bl_i<(NUM_BYTE_LANES/bl_divisor); bl_i++)\r
1818 {\r
1819 if (side_x == L)\r
1820 {\r
1821 set_rdqs(channel_i, rank_i, bl_i, (x_center[channel_i][rank_i][bl_i] - (MIN_RDQS_EYE / 2)));\r
1822 }\r
1823 else\r
1824 {\r
1825 set_rdqs(channel_i, rank_i, bl_i, (x_center[channel_i][rank_i][bl_i] + (MIN_RDQS_EYE / 2)));\r
1826 }\r
1827 if (side_y == B)\r
1828 {\r
1829 set_vref(channel_i, bl_i, (y_center[channel_i][bl_i] - (MIN_VREF_EYE / 2)));\r
1830 }\r
1831 else\r
1832 {\r
1833 set_vref(channel_i, bl_i, (y_center[channel_i][bl_i] + (MIN_VREF_EYE / 2)));\r
1834 }\r
1835 } // bl_i loop\r
1836 } // if rank is enabled\r
1837 } // rank_i loop\r
1838 } // if channel is enabled\r
1839 } // channel_i loop\r
1840\r
1841 // request HTE reconfiguration\r
1842 mrc_params->hte_setup = 1;\r
1843\r
1844 // check the eye\r
1845 if (check_bls_ex( mrc_params, address) & 0xFF)\r
1846 {\r
1847 // one or more byte lanes failed\r
1848 post_code(0xEE, (0x74 + (side_x * 2) + (side_y)));\r
1849 }\r
1850 } // side_x loop\r
1851 } // side_y loop\r
1852#endif // RX_EYE_CHECK\r
1853\r
1854 post_code(0x07, 0x40);\r
1855\r
1856 // set final placements\r
1857 for (channel_i = 0; channel_i < NUM_CHANNELS; channel_i++)\r
1858 {\r
1859 if (mrc_params->channel_enables & (1 << channel_i))\r
1860 {\r
1861 for (rank_i = 0; rank_i < NUM_RANKS; rank_i++)\r
1862 {\r
1863 if (mrc_params->rank_enables & (1 << rank_i))\r
1864 {\r
1865#ifdef R2R_SHARING\r
1866 // increment "num_ranks_enabled"\r
1867 num_ranks_enabled++;\r
1868#endif // R2R_SHARING\r
1869 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
1870 {\r
1871 // x_coordinate:\r
1872#ifdef R2R_SHARING\r
1873 final_delay[channel_i][bl_i] += x_center[channel_i][rank_i][bl_i];\r
1874 set_rdqs(channel_i, rank_i, bl_i, ((final_delay[channel_i][bl_i]) / num_ranks_enabled));\r
1875#else\r
1876 set_rdqs(channel_i, rank_i, bl_i, x_center[channel_i][rank_i][bl_i]);\r
1877#endif // R2R_SHARING\r
1878 // y_coordinate:\r
1879 set_vref(channel_i, bl_i, y_center[channel_i][bl_i]);\r
1880 } // bl_i loop\r
1881 } // if rank is enabled\r
1882 } // rank_i loop\r
1883 } // if channel is enabled\r
1884 } // channel_i loop\r
1885#endif // BACKUP_RDQS\r
1886 LEAVEFN();\r
1887 return;\r
1888}\r
1889\r
1890// wr_train:\r
1891// POST_CODE[major] == 0x08\r
1892//\r
1893// This function will perform the WRITE TRAINING Algorithm on all channels/ranks/byte_lanes simultaneously to minimize execution time.\r
1894// The idea here is to train the WDQ timings to achieve maximum WRITE margins.\r
1895// The algorithm will start with WDQ at the current WDQ setting (tracks WDQS in WR_LVL) +/- 32 PIs (+/- 1/4 CLK) and collapse the eye until all data patterns pass.\r
1896// This is because WDQS will be aligned to WCLK by the Write Leveling algorithm and WDQ will only ever have a 1/2 CLK window of validity.\r
1897static void wr_train(\r
1898 MRCParams_t *mrc_params)\r
1899{\r
1900\r
1901#define WDQ_STEP 1 // how many WDQ codes to jump while margining\r
1902#define L 0 // LEFT side loop value definition\r
1903#define R 1 // RIGHT side loop value definition\r
1904\r
1905 uint8_t channel_i; // channel counter\r
1906 uint8_t rank_i; // rank counter\r
1907 uint8_t bl_i; // byte lane counter\r
1908 uint8_t bl_divisor = (mrc_params->channel_width == x16) ? 2 : 1; // byte lane divisor\r
1909#ifdef BACKUP_WDQ\r
1910#else\r
1911 uint8_t side_i; // LEFT/RIGHT side indicator (0=L, 1=R)\r
1912 uint32_t tempD; // temporary DWORD\r
1913 uint32_t delay[2/*side_i*/][NUM_CHANNELS][NUM_RANKS][NUM_BYTE_LANES]; // 2 arrays, for L & R side passing delays\r
1914 uint32_t address; // target address for "check_bls_ex()"\r
1915 uint32_t result; // result of "check_bls_ex()"\r
1916 uint32_t bl_mask; // byte lane mask for "result" checking\r
1917#ifdef R2R_SHARING\r
1918 uint32_t final_delay[NUM_CHANNELS][NUM_BYTE_LANES]; // used to find placement for rank2rank sharing configs\r
1919 uint32_t num_ranks_enabled = 0; // used to find placement for rank2rank sharing configs\r
1920#endif // R2R_SHARING\r
1921#endif // BACKUP_WDQ\r
1922\r
1923 // wr_train starts\r
1924 post_code(0x08, 0x00);\r
1925\r
1926 ENTERFN();\r
1927\r
1928#ifdef BACKUP_WDQ\r
1929 for (channel_i=0; channel_i<NUM_CHANNELS; channel_i++)\r
1930 {\r
1931 if (mrc_params->channel_enables & (1<<channel_i))\r
1932 {\r
1933 for (rank_i=0; rank_i<NUM_RANKS; rank_i++)\r
1934 {\r
1935 if (mrc_params->rank_enables & (1<<rank_i))\r
1936 {\r
1937 for (bl_i=0; bl_i<(NUM_BYTE_LANES/bl_divisor); bl_i++)\r
1938 {\r
1939 set_wdq(channel_i, rank_i, bl_i, ddr_wdq[PLATFORM_ID]);\r
1940 } // bl_i loop\r
1941 } // if rank is enabled\r
1942 } // rank_i loop\r
1943 } // if channel is enabled\r
1944 } // channel_i loop\r
1945#else\r
1946 // initialise "delay"\r
1947 for (channel_i = 0; channel_i < NUM_CHANNELS; channel_i++)\r
1948 {\r
1949 if (mrc_params->channel_enables & (1 << channel_i))\r
1950 {\r
1951 for (rank_i = 0; rank_i < NUM_RANKS; rank_i++)\r
1952 {\r
1953 if (mrc_params->rank_enables & (1 << rank_i))\r
1954 {\r
1955 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
1956 {\r
1957 // want to start with WDQ = (WDQS - QRTR_CLK) +/- QRTR_CLK\r
1958 tempD = get_wdqs(channel_i, rank_i, bl_i) - QRTR_CLK;\r
1959 delay[L][channel_i][rank_i][bl_i] = tempD - QRTR_CLK;\r
1960 delay[R][channel_i][rank_i][bl_i] = tempD + QRTR_CLK;\r
1961 } // bl_i loop\r
1962 } // if rank is enabled\r
1963 } // rank_i loop\r
1964 } // if channel is enabled\r
1965 } // channel_i loop\r
1966\r
1967 // initialise other variables\r
1968 bl_mask = byte_lane_mask(mrc_params);\r
1969 address = get_addr(mrc_params, 0, 0);\r
1970\r
1971#ifdef R2R_SHARING\r
1972 // need to set "final_delay[][]" elements to "0"\r
1973 memset((void *) (final_delay), 0x00, (size_t) sizeof(final_delay));\r
1974#endif // R2R_SHARING\r
1975\r
1976 // start algorithm on the LEFT side and train each channel/bl until no failures are observed, then repeat for the RIGHT side.\r
1977 for (side_i = L; side_i <= R; side_i++)\r
1978 {\r
1979 post_code(0x08, (0x10 + (side_i)));\r
1980\r
1981 // set starting values\r
1982 for (channel_i = 0; channel_i < NUM_CHANNELS; channel_i++)\r
1983 {\r
1984 if (mrc_params->channel_enables & (1 << channel_i))\r
1985 {\r
1986 for (rank_i = 0; rank_i < NUM_RANKS; rank_i++)\r
1987 {\r
1988 if (mrc_params->rank_enables & (1 << rank_i))\r
1989 {\r
1990 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
1991 {\r
1992 set_wdq(channel_i, rank_i, bl_i, delay[side_i][channel_i][rank_i][bl_i]);\r
1993 } // bl_i loop\r
1994 } // if rank is enabled\r
1995 } // rank_i loop\r
1996 } // if channel is enabled\r
1997 } // channel_i loop\r
1998\r
1999 // find passing values\r
2000 for (channel_i = 0; channel_i < NUM_CHANNELS; channel_i++)\r
2001 {\r
2002 if (mrc_params->channel_enables & (0x1 << channel_i))\r
2003 {\r
2004 for (rank_i = 0; rank_i < NUM_RANKS; rank_i++)\r
2005 {\r
2006 if (mrc_params->rank_enables & (0x1 << rank_i))\r
2007 {\r
2008 // get an address in the target channel/rank\r
2009 address = get_addr(mrc_params, channel_i, rank_i);\r
2010\r
2011 // request HTE reconfiguration\r
2012 mrc_params->hte_setup = 1;\r
2013\r
2014 // check the settings\r
2015 do\r
2016 {\r
2017\r
2018#ifdef SIM\r
2019 // need restore memory to idle state as write can be in bad sync\r
2020 dram_init_command (DCMD_PREA(rank_i));\r
2021#endif\r
2022\r
2023 // result[07:00] == failing byte lane (MAX 8)\r
2024 result = check_bls_ex( mrc_params, address);\r
2025 // check for failures\r
2026 if (result & 0xFF)\r
2027 {\r
2028 // at least 1 byte lane failed\r
2029 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
2030 {\r
2031 if (result & (bl_mask << bl_i))\r
2032 {\r
2033 if (side_i == L)\r
2034 {\r
2035 delay[L][channel_i][rank_i][bl_i] += WDQ_STEP;\r
2036 }\r
2037 else\r
2038 {\r
2039 delay[R][channel_i][rank_i][bl_i] -= WDQ_STEP;\r
2040 }\r
2041 // check for algorithm failure\r
2042 if (delay[L][channel_i][rank_i][bl_i] != delay[R][channel_i][rank_i][bl_i])\r
2043 {\r
2044 // margin available, update delay setting\r
2045 set_wdq(channel_i, rank_i, bl_i, delay[side_i][channel_i][rank_i][bl_i]);\r
2046 }\r
2047 else\r
2048 {\r
2049 // no margin available, notify the user and halt\r
2050 training_message(channel_i, rank_i, bl_i);\r
2051 post_code(0xEE, (0x80 + side_i));\r
2052 }\r
2053 } // if bl_i failed\r
2054 } // bl_i loop\r
2055 } // at least 1 byte lane failed\r
2056 } while (result & 0xFF); // stop when all byte lanes pass\r
2057 } // if rank is enabled\r
2058 } // rank_i loop\r
2059 } // if channel is enabled\r
2060 } // channel_i loop\r
2061 } // side_i loop\r
2062\r
2063 // program WDQ to the middle of passing window\r
2064 for (channel_i = 0; channel_i < NUM_CHANNELS; channel_i++)\r
2065 {\r
2066 if (mrc_params->channel_enables & (1 << channel_i))\r
2067 {\r
2068 for (rank_i = 0; rank_i < NUM_RANKS; rank_i++)\r
2069 {\r
2070 if (mrc_params->rank_enables & (1 << rank_i))\r
2071 {\r
2072#ifdef R2R_SHARING\r
2073 // increment "num_ranks_enabled"\r
2074 num_ranks_enabled++;\r
2075#endif // R2R_SHARING\r
2076 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
2077 {\r
2078\r
2079 DPF(D_INFO, "WDQ eye rank%d lane%d : %d-%d\n", rank_i, bl_i,\r
2080 delay[L][channel_i][rank_i][bl_i],\r
2081 delay[R][channel_i][rank_i][bl_i]);\r
2082\r
2083 tempD = (delay[R][channel_i][rank_i][bl_i] + delay[L][channel_i][rank_i][bl_i]) / 2;\r
2084\r
2085#ifdef R2R_SHARING\r
2086 final_delay[channel_i][bl_i] += tempD;\r
2087 set_wdq(channel_i, rank_i, bl_i, ((final_delay[channel_i][bl_i]) / num_ranks_enabled));\r
2088#else\r
2089 set_wdq(channel_i, rank_i, bl_i, tempD);\r
2090#endif // R2R_SHARING\r
2091\r
2092 } // bl_i loop\r
2093 } // if rank is enabled\r
2094 } // rank_i loop\r
2095 } // if channel is enabled\r
2096 } // channel_i loop\r
2097#endif // BACKUP_WDQ\r
2098 LEAVEFN();\r
2099 return;\r
2100}\r
2101\r
2102// Wrapper for jedec initialisation routine\r
2103static void perform_jedec_init(\r
2104 MRCParams_t *mrc_params)\r
2105{\r
2106 jedec_init(mrc_params, 0);\r
2107}\r
2108\r
2109// Configure DDRPHY for Auto-Refresh, Periodic Compensations,\r
2110// Dynamic Diff-Amp, ZQSPERIOD, Auto-Precharge, CKE Power-Down\r
2111static void set_auto_refresh(\r
2112 MRCParams_t *mrc_params)\r
2113{\r
2114 uint32_t channel_i;\r
2115 uint32_t rank_i;\r
2116 uint32_t bl_i;\r
2117 uint32_t bl_divisor = /*(mrc_params->channel_width==x16)?2:*/1;\r
2118 uint32_t tempD;\r
2119\r
2120 ENTERFN();\r
2121\r
2122 // enable Auto-Refresh, Periodic Compensations, Dynamic Diff-Amp, ZQSPERIOD, Auto-Precharge, CKE Power-Down\r
2123 for (channel_i = 0; channel_i < NUM_CHANNELS; channel_i++)\r
2124 {\r
2125 if (mrc_params->channel_enables & (1 << channel_i))\r
2126 {\r
2127 // Enable Periodic RCOMPS\r
2128 isbM32m(DDRPHY, CMPCTRL, (BIT1), (BIT1));\r
2129\r
2130\r
2131 // Enable Dynamic DiffAmp & Set Read ODT Value\r
2132 switch (mrc_params->rd_odt_value)\r
2133 {\r
2134 case 0: tempD = 0x3F; break; // OFF\r
2135 default: tempD = 0x00; break; // Auto\r
2136 } // rd_odt_value switch\r
2137\r
2138 for (bl_i=0; bl_i<((NUM_BYTE_LANES/bl_divisor)/2); bl_i++)\r
2139 {\r
2140 isbM32m(DDRPHY, (B0OVRCTL + (bl_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)),\r
2141 ((0x00<<16)|(tempD<<10)),\r
2142 ((BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT15|BIT14|BIT13|BIT12|BIT11|BIT10))); // Override: DIFFAMP, ODT\r
2143\r
2144 isbM32m(DDRPHY, (B1OVRCTL + (bl_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)),\r
2145 ((0x00<<16)|(tempD<<10)),\r
2146 ((BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT15|BIT14|BIT13|BIT12|BIT11|BIT10)));// Override: DIFFAMP, ODT\r
2147 } // bl_i loop\r
2148\r
2149 // Issue ZQCS command\r
2150 for (rank_i = 0; rank_i < NUM_RANKS; rank_i++)\r
2151 {\r
2152 if (mrc_params->rank_enables & (1 << rank_i))\r
2153 {\r
2154 dram_init_command(DCMD_ZQCS(rank_i));\r
2155 } // if rank_i enabled\r
2156 } // rank_i loop\r
2157\r
2158 } // if channel_i enabled\r
2159 } // channel_i loop\r
2160\r
2161 clear_pointers();\r
2162\r
2163 LEAVEFN();\r
2164 return;\r
2165}\r
2166\r
2167// Depending on configuration enables ECC support.\r
2168// Available memory size is decresed, and updated with 0s\r
2169// in order to clear error status. Address mode 2 forced.\r
2170static void ecc_enable(\r
2171 MRCParams_t *mrc_params)\r
2172{\r
2173 RegDRP Drp;\r
2174 RegDSCH Dsch;\r
2175 RegDECCCTRL Ctr;\r
2176\r
2177 if (mrc_params->ecc_enables == 0) return;\r
2178\r
2179 ENTERFN();\r
2180\r
2181 // Configuration required in ECC mode\r
2182 Drp.raw = isbR32m(MCU, DRP);\r
2183 Drp.field.addressMap = 2;\r
2184 Drp.field.split64 = 1;\r
2185 isbW32m(MCU, DRP, Drp.raw);\r
2186\r
2187 // Disable new request bypass\r
2188 Dsch.raw = isbR32m(MCU, DSCH);\r
2189 Dsch.field.NEWBYPDIS = 1;\r
2190 isbW32m(MCU, DSCH, Dsch.raw);\r
2191\r
2192 // Enable ECC\r
2193 Ctr.raw = 0;\r
2194 Ctr.field.SBEEN = 1;\r
2195 Ctr.field.DBEEN = 1;\r
2196 Ctr.field.ENCBGEN = 1;\r
2197 isbW32m(MCU, DECCCTRL, Ctr.raw);\r
2198\r
2199#ifdef SIM\r
2200 // Read back to be sure writing took place\r
2201 Ctr.raw = isbR32m(MCU, DECCCTRL);\r
2202#endif\r
2203\r
2204 // Assume 8 bank memory, one bank is gone for ECC\r
2205 mrc_params->mem_size -= mrc_params->mem_size / 8;\r
2206\r
2207 // For S3 resume memory content has to be preserved\r
2208 if (mrc_params->boot_mode != bmS3)\r
2209 {\r
2210 select_hte(mrc_params);\r
2211 HteMemInit(mrc_params, MrcMemInit, MrcHaltHteEngineOnError);\r
2212 select_memory_manager(mrc_params);\r
2213 }\r
2214\r
2215 LEAVEFN();\r
2216 return;\r
2217}\r
2218\r
2219// Lock MCU registers at the end of initialisation sequence.\r
2220static void lock_registers(\r
2221 MRCParams_t *mrc_params)\r
2222{\r
2223 RegDCO Dco;\r
2224\r
2225 ENTERFN();\r
2226\r
2227 Dco.raw = isbR32m(MCU, DCO);\r
2228 Dco.field.PMIDIS = 0; //0 - PRI enabled\r
2229 Dco.field.PMICTL = 0; //0 - PRI owned by MEMORY_MANAGER\r
2230 Dco.field.DRPLOCK = 1;\r
2231 Dco.field.REUTLOCK = 1;\r
2232 isbW32m(MCU, DCO, Dco.raw);\r
2233\r
2234 LEAVEFN();\r
2235\r
2236}\r
2237\r
2238#ifdef MRC_SV\r
2239\r
2240// cache write back invalidate\r
2241static void asm_wbinvd(void)\r
2242{\r
2243#if defined (SIM) || defined (GCC)\r
2244 asm(\r
2245 "wbinvd;"\r
2246 );\r
2247#else\r
2248 __asm wbinvd;\r
2249#endif\r
2250}\r
2251\r
2252// cache invalidate\r
2253static void asm_invd(void)\r
2254{\r
2255#if defined (SIM) || defined (GCC)\r
2256 asm(\r
2257 "invd;"\r
2258 );\r
2259#else\r
2260 __asm invd;\r
2261#endif\r
2262}\r
2263\r
2264\r
2265static void cpu_read(void)\r
2266{\r
2267 uint32_t adr, dat, limit;\r
2268\r
2269 asm_invd();\r
2270\r
2271 limit = 8 * 1024;\r
2272 for (adr = 0; adr < limit; adr += 4)\r
2273 {\r
2274 dat = *(uint32_t*) adr;\r
2275 if ((adr & 0x0F) == 0)\r
2276 {\r
2277 DPF(D_INFO, "\n%x : ", adr);\r
2278 }\r
2279 DPF(D_INFO, "%x ", dat);\r
2280 }\r
2281 DPF(D_INFO, "\n");\r
2282\r
2283 DPF(D_INFO, "CPU read done\n");\r
2284}\r
2285\r
2286\r
2287static void cpu_write(void)\r
2288{\r
2289 uint32_t adr, limit;\r
2290\r
2291 limit = 8 * 1024;\r
2292 for (adr = 0; adr < limit; adr += 4)\r
2293 {\r
2294 *(uint32_t*) adr = 0xDEAD0000 + adr;\r
2295 }\r
2296\r
2297 asm_wbinvd();\r
2298\r
2299 DPF(D_INFO, "CPU write done\n");\r
2300}\r
2301\r
2302\r
2303static void cpu_memory_test(\r
2304 MRCParams_t *mrc_params)\r
2305{\r
2306 uint32_t result = 0;\r
2307 uint32_t val, dat, adr, adr0, step, limit;\r
2308 uint64_t my_tsc;\r
2309\r
2310 ENTERFN();\r
2311\r
2312 asm_invd();\r
2313\r
2314 adr0 = 1 * 1024 * 1024;\r
2315 limit = 256 * 1024 * 1024;\r
2316\r
2317 for (step = 0; step <= 4; step++)\r
2318 {\r
2319 DPF(D_INFO, "Mem test step %d starting from %xh\n", step, adr0);\r
2320\r
2321 my_tsc = read_tsc();\r
2322 for (adr = adr0; adr < limit; adr += sizeof(uint32_t))\r
2323 {\r
2324 if (step == 0) dat = adr;\r
2325 else if (step == 1) dat = (1 << ((adr >> 2) & 0x1f));\r
2326 else if (step == 2) dat = ~(1 << ((adr >> 2) & 0x1f));\r
2327 else if (step == 3) dat = 0x5555AAAA;\r
2328 else if (step == 4) dat = 0xAAAA5555;\r
2329\r
2330 *(uint32_t*) adr = dat;\r
2331 }\r
2332 DPF(D_INFO, "Write time %llXh\n", read_tsc() - my_tsc);\r
2333\r
2334 my_tsc = read_tsc();\r
2335 for (adr = adr0; adr < limit; adr += sizeof(uint32_t))\r
2336 {\r
2337 if (step == 0) dat = adr;\r
2338 else if (step == 1) dat = (1 << ((adr >> 2) & 0x1f));\r
2339 else if (step == 2) dat = ~(1 << ((adr >> 2) & 0x1f));\r
2340 else if (step == 3) dat = 0x5555AAAA;\r
2341 else if (step == 4) dat = 0xAAAA5555;\r
2342\r
2343 val = *(uint32_t*) adr;\r
2344\r
2345 if (val != dat)\r
2346 {\r
2347 DPF(D_INFO, "%x vs. %x@%x\n", dat, val, adr);\r
2348 result = adr|BIT31;\r
2349 }\r
2350 }\r
2351 DPF(D_INFO, "Read time %llXh\n", read_tsc() - my_tsc);\r
2352 }\r
2353\r
2354 DPF( D_INFO, "Memory test result %x\n", result);\r
2355 LEAVEFN();\r
2356}\r
2357#endif // MRC_SV\r
2358\r
2359\r
2360// Execute memory test, if error dtected it is\r
2361// indicated in mrc_params->status.\r
2362static void memory_test(\r
2363 MRCParams_t *mrc_params)\r
2364{\r
2365 uint32_t result = 0;\r
2366\r
2367 ENTERFN();\r
2368\r
2369 select_hte(mrc_params);\r
2370 result = HteMemInit(mrc_params, MrcMemTest, MrcHaltHteEngineOnError);\r
2371 select_memory_manager(mrc_params);\r
2372\r
2373 DPF(D_INFO, "Memory test result %x\n", result);\r
2374 mrc_params->status = ((result == 0) ? MRC_SUCCESS : MRC_E_MEMTEST);\r
2375 LEAVEFN();\r
2376}\r
2377\r
2378\r
2379// Force same timings as with backup settings\r
2380static void static_timings(\r
2381 MRCParams_t *mrc_params)\r
2382\r
2383{\r
2384 uint8_t ch, rk, bl;\r
2385\r
2386 for (ch = 0; ch < NUM_CHANNELS; ch++)\r
2387 {\r
2388 for (rk = 0; rk < NUM_RANKS; rk++)\r
2389 {\r
2390 for (bl = 0; bl < NUM_BYTE_LANES; bl++)\r
2391 {\r
2392 set_rcvn(ch, rk, bl, 498); // RCVN\r
2393 set_rdqs(ch, rk, bl, 24); // RDQS\r
2394 set_wdqs(ch, rk, bl, 292); // WDQS\r
2395 set_wdq( ch, rk, bl, 260); // WDQ\r
2396 if (rk == 0)\r
2397 {\r
2398 set_vref(ch, bl, 32); // VREF (RANK0 only)\r
2399 }\r
2400 }\r
2401 set_wctl(ch, rk, 217); // WCTL\r
2402 }\r
2403 set_wcmd(ch, 220); // WCMD\r
2404 }\r
2405\r
2406 return;\r
2407}\r
2408\r
2409//\r
2410// Initialise system memory.\r
2411//\r
2412void MemInit(\r
2413 MRCParams_t *mrc_params)\r
2414{\r
2415 static const MemInit_t init[] =\r
2416 {\r
2417 { 0x0101, bmCold|bmFast|bmWarm|bmS3, clear_self_refresh }, //0\r
2418 { 0x0200, bmCold|bmFast|bmWarm|bmS3, prog_ddr_timing_control }, //1 initialise the MCU\r
2419 { 0x0103, bmCold|bmFast , prog_decode_before_jedec }, //2\r
2420 { 0x0104, bmCold|bmFast , perform_ddr_reset }, //3\r
2421 { 0x0300, bmCold|bmFast |bmS3, ddrphy_init }, //4 initialise the DDRPHY\r
2422 { 0x0400, bmCold|bmFast , perform_jedec_init }, //5 perform JEDEC initialisation of DRAMs\r
2423 { 0x0105, bmCold|bmFast , set_ddr_init_complete }, //6\r
2424 { 0x0106, bmFast|bmWarm|bmS3, restore_timings }, //7\r
2425 { 0x0106, bmCold , default_timings }, //8\r
2426 { 0x0500, bmCold , rcvn_cal }, //9 perform RCVN_CAL algorithm\r
2427 { 0x0600, bmCold , wr_level }, //10 perform WR_LEVEL algorithm\r
2428 { 0x0120, bmCold , prog_page_ctrl }, //11\r
2429 { 0x0700, bmCold , rd_train }, //12 perform RD_TRAIN algorithm\r
2430 { 0x0800, bmCold , wr_train }, //13 perform WR_TRAIN algorithm\r
2431 { 0x010B, bmCold , store_timings }, //14\r
2432 { 0x010C, bmCold|bmFast|bmWarm|bmS3, enable_scrambling }, //15\r
2433 { 0x010D, bmCold|bmFast|bmWarm|bmS3, prog_ddr_control }, //16\r
2434 { 0x010E, bmCold|bmFast|bmWarm|bmS3, prog_dra_drb }, //17\r
2435 { 0x010F, bmWarm|bmS3, perform_wake }, //18\r
2436 { 0x0110, bmCold|bmFast|bmWarm|bmS3, change_refresh_period }, //19\r
2437 { 0x0111, bmCold|bmFast|bmWarm|bmS3, set_auto_refresh }, //20\r
2438 { 0x0112, bmCold|bmFast|bmWarm|bmS3, ecc_enable }, //21\r
2439 { 0x0113, bmCold|bmFast , memory_test }, //22\r
2440 { 0x0114, bmCold|bmFast|bmWarm|bmS3, lock_registers } //23 set init done\r
2441 };\r
2442\r
2443 uint32_t i;\r
2444\r
2445 ENTERFN();\r
2446\r
2447 DPF(D_INFO, "Meminit build %s %s\n", __DATE__, __TIME__);\r
2448\r
2449 // MRC started\r
2450 post_code(0x01, 0x00);\r
2451\r
2452 if (mrc_params->boot_mode != bmCold)\r
2453 {\r
2454 if (mrc_params->ddr_speed != mrc_params->timings.ddr_speed)\r
2455 {\r
2456 // full training required as frequency changed\r
2457 mrc_params->boot_mode = bmCold;\r
2458 }\r
2459 }\r
2460\r
2461 for (i = 0; i < MCOUNT(init); i++)\r
2462 {\r
2463 uint64_t my_tsc;\r
2464\r
2465#ifdef MRC_SV\r
2466 if (mrc_params->menu_after_mrc && i > 14)\r
2467 {\r
2468 uint8_t ch;\r
2469\r
2470 mylop:\r
2471\r
2472 DPF(D_INFO, "-- c - continue --\n");\r
2473 DPF(D_INFO, "-- j - move to jedec init --\n");\r
2474 DPF(D_INFO, "-- m - memory test --\n");\r
2475 DPF(D_INFO, "-- r - cpu read --\n");\r
2476 DPF(D_INFO, "-- w - cpu write --\n");\r
2477 DPF(D_INFO, "-- b - hte base test --\n");\r
2478 DPF(D_INFO, "-- g - hte extended test --\n");\r
2479\r
2480 ch = mgetc();\r
2481 switch (ch)\r
2482 {\r
2483 case 'c':\r
2484 break;\r
2485 case 'j': //move to jedec init\r
2486 i = 5;\r
2487 break;\r
2488\r
2489 case 'M':\r
2490 case 'N':\r
2491 {\r
2492 uint32_t n, res, cnt=0;\r
2493\r
2494 for(n=0; mgetch()==0; n++)\r
2495 {\r
2496 if( ch == 'M' || n % 256 == 0)\r
2497 {\r
2498 DPF(D_INFO, "n=%d e=%d\n", n, cnt);\r
2499 }\r
2500\r
2501 res = 0;\r
2502\r
2503 if( ch == 'M')\r
2504 {\r
2505 memory_test(mrc_params);\r
2506 res |= mrc_params->status;\r
2507 }\r
2508\r
2509 mrc_params->hte_setup = 1;\r
2510 res |= check_bls_ex(mrc_params, 0x00000000);\r
2511 res |= check_bls_ex(mrc_params, 0x00000000);\r
2512 res |= check_bls_ex(mrc_params, 0x00000000);\r
2513 res |= check_bls_ex(mrc_params, 0x00000000);\r
2514\r
2515 if( mrc_params->rank_enables & 2)\r
2516 {\r
2517 mrc_params->hte_setup = 1;\r
2518 res |= check_bls_ex(mrc_params, 0x40000000);\r
2519 res |= check_bls_ex(mrc_params, 0x40000000);\r
2520 res |= check_bls_ex(mrc_params, 0x40000000);\r
2521 res |= check_bls_ex(mrc_params, 0x40000000);\r
2522 }\r
2523\r
2524 if( res != 0)\r
2525 {\r
2526 DPF(D_INFO, "###########\n");\r
2527 DPF(D_INFO, "#\n");\r
2528 DPF(D_INFO, "# Error count %d\n", ++cnt);\r
2529 DPF(D_INFO, "#\n");\r
2530 DPF(D_INFO, "###########\n");\r
2531 }\r
2532\r
2533 } // for\r
2534\r
2535 select_memory_manager(mrc_params);\r
2536 }\r
2537 goto mylop;\r
2538 case 'm':\r
2539 memory_test(mrc_params);\r
2540 goto mylop;\r
2541 case 'n':\r
2542 cpu_memory_test(mrc_params);\r
2543 goto mylop;\r
2544\r
2545 case 'l':\r
2546 ch = mgetc();\r
2547 if (ch <= '9') DpfPrintMask ^= (ch - '0') << 3;\r
2548 DPF(D_INFO, "Log mask %x\n", DpfPrintMask);\r
2549 goto mylop;\r
2550 case 'p':\r
2551 print_timings(mrc_params);\r
2552 goto mylop;\r
2553 case 'R':\r
2554 rd_train(mrc_params);\r
2555 goto mylop;\r
2556 case 'W':\r
2557 wr_train(mrc_params);\r
2558 goto mylop;\r
2559\r
2560 case 'r':\r
2561 cpu_read();\r
2562 goto mylop;\r
2563 case 'w':\r
2564 cpu_write();\r
2565 goto mylop;\r
2566\r
2567 case 'g':\r
2568 {\r
2569 uint32_t result;\r
2570 select_hte(mrc_params);\r
2571 mrc_params->hte_setup = 1;\r
2572 result = check_bls_ex(mrc_params, 0);\r
2573 DPF(D_INFO, "Extended test result %x\n", result);\r
2574 select_memory_manager(mrc_params);\r
2575 }\r
2576 goto mylop;\r
2577 case 'b':\r
2578 {\r
2579 uint32_t result;\r
2580 select_hte(mrc_params);\r
2581 mrc_params->hte_setup = 1;\r
2582 result = check_rw_coarse(mrc_params, 0);\r
2583 DPF(D_INFO, "Base test result %x\n", result);\r
2584 select_memory_manager(mrc_params);\r
2585 }\r
2586 goto mylop;\r
2587 case 'B':\r
2588 select_hte(mrc_params);\r
2589 HteMemOp(0x2340, 1, 1);\r
2590 select_memory_manager(mrc_params);\r
2591 goto mylop;\r
2592\r
2593 case '3':\r
2594 {\r
2595 RegDPMC0 DPMC0reg;\r
2596\r
2597 DPF( D_INFO, "===>> Start suspend\n");\r
2598 isbR32m(MCU, DSTAT);\r
2599\r
2600 DPMC0reg.raw = isbR32m(MCU, DPMC0);\r
2601 DPMC0reg.field.DYNSREN = 0;\r
2602 DPMC0reg.field.powerModeOpCode = 0x05; // Disable Master DLL\r
2603 isbW32m(MCU, DPMC0, DPMC0reg.raw);\r
2604\r
2605 // Should be off for negative test case verification\r
2606 #if 1\r
2607 Wr32(MMIO, PCIADDR(0,0,0,SB_PACKET_REG),\r
2608 (uint32_t)SB_COMMAND(SB_SUSPEND_CMND_OPCODE, MCU, 0));\r
2609 #endif\r
2610\r
2611 DPF( D_INFO, "press key\n");\r
2612 mgetc();\r
2613 DPF( D_INFO, "===>> Start resume\n");\r
2614 isbR32m(MCU, DSTAT);\r
2615\r
2616 mrc_params->boot_mode = bmS3;\r
2617 i = 0;\r
2618 }\r
2619\r
2620 } // switch\r
2621\r
2622 } // if( menu\r
2623#endif //MRC_SV\r
2624\r
2625 if (mrc_params->boot_mode & init[i].boot_path)\r
2626 {\r
2627 uint8_t major = init[i].post_code >> 8 & 0xFF;\r
2628 uint8_t minor = init[i].post_code >> 0 & 0xFF;\r
2629 post_code(major, minor);\r
2630\r
2631 my_tsc = read_tsc();\r
2632 init[i].init_fn(mrc_params);\r
2633 DPF(D_TIME, "Execution time %llX", read_tsc() - my_tsc);\r
2634 }\r
2635 }\r
2636\r
2637 // display the timings\r
2638 print_timings(mrc_params);\r
2639\r
2640 // MRC is complete.\r
2641 post_code(0x01, 0xFF);\r
2642\r
2643 LEAVEFN();\r
2644 return;\r
2645}\r