]> git.proxmox.com Git - mirror_edk2.git/blame_incremental - QuarkSocPkg/QuarkNorthCluster/MemoryInit/Pei/meminit.c
QuarkSocPkg: MemoryInit/Pei: remove set but unused variable TRFC
[mirror_edk2.git] / QuarkSocPkg / QuarkNorthCluster / MemoryInit / Pei / meminit.c
... / ...
CommitLineData
1/************************************************************************\r
2 *\r
3 * Copyright (c) 2013-2015 Intel Corporation.\r
4 *\r
5* This program and the accompanying materials\r
6* are licensed and made available under the terms and conditions of the BSD License\r
7* which accompanies this distribution. The full text of the license may be found at\r
8* http://opensource.org/licenses/bsd-license.php\r
9*\r
10* THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
11* WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
12 *\r
13 * This file contains all of the Cat Mountain Memory Reference Code (MRC).\r
14 *\r
15 * These functions are generic and should work for any Cat Mountain config.\r
16 *\r
17 * MRC requires two data structures to be passed in which are initialised by "PreMemInit()".\r
18 *\r
19 * The basic flow is as follows:\r
20 * 01) Check for supported DDR speed configuration\r
21 * 02) Set up MEMORY_MANAGER buffer as pass-through (POR)\r
22 * 03) Set Channel Interleaving Mode and Channel Stride to the most aggressive setting possible\r
23 * 04) Set up the MCU logic\r
24 * 05) Set up the DDR_PHY logic\r
25 * 06) Initialise the DRAMs (JEDEC)\r
26 * 07) Perform the Receive Enable Calibration algorithm\r
27 * 08) Perform the Write Leveling algorithm\r
28 * 09) Perform the Read Training algorithm (includes internal Vref)\r
29 * 10) Perform the Write Training algorithm\r
30 * 11) Set Channel Interleaving Mode and Channel Stride to the desired settings\r
31 *\r
32 * Dunit configuration based on Valleyview MRC.\r
33 *\r
34 ***************************************************************************/\r
35\r
36#include "mrc.h"\r
37#include "memory_options.h"\r
38\r
39#include "meminit.h"\r
40#include "meminit_utils.h"\r
41#include "hte.h"\r
42#include "io.h"\r
43\r
44// Override ODT to off state if requested\r
45#define DRMC_DEFAULT (mrc_params->rd_odt_value==0?BIT12:0)\r
46\r
47\r
48// tRFC values (in picoseconds) per density\r
49const uint32_t tRFC[5] =\r
50{\r
51 90000, // 512Mb\r
52 110000, // 1Gb\r
53 160000, // 2Gb\r
54 300000, // 4Gb\r
55 350000, // 8Gb\r
56 };\r
57\r
58// tCK clock period in picoseconds per speed index 800, 1066, 1333\r
59const uint32_t tCK[3] =\r
60{\r
61 2500,\r
62 1875,\r
63 1500\r
64};\r
65\r
66#ifdef SIM\r
67// Select static timings specific to simulation environment\r
68#define PLATFORM_ID 0\r
69#else\r
70// Select static timings specific to ClantonPeek platform\r
71#define PLATFORM_ID 1\r
72#endif\r
73\r
74\r
75// Global variables\r
76const uint16_t ddr_wclk[] =\r
77 {193, 158};\r
78\r
79const uint16_t ddr_wctl[] =\r
80 { 1, 217};\r
81\r
82const uint16_t ddr_wcmd[] =\r
83 { 1, 220};\r
84\r
85\r
86#ifdef BACKUP_RCVN\r
87const uint16_t ddr_rcvn[] =\r
88 {129, 498};\r
89#endif // BACKUP_RCVN\r
90\r
91#ifdef BACKUP_WDQS\r
92const uint16_t ddr_wdqs[] =\r
93 { 65, 289};\r
94#endif // BACKUP_WDQS\r
95\r
96#ifdef BACKUP_RDQS\r
97const uint8_t ddr_rdqs[] =\r
98 { 32, 24};\r
99#endif // BACKUP_RDQS\r
100\r
101#ifdef BACKUP_WDQ\r
102const uint16_t ddr_wdq[] =\r
103 { 32, 257};\r
104#endif // BACKUP_WDQ\r
105\r
106\r
107\r
108// Select MEMORY_MANAGER as the source for PRI interface\r
109static void select_memory_manager(\r
110 MRCParams_t *mrc_params)\r
111{\r
112 RegDCO Dco;\r
113\r
114 ENTERFN();\r
115\r
116 Dco.raw = isbR32m(MCU, DCO);\r
117 Dco.field.PMICTL = 0; //0 - PRI owned by MEMORY_MANAGER\r
118 isbW32m(MCU, DCO, Dco.raw);\r
119\r
120 LEAVEFN();\r
121}\r
122\r
123// Select HTE as the source for PRI interface\r
124void select_hte(\r
125 MRCParams_t *mrc_params)\r
126{\r
127 RegDCO Dco;\r
128\r
129 ENTERFN();\r
130\r
131 Dco.raw = isbR32m(MCU, DCO);\r
132 Dco.field.PMICTL = 1; //1 - PRI owned by HTE\r
133 isbW32m(MCU, DCO, Dco.raw);\r
134\r
135 LEAVEFN();\r
136}\r
137\r
138// Send DRAM command, data should be formated\r
139// using DCMD_Xxxx macro or emrsXCommand structure.\r
140static void dram_init_command(\r
141 uint32_t data)\r
142{\r
143 Wr32(DCMD, 0, data);\r
144}\r
145\r
146// Send DRAM wake command using special MCU side-band WAKE opcode\r
147static void dram_wake_command(\r
148 void)\r
149{\r
150 ENTERFN();\r
151\r
152 Wr32(MMIO, PCIADDR(0,0,0,SB_PACKET_REG),\r
153 (uint32_t) SB_COMMAND(SB_WAKE_CMND_OPCODE, MCU, 0));\r
154\r
155 LEAVEFN();\r
156}\r
157\r
158// Stop self refresh driven by MCU\r
159static void clear_self_refresh(\r
160 MRCParams_t *mrc_params)\r
161{\r
162 ENTERFN();\r
163\r
164 // clear the PMSTS Channel Self Refresh bits\r
165 isbM32m(MCU, PMSTS, BIT0, BIT0);\r
166\r
167 LEAVEFN();\r
168}\r
169\r
170// Configure MCU before jedec init sequence\r
171static void prog_decode_before_jedec(\r
172 MRCParams_t *mrc_params)\r
173{\r
174 RegDRP Drp;\r
175 RegDRCF Drfc;\r
176 RegDCAL Dcal;\r
177 RegDSCH Dsch;\r
178 RegDPMC0 Dpmc0;\r
179\r
180 ENTERFN();\r
181\r
182 // Disable power saving features\r
183 Dpmc0.raw = isbR32m(MCU, DPMC0);\r
184 Dpmc0.field.CLKGTDIS = 1;\r
185 Dpmc0.field.DISPWRDN = 1;\r
186 Dpmc0.field.DYNSREN = 0;\r
187 Dpmc0.field.PCLSTO = 0;\r
188 isbW32m(MCU, DPMC0, Dpmc0.raw);\r
189\r
190 // Disable out of order transactions\r
191 Dsch.raw = isbR32m(MCU, DSCH);\r
192 Dsch.field.OOODIS = 1;\r
193 Dsch.field.NEWBYPDIS = 1;\r
194 isbW32m(MCU, DSCH, Dsch.raw);\r
195\r
196 // Disable issuing the REF command\r
197 Drfc.raw = isbR32m(MCU, DRFC);\r
198 Drfc.field.tREFI = 0;\r
199 isbW32m(MCU, DRFC, Drfc.raw);\r
200\r
201 // Disable ZQ calibration short\r
202 Dcal.raw = isbR32m(MCU, DCAL);\r
203 Dcal.field.ZQCINT = 0;\r
204 Dcal.field.SRXZQCL = 0;\r
205 isbW32m(MCU, DCAL, Dcal.raw);\r
206\r
207 // Training performed in address mode 0, rank population has limited impact, however\r
208 // simulator complains if enabled non-existing rank.\r
209 Drp.raw = 0;\r
210 if (mrc_params->rank_enables & 1)\r
211 Drp.field.rank0Enabled = 1;\r
212 if (mrc_params->rank_enables & 2)\r
213 Drp.field.rank1Enabled = 1;\r
214 isbW32m(MCU, DRP, Drp.raw);\r
215\r
216 LEAVEFN();\r
217}\r
218\r
219// After Cold Reset, BIOS should set COLDWAKE bit to 1 before\r
220// sending the WAKE message to the Dunit.\r
221// For Standby Exit, or any other mode in which the DRAM is in\r
222// SR, this bit must be set to 0.\r
223static void perform_ddr_reset(\r
224 MRCParams_t *mrc_params)\r
225{\r
226 ENTERFN();\r
227\r
228 // Set COLDWAKE bit before sending the WAKE message\r
229 isbM32m(MCU, DRMC, BIT16, BIT16);\r
230\r
231 // Send wake command to DUNIT (MUST be done before JEDEC)\r
232 dram_wake_command();\r
233\r
234 // Set default value\r
235 isbW32m(MCU, DRMC, DRMC_DEFAULT);\r
236\r
237 LEAVEFN();\r
238}\r
239\r
240// Dunit Initialisation Complete.\r
241// Indicates that initialisation of the Dunit has completed.\r
242// Memory accesses are permitted and maintenance operation\r
243// begins. Until this bit is set to a 1, the memory controller will\r
244// not accept DRAM requests from the MEMORY_MANAGER or HTE.\r
245static void set_ddr_init_complete(\r
246 MRCParams_t *mrc_params)\r
247{\r
248 RegDCO Dco;\r
249\r
250 ENTERFN();\r
251\r
252 Dco.raw = isbR32m(MCU, DCO);\r
253 Dco.field.PMICTL = 0; //0 - PRI owned by MEMORY_MANAGER\r
254 Dco.field.IC = 1; //1 - initialisation complete\r
255 isbW32m(MCU, DCO, Dco.raw);\r
256\r
257 LEAVEFN();\r
258}\r
259\r
260static void prog_page_ctrl(\r
261 MRCParams_t *mrc_params)\r
262{\r
263 RegDPMC0 Dpmc0;\r
264\r
265 ENTERFN();\r
266\r
267 Dpmc0.raw = isbR32m(MCU, DPMC0);\r
268\r
269 Dpmc0.field.PCLSTO = 0x4;\r
270 Dpmc0.field.PREAPWDEN = 1;\r
271\r
272 isbW32m(MCU, DPMC0, Dpmc0.raw);\r
273}\r
274\r
275// Configure MCU Power Management Control Register\r
276// and Scheduler Control Register.\r
277static void prog_ddr_control(\r
278 MRCParams_t *mrc_params)\r
279{\r
280 RegDSCH Dsch;\r
281 RegDPMC0 Dpmc0;\r
282\r
283 ENTERFN();\r
284\r
285 Dpmc0.raw = isbR32m(MCU, DPMC0);\r
286 Dsch.raw = isbR32m(MCU, DSCH);\r
287\r
288 Dpmc0.field.DISPWRDN = mrc_params->power_down_disable;\r
289 Dpmc0.field.CLKGTDIS = 0;\r
290 Dpmc0.field.PCLSTO = 4;\r
291 Dpmc0.field.PREAPWDEN = 1;\r
292\r
293 Dsch.field.OOODIS = 0;\r
294 Dsch.field.OOOST3DIS = 0;\r
295 Dsch.field.NEWBYPDIS = 0;\r
296\r
297 isbW32m(MCU, DSCH, Dsch.raw);\r
298 isbW32m(MCU, DPMC0, Dpmc0.raw);\r
299\r
300 // CMDTRIST = 2h - CMD/ADDR are tristated when no valid command\r
301 isbM32m(MCU, DPMC1, 2 << 4, BIT5|BIT4);\r
302\r
303 LEAVEFN();\r
304}\r
305\r
306// After training complete configure MCU Rank Population Register\r
307// specifying: ranks enabled, device width, density, address mode.\r
308static void prog_dra_drb(\r
309 MRCParams_t *mrc_params)\r
310{\r
311 RegDRP Drp;\r
312 RegDCO Dco;\r
313\r
314 ENTERFN();\r
315\r
316 Dco.raw = isbR32m(MCU, DCO);\r
317 Dco.field.IC = 0;\r
318 isbW32m(MCU, DCO, Dco.raw);\r
319\r
320 Drp.raw = 0;\r
321 if (mrc_params->rank_enables & 1)\r
322 Drp.field.rank0Enabled = 1;\r
323 if (mrc_params->rank_enables & 2)\r
324 Drp.field.rank1Enabled = 1;\r
325 if (mrc_params->dram_width == x16)\r
326 {\r
327 Drp.field.dimm0DevWidth = 1;\r
328 Drp.field.dimm1DevWidth = 1;\r
329 }\r
330 // Density encoding in DRAMParams_t 0=512Mb, 1=Gb, 2=2Gb, 3=4Gb\r
331 // has to be mapped RANKDENSx encoding (0=1Gb)\r
332 Drp.field.dimm0DevDensity = mrc_params->params.DENSITY - 1;\r
333 Drp.field.dimm1DevDensity = mrc_params->params.DENSITY - 1;\r
334\r
335 // Address mode can be overwritten if ECC enabled\r
336 Drp.field.addressMap = mrc_params->address_mode;\r
337\r
338 isbW32m(MCU, DRP, Drp.raw);\r
339\r
340 Dco.field.PMICTL = 0; //0 - PRI owned by MEMORY_MANAGER\r
341 Dco.field.IC = 1; //1 - initialisation complete\r
342 isbW32m(MCU, DCO, Dco.raw);\r
343\r
344 LEAVEFN();\r
345}\r
346\r
347// Configure refresh rate and short ZQ calibration interval.\r
348// Activate dynamic self refresh.\r
349static void change_refresh_period(\r
350 MRCParams_t *mrc_params)\r
351{\r
352 RegDRCF Drfc;\r
353 RegDCAL Dcal;\r
354 RegDPMC0 Dpmc0;\r
355\r
356 ENTERFN();\r
357\r
358 Drfc.raw = isbR32m(MCU, DRFC);\r
359 Drfc.field.tREFI = mrc_params->refresh_rate;\r
360 Drfc.field.REFDBTCLR = 1;\r
361 isbW32m(MCU, DRFC, Drfc.raw);\r
362\r
363 Dcal.raw = isbR32m(MCU, DCAL);\r
364 Dcal.field.ZQCINT = 3; // 63ms\r
365 isbW32m(MCU, DCAL, Dcal.raw);\r
366\r
367 Dpmc0.raw = isbR32m(MCU, DPMC0);\r
368 Dpmc0.field.ENPHYCLKGATE = 1;\r
369 Dpmc0.field.DYNSREN = 1;\r
370 isbW32m(MCU, DPMC0, Dpmc0.raw);\r
371\r
372 LEAVEFN();\r
373}\r
374\r
375// Send DRAM wake command\r
376static void perform_wake(\r
377 MRCParams_t *mrc_params)\r
378{\r
379 ENTERFN();\r
380\r
381 dram_wake_command();\r
382\r
383 LEAVEFN();\r
384}\r
385\r
386// prog_ddr_timing_control (aka mcu_init):\r
387// POST_CODE[major] == 0x02\r
388//\r
389// It will initialise timing registers in the MCU (DTR0..DTR4).\r
390static void prog_ddr_timing_control(\r
391 MRCParams_t *mrc_params)\r
392{\r
393 uint8_t TCL, WL;\r
394 uint8_t TRP, TRCD, TRAS, TWR, TWTR, TRRD, TRTP, TFAW;\r
395 uint32_t TCK;\r
396\r
397 RegDTR0 Dtr0;\r
398 RegDTR1 Dtr1;\r
399 RegDTR2 Dtr2;\r
400 RegDTR3 Dtr3;\r
401 RegDTR4 Dtr4;\r
402\r
403 ENTERFN();\r
404\r
405 // mcu_init starts\r
406 post_code(0x02, 0x00);\r
407\r
408 Dtr0.raw = isbR32m(MCU, DTR0);\r
409 Dtr1.raw = isbR32m(MCU, DTR1);\r
410 Dtr2.raw = isbR32m(MCU, DTR2);\r
411 Dtr3.raw = isbR32m(MCU, DTR3);\r
412 Dtr4.raw = isbR32m(MCU, DTR4);\r
413\r
414 TCK = tCK[mrc_params->ddr_speed]; // Clock in picoseconds\r
415 TCL = mrc_params->params.tCL; // CAS latency in clocks\r
416 TRP = TCL; // Per CAT MRC\r
417 TRCD = TCL; // Per CAT MRC\r
418 TRAS = MCEIL(mrc_params->params.tRAS, TCK);\r
419 TWR = MCEIL(15000, TCK); // Per JEDEC: tWR=15000ps DDR2/3 from 800-1600\r
420\r
421 TWTR = MCEIL(mrc_params->params.tWTR, TCK);\r
422 TRRD = MCEIL(mrc_params->params.tRRD, TCK);\r
423 TRTP = 4; // Valid for 800 and 1066, use 5 for 1333\r
424 TFAW = MCEIL(mrc_params->params.tFAW, TCK);\r
425\r
426 WL = 5 + mrc_params->ddr_speed;\r
427\r
428 Dtr0.field.dramFrequency = mrc_params->ddr_speed;\r
429\r
430 Dtr0.field.tCL = TCL - 5; //Convert from TCL (DRAM clocks) to VLV indx\r
431 Dtr0.field.tRP = TRP - 5; //5 bit DRAM Clock\r
432 Dtr0.field.tRCD = TRCD - 5; //5 bit DRAM Clock\r
433\r
434 Dtr1.field.tWCL = WL - 3; //Convert from WL (DRAM clocks) to VLV indx\r
435 Dtr1.field.tWTP = WL + 4 + TWR - 14; //Change to tWTP\r
436 Dtr1.field.tRTP = MMAX(TRTP, 4) - 3; //4 bit DRAM Clock\r
437 Dtr1.field.tRRD = TRRD - 4; //4 bit DRAM Clock\r
438 Dtr1.field.tCMD = 1; //2N\r
439 Dtr1.field.tRAS = TRAS - 14; //6 bit DRAM Clock\r
440\r
441 Dtr1.field.tFAW = ((TFAW + 1) >> 1) - 5; //4 bit DRAM Clock\r
442 Dtr1.field.tCCD = 0; //Set 4 Clock CAS to CAS delay (multi-burst)\r
443 Dtr2.field.tRRDR = 1;\r
444 Dtr2.field.tWWDR = 2;\r
445 Dtr2.field.tRWDR = 2;\r
446 Dtr3.field.tWRDR = 2;\r
447 Dtr3.field.tWRDD = 2;\r
448\r
449 if (mrc_params->ddr_speed == DDRFREQ_800)\r
450 {\r
451 // Extended RW delay (+1)\r
452 Dtr3.field.tRWSR = TCL - 5 + 1;\r
453 }\r
454 else if(mrc_params->ddr_speed == DDRFREQ_1066)\r
455 {\r
456 // Extended RW delay (+1)\r
457 Dtr3.field.tRWSR = TCL - 5 + 1;\r
458 }\r
459\r
460 Dtr3.field.tWRSR = 4 + WL + TWTR - 11;\r
461\r
462 if (mrc_params->ddr_speed == DDRFREQ_800)\r
463 {\r
464 Dtr3.field.tXP = MMAX(0, 1 - Dtr1.field.tCMD);\r
465 }\r
466 else\r
467 {\r
468 Dtr3.field.tXP = MMAX(0, 2 - Dtr1.field.tCMD);\r
469 }\r
470\r
471 Dtr4.field.WRODTSTRT = Dtr1.field.tCMD;\r
472 Dtr4.field.WRODTSTOP = Dtr1.field.tCMD;\r
473 Dtr4.field.RDODTSTRT = Dtr1.field.tCMD + Dtr0.field.tCL - Dtr1.field.tWCL + 2; //Convert from WL (DRAM clocks) to VLV indx\r
474 Dtr4.field.RDODTSTOP = Dtr1.field.tCMD + Dtr0.field.tCL - Dtr1.field.tWCL + 2;\r
475 Dtr4.field.TRGSTRDIS = 0;\r
476 Dtr4.field.ODTDIS = 0;\r
477\r
478 isbW32m(MCU, DTR0, Dtr0.raw);\r
479 isbW32m(MCU, DTR1, Dtr1.raw);\r
480 isbW32m(MCU, DTR2, Dtr2.raw);\r
481 isbW32m(MCU, DTR3, Dtr3.raw);\r
482 isbW32m(MCU, DTR4, Dtr4.raw);\r
483\r
484 LEAVEFN();\r
485}\r
486\r
487// ddrphy_init:\r
488// POST_CODE[major] == 0x03\r
489//\r
490// This function performs some initialisation on the DDRIO unit.\r
491// This function is dependent on BOARD_ID, DDR_SPEED, and CHANNEL_ENABLES.\r
492static void ddrphy_init(MRCParams_t *mrc_params)\r
493{\r
494 uint32_t tempD; // temporary DWORD\r
495 uint8_t channel_i; // channel counter\r
496 uint8_t rank_i; // rank counter\r
497 uint8_t bl_grp_i; // byte lane group counter (2 BLs per module)\r
498\r
499 uint8_t bl_divisor = /*(mrc_params->channel_width==x16)?2:*/1; // byte lane divisor\r
500 uint8_t speed = mrc_params->ddr_speed & (BIT1|BIT0); // For DDR3 --> 0 == 800, 1 == 1066, 2 == 1333\r
501 uint8_t tCAS;\r
502 uint8_t tCWL;\r
503\r
504 ENTERFN();\r
505\r
506 tCAS = mrc_params->params.tCL;\r
507 tCWL = 5 + mrc_params->ddr_speed;\r
508\r
509 // ddrphy_init starts\r
510 post_code(0x03, 0x00);\r
511\r
512 // HSD#231531\r
513 // Make sure IOBUFACT is deasserted before initialising the DDR PHY.\r
514 // HSD#234845\r
515 // Make sure WRPTRENABLE is deasserted before initialising the DDR PHY.\r
516 for (channel_i=0; channel_i<NUM_CHANNELS; channel_i++) {\r
517 if (mrc_params->channel_enables & (1<<channel_i)) {\r
518 // Deassert DDRPHY Initialisation Complete\r
519 isbM32m(DDRPHY, (CMDPMCONFIG0 + (channel_i * DDRIOCCC_CH_OFFSET)), ~BIT20, BIT20); // SPID_INIT_COMPLETE=0\r
520 // Deassert IOBUFACT\r
521 isbM32m(DDRPHY, (CMDCFGREG0 + (channel_i * DDRIOCCC_CH_OFFSET)), ~BIT2, BIT2); // IOBUFACTRST_N=0\r
522 // Disable WRPTR\r
523 isbM32m(DDRPHY, (CMDPTRREG + (channel_i * DDRIOCCC_CH_OFFSET)), ~BIT0, BIT0); // WRPTRENABLE=0\r
524 } // if channel enabled\r
525 } // channel_i loop\r
526\r
527 // Put PHY in reset\r
528 isbM32m(DDRPHY, MASTERRSTN, 0, BIT0); // PHYRSTN=0\r
529\r
530 // Initialise DQ01,DQ23,CMD,CLK-CTL,COMP modules\r
531 // STEP0:\r
532 post_code(0x03, 0x10);\r
533 for (channel_i=0; channel_i<NUM_CHANNELS; channel_i++) {\r
534 if (mrc_params->channel_enables & (1<<channel_i)) {\r
535\r
536 // DQ01-DQ23\r
537 for (bl_grp_i=0; bl_grp_i<((NUM_BYTE_LANES/bl_divisor)/2); bl_grp_i++) {\r
538 isbM32m(DDRPHY, (DQOBSCKEBBCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), ((bl_grp_i) ? (0x00) : (BIT22)), (BIT22)); // Analog MUX select - IO2xCLKSEL\r
539\r
540 // ODT Strength\r
541 switch (mrc_params->rd_odt_value) {\r
542 case 1: tempD = 0x3; break; // 60 ohm\r
543 case 2: tempD = 0x3; break; // 120 ohm\r
544 case 3: tempD = 0x3; break; // 180 ohm\r
545 default: tempD = 0x3; break; // 120 ohm\r
546 }\r
547 isbM32m(DDRPHY, (B0RXIOBUFCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (tempD<<5), (BIT6|BIT5)); // ODT strength\r
548 isbM32m(DDRPHY, (B1RXIOBUFCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (tempD<<5), (BIT6|BIT5)); // ODT strength\r
549 // Dynamic ODT/DIFFAMP\r
550 tempD = (((tCAS)<<24)|((tCAS)<<16)|((tCAS)<<8)|((tCAS)<<0));\r
551 switch (speed) {\r
552 case 0: tempD -= 0x01010101; break; // 800\r
553 case 1: tempD -= 0x02020202; break; // 1066\r
554 case 2: tempD -= 0x03030303; break; // 1333\r
555 case 3: tempD -= 0x04040404; break; // 1600\r
556 }\r
557 isbM32m(DDRPHY, (B01LATCTL1 + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), tempD, ((BIT28|BIT27|BIT26|BIT25|BIT24)|(BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT4|BIT3|BIT2|BIT1|BIT0))); // Launch Time: ODT, DIFFAMP, ODT, DIFFAMP\r
558 switch (speed) {\r
559 // HSD#234715\r
560 case 0: tempD = ((0x06<<16)|(0x07<<8)); break; // 800\r
561 case 1: tempD = ((0x07<<16)|(0x08<<8)); break; // 1066\r
562 case 2: tempD = ((0x09<<16)|(0x0A<<8)); break; // 1333\r
563 case 3: tempD = ((0x0A<<16)|(0x0B<<8)); break; // 1600\r
564 }\r
565 isbM32m(DDRPHY, (B0ONDURCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), tempD, ((BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT13|BIT12|BIT11|BIT10|BIT9|BIT8))); // On Duration: ODT, DIFFAMP\r
566 isbM32m(DDRPHY, (B1ONDURCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), tempD, ((BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT13|BIT12|BIT11|BIT10|BIT9|BIT8))); // On Duration: ODT, DIFFAMP\r
567\r
568 switch (mrc_params->rd_odt_value) {\r
569 case 0: tempD = ((0x3F<<16)|(0x3f<<10)); break; // override DIFFAMP=on, ODT=off\r
570 default: tempD = ((0x3F<<16)|(0x2A<<10)); break; // override DIFFAMP=on, ODT=on\r
571 }\r
572 isbM32m(DDRPHY, (B0OVRCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), tempD, ((BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT15|BIT14|BIT13|BIT12|BIT11|BIT10))); // Override: DIFFAMP, ODT\r
573 isbM32m(DDRPHY, (B1OVRCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), tempD, ((BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT15|BIT14|BIT13|BIT12|BIT11|BIT10))); // Override: DIFFAMP, ODT\r
574\r
575 // DLL Setup\r
576 // 1xCLK Domain Timings: tEDP,RCVEN,WDQS (PO)\r
577 isbM32m(DDRPHY, (B0LATCTL0 + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (((tCAS+7)<<16)|((tCAS-4)<<8)|((tCWL-2)<<0)), ((BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT4|BIT3|BIT2|BIT1|BIT0))); // 1xCLK: tEDP, RCVEN, WDQS\r
578 isbM32m(DDRPHY, (B1LATCTL0 + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (((tCAS+7)<<16)|((tCAS-4)<<8)|((tCWL-2)<<0)), ((BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT4|BIT3|BIT2|BIT1|BIT0))); // 1xCLK: tEDP, RCVEN, WDQS\r
579\r
580 // RCVEN Bypass (PO)\r
581 isbM32m(DDRPHY, (B0RXIOBUFCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), ((0x0<<7)|(0x0<<0)), (BIT7|BIT0)); // AFE Bypass, RCVEN DIFFAMP\r
582 isbM32m(DDRPHY, (B1RXIOBUFCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), ((0x0<<7)|(0x0<<0)), (BIT7|BIT0)); // AFE Bypass, RCVEN DIFFAMP\r
583 // TX\r
584 isbM32m(DDRPHY, (DQCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (BIT16), (BIT16)); // 0 means driving DQ during DQS-preamble\r
585 isbM32m(DDRPHY, (B01PTRCTL1 + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (BIT8), (BIT8)); // WR_LVL mode disable\r
586 // RX (PO)\r
587 isbM32m(DDRPHY, (B0VREFCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), ((0x03<<2)|(0x0<<1)|(0x0<<0)), ((BIT7|BIT6|BIT5|BIT4|BIT3|BIT2)|BIT1|BIT0)); // Internal Vref Code, Enable#, Ext_or_Int (1=Ext)\r
588 isbM32m(DDRPHY, (B1VREFCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), ((0x03<<2)|(0x0<<1)|(0x0<<0)), ((BIT7|BIT6|BIT5|BIT4|BIT3|BIT2)|BIT1|BIT0)); // Internal Vref Code, Enable#, Ext_or_Int (1=Ext)\r
589 isbM32m(DDRPHY, (B0RXIOBUFCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (0), (BIT4)); // Per-Bit De-Skew Enable\r
590 isbM32m(DDRPHY, (B1RXIOBUFCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (0), (BIT4)); // Per-Bit De-Skew Enable\r
591 }\r
592 // CLKEBB\r
593 isbM32m(DDRPHY, (CMDOBSCKEBBCTL + (channel_i * DDRIOCCC_CH_OFFSET)), 0, (BIT23));\r
594\r
595 // Enable tristate control of cmd/address bus\r
596 isbM32m(DDRPHY, (CMDCFGREG0 + (channel_i * DDRIOCCC_CH_OFFSET)), 0, (BIT1|BIT0));\r
597\r
598 // ODT RCOMP\r
599 isbM32m(DDRPHY, (CMDRCOMPODT + (channel_i * DDRIOCCC_CH_OFFSET)), ((0x03<<5)|(0x03<<0)), ((BIT9|BIT8|BIT7|BIT6|BIT5)|(BIT4|BIT3|BIT2|BIT1|BIT0)));\r
600\r
601 // CMDPM* registers must be programmed in this order...\r
602 isbM32m(DDRPHY, (CMDPMDLYREG4 + (channel_i * DDRIOCCC_CH_OFFSET)), ((0xFFFFU<<16)|(0xFFFF<<0)), ((BIT31|BIT30|BIT29|BIT28|BIT27|BIT26|BIT25|BIT24|BIT23|BIT22|BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT15|BIT14|BIT13|BIT12|BIT11|BIT10|BIT9|BIT8|BIT7|BIT6|BIT5|BIT4|BIT3|BIT2|BIT1|BIT0))); // Turn On Delays: SFR (regulator), MPLL\r
603 isbM32m(DDRPHY, (CMDPMDLYREG3 + (channel_i * DDRIOCCC_CH_OFFSET)), ((0xFU<<28)|(0xFFF<<16)|(0xF<<12)|(0x616<<0)), ((BIT31|BIT30|BIT29|BIT28)|(BIT27|BIT26|BIT25|BIT24|BIT23|BIT22|BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT15|BIT14|BIT13|BIT12)|(BIT11|BIT10|BIT9|BIT8|BIT7|BIT6|BIT5|BIT4|BIT3|BIT2|BIT1|BIT0))); // Delays: ASSERT_IOBUFACT_to_ALLON0_for_PM_MSG_3, VREG (MDLL) Turn On, ALLON0_to_DEASSERT_IOBUFACT_for_PM_MSG_gt0, MDLL Turn On\r
604 isbM32m(DDRPHY, (CMDPMDLYREG2 + (channel_i * DDRIOCCC_CH_OFFSET)), ((0xFFU<<24)|(0xFF<<16)|(0xFF<<8)|(0xFF<<0)), ((BIT31|BIT30|BIT29|BIT28|BIT27|BIT26|BIT25|BIT24)|(BIT23|BIT22|BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT15|BIT14|BIT13|BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT7|BIT6|BIT5|BIT4|BIT3|BIT2|BIT1|BIT0))); // MPLL Divider Reset Delays\r
605 isbM32m(DDRPHY, (CMDPMDLYREG1 + (channel_i * DDRIOCCC_CH_OFFSET)), ((0xFFU<<24)|(0xFF<<16)|(0xFF<<8)|(0xFF<<0)), ((BIT31|BIT30|BIT29|BIT28|BIT27|BIT26|BIT25|BIT24)|(BIT23|BIT22|BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT15|BIT14|BIT13|BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT7|BIT6|BIT5|BIT4|BIT3|BIT2|BIT1|BIT0))); // Turn Off Delays: VREG, Staggered MDLL, MDLL, PI\r
606 isbM32m(DDRPHY, (CMDPMDLYREG0 + (channel_i * DDRIOCCC_CH_OFFSET)), ((0xFFU<<24)|(0xFF<<16)|(0xFF<<8)|(0xFF<<0)), ((BIT31|BIT30|BIT29|BIT28|BIT27|BIT26|BIT25|BIT24)|(BIT23|BIT22|BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT15|BIT14|BIT13|BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT7|BIT6|BIT5|BIT4|BIT3|BIT2|BIT1|BIT0))); // Turn On Delays: MPLL, Staggered MDLL, PI, IOBUFACT\r
607 isbM32m(DDRPHY, (CMDPMCONFIG0 + (channel_i * DDRIOCCC_CH_OFFSET)), ((0x6<<8)|BIT6|(0x4<<0)), (BIT31|BIT30|BIT29|BIT28|BIT27|BIT26|BIT25|BIT24|BIT23|BIT22|BIT21|(BIT11|BIT10|BIT9|BIT8)|BIT6|(BIT3|BIT2|BIT1|BIT0))); // Allow PUnit signals\r
608 isbM32m(DDRPHY, (CMDMDLLCTL + (channel_i * DDRIOCCC_CH_OFFSET)), ((0x3<<4)|(0x7<<0)), ((BIT6|BIT5|BIT4)|(BIT3|BIT2|BIT1|BIT0))); // DLL_VREG Bias Trim, VREF Tuning for DLL_VREG\r
609 // CLK-CTL\r
610 isbM32m(DDRPHY, (CCOBSCKEBBCTL + (channel_i * DDRIOCCC_CH_OFFSET)), 0, (BIT24)); // CLKEBB\r
611 isbM32m(DDRPHY, (CCCFGREG0 + (channel_i * DDRIOCCC_CH_OFFSET)), ((0x0<<16)|(0x0<<12)|(0x0<<8)|(0xF<<4)|BIT0), ((BIT19|BIT18|BIT17|BIT16)|(BIT15|BIT14|BIT13|BIT12)|(BIT11|BIT10|BIT9|BIT8)|(BIT7|BIT6|BIT5|BIT4)|BIT0)); // Buffer Enable: CS,CKE,ODT,CLK\r
612 isbM32m(DDRPHY, (CCRCOMPODT + (channel_i * DDRIOCCC_CH_OFFSET)), ((0x03<<8)|(0x03<<0)), ((BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT4|BIT3|BIT2|BIT1|BIT0))); // ODT RCOMP\r
613 isbM32m(DDRPHY, (CCMDLLCTL + (channel_i * DDRIOCCC_CH_OFFSET)), ((0x3<<4)|(0x7<<0)), ((BIT6|BIT5|BIT4)|(BIT3|BIT2|BIT1|BIT0))); // DLL_VREG Bias Trim, VREF Tuning for DLL_VREG\r
614\r
615 // COMP (RON channel specific)\r
616 // - DQ/DQS/DM RON: 32 Ohm\r
617 // - CTRL/CMD RON: 27 Ohm\r
618 // - CLK RON: 26 Ohm\r
619 isbM32m(DDRPHY, (DQVREFCH0 + (channel_i * DDRCOMP_CH_OFFSET)), ((0x08<<24)|(0x03<<16)), ((BIT29|BIT28|BIT27|BIT26|BIT25|BIT24)|(BIT21|BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP Vref PU/PD\r
620 isbM32m(DDRPHY, (CMDVREFCH0 + (channel_i * DDRCOMP_CH_OFFSET)), ((0x0C<<24)|(0x03<<16)), ((BIT29|BIT28|BIT27|BIT26|BIT25|BIT24)|(BIT21|BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP Vref PU/PD\r
621 isbM32m(DDRPHY, (CLKVREFCH0 + (channel_i * DDRCOMP_CH_OFFSET)), ((0x0F<<24)|(0x03<<16)), ((BIT29|BIT28|BIT27|BIT26|BIT25|BIT24)|(BIT21|BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP Vref PU/PD\r
622 isbM32m(DDRPHY, (DQSVREFCH0 + (channel_i * DDRCOMP_CH_OFFSET)), ((0x08<<24)|(0x03<<16)), ((BIT29|BIT28|BIT27|BIT26|BIT25|BIT24)|(BIT21|BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP Vref PU/PD\r
623 isbM32m(DDRPHY, (CTLVREFCH0 + (channel_i * DDRCOMP_CH_OFFSET)), ((0x0C<<24)|(0x03<<16)), ((BIT29|BIT28|BIT27|BIT26|BIT25|BIT24)|(BIT21|BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP Vref PU/PD\r
624\r
625 // DQS Swapped Input Enable\r
626 isbM32m(DDRPHY, (COMPEN1CH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT19|BIT17), ((BIT31|BIT30)|BIT19|BIT17|(BIT15|BIT14)));\r
627\r
628 // ODT VREF = 1.5 x 274/360+274 = 0.65V (code of ~50)\r
629 isbM32m(DDRPHY, (DQVREFCH0 + (channel_i * DDRCOMP_CH_OFFSET)), ((0x32<<8)|(0x03<<0)), ((BIT13|BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT5|BIT4|BIT3|BIT2|BIT1|BIT0))); // ODT Vref PU/PD\r
630 isbM32m(DDRPHY, (DQSVREFCH0 + (channel_i * DDRCOMP_CH_OFFSET)), ((0x32<<8)|(0x03<<0)), ((BIT13|BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT5|BIT4|BIT3|BIT2|BIT1|BIT0))); // ODT Vref PU/PD\r
631 isbM32m(DDRPHY, (CLKVREFCH0 + (channel_i * DDRCOMP_CH_OFFSET)), ((0x0E<<8)|(0x05<<0)), ((BIT13|BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT5|BIT4|BIT3|BIT2|BIT1|BIT0))); // ODT Vref PU/PD\r
632\r
633 // Slew rate settings are frequency specific, numbers below are for 800Mhz (speed == 0)\r
634 // - DQ/DQS/DM/CLK SR: 4V/ns,\r
635 // - CTRL/CMD SR: 1.5V/ns\r
636 tempD = (0x0E<<16)|(0x0E<<12)|(0x08<<8)|(0x0B<<4)|(0x0B<<0);\r
637 isbM32m(DDRPHY, (DLYSELCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (tempD), ((BIT19|BIT18|BIT17|BIT16)|(BIT15|BIT14|BIT13|BIT12)|(BIT11|BIT10|BIT9|BIT8)|(BIT7|BIT6|BIT5|BIT4)|(BIT3|BIT2|BIT1|BIT0))); // DCOMP Delay Select: CTL,CMD,CLK,DQS,DQ\r
638 isbM32m(DDRPHY, (TCOVREFCH0 + (channel_i * DDRCOMP_CH_OFFSET)), ((0x05<<16)|(0x05<<8)|(0x05<<0)), ((BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT13|BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT5|BIT4|BIT3|BIT2|BIT1|BIT0))); // TCO Vref CLK,DQS,DQ\r
639 isbM32m(DDRPHY, (CCBUFODTCH0 + (channel_i * DDRCOMP_CH_OFFSET)), ((0x03<<8)|(0x03<<0)), ((BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT4|BIT3|BIT2|BIT1|BIT0))); // ODTCOMP CMD/CTL PU/PD\r
640 isbM32m(DDRPHY, (COMPEN0CH0 + (channel_i * DDRCOMP_CH_OFFSET)), (0), ((BIT31|BIT30)|BIT8)); // COMP\r
641\r
642 #ifdef BACKUP_COMPS\r
643 // DQ COMP Overrides\r
644 isbM32m(DDRPHY, (DQDRVPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0A<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP PU\r
645 isbM32m(DDRPHY, (DQDRVPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0A<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP PD\r
646 isbM32m(DDRPHY, (DQDLYPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x10<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // DCOMP PU\r
647 isbM32m(DDRPHY, (DQDLYPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x10<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // DCOMP PD\r
648 isbM32m(DDRPHY, (DQODTPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0B<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // ODTCOMP PU\r
649 isbM32m(DDRPHY, (DQODTPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0B<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // ODTCOMP PD\r
650 isbM32m(DDRPHY, (DQTCOPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31), (BIT31)); // TCOCOMP PU\r
651 isbM32m(DDRPHY, (DQTCOPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31), (BIT31)); // TCOCOMP PD\r
652 // DQS COMP Overrides\r
653 isbM32m(DDRPHY, (DQSDRVPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0A<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP PU\r
654 isbM32m(DDRPHY, (DQSDRVPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0A<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP PD\r
655 isbM32m(DDRPHY, (DQSDLYPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x10<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // DCOMP PU\r
656 isbM32m(DDRPHY, (DQSDLYPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x10<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // DCOMP PD\r
657 isbM32m(DDRPHY, (DQSODTPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0B<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // ODTCOMP PU\r
658 isbM32m(DDRPHY, (DQSODTPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0B<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // ODTCOMP PD\r
659 isbM32m(DDRPHY, (DQSTCOPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31), (BIT31)); // TCOCOMP PU\r
660 isbM32m(DDRPHY, (DQSTCOPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31), (BIT31)); // TCOCOMP PD\r
661 // CLK COMP Overrides\r
662 isbM32m(DDRPHY, (CLKDRVPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0C<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP PU\r
663 isbM32m(DDRPHY, (CLKDRVPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0C<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP PD\r
664 isbM32m(DDRPHY, (CLKDLYPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x07<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // DCOMP PU\r
665 isbM32m(DDRPHY, (CLKDLYPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x07<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // DCOMP PD\r
666 isbM32m(DDRPHY, (CLKODTPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0B<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // ODTCOMP PU\r
667 isbM32m(DDRPHY, (CLKODTPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0B<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // ODTCOMP PD\r
668 isbM32m(DDRPHY, (CLKTCOPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31), (BIT31)); // TCOCOMP PU\r
669 isbM32m(DDRPHY, (CLKTCOPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31), (BIT31)); // TCOCOMP PD\r
670 // CMD COMP Overrides\r
671 isbM32m(DDRPHY, (CMDDRVPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0D<<16)), (BIT31|(BIT21|BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP PU\r
672 isbM32m(DDRPHY, (CMDDRVPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0D<<16)), (BIT31|(BIT21|BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP PD\r
673 isbM32m(DDRPHY, (CMDDLYPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0A<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // DCOMP PU\r
674 isbM32m(DDRPHY, (CMDDLYPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0A<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // DCOMP PD\r
675 // CTL COMP Overrides\r
676 isbM32m(DDRPHY, (CTLDRVPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0D<<16)), (BIT31|(BIT21|BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP PU\r
677 isbM32m(DDRPHY, (CTLDRVPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0D<<16)), (BIT31|(BIT21|BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP PD\r
678 isbM32m(DDRPHY, (CTLDLYPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0A<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // DCOMP PU\r
679 isbM32m(DDRPHY, (CTLDLYPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0A<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // DCOMP PD\r
680 #else\r
681 // DQ TCOCOMP Overrides\r
682 isbM32m(DDRPHY, (DQTCOPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x1F<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // TCOCOMP PU\r
683 isbM32m(DDRPHY, (DQTCOPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x1F<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // TCOCOMP PD\r
684 // DQS TCOCOMP Overrides\r
685 isbM32m(DDRPHY, (DQSTCOPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x1F<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // TCOCOMP PU\r
686 isbM32m(DDRPHY, (DQSTCOPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x1F<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // TCOCOMP PD\r
687 // CLK TCOCOMP Overrides\r
688 isbM32m(DDRPHY, (CLKTCOPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x1F<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // TCOCOMP PU\r
689 isbM32m(DDRPHY, (CLKTCOPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x1F<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // TCOCOMP PD\r
690 #endif // BACKUP_COMPS\r
691 // program STATIC delays\r
692 #ifdef BACKUP_WCMD\r
693 set_wcmd(channel_i, ddr_wcmd[PLATFORM_ID]);\r
694 #else\r
695 set_wcmd(channel_i, ddr_wclk[PLATFORM_ID] + HALF_CLK);\r
696 #endif // BACKUP_WCMD\r
697 for (rank_i=0; rank_i<NUM_RANKS; rank_i++) {\r
698 if (mrc_params->rank_enables & (1<<rank_i)) {\r
699 set_wclk(channel_i, rank_i, ddr_wclk[PLATFORM_ID]);\r
700 #ifdef BACKUP_WCTL\r
701 set_wctl(channel_i, rank_i, ddr_wctl[PLATFORM_ID]);\r
702 #else\r
703 set_wctl(channel_i, rank_i, ddr_wclk[PLATFORM_ID] + HALF_CLK);\r
704 #endif // BACKUP_WCTL\r
705 }\r
706 }\r
707 }\r
708 }\r
709 // COMP (non channel specific)\r
710 //isbM32m(DDRPHY, (), (), ());\r
711 isbM32m(DDRPHY, (DQANADRVPUCTL), (BIT30), (BIT30)); // RCOMP: Dither PU Enable\r
712 isbM32m(DDRPHY, (DQANADRVPDCTL), (BIT30), (BIT30)); // RCOMP: Dither PD Enable\r
713 isbM32m(DDRPHY, (CMDANADRVPUCTL), (BIT30), (BIT30)); // RCOMP: Dither PU Enable\r
714 isbM32m(DDRPHY, (CMDANADRVPDCTL), (BIT30), (BIT30)); // RCOMP: Dither PD Enable\r
715 isbM32m(DDRPHY, (CLKANADRVPUCTL), (BIT30), (BIT30)); // RCOMP: Dither PU Enable\r
716 isbM32m(DDRPHY, (CLKANADRVPDCTL), (BIT30), (BIT30)); // RCOMP: Dither PD Enable\r
717 isbM32m(DDRPHY, (DQSANADRVPUCTL), (BIT30), (BIT30)); // RCOMP: Dither PU Enable\r
718 isbM32m(DDRPHY, (DQSANADRVPDCTL), (BIT30), (BIT30)); // RCOMP: Dither PD Enable\r
719 isbM32m(DDRPHY, (CTLANADRVPUCTL), (BIT30), (BIT30)); // RCOMP: Dither PU Enable\r
720 isbM32m(DDRPHY, (CTLANADRVPDCTL), (BIT30), (BIT30)); // RCOMP: Dither PD Enable\r
721 isbM32m(DDRPHY, (DQANAODTPUCTL), (BIT30), (BIT30)); // ODT: Dither PU Enable\r
722 isbM32m(DDRPHY, (DQANAODTPDCTL), (BIT30), (BIT30)); // ODT: Dither PD Enable\r
723 isbM32m(DDRPHY, (CLKANAODTPUCTL), (BIT30), (BIT30)); // ODT: Dither PU Enable\r
724 isbM32m(DDRPHY, (CLKANAODTPDCTL), (BIT30), (BIT30)); // ODT: Dither PD Enable\r
725 isbM32m(DDRPHY, (DQSANAODTPUCTL), (BIT30), (BIT30)); // ODT: Dither PU Enable\r
726 isbM32m(DDRPHY, (DQSANAODTPDCTL), (BIT30), (BIT30)); // ODT: Dither PD Enable\r
727 isbM32m(DDRPHY, (DQANADLYPUCTL), (BIT30), (BIT30)); // DCOMP: Dither PU Enable\r
728 isbM32m(DDRPHY, (DQANADLYPDCTL), (BIT30), (BIT30)); // DCOMP: Dither PD Enable\r
729 isbM32m(DDRPHY, (CMDANADLYPUCTL), (BIT30), (BIT30)); // DCOMP: Dither PU Enable\r
730 isbM32m(DDRPHY, (CMDANADLYPDCTL), (BIT30), (BIT30)); // DCOMP: Dither PD Enable\r
731 isbM32m(DDRPHY, (CLKANADLYPUCTL), (BIT30), (BIT30)); // DCOMP: Dither PU Enable\r
732 isbM32m(DDRPHY, (CLKANADLYPDCTL), (BIT30), (BIT30)); // DCOMP: Dither PD Enable\r
733 isbM32m(DDRPHY, (DQSANADLYPUCTL), (BIT30), (BIT30)); // DCOMP: Dither PU Enable\r
734 isbM32m(DDRPHY, (DQSANADLYPDCTL), (BIT30), (BIT30)); // DCOMP: Dither PD Enable\r
735 isbM32m(DDRPHY, (CTLANADLYPUCTL), (BIT30), (BIT30)); // DCOMP: Dither PU Enable\r
736 isbM32m(DDRPHY, (CTLANADLYPDCTL), (BIT30), (BIT30)); // DCOMP: Dither PD Enable\r
737 isbM32m(DDRPHY, (DQANATCOPUCTL), (BIT30), (BIT30)); // TCO: Dither PU Enable\r
738 isbM32m(DDRPHY, (DQANATCOPDCTL), (BIT30), (BIT30)); // TCO: Dither PD Enable\r
739 isbM32m(DDRPHY, (CLKANATCOPUCTL), (BIT30), (BIT30)); // TCO: Dither PU Enable\r
740 isbM32m(DDRPHY, (CLKANATCOPDCTL), (BIT30), (BIT30)); // TCO: Dither PD Enable\r
741 isbM32m(DDRPHY, (DQSANATCOPUCTL), (BIT30), (BIT30)); // TCO: Dither PU Enable\r
742 isbM32m(DDRPHY, (DQSANATCOPDCTL), (BIT30), (BIT30)); // TCO: Dither PD Enable\r
743 isbM32m(DDRPHY, (TCOCNTCTRL), (0x1<<0), (BIT1|BIT0)); // TCOCOMP: Pulse Count\r
744 isbM32m(DDRPHY, (CHNLBUFSTATIC), ((0x03<<24)|(0x03<<16)), ((BIT28|BIT27|BIT26|BIT25|BIT24)|(BIT20|BIT19|BIT18|BIT17|BIT16))); // ODT: CMD/CTL PD/PU\r
745 isbM32m(DDRPHY, (MSCNTR), (0x64<<0), (BIT7|BIT6|BIT5|BIT4|BIT3|BIT2|BIT1|BIT0)); // Set 1us counter\r
746 isbM32m(DDRPHY, (LATCH1CTL), (0x1<<28), (BIT30|BIT29|BIT28)); // ???\r
747\r
748 // Release PHY from reset\r
749 isbM32m(DDRPHY, MASTERRSTN, BIT0, BIT0); // PHYRSTN=1\r
750\r
751 // STEP1:\r
752 post_code(0x03, 0x11);\r
753 for (channel_i=0; channel_i<NUM_CHANNELS; channel_i++) {\r
754 if (mrc_params->channel_enables & (1<<channel_i)) {\r
755 // DQ01-DQ23\r
756 for (bl_grp_i=0; bl_grp_i<((NUM_BYTE_LANES/bl_divisor)/2); bl_grp_i++) {\r
757 isbM32m(DDRPHY, (DQMDLLCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (BIT13), (BIT13)); // Enable VREG\r
758 delay_n(3);\r
759 }\r
760 // ECC\r
761 isbM32m(DDRPHY, (ECCMDLLCTL), (BIT13), (BIT13)); // Enable VREG\r
762 delay_n(3);\r
763 // CMD\r
764 isbM32m(DDRPHY, (CMDMDLLCTL + (channel_i * DDRIOCCC_CH_OFFSET)), (BIT13), (BIT13)); // Enable VREG\r
765 delay_n(3);\r
766 // CLK-CTL\r
767 isbM32m(DDRPHY, (CCMDLLCTL + (channel_i * DDRIOCCC_CH_OFFSET)), (BIT13), (BIT13)); // Enable VREG\r
768 delay_n(3);\r
769 }\r
770 }\r
771\r
772 // STEP2:\r
773 post_code(0x03, 0x12);\r
774 delay_n(200);\r
775 for (channel_i=0; channel_i<NUM_CHANNELS; channel_i++) {\r
776 if (mrc_params->channel_enables & (1<<channel_i)) {\r
777 // DQ01-DQ23\r
778 for (bl_grp_i=0; bl_grp_i<((NUM_BYTE_LANES/bl_divisor)/2); bl_grp_i++) {\r
779 isbM32m(DDRPHY, (DQMDLLCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (BIT17), (BIT17)); // Enable MCDLL\r
780 delay_n(50);\r
781 }\r
782 // ECC\r
783 isbM32m(DDRPHY, (ECCMDLLCTL), (BIT17), (BIT17)); // Enable MCDLL\r
784 delay_n(50);\r
785 // CMD\r
786 isbM32m(DDRPHY, (CMDMDLLCTL + (channel_i * DDRIOCCC_CH_OFFSET)), (BIT18), (BIT18)); // Enable MCDLL\r
787 delay_n(50);\r
788 // CLK-CTL\r
789 isbM32m(DDRPHY, (CCMDLLCTL + (channel_i * DDRIOCCC_CH_OFFSET)), (BIT18), (BIT18)); // Enable MCDLL\r
790 delay_n(50);\r
791 }\r
792 }\r
793\r
794 // STEP3:\r
795 post_code(0x03, 0x13);\r
796 delay_n(100);\r
797 for (channel_i=0; channel_i<NUM_CHANNELS; channel_i++) {\r
798 if (mrc_params->channel_enables & (1<<channel_i)) {\r
799 // DQ01-DQ23\r
800 for (bl_grp_i=0; bl_grp_i<((NUM_BYTE_LANES/bl_divisor)/2); bl_grp_i++) {\r
801#ifdef FORCE_16BIT_DDRIO\r
802 tempD = ((bl_grp_i) && (mrc_params->channel_width == x16)) ? ((0x1<<12)|(0x1<<8)|(0xF<<4)|(0xF<<0)) : ((0xF<<12)|(0xF<<8)|(0xF<<4)|(0xF<<0));\r
803#else\r
804 tempD = ((0xF<<12)|(0xF<<8)|(0xF<<4)|(0xF<<0));\r
805#endif\r
806 isbM32m(DDRPHY, (DQDLLTXCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (tempD), ((BIT15|BIT14|BIT13|BIT12)|(BIT11|BIT10|BIT9|BIT8)|(BIT7|BIT6|BIT5|BIT4)|(BIT3|BIT2|BIT1|BIT0))); // Enable TXDLL\r
807 delay_n(3);\r
808 isbM32m(DDRPHY, (DQDLLRXCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (BIT3|BIT2|BIT1|BIT0), (BIT3|BIT2|BIT1|BIT0)); // Enable RXDLL\r
809 delay_n(3);\r
810 isbM32m(DDRPHY, (B0OVRCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (BIT3|BIT2|BIT1|BIT0), (BIT3|BIT2|BIT1|BIT0)); // Enable RXDLL Overrides BL0\r
811 }\r
812\r
813 // ECC\r
814 tempD = ((0xF<<12)|(0xF<<8)|(0xF<<4)|(0xF<<0));\r
815 isbM32m(DDRPHY, (ECCDLLTXCTL), (tempD), ((BIT15|BIT14|BIT13|BIT12)|(BIT11|BIT10|BIT9|BIT8)|(BIT7|BIT6|BIT5|BIT4)|(BIT3|BIT2|BIT1|BIT0))); // Enable TXDLL\r
816 delay_n(3);\r
817\r
818 // CMD (PO)\r
819 isbM32m(DDRPHY, (CMDDLLTXCTL + (channel_i * DDRIOCCC_CH_OFFSET)), ((0xF<<12)|(0xF<<8)|(0xF<<4)|(0xF<<0)), ((BIT15|BIT14|BIT13|BIT12)|(BIT11|BIT10|BIT9|BIT8)|(BIT7|BIT6|BIT5|BIT4)|(BIT3|BIT2|BIT1|BIT0))); // Enable TXDLL\r
820 delay_n(3);\r
821 }\r
822 }\r
823\r
824\r
825 // STEP4:\r
826 post_code(0x03, 0x14);\r
827 for (channel_i=0; channel_i<NUM_CHANNELS; channel_i++) {\r
828 if (mrc_params->channel_enables & (1<<channel_i)) {\r
829 // Host To Memory Clock Alignment (HMC) for 800/1066\r
830 for (bl_grp_i=0; bl_grp_i<((NUM_BYTE_LANES/bl_divisor)/2); bl_grp_i++) {\r
831 isbM32m(DDRPHY, (DQCLKALIGNREG2 + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), ((bl_grp_i)?(0x3):(0x1)), (BIT3|BIT2|BIT1|BIT0)); // CLK_ALIGN_MOD_ID\r
832 }\r
833 isbM32m(DDRPHY, (ECCCLKALIGNREG2 + (channel_i * DDRIODQ_CH_OFFSET)), 0x2, (BIT3|BIT2|BIT1|BIT0)); // CLK_ALIGN_MOD_ID\r
834 isbM32m(DDRPHY, (CMDCLKALIGNREG2 + (channel_i * DDRIODQ_CH_OFFSET)), 0x0, (BIT3|BIT2|BIT1|BIT0)); // CLK_ALIGN_MOD_ID\r
835 isbM32m(DDRPHY, (CCCLKALIGNREG2 + (channel_i * DDRIODQ_CH_OFFSET)), 0x2, (BIT3|BIT2|BIT1|BIT0)); // CLK_ALIGN_MOD_ID\r
836 isbM32m(DDRPHY, (CMDCLKALIGNREG0 + (channel_i * DDRIOCCC_CH_OFFSET)), (0x2<<4), (BIT5|BIT4)); // CLK_ALIGN_MODE\r
837 isbM32m(DDRPHY, (CMDCLKALIGNREG1 + (channel_i * DDRIOCCC_CH_OFFSET)), ((0x18<<16)|(0x10<<8)|(0x8<<2)|(0x1<<0)), ((BIT22|BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT14|BIT13|BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT7|BIT6|BIT5|BIT4|BIT3|BIT2)|(BIT1|BIT0))); // NUM_SAMPLES, MAX_SAMPLES, MACRO_PI_STEP, MICRO_PI_STEP\r
838 isbM32m(DDRPHY, (CMDCLKALIGNREG2 + (channel_i * DDRIOCCC_CH_OFFSET)), ((0x10<<16)|(0x4<<8)|(0x2<<4)), ((BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT11|BIT10|BIT9|BIT8)|(BIT7|BIT6|BIT5|BIT4))); // ???, TOTAL_NUM_MODULES, FIRST_U_PARTITION\r
839 #ifdef HMC_TEST\r
840 isbM32m(DDRPHY, (CMDCLKALIGNREG0 + (channel_i * DDRIOCCC_CH_OFFSET)), BIT24, BIT24); // START_CLK_ALIGN=1\r
841 while (isbR32m(DDRPHY, (CMDCLKALIGNREG0 + (channel_i * DDRIOCCC_CH_OFFSET))) & BIT24); // wait for START_CLK_ALIGN=0\r
842 #endif // HMC_TEST\r
843\r
844 // Set RD/WR Pointer Seperation & COUNTEN & FIFOPTREN\r
845 isbM32m(DDRPHY, (CMDPTRREG + (channel_i * DDRIOCCC_CH_OFFSET)), BIT0, BIT0); // WRPTRENABLE=1\r
846\r
847\r
848#ifdef SIM\r
849 // comp is not working on simulator\r
850#else\r
851 // COMP initial\r
852 isbM32m(DDRPHY, (COMPEN0CH0 + (channel_i * DDRCOMP_CH_OFFSET)), BIT5, BIT5); // enable bypass for CLK buffer (PO)\r
853 isbM32m(DDRPHY, (CMPCTRL), (BIT0), (BIT0)); // Initial COMP Enable\r
854 while (isbR32m(DDRPHY, (CMPCTRL)) & BIT0); // wait for Initial COMP Enable = 0\r
855 isbM32m(DDRPHY, (COMPEN0CH0 + (channel_i * DDRCOMP_CH_OFFSET)), ~BIT5, BIT5); // disable bypass for CLK buffer (PO)\r
856#endif\r
857\r
858 // IOBUFACT\r
859 // STEP4a\r
860 isbM32m(DDRPHY, (CMDCFGREG0 + (channel_i * DDRIOCCC_CH_OFFSET)), BIT2, BIT2); // IOBUFACTRST_N=1\r
861\r
862 // DDRPHY initialisation complete\r
863 isbM32m(DDRPHY, (CMDPMCONFIG0 + (channel_i * DDRIOCCC_CH_OFFSET)), BIT20, BIT20); // SPID_INIT_COMPLETE=1\r
864 }\r
865 }\r
866\r
867 LEAVEFN();\r
868 return;\r
869}\r
870\r
871// jedec_init (aka PerformJedecInit):\r
872// This function performs JEDEC initialisation on all enabled channels.\r
873static void jedec_init(\r
874 MRCParams_t *mrc_params,\r
875 uint32_t silent)\r
876{\r
877 uint8_t TWR, WL, Rank;\r
878 uint32_t TCK;\r
879\r
880 RegDTR0 DTR0reg;\r
881\r
882 DramInitDDR3MRS0 mrs0Command;\r
883 DramInitDDR3EMR1 emrs1Command;\r
884 DramInitDDR3EMR2 emrs2Command;\r
885 DramInitDDR3EMR3 emrs3Command;\r
886\r
887 ENTERFN();\r
888\r
889 // jedec_init starts\r
890 if (!silent)\r
891 {\r
892 post_code(0x04, 0x00);\r
893 }\r
894\r
895 // Assert RESET# for 200us\r
896 isbM32m(DDRPHY, CCDDR3RESETCTL, BIT1, (BIT8|BIT1)); // DDR3_RESET_SET=0, DDR3_RESET_RESET=1\r
897#ifdef QUICKSIM\r
898 // Don't waste time during simulation\r
899 delay_u(2);\r
900#else\r
901 delay_u(200);\r
902#endif\r
903 isbM32m(DDRPHY, CCDDR3RESETCTL, BIT8, (BIT8|BIT1)); // DDR3_RESET_SET=1, DDR3_RESET_RESET=0\r
904\r
905 DTR0reg.raw = isbR32m(MCU, DTR0);\r
906\r
907 // Set CKEVAL for populated ranks\r
908 // then send NOP to each rank (#4550197)\r
909 {\r
910 uint32_t DRPbuffer;\r
911 uint32_t DRMCbuffer;\r
912\r
913 DRPbuffer = isbR32m(MCU, DRP);\r
914 DRPbuffer &= 0x3;\r
915 DRMCbuffer = isbR32m(MCU, DRMC);\r
916 DRMCbuffer &= 0xFFFFFFFC;\r
917 DRMCbuffer |= (BIT4 | DRPbuffer);\r
918\r
919 isbW32m(MCU, DRMC, DRMCbuffer);\r
920\r
921 for (Rank = 0; Rank < NUM_RANKS; Rank++)\r
922 {\r
923 // Skip to next populated rank\r
924 if ((mrc_params->rank_enables & (1 << Rank)) == 0)\r
925 {\r
926 continue;\r
927 }\r
928\r
929 dram_init_command(DCMD_NOP(Rank));\r
930 }\r
931\r
932 isbW32m(MCU, DRMC, DRMC_DEFAULT);\r
933 }\r
934\r
935 // setup for emrs 2\r
936 // BIT[15:11] --> Always "0"\r
937 // BIT[10:09] --> Rtt_WR: want "Dynamic ODT Off" (0)\r
938 // BIT[08] --> Always "0"\r
939 // BIT[07] --> SRT: use sr_temp_range\r
940 // BIT[06] --> ASR: want "Manual SR Reference" (0)\r
941 // BIT[05:03] --> CWL: use oem_tCWL\r
942 // BIT[02:00] --> PASR: want "Full Array" (0)\r
943 emrs2Command.raw = 0;\r
944 emrs2Command.field.bankAddress = 2;\r
945\r
946 WL = 5 + mrc_params->ddr_speed;\r
947 emrs2Command.field.CWL = WL - 5;\r
948 emrs2Command.field.SRT = mrc_params->sr_temp_range;\r
949\r
950 // setup for emrs 3\r
951 // BIT[15:03] --> Always "0"\r
952 // BIT[02] --> MPR: want "Normal Operation" (0)\r
953 // BIT[01:00] --> MPR_Loc: want "Predefined Pattern" (0)\r
954 emrs3Command.raw = 0;\r
955 emrs3Command.field.bankAddress = 3;\r
956\r
957 // setup for emrs 1\r
958 // BIT[15:13] --> Always "0"\r
959 // BIT[12:12] --> Qoff: want "Output Buffer Enabled" (0)\r
960 // BIT[11:11] --> TDQS: want "Disabled" (0)\r
961 // BIT[10:10] --> Always "0"\r
962 // BIT[09,06,02] --> Rtt_nom: use rtt_nom_value\r
963 // BIT[08] --> Always "0"\r
964 // BIT[07] --> WR_LVL: want "Disabled" (0)\r
965 // BIT[05,01] --> DIC: use ron_value\r
966 // BIT[04:03] --> AL: additive latency want "0" (0)\r
967 // BIT[00] --> DLL: want "Enable" (0)\r
968 //\r
969 // (BIT5|BIT1) set Ron value\r
970 // 00 --> RZQ/6 (40ohm)\r
971 // 01 --> RZQ/7 (34ohm)\r
972 // 1* --> RESERVED\r
973 //\r
974 // (BIT9|BIT6|BIT2) set Rtt_nom value\r
975 // 000 --> Disabled\r
976 // 001 --> RZQ/4 ( 60ohm)\r
977 // 010 --> RZQ/2 (120ohm)\r
978 // 011 --> RZQ/6 ( 40ohm)\r
979 // 1** --> RESERVED\r
980 emrs1Command.raw = 0;\r
981 emrs1Command.field.bankAddress = 1;\r
982 emrs1Command.field.dllEnabled = 0; // 0 = Enable , 1 = Disable\r
983\r
984 if (mrc_params->ron_value == 0)\r
985 {\r
986 emrs1Command.field.DIC0 = DDR3_EMRS1_DIC_34;\r
987 }\r
988 else\r
989 {\r
990 emrs1Command.field.DIC0 = DDR3_EMRS1_DIC_40;\r
991 }\r
992\r
993\r
994 if (mrc_params->rtt_nom_value == 0)\r
995 {\r
996 emrs1Command.raw |= (DDR3_EMRS1_RTTNOM_40 << 6);\r
997 }\r
998 else if (mrc_params->rtt_nom_value == 1)\r
999 {\r
1000 emrs1Command.raw |= (DDR3_EMRS1_RTTNOM_60 << 6);\r
1001 }\r
1002 else if (mrc_params->rtt_nom_value == 2)\r
1003 {\r
1004 emrs1Command.raw |= (DDR3_EMRS1_RTTNOM_120 << 6);\r
1005 }\r
1006\r
1007 // save MRS1 value (excluding control fields)\r
1008 mrc_params->mrs1 = emrs1Command.raw >> 6;\r
1009\r
1010 // setup for mrs 0\r
1011 // BIT[15:13] --> Always "0"\r
1012 // BIT[12] --> PPD: for Quark (1)\r
1013 // BIT[11:09] --> WR: use oem_tWR\r
1014 // BIT[08] --> DLL: want "Reset" (1, self clearing)\r
1015 // BIT[07] --> MODE: want "Normal" (0)\r
1016 // BIT[06:04,02] --> CL: use oem_tCAS\r
1017 // BIT[03] --> RD_BURST_TYPE: want "Interleave" (1)\r
1018 // BIT[01:00] --> BL: want "8 Fixed" (0)\r
1019 // WR:\r
1020 // 0 --> 16\r
1021 // 1 --> 5\r
1022 // 2 --> 6\r
1023 // 3 --> 7\r
1024 // 4 --> 8\r
1025 // 5 --> 10\r
1026 // 6 --> 12\r
1027 // 7 --> 14\r
1028 // CL:\r
1029 // BIT[02:02] "0" if oem_tCAS <= 11 (1866?)\r
1030 // BIT[06:04] use oem_tCAS-4\r
1031 mrs0Command.raw = 0;\r
1032 mrs0Command.field.bankAddress = 0;\r
1033 mrs0Command.field.dllReset = 1;\r
1034 mrs0Command.field.BL = 0;\r
1035 mrs0Command.field.PPD = 1;\r
1036 mrs0Command.field.casLatency = DTR0reg.field.tCL + 1;\r
1037\r
1038 TCK = tCK[mrc_params->ddr_speed];\r
1039 TWR = MCEIL(15000, TCK); // Per JEDEC: tWR=15000ps DDR2/3 from 800-1600\r
1040 mrs0Command.field.writeRecovery = TWR - 4;\r
1041\r
1042 for (Rank = 0; Rank < NUM_RANKS; Rank++)\r
1043 {\r
1044 // Skip to next populated rank\r
1045 if ((mrc_params->rank_enables & (1 << Rank)) == 0)\r
1046 {\r
1047 continue;\r
1048 }\r
1049\r
1050 emrs2Command.field.rankSelect = Rank;\r
1051 dram_init_command(emrs2Command.raw);\r
1052\r
1053 emrs3Command.field.rankSelect = Rank;\r
1054 dram_init_command(emrs3Command.raw);\r
1055\r
1056 emrs1Command.field.rankSelect = Rank;\r
1057 dram_init_command(emrs1Command.raw);\r
1058\r
1059 mrs0Command.field.rankSelect = Rank;\r
1060 dram_init_command(mrs0Command.raw);\r
1061\r
1062 dram_init_command(DCMD_ZQCL(Rank));\r
1063 }\r
1064\r
1065 LEAVEFN();\r
1066 return;\r
1067}\r
1068\r
1069// rcvn_cal:\r
1070// POST_CODE[major] == 0x05\r
1071//\r
1072// This function will perform our RCVEN Calibration Algorithm.\r
1073// We will only use the 2xCLK domain timings to perform RCVEN Calibration.\r
1074// All byte lanes will be calibrated "simultaneously" per channel per rank.\r
1075static void rcvn_cal(\r
1076 MRCParams_t *mrc_params)\r
1077{\r
1078 uint8_t channel_i; // channel counter\r
1079 uint8_t rank_i; // rank counter\r
1080 uint8_t bl_i; // byte lane counter\r
1081 uint8_t bl_divisor = (mrc_params->channel_width == x16) ? 2 : 1; // byte lane divisor\r
1082\r
1083#ifdef R2R_SHARING\r
1084 uint32_t final_delay[NUM_CHANNELS][NUM_BYTE_LANES]; // used to find placement for rank2rank sharing configs\r
1085#ifndef BACKUP_RCVN\r
1086 uint32_t num_ranks_enabled = 0; // used to find placement for rank2rank sharing configs\r
1087#endif // BACKUP_RCVN\r
1088#endif // R2R_SHARING\r
1089\r
1090#ifdef BACKUP_RCVN\r
1091#else\r
1092 uint32_t tempD; // temporary DWORD\r
1093 uint32_t delay[NUM_BYTE_LANES]; // absolute PI value to be programmed on the byte lane\r
1094 RegDTR1 dtr1;\r
1095 RegDTR1 dtr1save;\r
1096#endif // BACKUP_RCVN\r
1097 ENTERFN();\r
1098\r
1099 // rcvn_cal starts\r
1100 post_code(0x05, 0x00);\r
1101\r
1102#ifndef BACKUP_RCVN\r
1103 // need separate burst to sample DQS preamble\r
1104 dtr1.raw = dtr1save.raw = isbR32m(MCU, DTR1);\r
1105 dtr1.field.tCCD = 1;\r
1106 isbW32m(MCU, DTR1, dtr1.raw);\r
1107#endif\r
1108\r
1109#ifdef R2R_SHARING\r
1110 // need to set "final_delay[][]" elements to "0"\r
1111 memset((void *) (final_delay), 0x00, (size_t) sizeof(final_delay));\r
1112#endif // R2R_SHARING\r
1113\r
1114 // loop through each enabled channel\r
1115 for (channel_i = 0; channel_i < NUM_CHANNELS; channel_i++)\r
1116 {\r
1117 if (mrc_params->channel_enables & (1 << channel_i))\r
1118 {\r
1119 // perform RCVEN Calibration on a per rank basis\r
1120 for (rank_i = 0; rank_i < NUM_RANKS; rank_i++)\r
1121 {\r
1122 if (mrc_params->rank_enables & (1 << rank_i))\r
1123 {\r
1124 // POST_CODE here indicates the current channel and rank being calibrated\r
1125 post_code(0x05, (0x10 + ((channel_i << 4) | rank_i)));\r
1126\r
1127#ifdef BACKUP_RCVN\r
1128 // set hard-coded timing values\r
1129 for (bl_i=0; bl_i<(NUM_BYTE_LANES/bl_divisor); bl_i++)\r
1130 {\r
1131 set_rcvn(channel_i, rank_i, bl_i, ddr_rcvn[PLATFORM_ID]);\r
1132 }\r
1133#else\r
1134 // enable FIFORST\r
1135 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i += 2)\r
1136 {\r
1137 isbM32m(DDRPHY, (B01PTRCTL1 + ((bl_i >> 1) * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), 0,\r
1138 BIT8); // 0 is enabled\r
1139 } // bl_i loop\r
1140 // initialise the starting delay to 128 PI (tCAS +1 CLK)\r
1141 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
1142 {\r
1143#ifdef SIM\r
1144 // Original value was late at the end of DQS sequence\r
1145 delay[bl_i] = 3 * FULL_CLK;\r
1146#else\r
1147 delay[bl_i] = (4 + 1) * FULL_CLK; // 1x CLK domain timing is tCAS-4\r
1148#endif\r
1149\r
1150 set_rcvn(channel_i, rank_i, bl_i, delay[bl_i]);\r
1151 } // bl_i loop\r
1152\r
1153 // now find the rising edge\r
1154 find_rising_edge(mrc_params, delay, channel_i, rank_i, true);\r
1155 // Now increase delay by 32 PI (1/4 CLK) to place in center of high pulse.\r
1156 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
1157 {\r
1158 delay[bl_i] += QRTR_CLK;\r
1159 set_rcvn(channel_i, rank_i, bl_i, delay[bl_i]);\r
1160 } // bl_i loop\r
1161 // Now decrement delay by 128 PI (1 CLK) until we sample a "0"\r
1162 do\r
1163 {\r
1164\r
1165 tempD = sample_dqs(mrc_params, channel_i, rank_i, true);\r
1166 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
1167 {\r
1168 if (tempD & (1 << bl_i))\r
1169 {\r
1170 if (delay[bl_i] >= FULL_CLK)\r
1171 {\r
1172 delay[bl_i] -= FULL_CLK;\r
1173 set_rcvn(channel_i, rank_i, bl_i, delay[bl_i]);\r
1174 }\r
1175 else\r
1176 {\r
1177 // not enough delay\r
1178 training_message(channel_i, rank_i, bl_i);\r
1179 post_code(0xEE, 0x50);\r
1180 }\r
1181 }\r
1182 } // bl_i loop\r
1183 } while (tempD & 0xFF);\r
1184\r
1185#ifdef R2R_SHARING\r
1186 // increment "num_ranks_enabled"\r
1187 num_ranks_enabled++;\r
1188 // Finally increment delay by 32 PI (1/4 CLK) to place in center of preamble.\r
1189 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
1190 {\r
1191 delay[bl_i] += QRTR_CLK;\r
1192 // add "delay[]" values to "final_delay[][]" for rolling average\r
1193 final_delay[channel_i][bl_i] += delay[bl_i];\r
1194 // set timing based on rolling average values\r
1195 set_rcvn(channel_i, rank_i, bl_i, ((final_delay[channel_i][bl_i]) / num_ranks_enabled));\r
1196 } // bl_i loop\r
1197#else\r
1198 // Finally increment delay by 32 PI (1/4 CLK) to place in center of preamble.\r
1199 for (bl_i=0; bl_i<(NUM_BYTE_LANES/bl_divisor); bl_i++)\r
1200 {\r
1201 delay[bl_i] += QRTR_CLK;\r
1202 set_rcvn(channel_i, rank_i, bl_i, delay[bl_i]);\r
1203 } // bl_i loop\r
1204\r
1205#endif // R2R_SHARING\r
1206\r
1207 // disable FIFORST\r
1208 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i += 2)\r
1209 {\r
1210 isbM32m(DDRPHY, (B01PTRCTL1 + ((bl_i >> 1) * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), BIT8,\r
1211 BIT8); // 1 is disabled\r
1212 } // bl_i loop\r
1213\r
1214#endif // BACKUP_RCVN\r
1215\r
1216 } // if rank is enabled\r
1217 } // rank_i loop\r
1218 } // if channel is enabled\r
1219 } // channel_i loop\r
1220\r
1221#ifndef BACKUP_RCVN\r
1222 // restore original\r
1223 isbW32m(MCU, DTR1, dtr1save.raw);\r
1224#endif\r
1225\r
1226#ifdef MRC_SV\r
1227 if (mrc_params->tune_rcvn)\r
1228 {\r
1229 uint32_t rcven, val;\r
1230 uint32_t rdcmd2rcven;\r
1231\r
1232 /*\r
1233 Formulas for RDCMD2DATAVALID & DIFFAMP dynamic timings\r
1234\r
1235 1. Set after RCVEN training\r
1236\r
1237 //Tune RDCMD2DATAVALID\r
1238\r
1239 x80/x84[21:16]\r
1240 MAX OF 2 RANKS : round up (rdcmd2rcven (rcven 1x) + 2x x 2 + PI/128) + 5\r
1241\r
1242 //rdcmd2rcven x80/84[12:8]\r
1243 //rcven 2x x70[23:20] & [11:8]\r
1244\r
1245 //Tune DIFFAMP Timings\r
1246\r
1247 //diffampen launch x88[20:16] & [4:0] -- B01LATCTL1\r
1248 MIN OF 2 RANKS : round down (rcven 1x + 2x x 2 + PI/128) - 1\r
1249\r
1250 //diffampen length x8C/x90 [13:8] -- B0ONDURCTL B1ONDURCTL\r
1251 MAX OF 2 RANKS : roundup (rcven 1x + 2x x 2 + PI/128) + 5\r
1252\r
1253\r
1254 2. need to do a fiforst after settings these values\r
1255 */\r
1256\r
1257 DPF(D_INFO, "BEFORE\n");\r
1258 DPF(D_INFO, "### %x\n", isbR32m(DDRPHY, B0LATCTL0));\r
1259 DPF(D_INFO, "### %x\n", isbR32m(DDRPHY, B01LATCTL1));\r
1260 DPF(D_INFO, "### %x\n", isbR32m(DDRPHY, B0ONDURCTL));\r
1261\r
1262 DPF(D_INFO, "### %x\n", isbR32m(DDRPHY, B1LATCTL0));\r
1263 DPF(D_INFO, "### %x\n", isbR32m(DDRPHY, B1ONDURCTL));\r
1264\r
1265 rcven = get_rcvn(0, 0, 0) / 128;\r
1266 rdcmd2rcven = (isbR32m(DDRPHY, B0LATCTL0) >> 8) & 0x1F;\r
1267 val = rdcmd2rcven + rcven + 6;\r
1268 isbM32m(DDRPHY, B0LATCTL0, val << 16, (BIT21|BIT20|BIT19|BIT18|BIT17|BIT16));\r
1269\r
1270 val = rdcmd2rcven + rcven - 1;\r
1271 isbM32m(DDRPHY, B01LATCTL1, val << 0, (BIT4|BIT3|BIT2|BIT1|BIT0));\r
1272\r
1273 val = rdcmd2rcven + rcven + 5;\r
1274 isbM32m(DDRPHY, B0ONDURCTL, val << 8, (BIT13|BIT12|BIT11|BIT10|BIT9|BIT8));\r
1275\r
1276 rcven = get_rcvn(0, 0, 1) / 128;\r
1277 rdcmd2rcven = (isbR32m(DDRPHY, B1LATCTL0) >> 8) & 0x1F;\r
1278 val = rdcmd2rcven + rcven + 6;\r
1279 isbM32m(DDRPHY, B1LATCTL0, val << 16, (BIT21|BIT20|BIT19|BIT18|BIT17|BIT16));\r
1280\r
1281 val = rdcmd2rcven + rcven - 1;\r
1282 isbM32m(DDRPHY, B01LATCTL1, val << 16, (BIT20|BIT19|BIT18|BIT17|BIT16));\r
1283\r
1284 val = rdcmd2rcven + rcven + 5;\r
1285 isbM32m(DDRPHY, B1ONDURCTL, val << 8, (BIT13|BIT12|BIT11|BIT10|BIT9|BIT8));\r
1286\r
1287 DPF(D_INFO, "AFTER\n");\r
1288 DPF(D_INFO, "### %x\n", isbR32m(DDRPHY, B0LATCTL0));\r
1289 DPF(D_INFO, "### %x\n", isbR32m(DDRPHY, B01LATCTL1));\r
1290 DPF(D_INFO, "### %x\n", isbR32m(DDRPHY, B0ONDURCTL));\r
1291\r
1292 DPF(D_INFO, "### %x\n", isbR32m(DDRPHY, B1LATCTL0));\r
1293 DPF(D_INFO, "### %x\n", isbR32m(DDRPHY, B1ONDURCTL));\r
1294\r
1295 DPF(D_INFO, "\nPress a key\n");\r
1296 mgetc();\r
1297\r
1298 // fifo reset\r
1299 isbM32m(DDRPHY, B01PTRCTL1, 0, BIT8); // 0 is enabled\r
1300 delay_n(3);\r
1301 isbM32m(DDRPHY, B01PTRCTL1, BIT8, BIT8); // 1 is disabled\r
1302 }\r
1303#endif\r
1304\r
1305 LEAVEFN();\r
1306 return;\r
1307}\r
1308\r
1309// Check memory executing write/read/verify of many data patterns\r
1310// at the specified address. Bits in the result indicate failure\r
1311// on specific byte lane.\r
1312static uint32_t check_bls_ex(\r
1313 MRCParams_t *mrc_params,\r
1314 uint32_t address)\r
1315{\r
1316 uint32_t result;\r
1317 uint8_t first_run = 0;\r
1318\r
1319 if (mrc_params->hte_setup)\r
1320 {\r
1321 mrc_params->hte_setup = 0;\r
1322\r
1323 first_run = 1;\r
1324 select_hte(mrc_params);\r
1325 }\r
1326\r
1327 result = WriteStressBitLanesHTE(mrc_params, address, first_run);\r
1328\r
1329 DPF(D_TRN, "check_bls_ex result is %x\n", result);\r
1330 return result;\r
1331}\r
1332\r
1333// Check memory executing simple write/read/verify at\r
1334// the specified address. Bits in the result indicate failure\r
1335// on specific byte lane.\r
1336static uint32_t check_rw_coarse(\r
1337 MRCParams_t *mrc_params,\r
1338 uint32_t address)\r
1339{\r
1340 uint32_t result = 0;\r
1341 uint8_t first_run = 0;\r
1342\r
1343 if (mrc_params->hte_setup)\r
1344 {\r
1345 mrc_params->hte_setup = 0;\r
1346\r
1347 first_run = 1;\r
1348 select_hte(mrc_params);\r
1349 }\r
1350\r
1351 result = BasicWriteReadHTE(mrc_params, address, first_run, WRITE_TRAIN);\r
1352\r
1353 DPF(D_TRN, "check_rw_coarse result is %x\n", result);\r
1354 return result;\r
1355}\r
1356\r
1357// wr_level:\r
1358// POST_CODE[major] == 0x06\r
1359//\r
1360// This function will perform the Write Levelling algorithm (align WCLK and WDQS).\r
1361// This algorithm will act on each rank in each channel separately.\r
1362static void wr_level(\r
1363 MRCParams_t *mrc_params)\r
1364{\r
1365 uint8_t channel_i; // channel counter\r
1366 uint8_t rank_i; // rank counter\r
1367 uint8_t bl_i; // byte lane counter\r
1368 uint8_t bl_divisor = (mrc_params->channel_width == x16) ? 2 : 1; // byte lane divisor\r
1369\r
1370#ifdef R2R_SHARING\r
1371 uint32_t final_delay[NUM_CHANNELS][NUM_BYTE_LANES]; // used to find placement for rank2rank sharing configs\r
1372#ifndef BACKUP_WDQS\r
1373 uint32_t num_ranks_enabled = 0; // used to find placement for rank2rank sharing configs\r
1374#endif // BACKUP_WDQS\r
1375#endif // R2R_SHARING\r
1376\r
1377#ifdef BACKUP_WDQS\r
1378#else\r
1379 bool all_edges_found; // determines stop condition for CRS_WR_LVL\r
1380 uint32_t delay[NUM_BYTE_LANES]; // absolute PI value to be programmed on the byte lane\r
1381 // static makes it so the data is loaded in the heap once by shadow(), where\r
1382 // non-static copies the data onto the stack every time this function is called.\r
1383\r
1384 uint32_t address; // address to be checked during COARSE_WR_LVL\r
1385 RegDTR4 dtr4;\r
1386 RegDTR4 dtr4save;\r
1387#endif // BACKUP_WDQS\r
1388\r
1389 ENTERFN();\r
1390\r
1391 // wr_level starts\r
1392 post_code(0x06, 0x00);\r
1393\r
1394#ifdef R2R_SHARING\r
1395 // need to set "final_delay[][]" elements to "0"\r
1396 memset((void *) (final_delay), 0x00, (size_t) sizeof(final_delay));\r
1397#endif // R2R_SHARING\r
1398 // loop through each enabled channel\r
1399 for (channel_i = 0; channel_i < NUM_CHANNELS; channel_i++)\r
1400 {\r
1401 if (mrc_params->channel_enables & (1 << channel_i))\r
1402 {\r
1403 // perform WRITE LEVELING algorithm on a per rank basis\r
1404 for (rank_i = 0; rank_i < NUM_RANKS; rank_i++)\r
1405 {\r
1406 if (mrc_params->rank_enables & (1 << rank_i))\r
1407 {\r
1408 // POST_CODE here indicates the current rank and channel being calibrated\r
1409 post_code(0x06, (0x10 + ((channel_i << 4) | rank_i)));\r
1410\r
1411#ifdef BACKUP_WDQS\r
1412 for (bl_i=0; bl_i<(NUM_BYTE_LANES/bl_divisor); bl_i++)\r
1413 {\r
1414 set_wdqs(channel_i, rank_i, bl_i, ddr_wdqs[PLATFORM_ID]);\r
1415 set_wdq(channel_i, rank_i, bl_i, (ddr_wdqs[PLATFORM_ID] - QRTR_CLK));\r
1416 }\r
1417#else\r
1418\r
1419 { // Begin product specific code\r
1420\r
1421 // perform a single PRECHARGE_ALL command to make DRAM state machine go to IDLE state\r
1422 dram_init_command(DCMD_PREA(rank_i));\r
1423\r
1424 // enable Write Levelling Mode (EMRS1 w/ Write Levelling Mode Enable)\r
1425 dram_init_command(DCMD_MRS1(rank_i,0x0082));\r
1426\r
1427 // set ODT DRAM Full Time Termination disable in MCU\r
1428 dtr4.raw = dtr4save.raw = isbR32m(MCU, DTR4);\r
1429 dtr4.field.ODTDIS = 1;\r
1430 isbW32m(MCU, DTR4, dtr4.raw);\r
1431\r
1432 for (bl_i = 0; bl_i < ((NUM_BYTE_LANES / bl_divisor) / 2); bl_i++)\r
1433 {\r
1434 isbM32m(DDRPHY, DQCTL + (DDRIODQ_BL_OFFSET * bl_i) + (DDRIODQ_CH_OFFSET * channel_i),\r
1435 (BIT28 | (0x1 << 8) | (0x1 << 6) | (0x1 << 4) | (0x1 << 2)),\r
1436 (BIT28 | (BIT9|BIT8) | (BIT7|BIT6) | (BIT5|BIT4) | (BIT3|BIT2))); // Enable Sandy Bridge Mode (WDQ Tri-State) & Ensure 5 WDQS pulses during Write Leveling\r
1437 }\r
1438\r
1439 isbM32m(DDRPHY, CCDDR3RESETCTL + (DDRIOCCC_CH_OFFSET * channel_i), (BIT16), (BIT16)); // Write Leveling Mode enabled in IO\r
1440 } // End product specific code\r
1441 // Initialise the starting delay to WCLK\r
1442 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
1443 {\r
1444 { // Begin product specific code\r
1445 // CLK0 --> RK0\r
1446 // CLK1 --> RK1\r
1447 delay[bl_i] = get_wclk(channel_i, rank_i);\r
1448 } // End product specific code\r
1449 set_wdqs(channel_i, rank_i, bl_i, delay[bl_i]);\r
1450 } // bl_i loop\r
1451 // now find the rising edge\r
1452 find_rising_edge(mrc_params, delay, channel_i, rank_i, false);\r
1453 { // Begin product specific code\r
1454 // disable Write Levelling Mode\r
1455 isbM32m(DDRPHY, CCDDR3RESETCTL + (DDRIOCCC_CH_OFFSET * channel_i), (0), (BIT16)); // Write Leveling Mode disabled in IO\r
1456\r
1457 for (bl_i = 0; bl_i < ((NUM_BYTE_LANES / bl_divisor) / 2); bl_i++)\r
1458 {\r
1459 isbM32m(DDRPHY, DQCTL + (DDRIODQ_BL_OFFSET * bl_i) + (DDRIODQ_CH_OFFSET * channel_i),\r
1460 ((0x1 << 8) | (0x1 << 6) | (0x1 << 4) | (0x1 << 2)),\r
1461 (BIT28 | (BIT9|BIT8) | (BIT7|BIT6) | (BIT5|BIT4) | (BIT3|BIT2))); // Disable Sandy Bridge Mode & Ensure 4 WDQS pulses during normal operation\r
1462 } // bl_i loop\r
1463\r
1464 // restore original DTR4\r
1465 isbW32m(MCU, DTR4, dtr4save.raw);\r
1466\r
1467 // restore original value (Write Levelling Mode Disable)\r
1468 dram_init_command(DCMD_MRS1(rank_i, mrc_params->mrs1));\r
1469\r
1470 // perform a single PRECHARGE_ALL command to make DRAM state machine go to IDLE state\r
1471 dram_init_command(DCMD_PREA(rank_i));\r
1472 } // End product specific code\r
1473\r
1474 post_code(0x06, (0x30 + ((channel_i << 4) | rank_i)));\r
1475\r
1476 // COARSE WRITE LEVEL:\r
1477 // check that we're on the correct clock edge\r
1478\r
1479 // hte reconfiguration request\r
1480 mrc_params->hte_setup = 1;\r
1481\r
1482 // start CRS_WR_LVL with WDQS = WDQS + 128 PI\r
1483 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
1484 {\r
1485 delay[bl_i] = get_wdqs(channel_i, rank_i, bl_i) + FULL_CLK;\r
1486 set_wdqs(channel_i, rank_i, bl_i, delay[bl_i]);\r
1487 // program WDQ timings based on WDQS (WDQ = WDQS - 32 PI)\r
1488 set_wdq(channel_i, rank_i, bl_i, (delay[bl_i] - QRTR_CLK));\r
1489 } // bl_i loop\r
1490\r
1491 // get an address in the targeted channel/rank\r
1492 address = get_addr(mrc_params, channel_i, rank_i);\r
1493 do\r
1494 {\r
1495 uint32_t coarse_result = 0x00;\r
1496 uint32_t coarse_result_mask = byte_lane_mask(mrc_params);\r
1497 all_edges_found = true; // assume pass\r
1498\r
1499#ifdef SIM\r
1500 // need restore memory to idle state as write can be in bad sync\r
1501 dram_init_command (DCMD_PREA(rank_i));\r
1502#endif\r
1503\r
1504 mrc_params->hte_setup = 1;\r
1505 coarse_result = check_rw_coarse(mrc_params, address);\r
1506\r
1507 // check for failures and margin the byte lane back 128 PI (1 CLK)\r
1508 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
1509 {\r
1510 if (coarse_result & (coarse_result_mask << bl_i))\r
1511 {\r
1512 all_edges_found = false;\r
1513 delay[bl_i] -= FULL_CLK;\r
1514 set_wdqs(channel_i, rank_i, bl_i, delay[bl_i]);\r
1515 // program WDQ timings based on WDQS (WDQ = WDQS - 32 PI)\r
1516 set_wdq(channel_i, rank_i, bl_i, (delay[bl_i] - QRTR_CLK));\r
1517 }\r
1518 } // bl_i loop\r
1519\r
1520 } while (!all_edges_found);\r
1521\r
1522#ifdef R2R_SHARING\r
1523 // increment "num_ranks_enabled"\r
1524 num_ranks_enabled++;\r
1525 // accumulate "final_delay[][]" values from "delay[]" values for rolling average\r
1526 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
1527 {\r
1528 final_delay[channel_i][bl_i] += delay[bl_i];\r
1529 set_wdqs(channel_i, rank_i, bl_i, ((final_delay[channel_i][bl_i]) / num_ranks_enabled));\r
1530 // program WDQ timings based on WDQS (WDQ = WDQS - 32 PI)\r
1531 set_wdq(channel_i, rank_i, bl_i, ((final_delay[channel_i][bl_i]) / num_ranks_enabled) - QRTR_CLK);\r
1532 } // bl_i loop\r
1533#endif // R2R_SHARING\r
1534#endif // BACKUP_WDQS\r
1535\r
1536 } // if rank is enabled\r
1537 } // rank_i loop\r
1538 } // if channel is enabled\r
1539 } // channel_i loop\r
1540\r
1541 LEAVEFN();\r
1542 return;\r
1543}\r
1544\r
1545// rd_train:\r
1546// POST_CODE[major] == 0x07\r
1547//\r
1548// This function will perform the READ TRAINING Algorithm on all channels/ranks/byte_lanes simultaneously to minimize execution time.\r
1549// The idea here is to train the VREF and RDQS (and eventually RDQ) values to achieve maximum READ margins.\r
1550// The algorithm will first determine the X coordinate (RDQS setting).\r
1551// This is done by collapsing the VREF eye until we find a minimum required RDQS eye for VREF_MIN and VREF_MAX.\r
1552// Then we take the averages of the RDQS eye at VREF_MIN and VREF_MAX, then average those; this will be the final X coordinate.\r
1553// The algorithm will then determine the Y coordinate (VREF setting).\r
1554// This is done by collapsing the RDQS eye until we find a minimum required VREF eye for RDQS_MIN and RDQS_MAX.\r
1555// Then we take the averages of the VREF eye at RDQS_MIN and RDQS_MAX, then average those; this will be the final Y coordinate.\r
1556// NOTE: this algorithm assumes the eye curves have a one-to-one relationship, meaning for each X the curve has only one Y and vice-a-versa.\r
1557static void rd_train(\r
1558 MRCParams_t *mrc_params)\r
1559{\r
1560\r
1561#define MIN_RDQS_EYE 10 // in PI Codes\r
1562#define MIN_VREF_EYE 10 // in VREF Codes\r
1563#define RDQS_STEP 1 // how many RDQS codes to jump while margining\r
1564#define VREF_STEP 1 // how many VREF codes to jump while margining\r
1565#define VREF_MIN (0x00) // offset into "vref_codes[]" for minimum allowed VREF setting\r
1566#define VREF_MAX (0x3F) // offset into "vref_codes[]" for maximum allowed VREF setting\r
1567#define RDQS_MIN (0x00) // minimum RDQS delay value\r
1568#define RDQS_MAX (0x3F) // maximum RDQS delay value\r
1569#define B 0 // BOTTOM VREF\r
1570#define T 1 // TOP VREF\r
1571#define L 0 // LEFT RDQS\r
1572#define R 1 // RIGHT RDQS\r
1573\r
1574 uint8_t channel_i; // channel counter\r
1575 uint8_t rank_i; // rank counter\r
1576 uint8_t bl_i; // byte lane counter\r
1577 uint8_t bl_divisor = (mrc_params->channel_width == x16) ? 2 : 1; // byte lane divisor\r
1578#ifdef BACKUP_RDQS\r
1579#else\r
1580 uint8_t side_x; // tracks LEFT/RIGHT approach vectors\r
1581 uint8_t side_y; // tracks BOTTOM/TOP approach vectors\r
1582 uint8_t x_coordinate[2/*side_x*/][2/*side_y*/][NUM_CHANNELS][NUM_RANKS][NUM_BYTE_LANES]; // X coordinate data (passing RDQS values) for approach vectors\r
1583 uint8_t y_coordinate[2/*side_x*/][2/*side_y*/][NUM_CHANNELS][NUM_BYTE_LANES]; // Y coordinate data (passing VREF values) for approach vectors\r
1584 uint8_t x_center[NUM_CHANNELS][NUM_RANKS][NUM_BYTE_LANES]; // centered X (RDQS)\r
1585 uint8_t y_center[NUM_CHANNELS][NUM_BYTE_LANES]; // centered Y (VREF)\r
1586 uint32_t address; // target address for "check_bls_ex()"\r
1587 uint32_t result; // result of "check_bls_ex()"\r
1588 uint32_t bl_mask; // byte lane mask for "result" checking\r
1589#ifdef R2R_SHARING\r
1590 uint32_t final_delay[NUM_CHANNELS][NUM_BYTE_LANES]; // used to find placement for rank2rank sharing configs\r
1591 uint32_t num_ranks_enabled = 0; // used to find placement for rank2rank sharing configs\r
1592#endif // R2R_SHARING\r
1593#endif // BACKUP_RDQS\r
1594 // rd_train starts\r
1595 post_code(0x07, 0x00);\r
1596\r
1597 ENTERFN();\r
1598\r
1599#ifdef BACKUP_RDQS\r
1600 for (channel_i=0; channel_i<NUM_CHANNELS; channel_i++)\r
1601 {\r
1602 if (mrc_params->channel_enables & (1<<channel_i))\r
1603 {\r
1604 for (rank_i=0; rank_i<NUM_RANKS; rank_i++)\r
1605 {\r
1606 if (mrc_params->rank_enables & (1<<rank_i))\r
1607 {\r
1608 for (bl_i=0; bl_i<(NUM_BYTE_LANES/bl_divisor); bl_i++)\r
1609 {\r
1610 set_rdqs(channel_i, rank_i, bl_i, ddr_rdqs[PLATFORM_ID]);\r
1611 } // bl_i loop\r
1612 } // if rank is enabled\r
1613 } // rank_i loop\r
1614 } // if channel is enabled\r
1615 } // channel_i loop\r
1616#else\r
1617 // initialise x/y_coordinate arrays\r
1618 for (channel_i = 0; channel_i < NUM_CHANNELS; channel_i++)\r
1619 {\r
1620 if (mrc_params->channel_enables & (1 << channel_i))\r
1621 {\r
1622 for (rank_i = 0; rank_i < NUM_RANKS; rank_i++)\r
1623 {\r
1624 if (mrc_params->rank_enables & (1 << rank_i))\r
1625 {\r
1626 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
1627 {\r
1628 // x_coordinate:\r
1629 x_coordinate[L][B][channel_i][rank_i][bl_i] = RDQS_MIN;\r
1630 x_coordinate[R][B][channel_i][rank_i][bl_i] = RDQS_MAX;\r
1631 x_coordinate[L][T][channel_i][rank_i][bl_i] = RDQS_MIN;\r
1632 x_coordinate[R][T][channel_i][rank_i][bl_i] = RDQS_MAX;\r
1633 // y_coordinate:\r
1634 y_coordinate[L][B][channel_i][bl_i] = VREF_MIN;\r
1635 y_coordinate[R][B][channel_i][bl_i] = VREF_MIN;\r
1636 y_coordinate[L][T][channel_i][bl_i] = VREF_MAX;\r
1637 y_coordinate[R][T][channel_i][bl_i] = VREF_MAX;\r
1638 } // bl_i loop\r
1639 } // if rank is enabled\r
1640 } // rank_i loop\r
1641 } // if channel is enabled\r
1642 } // channel_i loop\r
1643\r
1644 // initialise other variables\r
1645 bl_mask = byte_lane_mask(mrc_params);\r
1646 address = get_addr(mrc_params, 0, 0);\r
1647\r
1648#ifdef R2R_SHARING\r
1649 // need to set "final_delay[][]" elements to "0"\r
1650 memset((void *) (final_delay), 0x00, (size_t) sizeof(final_delay));\r
1651#endif // R2R_SHARING\r
1652\r
1653 // look for passing coordinates\r
1654 for (side_y = B; side_y <= T; side_y++)\r
1655 {\r
1656 for (side_x = L; side_x <= R; side_x++)\r
1657 {\r
1658\r
1659 post_code(0x07, (0x10 + (side_y * 2) + (side_x)));\r
1660\r
1661 // find passing values\r
1662 for (channel_i = 0; channel_i < NUM_CHANNELS; channel_i++)\r
1663 {\r
1664 if (mrc_params->channel_enables & (0x1 << channel_i))\r
1665 {\r
1666 for (rank_i = 0; rank_i < NUM_RANKS; rank_i++)\r
1667 {\r
1668\r
1669 if (mrc_params->rank_enables & (0x1 << rank_i))\r
1670 {\r
1671 // set x/y_coordinate search starting settings\r
1672 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
1673 {\r
1674 set_rdqs(channel_i, rank_i, bl_i, x_coordinate[side_x][side_y][channel_i][rank_i][bl_i]);\r
1675 set_vref(channel_i, bl_i, y_coordinate[side_x][side_y][channel_i][bl_i]);\r
1676 } // bl_i loop\r
1677 // get an address in the target channel/rank\r
1678 address = get_addr(mrc_params, channel_i, rank_i);\r
1679\r
1680 // request HTE reconfiguration\r
1681 mrc_params->hte_setup = 1;\r
1682\r
1683 // test the settings\r
1684 do\r
1685 {\r
1686\r
1687 // result[07:00] == failing byte lane (MAX 8)\r
1688 result = check_bls_ex( mrc_params, address);\r
1689\r
1690 // check for failures\r
1691 if (result & 0xFF)\r
1692 {\r
1693 // at least 1 byte lane failed\r
1694 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
1695 {\r
1696 if (result & (bl_mask << bl_i))\r
1697 {\r
1698 // adjust the RDQS values accordingly\r
1699 if (side_x == L)\r
1700 {\r
1701 x_coordinate[L][side_y][channel_i][rank_i][bl_i] += RDQS_STEP;\r
1702 }\r
1703 else\r
1704 {\r
1705 x_coordinate[R][side_y][channel_i][rank_i][bl_i] -= RDQS_STEP;\r
1706 }\r
1707 // check that we haven't closed the RDQS_EYE too much\r
1708 if ((x_coordinate[L][side_y][channel_i][rank_i][bl_i] > (RDQS_MAX - MIN_RDQS_EYE)) ||\r
1709 (x_coordinate[R][side_y][channel_i][rank_i][bl_i] < (RDQS_MIN + MIN_RDQS_EYE))\r
1710 ||\r
1711 (x_coordinate[L][side_y][channel_i][rank_i][bl_i]\r
1712 == x_coordinate[R][side_y][channel_i][rank_i][bl_i]))\r
1713 {\r
1714 // not enough RDQS margin available at this VREF\r
1715 // update VREF values accordingly\r
1716 if (side_y == B)\r
1717 {\r
1718 y_coordinate[side_x][B][channel_i][bl_i] += VREF_STEP;\r
1719 }\r
1720 else\r
1721 {\r
1722 y_coordinate[side_x][T][channel_i][bl_i] -= VREF_STEP;\r
1723 }\r
1724 // check that we haven't closed the VREF_EYE too much\r
1725 if ((y_coordinate[side_x][B][channel_i][bl_i] > (VREF_MAX - MIN_VREF_EYE)) ||\r
1726 (y_coordinate[side_x][T][channel_i][bl_i] < (VREF_MIN + MIN_VREF_EYE)) ||\r
1727 (y_coordinate[side_x][B][channel_i][bl_i] == y_coordinate[side_x][T][channel_i][bl_i]))\r
1728 {\r
1729 // VREF_EYE collapsed below MIN_VREF_EYE\r
1730 training_message(channel_i, rank_i, bl_i);\r
1731 post_code(0xEE, (0x70 + (side_y * 2) + (side_x)));\r
1732 }\r
1733 else\r
1734 {\r
1735 // update the VREF setting\r
1736 set_vref(channel_i, bl_i, y_coordinate[side_x][side_y][channel_i][bl_i]);\r
1737 // reset the X coordinate to begin the search at the new VREF\r
1738 x_coordinate[side_x][side_y][channel_i][rank_i][bl_i] =\r
1739 (side_x == L) ? (RDQS_MIN) : (RDQS_MAX);\r
1740 }\r
1741 }\r
1742 // update the RDQS setting\r
1743 set_rdqs(channel_i, rank_i, bl_i, x_coordinate[side_x][side_y][channel_i][rank_i][bl_i]);\r
1744 } // if bl_i failed\r
1745 } // bl_i loop\r
1746 } // at least 1 byte lane failed\r
1747 } while (result & 0xFF);\r
1748 } // if rank is enabled\r
1749 } // rank_i loop\r
1750 } // if channel is enabled\r
1751 } // channel_i loop\r
1752 } // side_x loop\r
1753 } // side_y loop\r
1754\r
1755 post_code(0x07, 0x20);\r
1756\r
1757 // find final RDQS (X coordinate) & final VREF (Y coordinate)\r
1758 for (channel_i = 0; channel_i < NUM_CHANNELS; channel_i++)\r
1759 {\r
1760 if (mrc_params->channel_enables & (1 << channel_i))\r
1761 {\r
1762 for (rank_i = 0; rank_i < NUM_RANKS; rank_i++)\r
1763 {\r
1764 if (mrc_params->rank_enables & (1 << rank_i))\r
1765 {\r
1766 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
1767 {\r
1768 uint32_t tempD1;\r
1769 uint32_t tempD2;\r
1770\r
1771 // x_coordinate:\r
1772 DPF(D_INFO, "RDQS T/B eye rank%d lane%d : %d-%d %d-%d\n", rank_i, bl_i,\r
1773 x_coordinate[L][T][channel_i][rank_i][bl_i],\r
1774 x_coordinate[R][T][channel_i][rank_i][bl_i],\r
1775 x_coordinate[L][B][channel_i][rank_i][bl_i],\r
1776 x_coordinate[R][B][channel_i][rank_i][bl_i]);\r
1777\r
1778 tempD1 = (x_coordinate[R][T][channel_i][rank_i][bl_i] + x_coordinate[L][T][channel_i][rank_i][bl_i]) / 2; // average the TOP side LEFT & RIGHT values\r
1779 tempD2 = (x_coordinate[R][B][channel_i][rank_i][bl_i] + x_coordinate[L][B][channel_i][rank_i][bl_i]) / 2; // average the BOTTOM side LEFT & RIGHT values\r
1780 x_center[channel_i][rank_i][bl_i] = (uint8_t) ((tempD1 + tempD2) / 2); // average the above averages\r
1781\r
1782 // y_coordinate:\r
1783 DPF(D_INFO, "VREF R/L eye lane%d : %d-%d %d-%d\n", bl_i,\r
1784 y_coordinate[R][B][channel_i][bl_i],\r
1785 y_coordinate[R][T][channel_i][bl_i],\r
1786 y_coordinate[L][B][channel_i][bl_i],\r
1787 y_coordinate[L][T][channel_i][bl_i]);\r
1788\r
1789 tempD1 = (y_coordinate[R][T][channel_i][bl_i] + y_coordinate[R][B][channel_i][bl_i]) / 2; // average the RIGHT side TOP & BOTTOM values\r
1790 tempD2 = (y_coordinate[L][T][channel_i][bl_i] + y_coordinate[L][B][channel_i][bl_i]) / 2; // average the LEFT side TOP & BOTTOM values\r
1791 y_center[channel_i][bl_i] = (uint8_t) ((tempD1 + tempD2) / 2); // average the above averages\r
1792 } // bl_i loop\r
1793 } // if rank is enabled\r
1794 } // rank_i loop\r
1795 } // if channel is enabled\r
1796 } // channel_i loop\r
1797\r
1798#ifdef RX_EYE_CHECK\r
1799 // perform an eye check\r
1800 for (side_y=B; side_y<=T; side_y++)\r
1801 {\r
1802 for (side_x=L; side_x<=R; side_x++)\r
1803 {\r
1804\r
1805 post_code(0x07, (0x30 + (side_y * 2) + (side_x)));\r
1806\r
1807 // update the settings for the eye check\r
1808 for (channel_i=0; channel_i<NUM_CHANNELS; channel_i++)\r
1809 {\r
1810 if (mrc_params->channel_enables & (1<<channel_i))\r
1811 {\r
1812 for (rank_i=0; rank_i<NUM_RANKS; rank_i++)\r
1813 {\r
1814 if (mrc_params->rank_enables & (1<<rank_i))\r
1815 {\r
1816 for (bl_i=0; bl_i<(NUM_BYTE_LANES/bl_divisor); bl_i++)\r
1817 {\r
1818 if (side_x == L)\r
1819 {\r
1820 set_rdqs(channel_i, rank_i, bl_i, (x_center[channel_i][rank_i][bl_i] - (MIN_RDQS_EYE / 2)));\r
1821 }\r
1822 else\r
1823 {\r
1824 set_rdqs(channel_i, rank_i, bl_i, (x_center[channel_i][rank_i][bl_i] + (MIN_RDQS_EYE / 2)));\r
1825 }\r
1826 if (side_y == B)\r
1827 {\r
1828 set_vref(channel_i, bl_i, (y_center[channel_i][bl_i] - (MIN_VREF_EYE / 2)));\r
1829 }\r
1830 else\r
1831 {\r
1832 set_vref(channel_i, bl_i, (y_center[channel_i][bl_i] + (MIN_VREF_EYE / 2)));\r
1833 }\r
1834 } // bl_i loop\r
1835 } // if rank is enabled\r
1836 } // rank_i loop\r
1837 } // if channel is enabled\r
1838 } // channel_i loop\r
1839\r
1840 // request HTE reconfiguration\r
1841 mrc_params->hte_setup = 1;\r
1842\r
1843 // check the eye\r
1844 if (check_bls_ex( mrc_params, address) & 0xFF)\r
1845 {\r
1846 // one or more byte lanes failed\r
1847 post_code(0xEE, (0x74 + (side_x * 2) + (side_y)));\r
1848 }\r
1849 } // side_x loop\r
1850 } // side_y loop\r
1851#endif // RX_EYE_CHECK\r
1852\r
1853 post_code(0x07, 0x40);\r
1854\r
1855 // set final placements\r
1856 for (channel_i = 0; channel_i < NUM_CHANNELS; channel_i++)\r
1857 {\r
1858 if (mrc_params->channel_enables & (1 << channel_i))\r
1859 {\r
1860 for (rank_i = 0; rank_i < NUM_RANKS; rank_i++)\r
1861 {\r
1862 if (mrc_params->rank_enables & (1 << rank_i))\r
1863 {\r
1864#ifdef R2R_SHARING\r
1865 // increment "num_ranks_enabled"\r
1866 num_ranks_enabled++;\r
1867#endif // R2R_SHARING\r
1868 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
1869 {\r
1870 // x_coordinate:\r
1871#ifdef R2R_SHARING\r
1872 final_delay[channel_i][bl_i] += x_center[channel_i][rank_i][bl_i];\r
1873 set_rdqs(channel_i, rank_i, bl_i, ((final_delay[channel_i][bl_i]) / num_ranks_enabled));\r
1874#else\r
1875 set_rdqs(channel_i, rank_i, bl_i, x_center[channel_i][rank_i][bl_i]);\r
1876#endif // R2R_SHARING\r
1877 // y_coordinate:\r
1878 set_vref(channel_i, bl_i, y_center[channel_i][bl_i]);\r
1879 } // bl_i loop\r
1880 } // if rank is enabled\r
1881 } // rank_i loop\r
1882 } // if channel is enabled\r
1883 } // channel_i loop\r
1884#endif // BACKUP_RDQS\r
1885 LEAVEFN();\r
1886 return;\r
1887}\r
1888\r
1889// wr_train:\r
1890// POST_CODE[major] == 0x08\r
1891//\r
1892// This function will perform the WRITE TRAINING Algorithm on all channels/ranks/byte_lanes simultaneously to minimize execution time.\r
1893// The idea here is to train the WDQ timings to achieve maximum WRITE margins.\r
1894// The algorithm will start with WDQ at the current WDQ setting (tracks WDQS in WR_LVL) +/- 32 PIs (+/- 1/4 CLK) and collapse the eye until all data patterns pass.\r
1895// This is because WDQS will be aligned to WCLK by the Write Leveling algorithm and WDQ will only ever have a 1/2 CLK window of validity.\r
1896static void wr_train(\r
1897 MRCParams_t *mrc_params)\r
1898{\r
1899\r
1900#define WDQ_STEP 1 // how many WDQ codes to jump while margining\r
1901#define L 0 // LEFT side loop value definition\r
1902#define R 1 // RIGHT side loop value definition\r
1903\r
1904 uint8_t channel_i; // channel counter\r
1905 uint8_t rank_i; // rank counter\r
1906 uint8_t bl_i; // byte lane counter\r
1907 uint8_t bl_divisor = (mrc_params->channel_width == x16) ? 2 : 1; // byte lane divisor\r
1908#ifdef BACKUP_WDQ\r
1909#else\r
1910 uint8_t side_i; // LEFT/RIGHT side indicator (0=L, 1=R)\r
1911 uint32_t tempD; // temporary DWORD\r
1912 uint32_t delay[2/*side_i*/][NUM_CHANNELS][NUM_RANKS][NUM_BYTE_LANES]; // 2 arrays, for L & R side passing delays\r
1913 uint32_t address; // target address for "check_bls_ex()"\r
1914 uint32_t result; // result of "check_bls_ex()"\r
1915 uint32_t bl_mask; // byte lane mask for "result" checking\r
1916#ifdef R2R_SHARING\r
1917 uint32_t final_delay[NUM_CHANNELS][NUM_BYTE_LANES]; // used to find placement for rank2rank sharing configs\r
1918 uint32_t num_ranks_enabled = 0; // used to find placement for rank2rank sharing configs\r
1919#endif // R2R_SHARING\r
1920#endif // BACKUP_WDQ\r
1921\r
1922 // wr_train starts\r
1923 post_code(0x08, 0x00);\r
1924\r
1925 ENTERFN();\r
1926\r
1927#ifdef BACKUP_WDQ\r
1928 for (channel_i=0; channel_i<NUM_CHANNELS; channel_i++)\r
1929 {\r
1930 if (mrc_params->channel_enables & (1<<channel_i))\r
1931 {\r
1932 for (rank_i=0; rank_i<NUM_RANKS; rank_i++)\r
1933 {\r
1934 if (mrc_params->rank_enables & (1<<rank_i))\r
1935 {\r
1936 for (bl_i=0; bl_i<(NUM_BYTE_LANES/bl_divisor); bl_i++)\r
1937 {\r
1938 set_wdq(channel_i, rank_i, bl_i, ddr_wdq[PLATFORM_ID]);\r
1939 } // bl_i loop\r
1940 } // if rank is enabled\r
1941 } // rank_i loop\r
1942 } // if channel is enabled\r
1943 } // channel_i loop\r
1944#else\r
1945 // initialise "delay"\r
1946 for (channel_i = 0; channel_i < NUM_CHANNELS; channel_i++)\r
1947 {\r
1948 if (mrc_params->channel_enables & (1 << channel_i))\r
1949 {\r
1950 for (rank_i = 0; rank_i < NUM_RANKS; rank_i++)\r
1951 {\r
1952 if (mrc_params->rank_enables & (1 << rank_i))\r
1953 {\r
1954 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
1955 {\r
1956 // want to start with WDQ = (WDQS - QRTR_CLK) +/- QRTR_CLK\r
1957 tempD = get_wdqs(channel_i, rank_i, bl_i) - QRTR_CLK;\r
1958 delay[L][channel_i][rank_i][bl_i] = tempD - QRTR_CLK;\r
1959 delay[R][channel_i][rank_i][bl_i] = tempD + QRTR_CLK;\r
1960 } // bl_i loop\r
1961 } // if rank is enabled\r
1962 } // rank_i loop\r
1963 } // if channel is enabled\r
1964 } // channel_i loop\r
1965\r
1966 // initialise other variables\r
1967 bl_mask = byte_lane_mask(mrc_params);\r
1968 address = get_addr(mrc_params, 0, 0);\r
1969\r
1970#ifdef R2R_SHARING\r
1971 // need to set "final_delay[][]" elements to "0"\r
1972 memset((void *) (final_delay), 0x00, (size_t) sizeof(final_delay));\r
1973#endif // R2R_SHARING\r
1974\r
1975 // start algorithm on the LEFT side and train each channel/bl until no failures are observed, then repeat for the RIGHT side.\r
1976 for (side_i = L; side_i <= R; side_i++)\r
1977 {\r
1978 post_code(0x08, (0x10 + (side_i)));\r
1979\r
1980 // set starting values\r
1981 for (channel_i = 0; channel_i < NUM_CHANNELS; channel_i++)\r
1982 {\r
1983 if (mrc_params->channel_enables & (1 << channel_i))\r
1984 {\r
1985 for (rank_i = 0; rank_i < NUM_RANKS; rank_i++)\r
1986 {\r
1987 if (mrc_params->rank_enables & (1 << rank_i))\r
1988 {\r
1989 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
1990 {\r
1991 set_wdq(channel_i, rank_i, bl_i, delay[side_i][channel_i][rank_i][bl_i]);\r
1992 } // bl_i loop\r
1993 } // if rank is enabled\r
1994 } // rank_i loop\r
1995 } // if channel is enabled\r
1996 } // channel_i loop\r
1997\r
1998 // find passing values\r
1999 for (channel_i = 0; channel_i < NUM_CHANNELS; channel_i++)\r
2000 {\r
2001 if (mrc_params->channel_enables & (0x1 << channel_i))\r
2002 {\r
2003 for (rank_i = 0; rank_i < NUM_RANKS; rank_i++)\r
2004 {\r
2005 if (mrc_params->rank_enables & (0x1 << rank_i))\r
2006 {\r
2007 // get an address in the target channel/rank\r
2008 address = get_addr(mrc_params, channel_i, rank_i);\r
2009\r
2010 // request HTE reconfiguration\r
2011 mrc_params->hte_setup = 1;\r
2012\r
2013 // check the settings\r
2014 do\r
2015 {\r
2016\r
2017#ifdef SIM\r
2018 // need restore memory to idle state as write can be in bad sync\r
2019 dram_init_command (DCMD_PREA(rank_i));\r
2020#endif\r
2021\r
2022 // result[07:00] == failing byte lane (MAX 8)\r
2023 result = check_bls_ex( mrc_params, address);\r
2024 // check for failures\r
2025 if (result & 0xFF)\r
2026 {\r
2027 // at least 1 byte lane failed\r
2028 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
2029 {\r
2030 if (result & (bl_mask << bl_i))\r
2031 {\r
2032 if (side_i == L)\r
2033 {\r
2034 delay[L][channel_i][rank_i][bl_i] += WDQ_STEP;\r
2035 }\r
2036 else\r
2037 {\r
2038 delay[R][channel_i][rank_i][bl_i] -= WDQ_STEP;\r
2039 }\r
2040 // check for algorithm failure\r
2041 if (delay[L][channel_i][rank_i][bl_i] != delay[R][channel_i][rank_i][bl_i])\r
2042 {\r
2043 // margin available, update delay setting\r
2044 set_wdq(channel_i, rank_i, bl_i, delay[side_i][channel_i][rank_i][bl_i]);\r
2045 }\r
2046 else\r
2047 {\r
2048 // no margin available, notify the user and halt\r
2049 training_message(channel_i, rank_i, bl_i);\r
2050 post_code(0xEE, (0x80 + side_i));\r
2051 }\r
2052 } // if bl_i failed\r
2053 } // bl_i loop\r
2054 } // at least 1 byte lane failed\r
2055 } while (result & 0xFF); // stop when all byte lanes pass\r
2056 } // if rank is enabled\r
2057 } // rank_i loop\r
2058 } // if channel is enabled\r
2059 } // channel_i loop\r
2060 } // side_i loop\r
2061\r
2062 // program WDQ to the middle of passing window\r
2063 for (channel_i = 0; channel_i < NUM_CHANNELS; channel_i++)\r
2064 {\r
2065 if (mrc_params->channel_enables & (1 << channel_i))\r
2066 {\r
2067 for (rank_i = 0; rank_i < NUM_RANKS; rank_i++)\r
2068 {\r
2069 if (mrc_params->rank_enables & (1 << rank_i))\r
2070 {\r
2071#ifdef R2R_SHARING\r
2072 // increment "num_ranks_enabled"\r
2073 num_ranks_enabled++;\r
2074#endif // R2R_SHARING\r
2075 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
2076 {\r
2077\r
2078 DPF(D_INFO, "WDQ eye rank%d lane%d : %d-%d\n", rank_i, bl_i,\r
2079 delay[L][channel_i][rank_i][bl_i],\r
2080 delay[R][channel_i][rank_i][bl_i]);\r
2081\r
2082 tempD = (delay[R][channel_i][rank_i][bl_i] + delay[L][channel_i][rank_i][bl_i]) / 2;\r
2083\r
2084#ifdef R2R_SHARING\r
2085 final_delay[channel_i][bl_i] += tempD;\r
2086 set_wdq(channel_i, rank_i, bl_i, ((final_delay[channel_i][bl_i]) / num_ranks_enabled));\r
2087#else\r
2088 set_wdq(channel_i, rank_i, bl_i, tempD);\r
2089#endif // R2R_SHARING\r
2090\r
2091 } // bl_i loop\r
2092 } // if rank is enabled\r
2093 } // rank_i loop\r
2094 } // if channel is enabled\r
2095 } // channel_i loop\r
2096#endif // BACKUP_WDQ\r
2097 LEAVEFN();\r
2098 return;\r
2099}\r
2100\r
2101// Wrapper for jedec initialisation routine\r
2102static void perform_jedec_init(\r
2103 MRCParams_t *mrc_params)\r
2104{\r
2105 jedec_init(mrc_params, 0);\r
2106}\r
2107\r
2108// Configure DDRPHY for Auto-Refresh, Periodic Compensations,\r
2109// Dynamic Diff-Amp, ZQSPERIOD, Auto-Precharge, CKE Power-Down\r
2110static void set_auto_refresh(\r
2111 MRCParams_t *mrc_params)\r
2112{\r
2113 uint32_t channel_i;\r
2114 uint32_t rank_i;\r
2115 uint32_t bl_i;\r
2116 uint32_t bl_divisor = /*(mrc_params->channel_width==x16)?2:*/1;\r
2117 uint32_t tempD;\r
2118\r
2119 ENTERFN();\r
2120\r
2121 // enable Auto-Refresh, Periodic Compensations, Dynamic Diff-Amp, ZQSPERIOD, Auto-Precharge, CKE Power-Down\r
2122 for (channel_i = 0; channel_i < NUM_CHANNELS; channel_i++)\r
2123 {\r
2124 if (mrc_params->channel_enables & (1 << channel_i))\r
2125 {\r
2126 // Enable Periodic RCOMPS\r
2127 isbM32m(DDRPHY, CMPCTRL, (BIT1), (BIT1));\r
2128\r
2129\r
2130 // Enable Dynamic DiffAmp & Set Read ODT Value\r
2131 switch (mrc_params->rd_odt_value)\r
2132 {\r
2133 case 0: tempD = 0x3F; break; // OFF\r
2134 default: tempD = 0x00; break; // Auto\r
2135 } // rd_odt_value switch\r
2136\r
2137 for (bl_i=0; bl_i<((NUM_BYTE_LANES/bl_divisor)/2); bl_i++)\r
2138 {\r
2139 isbM32m(DDRPHY, (B0OVRCTL + (bl_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)),\r
2140 ((0x00<<16)|(tempD<<10)),\r
2141 ((BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT15|BIT14|BIT13|BIT12|BIT11|BIT10))); // Override: DIFFAMP, ODT\r
2142\r
2143 isbM32m(DDRPHY, (B1OVRCTL + (bl_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)),\r
2144 ((0x00<<16)|(tempD<<10)),\r
2145 ((BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT15|BIT14|BIT13|BIT12|BIT11|BIT10)));// Override: DIFFAMP, ODT\r
2146 } // bl_i loop\r
2147\r
2148 // Issue ZQCS command\r
2149 for (rank_i = 0; rank_i < NUM_RANKS; rank_i++)\r
2150 {\r
2151 if (mrc_params->rank_enables & (1 << rank_i))\r
2152 {\r
2153 dram_init_command(DCMD_ZQCS(rank_i));\r
2154 } // if rank_i enabled\r
2155 } // rank_i loop\r
2156\r
2157 } // if channel_i enabled\r
2158 } // channel_i loop\r
2159\r
2160 clear_pointers();\r
2161\r
2162 LEAVEFN();\r
2163 return;\r
2164}\r
2165\r
2166// Depending on configuration enables ECC support.\r
2167// Available memory size is decresed, and updated with 0s\r
2168// in order to clear error status. Address mode 2 forced.\r
2169static void ecc_enable(\r
2170 MRCParams_t *mrc_params)\r
2171{\r
2172 RegDRP Drp;\r
2173 RegDSCH Dsch;\r
2174 RegDECCCTRL Ctr;\r
2175\r
2176 if (mrc_params->ecc_enables == 0) return;\r
2177\r
2178 ENTERFN();\r
2179\r
2180 // Configuration required in ECC mode\r
2181 Drp.raw = isbR32m(MCU, DRP);\r
2182 Drp.field.addressMap = 2;\r
2183 Drp.field.split64 = 1;\r
2184 isbW32m(MCU, DRP, Drp.raw);\r
2185\r
2186 // Disable new request bypass\r
2187 Dsch.raw = isbR32m(MCU, DSCH);\r
2188 Dsch.field.NEWBYPDIS = 1;\r
2189 isbW32m(MCU, DSCH, Dsch.raw);\r
2190\r
2191 // Enable ECC\r
2192 Ctr.raw = 0;\r
2193 Ctr.field.SBEEN = 1;\r
2194 Ctr.field.DBEEN = 1;\r
2195 Ctr.field.ENCBGEN = 1;\r
2196 isbW32m(MCU, DECCCTRL, Ctr.raw);\r
2197\r
2198#ifdef SIM\r
2199 // Read back to be sure writing took place\r
2200 Ctr.raw = isbR32m(MCU, DECCCTRL);\r
2201#endif\r
2202\r
2203 // Assume 8 bank memory, one bank is gone for ECC\r
2204 mrc_params->mem_size -= mrc_params->mem_size / 8;\r
2205\r
2206 // For S3 resume memory content has to be preserved\r
2207 if (mrc_params->boot_mode != bmS3)\r
2208 {\r
2209 select_hte(mrc_params);\r
2210 HteMemInit(mrc_params, MrcMemInit, MrcHaltHteEngineOnError);\r
2211 select_memory_manager(mrc_params);\r
2212 }\r
2213\r
2214 LEAVEFN();\r
2215 return;\r
2216}\r
2217\r
2218// Lock MCU registers at the end of initialisation sequence.\r
2219static void lock_registers(\r
2220 MRCParams_t *mrc_params)\r
2221{\r
2222 RegDCO Dco;\r
2223\r
2224 ENTERFN();\r
2225\r
2226 Dco.raw = isbR32m(MCU, DCO);\r
2227 Dco.field.PMIDIS = 0; //0 - PRI enabled\r
2228 Dco.field.PMICTL = 0; //0 - PRI owned by MEMORY_MANAGER\r
2229 Dco.field.DRPLOCK = 1;\r
2230 Dco.field.REUTLOCK = 1;\r
2231 isbW32m(MCU, DCO, Dco.raw);\r
2232\r
2233 LEAVEFN();\r
2234\r
2235}\r
2236\r
2237#ifdef MRC_SV\r
2238\r
2239// cache write back invalidate\r
2240static void asm_wbinvd(void)\r
2241{\r
2242#if defined (SIM) || defined (GCC)\r
2243 asm(\r
2244 "wbinvd;"\r
2245 );\r
2246#else\r
2247 __asm wbinvd;\r
2248#endif\r
2249}\r
2250\r
2251// cache invalidate\r
2252static void asm_invd(void)\r
2253{\r
2254#if defined (SIM) || defined (GCC)\r
2255 asm(\r
2256 "invd;"\r
2257 );\r
2258#else\r
2259 __asm invd;\r
2260#endif\r
2261}\r
2262\r
2263\r
2264static void cpu_read(void)\r
2265{\r
2266 uint32_t adr, dat, limit;\r
2267\r
2268 asm_invd();\r
2269\r
2270 limit = 8 * 1024;\r
2271 for (adr = 0; adr < limit; adr += 4)\r
2272 {\r
2273 dat = *(uint32_t*) adr;\r
2274 if ((adr & 0x0F) == 0)\r
2275 {\r
2276 DPF(D_INFO, "\n%x : ", adr);\r
2277 }\r
2278 DPF(D_INFO, "%x ", dat);\r
2279 }\r
2280 DPF(D_INFO, "\n");\r
2281\r
2282 DPF(D_INFO, "CPU read done\n");\r
2283}\r
2284\r
2285\r
2286static void cpu_write(void)\r
2287{\r
2288 uint32_t adr, limit;\r
2289\r
2290 limit = 8 * 1024;\r
2291 for (adr = 0; adr < limit; adr += 4)\r
2292 {\r
2293 *(uint32_t*) adr = 0xDEAD0000 + adr;\r
2294 }\r
2295\r
2296 asm_wbinvd();\r
2297\r
2298 DPF(D_INFO, "CPU write done\n");\r
2299}\r
2300\r
2301\r
2302static void cpu_memory_test(\r
2303 MRCParams_t *mrc_params)\r
2304{\r
2305 uint32_t result = 0;\r
2306 uint32_t val, dat, adr, adr0, step, limit;\r
2307 uint64_t my_tsc;\r
2308\r
2309 ENTERFN();\r
2310\r
2311 asm_invd();\r
2312\r
2313 adr0 = 1 * 1024 * 1024;\r
2314 limit = 256 * 1024 * 1024;\r
2315\r
2316 for (step = 0; step <= 4; step++)\r
2317 {\r
2318 DPF(D_INFO, "Mem test step %d starting from %xh\n", step, adr0);\r
2319\r
2320 my_tsc = read_tsc();\r
2321 for (adr = adr0; adr < limit; adr += sizeof(uint32_t))\r
2322 {\r
2323 if (step == 0) dat = adr;\r
2324 else if (step == 1) dat = (1 << ((adr >> 2) & 0x1f));\r
2325 else if (step == 2) dat = ~(1 << ((adr >> 2) & 0x1f));\r
2326 else if (step == 3) dat = 0x5555AAAA;\r
2327 else if (step == 4) dat = 0xAAAA5555;\r
2328\r
2329 *(uint32_t*) adr = dat;\r
2330 }\r
2331 DPF(D_INFO, "Write time %llXh\n", read_tsc() - my_tsc);\r
2332\r
2333 my_tsc = read_tsc();\r
2334 for (adr = adr0; adr < limit; adr += sizeof(uint32_t))\r
2335 {\r
2336 if (step == 0) dat = adr;\r
2337 else if (step == 1) dat = (1 << ((adr >> 2) & 0x1f));\r
2338 else if (step == 2) dat = ~(1 << ((adr >> 2) & 0x1f));\r
2339 else if (step == 3) dat = 0x5555AAAA;\r
2340 else if (step == 4) dat = 0xAAAA5555;\r
2341\r
2342 val = *(uint32_t*) adr;\r
2343\r
2344 if (val != dat)\r
2345 {\r
2346 DPF(D_INFO, "%x vs. %x@%x\n", dat, val, adr);\r
2347 result = adr|BIT31;\r
2348 }\r
2349 }\r
2350 DPF(D_INFO, "Read time %llXh\n", read_tsc() - my_tsc);\r
2351 }\r
2352\r
2353 DPF( D_INFO, "Memory test result %x\n", result);\r
2354 LEAVEFN();\r
2355}\r
2356#endif // MRC_SV\r
2357\r
2358\r
2359// Execute memory test, if error dtected it is\r
2360// indicated in mrc_params->status.\r
2361static void memory_test(\r
2362 MRCParams_t *mrc_params)\r
2363{\r
2364 uint32_t result = 0;\r
2365\r
2366 ENTERFN();\r
2367\r
2368 select_hte(mrc_params);\r
2369 result = HteMemInit(mrc_params, MrcMemTest, MrcHaltHteEngineOnError);\r
2370 select_memory_manager(mrc_params);\r
2371\r
2372 DPF(D_INFO, "Memory test result %x\n", result);\r
2373 mrc_params->status = ((result == 0) ? MRC_SUCCESS : MRC_E_MEMTEST);\r
2374 LEAVEFN();\r
2375}\r
2376\r
2377\r
2378// Force same timings as with backup settings\r
2379static void static_timings(\r
2380 MRCParams_t *mrc_params)\r
2381\r
2382{\r
2383 uint8_t ch, rk, bl;\r
2384\r
2385 for (ch = 0; ch < NUM_CHANNELS; ch++)\r
2386 {\r
2387 for (rk = 0; rk < NUM_RANKS; rk++)\r
2388 {\r
2389 for (bl = 0; bl < NUM_BYTE_LANES; bl++)\r
2390 {\r
2391 set_rcvn(ch, rk, bl, 498); // RCVN\r
2392 set_rdqs(ch, rk, bl, 24); // RDQS\r
2393 set_wdqs(ch, rk, bl, 292); // WDQS\r
2394 set_wdq( ch, rk, bl, 260); // WDQ\r
2395 if (rk == 0)\r
2396 {\r
2397 set_vref(ch, bl, 32); // VREF (RANK0 only)\r
2398 }\r
2399 }\r
2400 set_wctl(ch, rk, 217); // WCTL\r
2401 }\r
2402 set_wcmd(ch, 220); // WCMD\r
2403 }\r
2404\r
2405 return;\r
2406}\r
2407\r
2408//\r
2409// Initialise system memory.\r
2410//\r
2411void MemInit(\r
2412 MRCParams_t *mrc_params)\r
2413{\r
2414 static const MemInit_t init[] =\r
2415 {\r
2416 { 0x0101, bmCold|bmFast|bmWarm|bmS3, clear_self_refresh }, //0\r
2417 { 0x0200, bmCold|bmFast|bmWarm|bmS3, prog_ddr_timing_control }, //1 initialise the MCU\r
2418 { 0x0103, bmCold|bmFast , prog_decode_before_jedec }, //2\r
2419 { 0x0104, bmCold|bmFast , perform_ddr_reset }, //3\r
2420 { 0x0300, bmCold|bmFast |bmS3, ddrphy_init }, //4 initialise the DDRPHY\r
2421 { 0x0400, bmCold|bmFast , perform_jedec_init }, //5 perform JEDEC initialisation of DRAMs\r
2422 { 0x0105, bmCold|bmFast , set_ddr_init_complete }, //6\r
2423 { 0x0106, bmFast|bmWarm|bmS3, restore_timings }, //7\r
2424 { 0x0106, bmCold , default_timings }, //8\r
2425 { 0x0500, bmCold , rcvn_cal }, //9 perform RCVN_CAL algorithm\r
2426 { 0x0600, bmCold , wr_level }, //10 perform WR_LEVEL algorithm\r
2427 { 0x0120, bmCold , prog_page_ctrl }, //11\r
2428 { 0x0700, bmCold , rd_train }, //12 perform RD_TRAIN algorithm\r
2429 { 0x0800, bmCold , wr_train }, //13 perform WR_TRAIN algorithm\r
2430 { 0x010B, bmCold , store_timings }, //14\r
2431 { 0x010C, bmCold|bmFast|bmWarm|bmS3, enable_scrambling }, //15\r
2432 { 0x010D, bmCold|bmFast|bmWarm|bmS3, prog_ddr_control }, //16\r
2433 { 0x010E, bmCold|bmFast|bmWarm|bmS3, prog_dra_drb }, //17\r
2434 { 0x010F, bmWarm|bmS3, perform_wake }, //18\r
2435 { 0x0110, bmCold|bmFast|bmWarm|bmS3, change_refresh_period }, //19\r
2436 { 0x0111, bmCold|bmFast|bmWarm|bmS3, set_auto_refresh }, //20\r
2437 { 0x0112, bmCold|bmFast|bmWarm|bmS3, ecc_enable }, //21\r
2438 { 0x0113, bmCold|bmFast , memory_test }, //22\r
2439 { 0x0114, bmCold|bmFast|bmWarm|bmS3, lock_registers } //23 set init done\r
2440 };\r
2441\r
2442 uint32_t i;\r
2443\r
2444 ENTERFN();\r
2445\r
2446 DPF(D_INFO, "Meminit build %s %s\n", __DATE__, __TIME__);\r
2447\r
2448 // MRC started\r
2449 post_code(0x01, 0x00);\r
2450\r
2451 if (mrc_params->boot_mode != bmCold)\r
2452 {\r
2453 if (mrc_params->ddr_speed != mrc_params->timings.ddr_speed)\r
2454 {\r
2455 // full training required as frequency changed\r
2456 mrc_params->boot_mode = bmCold;\r
2457 }\r
2458 }\r
2459\r
2460 for (i = 0; i < MCOUNT(init); i++)\r
2461 {\r
2462 uint64_t my_tsc;\r
2463\r
2464#ifdef MRC_SV\r
2465 if (mrc_params->menu_after_mrc && i > 14)\r
2466 {\r
2467 uint8_t ch;\r
2468\r
2469 mylop:\r
2470\r
2471 DPF(D_INFO, "-- c - continue --\n");\r
2472 DPF(D_INFO, "-- j - move to jedec init --\n");\r
2473 DPF(D_INFO, "-- m - memory test --\n");\r
2474 DPF(D_INFO, "-- r - cpu read --\n");\r
2475 DPF(D_INFO, "-- w - cpu write --\n");\r
2476 DPF(D_INFO, "-- b - hte base test --\n");\r
2477 DPF(D_INFO, "-- g - hte extended test --\n");\r
2478\r
2479 ch = mgetc();\r
2480 switch (ch)\r
2481 {\r
2482 case 'c':\r
2483 break;\r
2484 case 'j': //move to jedec init\r
2485 i = 5;\r
2486 break;\r
2487\r
2488 case 'M':\r
2489 case 'N':\r
2490 {\r
2491 uint32_t n, res, cnt=0;\r
2492\r
2493 for(n=0; mgetch()==0; n++)\r
2494 {\r
2495 if( ch == 'M' || n % 256 == 0)\r
2496 {\r
2497 DPF(D_INFO, "n=%d e=%d\n", n, cnt);\r
2498 }\r
2499\r
2500 res = 0;\r
2501\r
2502 if( ch == 'M')\r
2503 {\r
2504 memory_test(mrc_params);\r
2505 res |= mrc_params->status;\r
2506 }\r
2507\r
2508 mrc_params->hte_setup = 1;\r
2509 res |= check_bls_ex(mrc_params, 0x00000000);\r
2510 res |= check_bls_ex(mrc_params, 0x00000000);\r
2511 res |= check_bls_ex(mrc_params, 0x00000000);\r
2512 res |= check_bls_ex(mrc_params, 0x00000000);\r
2513\r
2514 if( mrc_params->rank_enables & 2)\r
2515 {\r
2516 mrc_params->hte_setup = 1;\r
2517 res |= check_bls_ex(mrc_params, 0x40000000);\r
2518 res |= check_bls_ex(mrc_params, 0x40000000);\r
2519 res |= check_bls_ex(mrc_params, 0x40000000);\r
2520 res |= check_bls_ex(mrc_params, 0x40000000);\r
2521 }\r
2522\r
2523 if( res != 0)\r
2524 {\r
2525 DPF(D_INFO, "###########\n");\r
2526 DPF(D_INFO, "#\n");\r
2527 DPF(D_INFO, "# Error count %d\n", ++cnt);\r
2528 DPF(D_INFO, "#\n");\r
2529 DPF(D_INFO, "###########\n");\r
2530 }\r
2531\r
2532 } // for\r
2533\r
2534 select_memory_manager(mrc_params);\r
2535 }\r
2536 goto mylop;\r
2537 case 'm':\r
2538 memory_test(mrc_params);\r
2539 goto mylop;\r
2540 case 'n':\r
2541 cpu_memory_test(mrc_params);\r
2542 goto mylop;\r
2543\r
2544 case 'l':\r
2545 ch = mgetc();\r
2546 if (ch <= '9') DpfPrintMask ^= (ch - '0') << 3;\r
2547 DPF(D_INFO, "Log mask %x\n", DpfPrintMask);\r
2548 goto mylop;\r
2549 case 'p':\r
2550 print_timings(mrc_params);\r
2551 goto mylop;\r
2552 case 'R':\r
2553 rd_train(mrc_params);\r
2554 goto mylop;\r
2555 case 'W':\r
2556 wr_train(mrc_params);\r
2557 goto mylop;\r
2558\r
2559 case 'r':\r
2560 cpu_read();\r
2561 goto mylop;\r
2562 case 'w':\r
2563 cpu_write();\r
2564 goto mylop;\r
2565\r
2566 case 'g':\r
2567 {\r
2568 uint32_t result;\r
2569 select_hte(mrc_params);\r
2570 mrc_params->hte_setup = 1;\r
2571 result = check_bls_ex(mrc_params, 0);\r
2572 DPF(D_INFO, "Extended test result %x\n", result);\r
2573 select_memory_manager(mrc_params);\r
2574 }\r
2575 goto mylop;\r
2576 case 'b':\r
2577 {\r
2578 uint32_t result;\r
2579 select_hte(mrc_params);\r
2580 mrc_params->hte_setup = 1;\r
2581 result = check_rw_coarse(mrc_params, 0);\r
2582 DPF(D_INFO, "Base test result %x\n", result);\r
2583 select_memory_manager(mrc_params);\r
2584 }\r
2585 goto mylop;\r
2586 case 'B':\r
2587 select_hte(mrc_params);\r
2588 HteMemOp(0x2340, 1, 1);\r
2589 select_memory_manager(mrc_params);\r
2590 goto mylop;\r
2591\r
2592 case '3':\r
2593 {\r
2594 RegDPMC0 DPMC0reg;\r
2595\r
2596 DPF( D_INFO, "===>> Start suspend\n");\r
2597 isbR32m(MCU, DSTAT);\r
2598\r
2599 DPMC0reg.raw = isbR32m(MCU, DPMC0);\r
2600 DPMC0reg.field.DYNSREN = 0;\r
2601 DPMC0reg.field.powerModeOpCode = 0x05; // Disable Master DLL\r
2602 isbW32m(MCU, DPMC0, DPMC0reg.raw);\r
2603\r
2604 // Should be off for negative test case verification\r
2605 #if 1\r
2606 Wr32(MMIO, PCIADDR(0,0,0,SB_PACKET_REG),\r
2607 (uint32_t)SB_COMMAND(SB_SUSPEND_CMND_OPCODE, MCU, 0));\r
2608 #endif\r
2609\r
2610 DPF( D_INFO, "press key\n");\r
2611 mgetc();\r
2612 DPF( D_INFO, "===>> Start resume\n");\r
2613 isbR32m(MCU, DSTAT);\r
2614\r
2615 mrc_params->boot_mode = bmS3;\r
2616 i = 0;\r
2617 }\r
2618\r
2619 } // switch\r
2620\r
2621 } // if( menu\r
2622#endif //MRC_SV\r
2623\r
2624 if (mrc_params->boot_mode & init[i].boot_path)\r
2625 {\r
2626 uint8_t major = init[i].post_code >> 8 & 0xFF;\r
2627 uint8_t minor = init[i].post_code >> 0 & 0xFF;\r
2628 post_code(major, minor);\r
2629\r
2630 my_tsc = read_tsc();\r
2631 init[i].init_fn(mrc_params);\r
2632 DPF(D_TIME, "Execution time %llX", read_tsc() - my_tsc);\r
2633 }\r
2634 }\r
2635\r
2636 // display the timings\r
2637 print_timings(mrc_params);\r
2638\r
2639 // MRC is complete.\r
2640 post_code(0x01, 0xFF);\r
2641\r
2642 LEAVEFN();\r
2643 return;\r
2644}\r