]> git.proxmox.com Git - mirror_edk2.git/blame - QuarkSocPkg/QuarkNorthCluster/MemoryInit/Pei/meminit.c
QuarkSocPkg: Replace BSD License with BSD+Patent License
[mirror_edk2.git] / QuarkSocPkg / QuarkNorthCluster / MemoryInit / Pei / meminit.c
CommitLineData
9b6bbcdb
MK
1/************************************************************************\r
2 *\r
3 * Copyright (c) 2013-2015 Intel Corporation.\r
4 *\r
c9f231d0 5* SPDX-License-Identifier: BSD-2-Clause-Patent\r
9b6bbcdb
MK
6 *\r
7 * This file contains all of the Cat Mountain Memory Reference Code (MRC).\r
8 *\r
9 * These functions are generic and should work for any Cat Mountain config.\r
10 *\r
11 * MRC requires two data structures to be passed in which are initialised by "PreMemInit()".\r
12 *\r
13 * The basic flow is as follows:\r
14 * 01) Check for supported DDR speed configuration\r
15 * 02) Set up MEMORY_MANAGER buffer as pass-through (POR)\r
16 * 03) Set Channel Interleaving Mode and Channel Stride to the most aggressive setting possible\r
17 * 04) Set up the MCU logic\r
18 * 05) Set up the DDR_PHY logic\r
19 * 06) Initialise the DRAMs (JEDEC)\r
20 * 07) Perform the Receive Enable Calibration algorithm\r
21 * 08) Perform the Write Leveling algorithm\r
22 * 09) Perform the Read Training algorithm (includes internal Vref)\r
23 * 10) Perform the Write Training algorithm\r
24 * 11) Set Channel Interleaving Mode and Channel Stride to the desired settings\r
25 *\r
26 * Dunit configuration based on Valleyview MRC.\r
27 *\r
28 ***************************************************************************/\r
29\r
30#include "mrc.h"\r
31#include "memory_options.h"\r
32\r
33#include "meminit.h"\r
34#include "meminit_utils.h"\r
35#include "hte.h"\r
36#include "io.h"\r
37\r
38// Override ODT to off state if requested\r
39#define DRMC_DEFAULT (mrc_params->rd_odt_value==0?BIT12:0)\r
40\r
41\r
42// tRFC values (in picoseconds) per density\r
43const uint32_t tRFC[5] =\r
44{\r
45 90000, // 512Mb\r
46 110000, // 1Gb\r
47 160000, // 2Gb\r
48 300000, // 4Gb\r
49 350000, // 8Gb\r
50 };\r
51\r
52// tCK clock period in picoseconds per speed index 800, 1066, 1333\r
53const uint32_t tCK[3] =\r
54{\r
55 2500,\r
56 1875,\r
57 1500\r
58};\r
59\r
60#ifdef SIM\r
61// Select static timings specific to simulation environment\r
62#define PLATFORM_ID 0\r
63#else\r
64// Select static timings specific to ClantonPeek platform\r
65#define PLATFORM_ID 1\r
66#endif\r
67\r
68\r
69// Global variables\r
70const uint16_t ddr_wclk[] =\r
71 {193, 158};\r
72\r
73const uint16_t ddr_wctl[] =\r
74 { 1, 217};\r
75\r
76const uint16_t ddr_wcmd[] =\r
77 { 1, 220};\r
78\r
79\r
80#ifdef BACKUP_RCVN\r
81const uint16_t ddr_rcvn[] =\r
82 {129, 498};\r
83#endif // BACKUP_RCVN\r
84\r
85#ifdef BACKUP_WDQS\r
86const uint16_t ddr_wdqs[] =\r
87 { 65, 289};\r
88#endif // BACKUP_WDQS\r
89\r
90#ifdef BACKUP_RDQS\r
91const uint8_t ddr_rdqs[] =\r
92 { 32, 24};\r
93#endif // BACKUP_RDQS\r
94\r
95#ifdef BACKUP_WDQ\r
96const uint16_t ddr_wdq[] =\r
97 { 32, 257};\r
98#endif // BACKUP_WDQ\r
99\r
100\r
101\r
102// Select MEMORY_MANAGER as the source for PRI interface\r
103static void select_memory_manager(\r
104 MRCParams_t *mrc_params)\r
105{\r
106 RegDCO Dco;\r
107\r
108 ENTERFN();\r
109\r
110 Dco.raw = isbR32m(MCU, DCO);\r
111 Dco.field.PMICTL = 0; //0 - PRI owned by MEMORY_MANAGER\r
112 isbW32m(MCU, DCO, Dco.raw);\r
113\r
114 LEAVEFN();\r
115}\r
116\r
117// Select HTE as the source for PRI interface\r
118void select_hte(\r
119 MRCParams_t *mrc_params)\r
120{\r
121 RegDCO Dco;\r
122\r
123 ENTERFN();\r
124\r
125 Dco.raw = isbR32m(MCU, DCO);\r
126 Dco.field.PMICTL = 1; //1 - PRI owned by HTE\r
127 isbW32m(MCU, DCO, Dco.raw);\r
128\r
129 LEAVEFN();\r
130}\r
131\r
132// Send DRAM command, data should be formated\r
133// using DCMD_Xxxx macro or emrsXCommand structure.\r
134static void dram_init_command(\r
135 uint32_t data)\r
136{\r
137 Wr32(DCMD, 0, data);\r
138}\r
139\r
140// Send DRAM wake command using special MCU side-band WAKE opcode\r
141static void dram_wake_command(\r
142 void)\r
143{\r
144 ENTERFN();\r
145\r
146 Wr32(MMIO, PCIADDR(0,0,0,SB_PACKET_REG),\r
147 (uint32_t) SB_COMMAND(SB_WAKE_CMND_OPCODE, MCU, 0));\r
148\r
149 LEAVEFN();\r
150}\r
151\r
152// Stop self refresh driven by MCU\r
153static void clear_self_refresh(\r
154 MRCParams_t *mrc_params)\r
155{\r
156 ENTERFN();\r
157\r
158 // clear the PMSTS Channel Self Refresh bits\r
159 isbM32m(MCU, PMSTS, BIT0, BIT0);\r
160\r
161 LEAVEFN();\r
162}\r
163\r
164// Configure MCU before jedec init sequence\r
165static void prog_decode_before_jedec(\r
166 MRCParams_t *mrc_params)\r
167{\r
168 RegDRP Drp;\r
169 RegDRCF Drfc;\r
170 RegDCAL Dcal;\r
171 RegDSCH Dsch;\r
172 RegDPMC0 Dpmc0;\r
173\r
174 ENTERFN();\r
175\r
176 // Disable power saving features\r
177 Dpmc0.raw = isbR32m(MCU, DPMC0);\r
178 Dpmc0.field.CLKGTDIS = 1;\r
179 Dpmc0.field.DISPWRDN = 1;\r
180 Dpmc0.field.DYNSREN = 0;\r
181 Dpmc0.field.PCLSTO = 0;\r
182 isbW32m(MCU, DPMC0, Dpmc0.raw);\r
183\r
184 // Disable out of order transactions\r
185 Dsch.raw = isbR32m(MCU, DSCH);\r
186 Dsch.field.OOODIS = 1;\r
187 Dsch.field.NEWBYPDIS = 1;\r
188 isbW32m(MCU, DSCH, Dsch.raw);\r
189\r
190 // Disable issuing the REF command\r
191 Drfc.raw = isbR32m(MCU, DRFC);\r
192 Drfc.field.tREFI = 0;\r
193 isbW32m(MCU, DRFC, Drfc.raw);\r
194\r
195 // Disable ZQ calibration short\r
196 Dcal.raw = isbR32m(MCU, DCAL);\r
197 Dcal.field.ZQCINT = 0;\r
198 Dcal.field.SRXZQCL = 0;\r
199 isbW32m(MCU, DCAL, Dcal.raw);\r
200\r
201 // Training performed in address mode 0, rank population has limited impact, however\r
202 // simulator complains if enabled non-existing rank.\r
203 Drp.raw = 0;\r
204 if (mrc_params->rank_enables & 1)\r
205 Drp.field.rank0Enabled = 1;\r
206 if (mrc_params->rank_enables & 2)\r
207 Drp.field.rank1Enabled = 1;\r
208 isbW32m(MCU, DRP, Drp.raw);\r
209\r
210 LEAVEFN();\r
211}\r
212\r
213// After Cold Reset, BIOS should set COLDWAKE bit to 1 before\r
214// sending the WAKE message to the Dunit.\r
215// For Standby Exit, or any other mode in which the DRAM is in\r
216// SR, this bit must be set to 0.\r
217static void perform_ddr_reset(\r
218 MRCParams_t *mrc_params)\r
219{\r
220 ENTERFN();\r
221\r
222 // Set COLDWAKE bit before sending the WAKE message\r
223 isbM32m(MCU, DRMC, BIT16, BIT16);\r
224\r
225 // Send wake command to DUNIT (MUST be done before JEDEC)\r
226 dram_wake_command();\r
227\r
228 // Set default value\r
229 isbW32m(MCU, DRMC, DRMC_DEFAULT);\r
230\r
231 LEAVEFN();\r
232}\r
233\r
234// Dunit Initialisation Complete.\r
235// Indicates that initialisation of the Dunit has completed.\r
236// Memory accesses are permitted and maintenance operation\r
237// begins. Until this bit is set to a 1, the memory controller will\r
238// not accept DRAM requests from the MEMORY_MANAGER or HTE.\r
239static void set_ddr_init_complete(\r
240 MRCParams_t *mrc_params)\r
241{\r
242 RegDCO Dco;\r
243\r
244 ENTERFN();\r
245\r
246 Dco.raw = isbR32m(MCU, DCO);\r
247 Dco.field.PMICTL = 0; //0 - PRI owned by MEMORY_MANAGER\r
248 Dco.field.IC = 1; //1 - initialisation complete\r
249 isbW32m(MCU, DCO, Dco.raw);\r
250\r
251 LEAVEFN();\r
252}\r
253\r
254static void prog_page_ctrl(\r
255 MRCParams_t *mrc_params)\r
256{\r
257 RegDPMC0 Dpmc0;\r
258\r
259 ENTERFN();\r
260\r
261 Dpmc0.raw = isbR32m(MCU, DPMC0);\r
262\r
263 Dpmc0.field.PCLSTO = 0x4;\r
264 Dpmc0.field.PREAPWDEN = 1;\r
265\r
266 isbW32m(MCU, DPMC0, Dpmc0.raw);\r
267}\r
268\r
269// Configure MCU Power Management Control Register\r
270// and Scheduler Control Register.\r
271static void prog_ddr_control(\r
272 MRCParams_t *mrc_params)\r
273{\r
274 RegDSCH Dsch;\r
275 RegDPMC0 Dpmc0;\r
276\r
277 ENTERFN();\r
278\r
279 Dpmc0.raw = isbR32m(MCU, DPMC0);\r
280 Dsch.raw = isbR32m(MCU, DSCH);\r
281\r
282 Dpmc0.field.DISPWRDN = mrc_params->power_down_disable;\r
283 Dpmc0.field.CLKGTDIS = 0;\r
284 Dpmc0.field.PCLSTO = 4;\r
285 Dpmc0.field.PREAPWDEN = 1;\r
286\r
287 Dsch.field.OOODIS = 0;\r
288 Dsch.field.OOOST3DIS = 0;\r
289 Dsch.field.NEWBYPDIS = 0;\r
290\r
291 isbW32m(MCU, DSCH, Dsch.raw);\r
292 isbW32m(MCU, DPMC0, Dpmc0.raw);\r
293\r
294 // CMDTRIST = 2h - CMD/ADDR are tristated when no valid command\r
295 isbM32m(MCU, DPMC1, 2 << 4, BIT5|BIT4);\r
296\r
297 LEAVEFN();\r
298}\r
299\r
300// After training complete configure MCU Rank Population Register\r
301// specifying: ranks enabled, device width, density, address mode.\r
302static void prog_dra_drb(\r
303 MRCParams_t *mrc_params)\r
304{\r
305 RegDRP Drp;\r
306 RegDCO Dco;\r
307\r
308 ENTERFN();\r
309\r
310 Dco.raw = isbR32m(MCU, DCO);\r
311 Dco.field.IC = 0;\r
312 isbW32m(MCU, DCO, Dco.raw);\r
313\r
314 Drp.raw = 0;\r
315 if (mrc_params->rank_enables & 1)\r
316 Drp.field.rank0Enabled = 1;\r
317 if (mrc_params->rank_enables & 2)\r
318 Drp.field.rank1Enabled = 1;\r
319 if (mrc_params->dram_width == x16)\r
320 {\r
321 Drp.field.dimm0DevWidth = 1;\r
322 Drp.field.dimm1DevWidth = 1;\r
323 }\r
324 // Density encoding in DRAMParams_t 0=512Mb, 1=Gb, 2=2Gb, 3=4Gb\r
325 // has to be mapped RANKDENSx encoding (0=1Gb)\r
326 Drp.field.dimm0DevDensity = mrc_params->params.DENSITY - 1;\r
327 Drp.field.dimm1DevDensity = mrc_params->params.DENSITY - 1;\r
328\r
329 // Address mode can be overwritten if ECC enabled\r
330 Drp.field.addressMap = mrc_params->address_mode;\r
331\r
332 isbW32m(MCU, DRP, Drp.raw);\r
333\r
334 Dco.field.PMICTL = 0; //0 - PRI owned by MEMORY_MANAGER\r
335 Dco.field.IC = 1; //1 - initialisation complete\r
336 isbW32m(MCU, DCO, Dco.raw);\r
337\r
338 LEAVEFN();\r
339}\r
340\r
341// Configure refresh rate and short ZQ calibration interval.\r
342// Activate dynamic self refresh.\r
343static void change_refresh_period(\r
344 MRCParams_t *mrc_params)\r
345{\r
346 RegDRCF Drfc;\r
347 RegDCAL Dcal;\r
348 RegDPMC0 Dpmc0;\r
349\r
350 ENTERFN();\r
351\r
352 Drfc.raw = isbR32m(MCU, DRFC);\r
353 Drfc.field.tREFI = mrc_params->refresh_rate;\r
354 Drfc.field.REFDBTCLR = 1;\r
355 isbW32m(MCU, DRFC, Drfc.raw);\r
356\r
357 Dcal.raw = isbR32m(MCU, DCAL);\r
358 Dcal.field.ZQCINT = 3; // 63ms\r
359 isbW32m(MCU, DCAL, Dcal.raw);\r
360\r
361 Dpmc0.raw = isbR32m(MCU, DPMC0);\r
362 Dpmc0.field.ENPHYCLKGATE = 1;\r
363 Dpmc0.field.DYNSREN = 1;\r
364 isbW32m(MCU, DPMC0, Dpmc0.raw);\r
365\r
366 LEAVEFN();\r
367}\r
368\r
369// Send DRAM wake command\r
370static void perform_wake(\r
371 MRCParams_t *mrc_params)\r
372{\r
373 ENTERFN();\r
374\r
375 dram_wake_command();\r
376\r
377 LEAVEFN();\r
378}\r
379\r
380// prog_ddr_timing_control (aka mcu_init):\r
381// POST_CODE[major] == 0x02\r
382//\r
383// It will initialise timing registers in the MCU (DTR0..DTR4).\r
384static void prog_ddr_timing_control(\r
385 MRCParams_t *mrc_params)\r
386{\r
387 uint8_t TCL, WL;\r
65f810db 388 uint8_t TRP, TRCD, TRAS, TWR, TWTR, TRRD, TRTP, TFAW;\r
9b6bbcdb
MK
389 uint32_t TCK;\r
390\r
391 RegDTR0 Dtr0;\r
392 RegDTR1 Dtr1;\r
393 RegDTR2 Dtr2;\r
394 RegDTR3 Dtr3;\r
395 RegDTR4 Dtr4;\r
396\r
397 ENTERFN();\r
398\r
399 // mcu_init starts\r
400 post_code(0x02, 0x00);\r
401\r
402 Dtr0.raw = isbR32m(MCU, DTR0);\r
403 Dtr1.raw = isbR32m(MCU, DTR1);\r
404 Dtr2.raw = isbR32m(MCU, DTR2);\r
405 Dtr3.raw = isbR32m(MCU, DTR3);\r
406 Dtr4.raw = isbR32m(MCU, DTR4);\r
407\r
408 TCK = tCK[mrc_params->ddr_speed]; // Clock in picoseconds\r
409 TCL = mrc_params->params.tCL; // CAS latency in clocks\r
410 TRP = TCL; // Per CAT MRC\r
411 TRCD = TCL; // Per CAT MRC\r
412 TRAS = MCEIL(mrc_params->params.tRAS, TCK);\r
9b6bbcdb
MK
413 TWR = MCEIL(15000, TCK); // Per JEDEC: tWR=15000ps DDR2/3 from 800-1600\r
414\r
415 TWTR = MCEIL(mrc_params->params.tWTR, TCK);\r
416 TRRD = MCEIL(mrc_params->params.tRRD, TCK);\r
417 TRTP = 4; // Valid for 800 and 1066, use 5 for 1333\r
418 TFAW = MCEIL(mrc_params->params.tFAW, TCK);\r
419\r
420 WL = 5 + mrc_params->ddr_speed;\r
421\r
422 Dtr0.field.dramFrequency = mrc_params->ddr_speed;\r
423\r
424 Dtr0.field.tCL = TCL - 5; //Convert from TCL (DRAM clocks) to VLV indx\r
425 Dtr0.field.tRP = TRP - 5; //5 bit DRAM Clock\r
426 Dtr0.field.tRCD = TRCD - 5; //5 bit DRAM Clock\r
427\r
428 Dtr1.field.tWCL = WL - 3; //Convert from WL (DRAM clocks) to VLV indx\r
429 Dtr1.field.tWTP = WL + 4 + TWR - 14; //Change to tWTP\r
430 Dtr1.field.tRTP = MMAX(TRTP, 4) - 3; //4 bit DRAM Clock\r
431 Dtr1.field.tRRD = TRRD - 4; //4 bit DRAM Clock\r
432 Dtr1.field.tCMD = 1; //2N\r
433 Dtr1.field.tRAS = TRAS - 14; //6 bit DRAM Clock\r
434\r
435 Dtr1.field.tFAW = ((TFAW + 1) >> 1) - 5; //4 bit DRAM Clock\r
436 Dtr1.field.tCCD = 0; //Set 4 Clock CAS to CAS delay (multi-burst)\r
437 Dtr2.field.tRRDR = 1;\r
438 Dtr2.field.tWWDR = 2;\r
439 Dtr2.field.tRWDR = 2;\r
440 Dtr3.field.tWRDR = 2;\r
441 Dtr3.field.tWRDD = 2;\r
442\r
443 if (mrc_params->ddr_speed == DDRFREQ_800)\r
444 {\r
445 // Extended RW delay (+1)\r
446 Dtr3.field.tRWSR = TCL - 5 + 1;\r
447 }\r
448 else if(mrc_params->ddr_speed == DDRFREQ_1066)\r
449 {\r
450 // Extended RW delay (+1)\r
451 Dtr3.field.tRWSR = TCL - 5 + 1;\r
452 }\r
453\r
454 Dtr3.field.tWRSR = 4 + WL + TWTR - 11;\r
455\r
456 if (mrc_params->ddr_speed == DDRFREQ_800)\r
457 {\r
458 Dtr3.field.tXP = MMAX(0, 1 - Dtr1.field.tCMD);\r
459 }\r
460 else\r
461 {\r
462 Dtr3.field.tXP = MMAX(0, 2 - Dtr1.field.tCMD);\r
463 }\r
464\r
465 Dtr4.field.WRODTSTRT = Dtr1.field.tCMD;\r
466 Dtr4.field.WRODTSTOP = Dtr1.field.tCMD;\r
467 Dtr4.field.RDODTSTRT = Dtr1.field.tCMD + Dtr0.field.tCL - Dtr1.field.tWCL + 2; //Convert from WL (DRAM clocks) to VLV indx\r
468 Dtr4.field.RDODTSTOP = Dtr1.field.tCMD + Dtr0.field.tCL - Dtr1.field.tWCL + 2;\r
469 Dtr4.field.TRGSTRDIS = 0;\r
470 Dtr4.field.ODTDIS = 0;\r
471\r
472 isbW32m(MCU, DTR0, Dtr0.raw);\r
473 isbW32m(MCU, DTR1, Dtr1.raw);\r
474 isbW32m(MCU, DTR2, Dtr2.raw);\r
475 isbW32m(MCU, DTR3, Dtr3.raw);\r
476 isbW32m(MCU, DTR4, Dtr4.raw);\r
477\r
478 LEAVEFN();\r
479}\r
480\r
481// ddrphy_init:\r
482// POST_CODE[major] == 0x03\r
483//\r
484// This function performs some initialisation on the DDRIO unit.\r
485// This function is dependent on BOARD_ID, DDR_SPEED, and CHANNEL_ENABLES.\r
486static void ddrphy_init(MRCParams_t *mrc_params)\r
487{\r
488 uint32_t tempD; // temporary DWORD\r
489 uint8_t channel_i; // channel counter\r
490 uint8_t rank_i; // rank counter\r
491 uint8_t bl_grp_i; // byte lane group counter (2 BLs per module)\r
492\r
493 uint8_t bl_divisor = /*(mrc_params->channel_width==x16)?2:*/1; // byte lane divisor\r
494 uint8_t speed = mrc_params->ddr_speed & (BIT1|BIT0); // For DDR3 --> 0 == 800, 1 == 1066, 2 == 1333\r
495 uint8_t tCAS;\r
496 uint8_t tCWL;\r
497\r
498 ENTERFN();\r
499\r
500 tCAS = mrc_params->params.tCL;\r
501 tCWL = 5 + mrc_params->ddr_speed;\r
502\r
503 // ddrphy_init starts\r
504 post_code(0x03, 0x00);\r
505\r
506 // HSD#231531\r
507 // Make sure IOBUFACT is deasserted before initialising the DDR PHY.\r
508 // HSD#234845\r
509 // Make sure WRPTRENABLE is deasserted before initialising the DDR PHY.\r
510 for (channel_i=0; channel_i<NUM_CHANNELS; channel_i++) {\r
511 if (mrc_params->channel_enables & (1<<channel_i)) {\r
512 // Deassert DDRPHY Initialisation Complete\r
513 isbM32m(DDRPHY, (CMDPMCONFIG0 + (channel_i * DDRIOCCC_CH_OFFSET)), ~BIT20, BIT20); // SPID_INIT_COMPLETE=0\r
514 // Deassert IOBUFACT\r
515 isbM32m(DDRPHY, (CMDCFGREG0 + (channel_i * DDRIOCCC_CH_OFFSET)), ~BIT2, BIT2); // IOBUFACTRST_N=0\r
516 // Disable WRPTR\r
517 isbM32m(DDRPHY, (CMDPTRREG + (channel_i * DDRIOCCC_CH_OFFSET)), ~BIT0, BIT0); // WRPTRENABLE=0\r
518 } // if channel enabled\r
519 } // channel_i loop\r
520\r
521 // Put PHY in reset\r
522 isbM32m(DDRPHY, MASTERRSTN, 0, BIT0); // PHYRSTN=0\r
523\r
524 // Initialise DQ01,DQ23,CMD,CLK-CTL,COMP modules\r
525 // STEP0:\r
526 post_code(0x03, 0x10);\r
527 for (channel_i=0; channel_i<NUM_CHANNELS; channel_i++) {\r
528 if (mrc_params->channel_enables & (1<<channel_i)) {\r
529\r
530 // DQ01-DQ23\r
531 for (bl_grp_i=0; bl_grp_i<((NUM_BYTE_LANES/bl_divisor)/2); bl_grp_i++) {\r
532 isbM32m(DDRPHY, (DQOBSCKEBBCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), ((bl_grp_i) ? (0x00) : (BIT22)), (BIT22)); // Analog MUX select - IO2xCLKSEL\r
533\r
534 // ODT Strength\r
535 switch (mrc_params->rd_odt_value) {\r
536 case 1: tempD = 0x3; break; // 60 ohm\r
537 case 2: tempD = 0x3; break; // 120 ohm\r
538 case 3: tempD = 0x3; break; // 180 ohm\r
539 default: tempD = 0x3; break; // 120 ohm\r
540 }\r
541 isbM32m(DDRPHY, (B0RXIOBUFCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (tempD<<5), (BIT6|BIT5)); // ODT strength\r
542 isbM32m(DDRPHY, (B1RXIOBUFCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (tempD<<5), (BIT6|BIT5)); // ODT strength\r
543 // Dynamic ODT/DIFFAMP\r
544 tempD = (((tCAS)<<24)|((tCAS)<<16)|((tCAS)<<8)|((tCAS)<<0));\r
545 switch (speed) {\r
546 case 0: tempD -= 0x01010101; break; // 800\r
547 case 1: tempD -= 0x02020202; break; // 1066\r
548 case 2: tempD -= 0x03030303; break; // 1333\r
549 case 3: tempD -= 0x04040404; break; // 1600\r
550 }\r
551 isbM32m(DDRPHY, (B01LATCTL1 + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), tempD, ((BIT28|BIT27|BIT26|BIT25|BIT24)|(BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT4|BIT3|BIT2|BIT1|BIT0))); // Launch Time: ODT, DIFFAMP, ODT, DIFFAMP\r
552 switch (speed) {\r
553 // HSD#234715\r
554 case 0: tempD = ((0x06<<16)|(0x07<<8)); break; // 800\r
555 case 1: tempD = ((0x07<<16)|(0x08<<8)); break; // 1066\r
556 case 2: tempD = ((0x09<<16)|(0x0A<<8)); break; // 1333\r
557 case 3: tempD = ((0x0A<<16)|(0x0B<<8)); break; // 1600\r
558 }\r
559 isbM32m(DDRPHY, (B0ONDURCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), tempD, ((BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT13|BIT12|BIT11|BIT10|BIT9|BIT8))); // On Duration: ODT, DIFFAMP\r
560 isbM32m(DDRPHY, (B1ONDURCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), tempD, ((BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT13|BIT12|BIT11|BIT10|BIT9|BIT8))); // On Duration: ODT, DIFFAMP\r
561\r
562 switch (mrc_params->rd_odt_value) {\r
563 case 0: tempD = ((0x3F<<16)|(0x3f<<10)); break; // override DIFFAMP=on, ODT=off\r
564 default: tempD = ((0x3F<<16)|(0x2A<<10)); break; // override DIFFAMP=on, ODT=on\r
565 }\r
566 isbM32m(DDRPHY, (B0OVRCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), tempD, ((BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT15|BIT14|BIT13|BIT12|BIT11|BIT10))); // Override: DIFFAMP, ODT\r
567 isbM32m(DDRPHY, (B1OVRCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), tempD, ((BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT15|BIT14|BIT13|BIT12|BIT11|BIT10))); // Override: DIFFAMP, ODT\r
568\r
569 // DLL Setup\r
570 // 1xCLK Domain Timings: tEDP,RCVEN,WDQS (PO)\r
571 isbM32m(DDRPHY, (B0LATCTL0 + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (((tCAS+7)<<16)|((tCAS-4)<<8)|((tCWL-2)<<0)), ((BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT4|BIT3|BIT2|BIT1|BIT0))); // 1xCLK: tEDP, RCVEN, WDQS\r
572 isbM32m(DDRPHY, (B1LATCTL0 + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (((tCAS+7)<<16)|((tCAS-4)<<8)|((tCWL-2)<<0)), ((BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT4|BIT3|BIT2|BIT1|BIT0))); // 1xCLK: tEDP, RCVEN, WDQS\r
573\r
574 // RCVEN Bypass (PO)\r
575 isbM32m(DDRPHY, (B0RXIOBUFCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), ((0x0<<7)|(0x0<<0)), (BIT7|BIT0)); // AFE Bypass, RCVEN DIFFAMP\r
576 isbM32m(DDRPHY, (B1RXIOBUFCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), ((0x0<<7)|(0x0<<0)), (BIT7|BIT0)); // AFE Bypass, RCVEN DIFFAMP\r
577 // TX\r
578 isbM32m(DDRPHY, (DQCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (BIT16), (BIT16)); // 0 means driving DQ during DQS-preamble\r
579 isbM32m(DDRPHY, (B01PTRCTL1 + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (BIT8), (BIT8)); // WR_LVL mode disable\r
580 // RX (PO)\r
581 isbM32m(DDRPHY, (B0VREFCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), ((0x03<<2)|(0x0<<1)|(0x0<<0)), ((BIT7|BIT6|BIT5|BIT4|BIT3|BIT2)|BIT1|BIT0)); // Internal Vref Code, Enable#, Ext_or_Int (1=Ext)\r
582 isbM32m(DDRPHY, (B1VREFCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), ((0x03<<2)|(0x0<<1)|(0x0<<0)), ((BIT7|BIT6|BIT5|BIT4|BIT3|BIT2)|BIT1|BIT0)); // Internal Vref Code, Enable#, Ext_or_Int (1=Ext)\r
583 isbM32m(DDRPHY, (B0RXIOBUFCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (0), (BIT4)); // Per-Bit De-Skew Enable\r
584 isbM32m(DDRPHY, (B1RXIOBUFCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (0), (BIT4)); // Per-Bit De-Skew Enable\r
585 }\r
586 // CLKEBB\r
587 isbM32m(DDRPHY, (CMDOBSCKEBBCTL + (channel_i * DDRIOCCC_CH_OFFSET)), 0, (BIT23));\r
588\r
589 // Enable tristate control of cmd/address bus\r
590 isbM32m(DDRPHY, (CMDCFGREG0 + (channel_i * DDRIOCCC_CH_OFFSET)), 0, (BIT1|BIT0));\r
591\r
592 // ODT RCOMP\r
593 isbM32m(DDRPHY, (CMDRCOMPODT + (channel_i * DDRIOCCC_CH_OFFSET)), ((0x03<<5)|(0x03<<0)), ((BIT9|BIT8|BIT7|BIT6|BIT5)|(BIT4|BIT3|BIT2|BIT1|BIT0)));\r
594\r
595 // CMDPM* registers must be programmed in this order...\r
596 isbM32m(DDRPHY, (CMDPMDLYREG4 + (channel_i * DDRIOCCC_CH_OFFSET)), ((0xFFFFU<<16)|(0xFFFF<<0)), ((BIT31|BIT30|BIT29|BIT28|BIT27|BIT26|BIT25|BIT24|BIT23|BIT22|BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT15|BIT14|BIT13|BIT12|BIT11|BIT10|BIT9|BIT8|BIT7|BIT6|BIT5|BIT4|BIT3|BIT2|BIT1|BIT0))); // Turn On Delays: SFR (regulator), MPLL\r
597 isbM32m(DDRPHY, (CMDPMDLYREG3 + (channel_i * DDRIOCCC_CH_OFFSET)), ((0xFU<<28)|(0xFFF<<16)|(0xF<<12)|(0x616<<0)), ((BIT31|BIT30|BIT29|BIT28)|(BIT27|BIT26|BIT25|BIT24|BIT23|BIT22|BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT15|BIT14|BIT13|BIT12)|(BIT11|BIT10|BIT9|BIT8|BIT7|BIT6|BIT5|BIT4|BIT3|BIT2|BIT1|BIT0))); // Delays: ASSERT_IOBUFACT_to_ALLON0_for_PM_MSG_3, VREG (MDLL) Turn On, ALLON0_to_DEASSERT_IOBUFACT_for_PM_MSG_gt0, MDLL Turn On\r
598 isbM32m(DDRPHY, (CMDPMDLYREG2 + (channel_i * DDRIOCCC_CH_OFFSET)), ((0xFFU<<24)|(0xFF<<16)|(0xFF<<8)|(0xFF<<0)), ((BIT31|BIT30|BIT29|BIT28|BIT27|BIT26|BIT25|BIT24)|(BIT23|BIT22|BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT15|BIT14|BIT13|BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT7|BIT6|BIT5|BIT4|BIT3|BIT2|BIT1|BIT0))); // MPLL Divider Reset Delays\r
599 isbM32m(DDRPHY, (CMDPMDLYREG1 + (channel_i * DDRIOCCC_CH_OFFSET)), ((0xFFU<<24)|(0xFF<<16)|(0xFF<<8)|(0xFF<<0)), ((BIT31|BIT30|BIT29|BIT28|BIT27|BIT26|BIT25|BIT24)|(BIT23|BIT22|BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT15|BIT14|BIT13|BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT7|BIT6|BIT5|BIT4|BIT3|BIT2|BIT1|BIT0))); // Turn Off Delays: VREG, Staggered MDLL, MDLL, PI\r
600 isbM32m(DDRPHY, (CMDPMDLYREG0 + (channel_i * DDRIOCCC_CH_OFFSET)), ((0xFFU<<24)|(0xFF<<16)|(0xFF<<8)|(0xFF<<0)), ((BIT31|BIT30|BIT29|BIT28|BIT27|BIT26|BIT25|BIT24)|(BIT23|BIT22|BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT15|BIT14|BIT13|BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT7|BIT6|BIT5|BIT4|BIT3|BIT2|BIT1|BIT0))); // Turn On Delays: MPLL, Staggered MDLL, PI, IOBUFACT\r
601 isbM32m(DDRPHY, (CMDPMCONFIG0 + (channel_i * DDRIOCCC_CH_OFFSET)), ((0x6<<8)|BIT6|(0x4<<0)), (BIT31|BIT30|BIT29|BIT28|BIT27|BIT26|BIT25|BIT24|BIT23|BIT22|BIT21|(BIT11|BIT10|BIT9|BIT8)|BIT6|(BIT3|BIT2|BIT1|BIT0))); // Allow PUnit signals\r
602 isbM32m(DDRPHY, (CMDMDLLCTL + (channel_i * DDRIOCCC_CH_OFFSET)), ((0x3<<4)|(0x7<<0)), ((BIT6|BIT5|BIT4)|(BIT3|BIT2|BIT1|BIT0))); // DLL_VREG Bias Trim, VREF Tuning for DLL_VREG\r
603 // CLK-CTL\r
604 isbM32m(DDRPHY, (CCOBSCKEBBCTL + (channel_i * DDRIOCCC_CH_OFFSET)), 0, (BIT24)); // CLKEBB\r
605 isbM32m(DDRPHY, (CCCFGREG0 + (channel_i * DDRIOCCC_CH_OFFSET)), ((0x0<<16)|(0x0<<12)|(0x0<<8)|(0xF<<4)|BIT0), ((BIT19|BIT18|BIT17|BIT16)|(BIT15|BIT14|BIT13|BIT12)|(BIT11|BIT10|BIT9|BIT8)|(BIT7|BIT6|BIT5|BIT4)|BIT0)); // Buffer Enable: CS,CKE,ODT,CLK\r
606 isbM32m(DDRPHY, (CCRCOMPODT + (channel_i * DDRIOCCC_CH_OFFSET)), ((0x03<<8)|(0x03<<0)), ((BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT4|BIT3|BIT2|BIT1|BIT0))); // ODT RCOMP\r
607 isbM32m(DDRPHY, (CCMDLLCTL + (channel_i * DDRIOCCC_CH_OFFSET)), ((0x3<<4)|(0x7<<0)), ((BIT6|BIT5|BIT4)|(BIT3|BIT2|BIT1|BIT0))); // DLL_VREG Bias Trim, VREF Tuning for DLL_VREG\r
608\r
609 // COMP (RON channel specific)\r
610 // - DQ/DQS/DM RON: 32 Ohm\r
611 // - CTRL/CMD RON: 27 Ohm\r
612 // - CLK RON: 26 Ohm\r
613 isbM32m(DDRPHY, (DQVREFCH0 + (channel_i * DDRCOMP_CH_OFFSET)), ((0x08<<24)|(0x03<<16)), ((BIT29|BIT28|BIT27|BIT26|BIT25|BIT24)|(BIT21|BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP Vref PU/PD\r
614 isbM32m(DDRPHY, (CMDVREFCH0 + (channel_i * DDRCOMP_CH_OFFSET)), ((0x0C<<24)|(0x03<<16)), ((BIT29|BIT28|BIT27|BIT26|BIT25|BIT24)|(BIT21|BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP Vref PU/PD\r
615 isbM32m(DDRPHY, (CLKVREFCH0 + (channel_i * DDRCOMP_CH_OFFSET)), ((0x0F<<24)|(0x03<<16)), ((BIT29|BIT28|BIT27|BIT26|BIT25|BIT24)|(BIT21|BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP Vref PU/PD\r
616 isbM32m(DDRPHY, (DQSVREFCH0 + (channel_i * DDRCOMP_CH_OFFSET)), ((0x08<<24)|(0x03<<16)), ((BIT29|BIT28|BIT27|BIT26|BIT25|BIT24)|(BIT21|BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP Vref PU/PD\r
617 isbM32m(DDRPHY, (CTLVREFCH0 + (channel_i * DDRCOMP_CH_OFFSET)), ((0x0C<<24)|(0x03<<16)), ((BIT29|BIT28|BIT27|BIT26|BIT25|BIT24)|(BIT21|BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP Vref PU/PD\r
618\r
619 // DQS Swapped Input Enable\r
620 isbM32m(DDRPHY, (COMPEN1CH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT19|BIT17), ((BIT31|BIT30)|BIT19|BIT17|(BIT15|BIT14)));\r
621\r
622 // ODT VREF = 1.5 x 274/360+274 = 0.65V (code of ~50)\r
623 isbM32m(DDRPHY, (DQVREFCH0 + (channel_i * DDRCOMP_CH_OFFSET)), ((0x32<<8)|(0x03<<0)), ((BIT13|BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT5|BIT4|BIT3|BIT2|BIT1|BIT0))); // ODT Vref PU/PD\r
624 isbM32m(DDRPHY, (DQSVREFCH0 + (channel_i * DDRCOMP_CH_OFFSET)), ((0x32<<8)|(0x03<<0)), ((BIT13|BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT5|BIT4|BIT3|BIT2|BIT1|BIT0))); // ODT Vref PU/PD\r
625 isbM32m(DDRPHY, (CLKVREFCH0 + (channel_i * DDRCOMP_CH_OFFSET)), ((0x0E<<8)|(0x05<<0)), ((BIT13|BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT5|BIT4|BIT3|BIT2|BIT1|BIT0))); // ODT Vref PU/PD\r
626\r
627 // Slew rate settings are frequency specific, numbers below are for 800Mhz (speed == 0)\r
628 // - DQ/DQS/DM/CLK SR: 4V/ns,\r
629 // - CTRL/CMD SR: 1.5V/ns\r
630 tempD = (0x0E<<16)|(0x0E<<12)|(0x08<<8)|(0x0B<<4)|(0x0B<<0);\r
631 isbM32m(DDRPHY, (DLYSELCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (tempD), ((BIT19|BIT18|BIT17|BIT16)|(BIT15|BIT14|BIT13|BIT12)|(BIT11|BIT10|BIT9|BIT8)|(BIT7|BIT6|BIT5|BIT4)|(BIT3|BIT2|BIT1|BIT0))); // DCOMP Delay Select: CTL,CMD,CLK,DQS,DQ\r
632 isbM32m(DDRPHY, (TCOVREFCH0 + (channel_i * DDRCOMP_CH_OFFSET)), ((0x05<<16)|(0x05<<8)|(0x05<<0)), ((BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT13|BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT5|BIT4|BIT3|BIT2|BIT1|BIT0))); // TCO Vref CLK,DQS,DQ\r
633 isbM32m(DDRPHY, (CCBUFODTCH0 + (channel_i * DDRCOMP_CH_OFFSET)), ((0x03<<8)|(0x03<<0)), ((BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT4|BIT3|BIT2|BIT1|BIT0))); // ODTCOMP CMD/CTL PU/PD\r
634 isbM32m(DDRPHY, (COMPEN0CH0 + (channel_i * DDRCOMP_CH_OFFSET)), (0), ((BIT31|BIT30)|BIT8)); // COMP\r
635\r
636 #ifdef BACKUP_COMPS\r
637 // DQ COMP Overrides\r
638 isbM32m(DDRPHY, (DQDRVPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0A<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP PU\r
639 isbM32m(DDRPHY, (DQDRVPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0A<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP PD\r
640 isbM32m(DDRPHY, (DQDLYPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x10<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // DCOMP PU\r
641 isbM32m(DDRPHY, (DQDLYPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x10<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // DCOMP PD\r
642 isbM32m(DDRPHY, (DQODTPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0B<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // ODTCOMP PU\r
643 isbM32m(DDRPHY, (DQODTPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0B<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // ODTCOMP PD\r
644 isbM32m(DDRPHY, (DQTCOPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31), (BIT31)); // TCOCOMP PU\r
645 isbM32m(DDRPHY, (DQTCOPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31), (BIT31)); // TCOCOMP PD\r
646 // DQS COMP Overrides\r
647 isbM32m(DDRPHY, (DQSDRVPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0A<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP PU\r
648 isbM32m(DDRPHY, (DQSDRVPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0A<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP PD\r
649 isbM32m(DDRPHY, (DQSDLYPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x10<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // DCOMP PU\r
650 isbM32m(DDRPHY, (DQSDLYPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x10<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // DCOMP PD\r
651 isbM32m(DDRPHY, (DQSODTPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0B<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // ODTCOMP PU\r
652 isbM32m(DDRPHY, (DQSODTPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0B<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // ODTCOMP PD\r
653 isbM32m(DDRPHY, (DQSTCOPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31), (BIT31)); // TCOCOMP PU\r
654 isbM32m(DDRPHY, (DQSTCOPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31), (BIT31)); // TCOCOMP PD\r
655 // CLK COMP Overrides\r
656 isbM32m(DDRPHY, (CLKDRVPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0C<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP PU\r
657 isbM32m(DDRPHY, (CLKDRVPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0C<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP PD\r
658 isbM32m(DDRPHY, (CLKDLYPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x07<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // DCOMP PU\r
659 isbM32m(DDRPHY, (CLKDLYPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x07<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // DCOMP PD\r
660 isbM32m(DDRPHY, (CLKODTPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0B<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // ODTCOMP PU\r
661 isbM32m(DDRPHY, (CLKODTPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0B<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // ODTCOMP PD\r
662 isbM32m(DDRPHY, (CLKTCOPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31), (BIT31)); // TCOCOMP PU\r
663 isbM32m(DDRPHY, (CLKTCOPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31), (BIT31)); // TCOCOMP PD\r
664 // CMD COMP Overrides\r
665 isbM32m(DDRPHY, (CMDDRVPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0D<<16)), (BIT31|(BIT21|BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP PU\r
666 isbM32m(DDRPHY, (CMDDRVPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0D<<16)), (BIT31|(BIT21|BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP PD\r
667 isbM32m(DDRPHY, (CMDDLYPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0A<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // DCOMP PU\r
668 isbM32m(DDRPHY, (CMDDLYPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0A<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // DCOMP PD\r
669 // CTL COMP Overrides\r
670 isbM32m(DDRPHY, (CTLDRVPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0D<<16)), (BIT31|(BIT21|BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP PU\r
671 isbM32m(DDRPHY, (CTLDRVPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0D<<16)), (BIT31|(BIT21|BIT20|BIT19|BIT18|BIT17|BIT16))); // RCOMP PD\r
672 isbM32m(DDRPHY, (CTLDLYPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0A<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // DCOMP PU\r
673 isbM32m(DDRPHY, (CTLDLYPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x0A<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // DCOMP PD\r
674 #else\r
675 // DQ TCOCOMP Overrides\r
676 isbM32m(DDRPHY, (DQTCOPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x1F<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // TCOCOMP PU\r
677 isbM32m(DDRPHY, (DQTCOPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x1F<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // TCOCOMP PD\r
678 // DQS TCOCOMP Overrides\r
679 isbM32m(DDRPHY, (DQSTCOPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x1F<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // TCOCOMP PU\r
680 isbM32m(DDRPHY, (DQSTCOPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x1F<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // TCOCOMP PD\r
681 // CLK TCOCOMP Overrides\r
682 isbM32m(DDRPHY, (CLKTCOPUCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x1F<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // TCOCOMP PU\r
683 isbM32m(DDRPHY, (CLKTCOPDCTLCH0 + (channel_i * DDRCOMP_CH_OFFSET)), (BIT31|(0x1F<<16)), (BIT31|(BIT20|BIT19|BIT18|BIT17|BIT16))); // TCOCOMP PD\r
684 #endif // BACKUP_COMPS\r
685 // program STATIC delays\r
686 #ifdef BACKUP_WCMD\r
687 set_wcmd(channel_i, ddr_wcmd[PLATFORM_ID]);\r
688 #else\r
689 set_wcmd(channel_i, ddr_wclk[PLATFORM_ID] + HALF_CLK);\r
690 #endif // BACKUP_WCMD\r
691 for (rank_i=0; rank_i<NUM_RANKS; rank_i++) {\r
692 if (mrc_params->rank_enables & (1<<rank_i)) {\r
693 set_wclk(channel_i, rank_i, ddr_wclk[PLATFORM_ID]);\r
694 #ifdef BACKUP_WCTL\r
695 set_wctl(channel_i, rank_i, ddr_wctl[PLATFORM_ID]);\r
696 #else\r
697 set_wctl(channel_i, rank_i, ddr_wclk[PLATFORM_ID] + HALF_CLK);\r
698 #endif // BACKUP_WCTL\r
699 }\r
700 }\r
701 }\r
702 }\r
703 // COMP (non channel specific)\r
704 //isbM32m(DDRPHY, (), (), ());\r
705 isbM32m(DDRPHY, (DQANADRVPUCTL), (BIT30), (BIT30)); // RCOMP: Dither PU Enable\r
706 isbM32m(DDRPHY, (DQANADRVPDCTL), (BIT30), (BIT30)); // RCOMP: Dither PD Enable\r
707 isbM32m(DDRPHY, (CMDANADRVPUCTL), (BIT30), (BIT30)); // RCOMP: Dither PU Enable\r
708 isbM32m(DDRPHY, (CMDANADRVPDCTL), (BIT30), (BIT30)); // RCOMP: Dither PD Enable\r
709 isbM32m(DDRPHY, (CLKANADRVPUCTL), (BIT30), (BIT30)); // RCOMP: Dither PU Enable\r
710 isbM32m(DDRPHY, (CLKANADRVPDCTL), (BIT30), (BIT30)); // RCOMP: Dither PD Enable\r
711 isbM32m(DDRPHY, (DQSANADRVPUCTL), (BIT30), (BIT30)); // RCOMP: Dither PU Enable\r
712 isbM32m(DDRPHY, (DQSANADRVPDCTL), (BIT30), (BIT30)); // RCOMP: Dither PD Enable\r
713 isbM32m(DDRPHY, (CTLANADRVPUCTL), (BIT30), (BIT30)); // RCOMP: Dither PU Enable\r
714 isbM32m(DDRPHY, (CTLANADRVPDCTL), (BIT30), (BIT30)); // RCOMP: Dither PD Enable\r
715 isbM32m(DDRPHY, (DQANAODTPUCTL), (BIT30), (BIT30)); // ODT: Dither PU Enable\r
716 isbM32m(DDRPHY, (DQANAODTPDCTL), (BIT30), (BIT30)); // ODT: Dither PD Enable\r
717 isbM32m(DDRPHY, (CLKANAODTPUCTL), (BIT30), (BIT30)); // ODT: Dither PU Enable\r
718 isbM32m(DDRPHY, (CLKANAODTPDCTL), (BIT30), (BIT30)); // ODT: Dither PD Enable\r
719 isbM32m(DDRPHY, (DQSANAODTPUCTL), (BIT30), (BIT30)); // ODT: Dither PU Enable\r
720 isbM32m(DDRPHY, (DQSANAODTPDCTL), (BIT30), (BIT30)); // ODT: Dither PD Enable\r
721 isbM32m(DDRPHY, (DQANADLYPUCTL), (BIT30), (BIT30)); // DCOMP: Dither PU Enable\r
722 isbM32m(DDRPHY, (DQANADLYPDCTL), (BIT30), (BIT30)); // DCOMP: Dither PD Enable\r
723 isbM32m(DDRPHY, (CMDANADLYPUCTL), (BIT30), (BIT30)); // DCOMP: Dither PU Enable\r
724 isbM32m(DDRPHY, (CMDANADLYPDCTL), (BIT30), (BIT30)); // DCOMP: Dither PD Enable\r
725 isbM32m(DDRPHY, (CLKANADLYPUCTL), (BIT30), (BIT30)); // DCOMP: Dither PU Enable\r
726 isbM32m(DDRPHY, (CLKANADLYPDCTL), (BIT30), (BIT30)); // DCOMP: Dither PD Enable\r
727 isbM32m(DDRPHY, (DQSANADLYPUCTL), (BIT30), (BIT30)); // DCOMP: Dither PU Enable\r
728 isbM32m(DDRPHY, (DQSANADLYPDCTL), (BIT30), (BIT30)); // DCOMP: Dither PD Enable\r
729 isbM32m(DDRPHY, (CTLANADLYPUCTL), (BIT30), (BIT30)); // DCOMP: Dither PU Enable\r
730 isbM32m(DDRPHY, (CTLANADLYPDCTL), (BIT30), (BIT30)); // DCOMP: Dither PD Enable\r
731 isbM32m(DDRPHY, (DQANATCOPUCTL), (BIT30), (BIT30)); // TCO: Dither PU Enable\r
732 isbM32m(DDRPHY, (DQANATCOPDCTL), (BIT30), (BIT30)); // TCO: Dither PD Enable\r
733 isbM32m(DDRPHY, (CLKANATCOPUCTL), (BIT30), (BIT30)); // TCO: Dither PU Enable\r
734 isbM32m(DDRPHY, (CLKANATCOPDCTL), (BIT30), (BIT30)); // TCO: Dither PD Enable\r
735 isbM32m(DDRPHY, (DQSANATCOPUCTL), (BIT30), (BIT30)); // TCO: Dither PU Enable\r
736 isbM32m(DDRPHY, (DQSANATCOPDCTL), (BIT30), (BIT30)); // TCO: Dither PD Enable\r
737 isbM32m(DDRPHY, (TCOCNTCTRL), (0x1<<0), (BIT1|BIT0)); // TCOCOMP: Pulse Count\r
738 isbM32m(DDRPHY, (CHNLBUFSTATIC), ((0x03<<24)|(0x03<<16)), ((BIT28|BIT27|BIT26|BIT25|BIT24)|(BIT20|BIT19|BIT18|BIT17|BIT16))); // ODT: CMD/CTL PD/PU\r
739 isbM32m(DDRPHY, (MSCNTR), (0x64<<0), (BIT7|BIT6|BIT5|BIT4|BIT3|BIT2|BIT1|BIT0)); // Set 1us counter\r
740 isbM32m(DDRPHY, (LATCH1CTL), (0x1<<28), (BIT30|BIT29|BIT28)); // ???\r
741\r
742 // Release PHY from reset\r
743 isbM32m(DDRPHY, MASTERRSTN, BIT0, BIT0); // PHYRSTN=1\r
744\r
745 // STEP1:\r
746 post_code(0x03, 0x11);\r
747 for (channel_i=0; channel_i<NUM_CHANNELS; channel_i++) {\r
748 if (mrc_params->channel_enables & (1<<channel_i)) {\r
749 // DQ01-DQ23\r
750 for (bl_grp_i=0; bl_grp_i<((NUM_BYTE_LANES/bl_divisor)/2); bl_grp_i++) {\r
751 isbM32m(DDRPHY, (DQMDLLCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (BIT13), (BIT13)); // Enable VREG\r
752 delay_n(3);\r
753 }\r
754 // ECC\r
755 isbM32m(DDRPHY, (ECCMDLLCTL), (BIT13), (BIT13)); // Enable VREG\r
756 delay_n(3);\r
757 // CMD\r
758 isbM32m(DDRPHY, (CMDMDLLCTL + (channel_i * DDRIOCCC_CH_OFFSET)), (BIT13), (BIT13)); // Enable VREG\r
759 delay_n(3);\r
760 // CLK-CTL\r
761 isbM32m(DDRPHY, (CCMDLLCTL + (channel_i * DDRIOCCC_CH_OFFSET)), (BIT13), (BIT13)); // Enable VREG\r
762 delay_n(3);\r
763 }\r
764 }\r
765\r
766 // STEP2:\r
767 post_code(0x03, 0x12);\r
768 delay_n(200);\r
769 for (channel_i=0; channel_i<NUM_CHANNELS; channel_i++) {\r
770 if (mrc_params->channel_enables & (1<<channel_i)) {\r
771 // DQ01-DQ23\r
772 for (bl_grp_i=0; bl_grp_i<((NUM_BYTE_LANES/bl_divisor)/2); bl_grp_i++) {\r
773 isbM32m(DDRPHY, (DQMDLLCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (BIT17), (BIT17)); // Enable MCDLL\r
774 delay_n(50);\r
775 }\r
776 // ECC\r
777 isbM32m(DDRPHY, (ECCMDLLCTL), (BIT17), (BIT17)); // Enable MCDLL\r
778 delay_n(50);\r
779 // CMD\r
780 isbM32m(DDRPHY, (CMDMDLLCTL + (channel_i * DDRIOCCC_CH_OFFSET)), (BIT18), (BIT18)); // Enable MCDLL\r
781 delay_n(50);\r
782 // CLK-CTL\r
783 isbM32m(DDRPHY, (CCMDLLCTL + (channel_i * DDRIOCCC_CH_OFFSET)), (BIT18), (BIT18)); // Enable MCDLL\r
784 delay_n(50);\r
785 }\r
786 }\r
787\r
788 // STEP3:\r
789 post_code(0x03, 0x13);\r
790 delay_n(100);\r
791 for (channel_i=0; channel_i<NUM_CHANNELS; channel_i++) {\r
792 if (mrc_params->channel_enables & (1<<channel_i)) {\r
793 // DQ01-DQ23\r
794 for (bl_grp_i=0; bl_grp_i<((NUM_BYTE_LANES/bl_divisor)/2); bl_grp_i++) {\r
795#ifdef FORCE_16BIT_DDRIO\r
796 tempD = ((bl_grp_i) && (mrc_params->channel_width == x16)) ? ((0x1<<12)|(0x1<<8)|(0xF<<4)|(0xF<<0)) : ((0xF<<12)|(0xF<<8)|(0xF<<4)|(0xF<<0));\r
797#else\r
798 tempD = ((0xF<<12)|(0xF<<8)|(0xF<<4)|(0xF<<0));\r
799#endif\r
800 isbM32m(DDRPHY, (DQDLLTXCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (tempD), ((BIT15|BIT14|BIT13|BIT12)|(BIT11|BIT10|BIT9|BIT8)|(BIT7|BIT6|BIT5|BIT4)|(BIT3|BIT2|BIT1|BIT0))); // Enable TXDLL\r
801 delay_n(3);\r
802 isbM32m(DDRPHY, (DQDLLRXCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (BIT3|BIT2|BIT1|BIT0), (BIT3|BIT2|BIT1|BIT0)); // Enable RXDLL\r
803 delay_n(3);\r
804 isbM32m(DDRPHY, (B0OVRCTL + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), (BIT3|BIT2|BIT1|BIT0), (BIT3|BIT2|BIT1|BIT0)); // Enable RXDLL Overrides BL0\r
805 }\r
806\r
807 // ECC\r
808 tempD = ((0xF<<12)|(0xF<<8)|(0xF<<4)|(0xF<<0));\r
809 isbM32m(DDRPHY, (ECCDLLTXCTL), (tempD), ((BIT15|BIT14|BIT13|BIT12)|(BIT11|BIT10|BIT9|BIT8)|(BIT7|BIT6|BIT5|BIT4)|(BIT3|BIT2|BIT1|BIT0))); // Enable TXDLL\r
810 delay_n(3);\r
811\r
812 // CMD (PO)\r
813 isbM32m(DDRPHY, (CMDDLLTXCTL + (channel_i * DDRIOCCC_CH_OFFSET)), ((0xF<<12)|(0xF<<8)|(0xF<<4)|(0xF<<0)), ((BIT15|BIT14|BIT13|BIT12)|(BIT11|BIT10|BIT9|BIT8)|(BIT7|BIT6|BIT5|BIT4)|(BIT3|BIT2|BIT1|BIT0))); // Enable TXDLL\r
814 delay_n(3);\r
815 }\r
816 }\r
817\r
818\r
819 // STEP4:\r
820 post_code(0x03, 0x14);\r
821 for (channel_i=0; channel_i<NUM_CHANNELS; channel_i++) {\r
822 if (mrc_params->channel_enables & (1<<channel_i)) {\r
823 // Host To Memory Clock Alignment (HMC) for 800/1066\r
824 for (bl_grp_i=0; bl_grp_i<((NUM_BYTE_LANES/bl_divisor)/2); bl_grp_i++) {\r
825 isbM32m(DDRPHY, (DQCLKALIGNREG2 + (bl_grp_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), ((bl_grp_i)?(0x3):(0x1)), (BIT3|BIT2|BIT1|BIT0)); // CLK_ALIGN_MOD_ID\r
826 }\r
827 isbM32m(DDRPHY, (ECCCLKALIGNREG2 + (channel_i * DDRIODQ_CH_OFFSET)), 0x2, (BIT3|BIT2|BIT1|BIT0)); // CLK_ALIGN_MOD_ID\r
828 isbM32m(DDRPHY, (CMDCLKALIGNREG2 + (channel_i * DDRIODQ_CH_OFFSET)), 0x0, (BIT3|BIT2|BIT1|BIT0)); // CLK_ALIGN_MOD_ID\r
829 isbM32m(DDRPHY, (CCCLKALIGNREG2 + (channel_i * DDRIODQ_CH_OFFSET)), 0x2, (BIT3|BIT2|BIT1|BIT0)); // CLK_ALIGN_MOD_ID\r
830 isbM32m(DDRPHY, (CMDCLKALIGNREG0 + (channel_i * DDRIOCCC_CH_OFFSET)), (0x2<<4), (BIT5|BIT4)); // CLK_ALIGN_MODE\r
831 isbM32m(DDRPHY, (CMDCLKALIGNREG1 + (channel_i * DDRIOCCC_CH_OFFSET)), ((0x18<<16)|(0x10<<8)|(0x8<<2)|(0x1<<0)), ((BIT22|BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT14|BIT13|BIT12|BIT11|BIT10|BIT9|BIT8)|(BIT7|BIT6|BIT5|BIT4|BIT3|BIT2)|(BIT1|BIT0))); // NUM_SAMPLES, MAX_SAMPLES, MACRO_PI_STEP, MICRO_PI_STEP\r
832 isbM32m(DDRPHY, (CMDCLKALIGNREG2 + (channel_i * DDRIOCCC_CH_OFFSET)), ((0x10<<16)|(0x4<<8)|(0x2<<4)), ((BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT11|BIT10|BIT9|BIT8)|(BIT7|BIT6|BIT5|BIT4))); // ???, TOTAL_NUM_MODULES, FIRST_U_PARTITION\r
833 #ifdef HMC_TEST\r
834 isbM32m(DDRPHY, (CMDCLKALIGNREG0 + (channel_i * DDRIOCCC_CH_OFFSET)), BIT24, BIT24); // START_CLK_ALIGN=1\r
835 while (isbR32m(DDRPHY, (CMDCLKALIGNREG0 + (channel_i * DDRIOCCC_CH_OFFSET))) & BIT24); // wait for START_CLK_ALIGN=0\r
836 #endif // HMC_TEST\r
837\r
838 // Set RD/WR Pointer Seperation & COUNTEN & FIFOPTREN\r
839 isbM32m(DDRPHY, (CMDPTRREG + (channel_i * DDRIOCCC_CH_OFFSET)), BIT0, BIT0); // WRPTRENABLE=1\r
840\r
841\r
842#ifdef SIM\r
843 // comp is not working on simulator\r
844#else\r
845 // COMP initial\r
846 isbM32m(DDRPHY, (COMPEN0CH0 + (channel_i * DDRCOMP_CH_OFFSET)), BIT5, BIT5); // enable bypass for CLK buffer (PO)\r
847 isbM32m(DDRPHY, (CMPCTRL), (BIT0), (BIT0)); // Initial COMP Enable\r
848 while (isbR32m(DDRPHY, (CMPCTRL)) & BIT0); // wait for Initial COMP Enable = 0\r
849 isbM32m(DDRPHY, (COMPEN0CH0 + (channel_i * DDRCOMP_CH_OFFSET)), ~BIT5, BIT5); // disable bypass for CLK buffer (PO)\r
850#endif\r
851\r
852 // IOBUFACT\r
853 // STEP4a\r
854 isbM32m(DDRPHY, (CMDCFGREG0 + (channel_i * DDRIOCCC_CH_OFFSET)), BIT2, BIT2); // IOBUFACTRST_N=1\r
855\r
856 // DDRPHY initialisation complete\r
857 isbM32m(DDRPHY, (CMDPMCONFIG0 + (channel_i * DDRIOCCC_CH_OFFSET)), BIT20, BIT20); // SPID_INIT_COMPLETE=1\r
858 }\r
859 }\r
860\r
861 LEAVEFN();\r
862 return;\r
863}\r
864\r
865// jedec_init (aka PerformJedecInit):\r
866// This function performs JEDEC initialisation on all enabled channels.\r
867static void jedec_init(\r
868 MRCParams_t *mrc_params,\r
869 uint32_t silent)\r
870{\r
871 uint8_t TWR, WL, Rank;\r
872 uint32_t TCK;\r
873\r
874 RegDTR0 DTR0reg;\r
875\r
876 DramInitDDR3MRS0 mrs0Command;\r
877 DramInitDDR3EMR1 emrs1Command;\r
878 DramInitDDR3EMR2 emrs2Command;\r
879 DramInitDDR3EMR3 emrs3Command;\r
880\r
881 ENTERFN();\r
882\r
883 // jedec_init starts\r
884 if (!silent)\r
885 {\r
886 post_code(0x04, 0x00);\r
887 }\r
888\r
889 // Assert RESET# for 200us\r
890 isbM32m(DDRPHY, CCDDR3RESETCTL, BIT1, (BIT8|BIT1)); // DDR3_RESET_SET=0, DDR3_RESET_RESET=1\r
891#ifdef QUICKSIM\r
892 // Don't waste time during simulation\r
893 delay_u(2);\r
894#else\r
895 delay_u(200);\r
896#endif\r
897 isbM32m(DDRPHY, CCDDR3RESETCTL, BIT8, (BIT8|BIT1)); // DDR3_RESET_SET=1, DDR3_RESET_RESET=0\r
898\r
899 DTR0reg.raw = isbR32m(MCU, DTR0);\r
900\r
901 // Set CKEVAL for populated ranks\r
902 // then send NOP to each rank (#4550197)\r
903 {\r
904 uint32_t DRPbuffer;\r
905 uint32_t DRMCbuffer;\r
906\r
907 DRPbuffer = isbR32m(MCU, DRP);\r
908 DRPbuffer &= 0x3;\r
909 DRMCbuffer = isbR32m(MCU, DRMC);\r
910 DRMCbuffer &= 0xFFFFFFFC;\r
911 DRMCbuffer |= (BIT4 | DRPbuffer);\r
912\r
913 isbW32m(MCU, DRMC, DRMCbuffer);\r
914\r
915 for (Rank = 0; Rank < NUM_RANKS; Rank++)\r
916 {\r
917 // Skip to next populated rank\r
918 if ((mrc_params->rank_enables & (1 << Rank)) == 0)\r
919 {\r
920 continue;\r
921 }\r
922\r
923 dram_init_command(DCMD_NOP(Rank));\r
924 }\r
925\r
926 isbW32m(MCU, DRMC, DRMC_DEFAULT);\r
927 }\r
928\r
929 // setup for emrs 2\r
930 // BIT[15:11] --> Always "0"\r
931 // BIT[10:09] --> Rtt_WR: want "Dynamic ODT Off" (0)\r
932 // BIT[08] --> Always "0"\r
933 // BIT[07] --> SRT: use sr_temp_range\r
934 // BIT[06] --> ASR: want "Manual SR Reference" (0)\r
935 // BIT[05:03] --> CWL: use oem_tCWL\r
936 // BIT[02:00] --> PASR: want "Full Array" (0)\r
937 emrs2Command.raw = 0;\r
938 emrs2Command.field.bankAddress = 2;\r
939\r
940 WL = 5 + mrc_params->ddr_speed;\r
941 emrs2Command.field.CWL = WL - 5;\r
942 emrs2Command.field.SRT = mrc_params->sr_temp_range;\r
943\r
944 // setup for emrs 3\r
945 // BIT[15:03] --> Always "0"\r
946 // BIT[02] --> MPR: want "Normal Operation" (0)\r
947 // BIT[01:00] --> MPR_Loc: want "Predefined Pattern" (0)\r
948 emrs3Command.raw = 0;\r
949 emrs3Command.field.bankAddress = 3;\r
950\r
951 // setup for emrs 1\r
952 // BIT[15:13] --> Always "0"\r
953 // BIT[12:12] --> Qoff: want "Output Buffer Enabled" (0)\r
954 // BIT[11:11] --> TDQS: want "Disabled" (0)\r
955 // BIT[10:10] --> Always "0"\r
956 // BIT[09,06,02] --> Rtt_nom: use rtt_nom_value\r
957 // BIT[08] --> Always "0"\r
958 // BIT[07] --> WR_LVL: want "Disabled" (0)\r
959 // BIT[05,01] --> DIC: use ron_value\r
960 // BIT[04:03] --> AL: additive latency want "0" (0)\r
961 // BIT[00] --> DLL: want "Enable" (0)\r
962 //\r
963 // (BIT5|BIT1) set Ron value\r
964 // 00 --> RZQ/6 (40ohm)\r
965 // 01 --> RZQ/7 (34ohm)\r
966 // 1* --> RESERVED\r
967 //\r
968 // (BIT9|BIT6|BIT2) set Rtt_nom value\r
969 // 000 --> Disabled\r
970 // 001 --> RZQ/4 ( 60ohm)\r
971 // 010 --> RZQ/2 (120ohm)\r
972 // 011 --> RZQ/6 ( 40ohm)\r
973 // 1** --> RESERVED\r
974 emrs1Command.raw = 0;\r
975 emrs1Command.field.bankAddress = 1;\r
976 emrs1Command.field.dllEnabled = 0; // 0 = Enable , 1 = Disable\r
977\r
978 if (mrc_params->ron_value == 0)\r
979 {\r
980 emrs1Command.field.DIC0 = DDR3_EMRS1_DIC_34;\r
981 }\r
982 else\r
983 {\r
984 emrs1Command.field.DIC0 = DDR3_EMRS1_DIC_40;\r
985 }\r
986\r
987\r
988 if (mrc_params->rtt_nom_value == 0)\r
989 {\r
990 emrs1Command.raw |= (DDR3_EMRS1_RTTNOM_40 << 6);\r
991 }\r
992 else if (mrc_params->rtt_nom_value == 1)\r
993 {\r
994 emrs1Command.raw |= (DDR3_EMRS1_RTTNOM_60 << 6);\r
995 }\r
996 else if (mrc_params->rtt_nom_value == 2)\r
997 {\r
998 emrs1Command.raw |= (DDR3_EMRS1_RTTNOM_120 << 6);\r
999 }\r
1000\r
1001 // save MRS1 value (excluding control fields)\r
1002 mrc_params->mrs1 = emrs1Command.raw >> 6;\r
1003\r
1004 // setup for mrs 0\r
1005 // BIT[15:13] --> Always "0"\r
1006 // BIT[12] --> PPD: for Quark (1)\r
1007 // BIT[11:09] --> WR: use oem_tWR\r
1008 // BIT[08] --> DLL: want "Reset" (1, self clearing)\r
1009 // BIT[07] --> MODE: want "Normal" (0)\r
1010 // BIT[06:04,02] --> CL: use oem_tCAS\r
1011 // BIT[03] --> RD_BURST_TYPE: want "Interleave" (1)\r
1012 // BIT[01:00] --> BL: want "8 Fixed" (0)\r
1013 // WR:\r
1014 // 0 --> 16\r
1015 // 1 --> 5\r
1016 // 2 --> 6\r
1017 // 3 --> 7\r
1018 // 4 --> 8\r
1019 // 5 --> 10\r
1020 // 6 --> 12\r
1021 // 7 --> 14\r
1022 // CL:\r
1023 // BIT[02:02] "0" if oem_tCAS <= 11 (1866?)\r
1024 // BIT[06:04] use oem_tCAS-4\r
1025 mrs0Command.raw = 0;\r
1026 mrs0Command.field.bankAddress = 0;\r
1027 mrs0Command.field.dllReset = 1;\r
1028 mrs0Command.field.BL = 0;\r
1029 mrs0Command.field.PPD = 1;\r
1030 mrs0Command.field.casLatency = DTR0reg.field.tCL + 1;\r
1031\r
1032 TCK = tCK[mrc_params->ddr_speed];\r
1033 TWR = MCEIL(15000, TCK); // Per JEDEC: tWR=15000ps DDR2/3 from 800-1600\r
1034 mrs0Command.field.writeRecovery = TWR - 4;\r
1035\r
1036 for (Rank = 0; Rank < NUM_RANKS; Rank++)\r
1037 {\r
1038 // Skip to next populated rank\r
1039 if ((mrc_params->rank_enables & (1 << Rank)) == 0)\r
1040 {\r
1041 continue;\r
1042 }\r
1043\r
1044 emrs2Command.field.rankSelect = Rank;\r
1045 dram_init_command(emrs2Command.raw);\r
1046\r
1047 emrs3Command.field.rankSelect = Rank;\r
1048 dram_init_command(emrs3Command.raw);\r
1049\r
1050 emrs1Command.field.rankSelect = Rank;\r
1051 dram_init_command(emrs1Command.raw);\r
1052\r
1053 mrs0Command.field.rankSelect = Rank;\r
1054 dram_init_command(mrs0Command.raw);\r
1055\r
1056 dram_init_command(DCMD_ZQCL(Rank));\r
1057 }\r
1058\r
1059 LEAVEFN();\r
1060 return;\r
1061}\r
1062\r
1063// rcvn_cal:\r
1064// POST_CODE[major] == 0x05\r
1065//\r
1066// This function will perform our RCVEN Calibration Algorithm.\r
1067// We will only use the 2xCLK domain timings to perform RCVEN Calibration.\r
1068// All byte lanes will be calibrated "simultaneously" per channel per rank.\r
1069static void rcvn_cal(\r
1070 MRCParams_t *mrc_params)\r
1071{\r
1072 uint8_t channel_i; // channel counter\r
1073 uint8_t rank_i; // rank counter\r
1074 uint8_t bl_i; // byte lane counter\r
1075 uint8_t bl_divisor = (mrc_params->channel_width == x16) ? 2 : 1; // byte lane divisor\r
1076\r
1077#ifdef R2R_SHARING\r
1078 uint32_t final_delay[NUM_CHANNELS][NUM_BYTE_LANES]; // used to find placement for rank2rank sharing configs\r
1079#ifndef BACKUP_RCVN\r
1080 uint32_t num_ranks_enabled = 0; // used to find placement for rank2rank sharing configs\r
1081#endif // BACKUP_RCVN\r
1082#endif // R2R_SHARING\r
1083\r
1084#ifdef BACKUP_RCVN\r
1085#else\r
1086 uint32_t tempD; // temporary DWORD\r
1087 uint32_t delay[NUM_BYTE_LANES]; // absolute PI value to be programmed on the byte lane\r
1088 RegDTR1 dtr1;\r
1089 RegDTR1 dtr1save;\r
1090#endif // BACKUP_RCVN\r
1091 ENTERFN();\r
1092\r
1093 // rcvn_cal starts\r
1094 post_code(0x05, 0x00);\r
1095\r
1096#ifndef BACKUP_RCVN\r
1097 // need separate burst to sample DQS preamble\r
1098 dtr1.raw = dtr1save.raw = isbR32m(MCU, DTR1);\r
1099 dtr1.field.tCCD = 1;\r
1100 isbW32m(MCU, DTR1, dtr1.raw);\r
1101#endif\r
1102\r
1103#ifdef R2R_SHARING\r
1104 // need to set "final_delay[][]" elements to "0"\r
1105 memset((void *) (final_delay), 0x00, (size_t) sizeof(final_delay));\r
1106#endif // R2R_SHARING\r
1107\r
1108 // loop through each enabled channel\r
1109 for (channel_i = 0; channel_i < NUM_CHANNELS; channel_i++)\r
1110 {\r
1111 if (mrc_params->channel_enables & (1 << channel_i))\r
1112 {\r
1113 // perform RCVEN Calibration on a per rank basis\r
1114 for (rank_i = 0; rank_i < NUM_RANKS; rank_i++)\r
1115 {\r
1116 if (mrc_params->rank_enables & (1 << rank_i))\r
1117 {\r
1118 // POST_CODE here indicates the current channel and rank being calibrated\r
1119 post_code(0x05, (0x10 + ((channel_i << 4) | rank_i)));\r
1120\r
1121#ifdef BACKUP_RCVN\r
1122 // set hard-coded timing values\r
1123 for (bl_i=0; bl_i<(NUM_BYTE_LANES/bl_divisor); bl_i++)\r
1124 {\r
1125 set_rcvn(channel_i, rank_i, bl_i, ddr_rcvn[PLATFORM_ID]);\r
1126 }\r
1127#else\r
1128 // enable FIFORST\r
1129 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i += 2)\r
1130 {\r
1131 isbM32m(DDRPHY, (B01PTRCTL1 + ((bl_i >> 1) * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), 0,\r
1132 BIT8); // 0 is enabled\r
1133 } // bl_i loop\r
1134 // initialise the starting delay to 128 PI (tCAS +1 CLK)\r
1135 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
1136 {\r
1137#ifdef SIM\r
1138 // Original value was late at the end of DQS sequence\r
1139 delay[bl_i] = 3 * FULL_CLK;\r
1140#else\r
1141 delay[bl_i] = (4 + 1) * FULL_CLK; // 1x CLK domain timing is tCAS-4\r
1142#endif\r
1143\r
1144 set_rcvn(channel_i, rank_i, bl_i, delay[bl_i]);\r
1145 } // bl_i loop\r
1146\r
1147 // now find the rising edge\r
1148 find_rising_edge(mrc_params, delay, channel_i, rank_i, true);\r
1149 // Now increase delay by 32 PI (1/4 CLK) to place in center of high pulse.\r
1150 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
1151 {\r
1152 delay[bl_i] += QRTR_CLK;\r
1153 set_rcvn(channel_i, rank_i, bl_i, delay[bl_i]);\r
1154 } // bl_i loop\r
1155 // Now decrement delay by 128 PI (1 CLK) until we sample a "0"\r
1156 do\r
1157 {\r
1158\r
1159 tempD = sample_dqs(mrc_params, channel_i, rank_i, true);\r
1160 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
1161 {\r
1162 if (tempD & (1 << bl_i))\r
1163 {\r
1164 if (delay[bl_i] >= FULL_CLK)\r
1165 {\r
1166 delay[bl_i] -= FULL_CLK;\r
1167 set_rcvn(channel_i, rank_i, bl_i, delay[bl_i]);\r
1168 }\r
1169 else\r
1170 {\r
1171 // not enough delay\r
1172 training_message(channel_i, rank_i, bl_i);\r
1173 post_code(0xEE, 0x50);\r
1174 }\r
1175 }\r
1176 } // bl_i loop\r
1177 } while (tempD & 0xFF);\r
1178\r
1179#ifdef R2R_SHARING\r
1180 // increment "num_ranks_enabled"\r
1181 num_ranks_enabled++;\r
1182 // Finally increment delay by 32 PI (1/4 CLK) to place in center of preamble.\r
1183 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
1184 {\r
1185 delay[bl_i] += QRTR_CLK;\r
1186 // add "delay[]" values to "final_delay[][]" for rolling average\r
1187 final_delay[channel_i][bl_i] += delay[bl_i];\r
1188 // set timing based on rolling average values\r
1189 set_rcvn(channel_i, rank_i, bl_i, ((final_delay[channel_i][bl_i]) / num_ranks_enabled));\r
1190 } // bl_i loop\r
1191#else\r
1192 // Finally increment delay by 32 PI (1/4 CLK) to place in center of preamble.\r
1193 for (bl_i=0; bl_i<(NUM_BYTE_LANES/bl_divisor); bl_i++)\r
1194 {\r
1195 delay[bl_i] += QRTR_CLK;\r
1196 set_rcvn(channel_i, rank_i, bl_i, delay[bl_i]);\r
1197 } // bl_i loop\r
1198\r
1199#endif // R2R_SHARING\r
1200\r
1201 // disable FIFORST\r
1202 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i += 2)\r
1203 {\r
1204 isbM32m(DDRPHY, (B01PTRCTL1 + ((bl_i >> 1) * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)), BIT8,\r
1205 BIT8); // 1 is disabled\r
1206 } // bl_i loop\r
1207\r
1208#endif // BACKUP_RCVN\r
1209\r
1210 } // if rank is enabled\r
1211 } // rank_i loop\r
1212 } // if channel is enabled\r
1213 } // channel_i loop\r
1214\r
1215#ifndef BACKUP_RCVN\r
1216 // restore original\r
1217 isbW32m(MCU, DTR1, dtr1save.raw);\r
1218#endif\r
1219\r
1220#ifdef MRC_SV\r
1221 if (mrc_params->tune_rcvn)\r
1222 {\r
1223 uint32_t rcven, val;\r
1224 uint32_t rdcmd2rcven;\r
1225\r
1226 /*\r
1227 Formulas for RDCMD2DATAVALID & DIFFAMP dynamic timings\r
1228\r
1229 1. Set after RCVEN training\r
1230\r
1231 //Tune RDCMD2DATAVALID\r
1232\r
1233 x80/x84[21:16]\r
1234 MAX OF 2 RANKS : round up (rdcmd2rcven (rcven 1x) + 2x x 2 + PI/128) + 5\r
1235\r
1236 //rdcmd2rcven x80/84[12:8]\r
1237 //rcven 2x x70[23:20] & [11:8]\r
1238\r
1239 //Tune DIFFAMP Timings\r
1240\r
1241 //diffampen launch x88[20:16] & [4:0] -- B01LATCTL1\r
1242 MIN OF 2 RANKS : round down (rcven 1x + 2x x 2 + PI/128) - 1\r
1243\r
1244 //diffampen length x8C/x90 [13:8] -- B0ONDURCTL B1ONDURCTL\r
1245 MAX OF 2 RANKS : roundup (rcven 1x + 2x x 2 + PI/128) + 5\r
1246\r
1247\r
1248 2. need to do a fiforst after settings these values\r
1249 */\r
1250\r
1251 DPF(D_INFO, "BEFORE\n");\r
1252 DPF(D_INFO, "### %x\n", isbR32m(DDRPHY, B0LATCTL0));\r
1253 DPF(D_INFO, "### %x\n", isbR32m(DDRPHY, B01LATCTL1));\r
1254 DPF(D_INFO, "### %x\n", isbR32m(DDRPHY, B0ONDURCTL));\r
1255\r
1256 DPF(D_INFO, "### %x\n", isbR32m(DDRPHY, B1LATCTL0));\r
1257 DPF(D_INFO, "### %x\n", isbR32m(DDRPHY, B1ONDURCTL));\r
1258\r
1259 rcven = get_rcvn(0, 0, 0) / 128;\r
1260 rdcmd2rcven = (isbR32m(DDRPHY, B0LATCTL0) >> 8) & 0x1F;\r
1261 val = rdcmd2rcven + rcven + 6;\r
1262 isbM32m(DDRPHY, B0LATCTL0, val << 16, (BIT21|BIT20|BIT19|BIT18|BIT17|BIT16));\r
1263\r
1264 val = rdcmd2rcven + rcven - 1;\r
1265 isbM32m(DDRPHY, B01LATCTL1, val << 0, (BIT4|BIT3|BIT2|BIT1|BIT0));\r
1266\r
1267 val = rdcmd2rcven + rcven + 5;\r
1268 isbM32m(DDRPHY, B0ONDURCTL, val << 8, (BIT13|BIT12|BIT11|BIT10|BIT9|BIT8));\r
1269\r
1270 rcven = get_rcvn(0, 0, 1) / 128;\r
1271 rdcmd2rcven = (isbR32m(DDRPHY, B1LATCTL0) >> 8) & 0x1F;\r
1272 val = rdcmd2rcven + rcven + 6;\r
1273 isbM32m(DDRPHY, B1LATCTL0, val << 16, (BIT21|BIT20|BIT19|BIT18|BIT17|BIT16));\r
1274\r
1275 val = rdcmd2rcven + rcven - 1;\r
1276 isbM32m(DDRPHY, B01LATCTL1, val << 16, (BIT20|BIT19|BIT18|BIT17|BIT16));\r
1277\r
1278 val = rdcmd2rcven + rcven + 5;\r
1279 isbM32m(DDRPHY, B1ONDURCTL, val << 8, (BIT13|BIT12|BIT11|BIT10|BIT9|BIT8));\r
1280\r
1281 DPF(D_INFO, "AFTER\n");\r
1282 DPF(D_INFO, "### %x\n", isbR32m(DDRPHY, B0LATCTL0));\r
1283 DPF(D_INFO, "### %x\n", isbR32m(DDRPHY, B01LATCTL1));\r
1284 DPF(D_INFO, "### %x\n", isbR32m(DDRPHY, B0ONDURCTL));\r
1285\r
1286 DPF(D_INFO, "### %x\n", isbR32m(DDRPHY, B1LATCTL0));\r
1287 DPF(D_INFO, "### %x\n", isbR32m(DDRPHY, B1ONDURCTL));\r
1288\r
1289 DPF(D_INFO, "\nPress a key\n");\r
1290 mgetc();\r
1291\r
1292 // fifo reset\r
1293 isbM32m(DDRPHY, B01PTRCTL1, 0, BIT8); // 0 is enabled\r
1294 delay_n(3);\r
1295 isbM32m(DDRPHY, B01PTRCTL1, BIT8, BIT8); // 1 is disabled\r
1296 }\r
1297#endif\r
1298\r
1299 LEAVEFN();\r
1300 return;\r
1301}\r
1302\r
1303// Check memory executing write/read/verify of many data patterns\r
1304// at the specified address. Bits in the result indicate failure\r
1305// on specific byte lane.\r
1306static uint32_t check_bls_ex(\r
1307 MRCParams_t *mrc_params,\r
1308 uint32_t address)\r
1309{\r
1310 uint32_t result;\r
1311 uint8_t first_run = 0;\r
1312\r
1313 if (mrc_params->hte_setup)\r
1314 {\r
1315 mrc_params->hte_setup = 0;\r
1316\r
1317 first_run = 1;\r
1318 select_hte(mrc_params);\r
1319 }\r
1320\r
1321 result = WriteStressBitLanesHTE(mrc_params, address, first_run);\r
1322\r
1323 DPF(D_TRN, "check_bls_ex result is %x\n", result);\r
1324 return result;\r
1325}\r
1326\r
1327// Check memory executing simple write/read/verify at\r
1328// the specified address. Bits in the result indicate failure\r
1329// on specific byte lane.\r
1330static uint32_t check_rw_coarse(\r
1331 MRCParams_t *mrc_params,\r
1332 uint32_t address)\r
1333{\r
1334 uint32_t result = 0;\r
1335 uint8_t first_run = 0;\r
1336\r
1337 if (mrc_params->hte_setup)\r
1338 {\r
1339 mrc_params->hte_setup = 0;\r
1340\r
1341 first_run = 1;\r
1342 select_hte(mrc_params);\r
1343 }\r
1344\r
1345 result = BasicWriteReadHTE(mrc_params, address, first_run, WRITE_TRAIN);\r
1346\r
1347 DPF(D_TRN, "check_rw_coarse result is %x\n", result);\r
1348 return result;\r
1349}\r
1350\r
1351// wr_level:\r
1352// POST_CODE[major] == 0x06\r
1353//\r
1354// This function will perform the Write Levelling algorithm (align WCLK and WDQS).\r
1355// This algorithm will act on each rank in each channel separately.\r
1356static void wr_level(\r
1357 MRCParams_t *mrc_params)\r
1358{\r
1359 uint8_t channel_i; // channel counter\r
1360 uint8_t rank_i; // rank counter\r
1361 uint8_t bl_i; // byte lane counter\r
1362 uint8_t bl_divisor = (mrc_params->channel_width == x16) ? 2 : 1; // byte lane divisor\r
1363\r
1364#ifdef R2R_SHARING\r
1365 uint32_t final_delay[NUM_CHANNELS][NUM_BYTE_LANES]; // used to find placement for rank2rank sharing configs\r
1366#ifndef BACKUP_WDQS\r
1367 uint32_t num_ranks_enabled = 0; // used to find placement for rank2rank sharing configs\r
1368#endif // BACKUP_WDQS\r
1369#endif // R2R_SHARING\r
1370\r
1371#ifdef BACKUP_WDQS\r
1372#else\r
1373 bool all_edges_found; // determines stop condition for CRS_WR_LVL\r
1374 uint32_t delay[NUM_BYTE_LANES]; // absolute PI value to be programmed on the byte lane\r
1375 // static makes it so the data is loaded in the heap once by shadow(), where\r
1376 // non-static copies the data onto the stack every time this function is called.\r
1377\r
1378 uint32_t address; // address to be checked during COARSE_WR_LVL\r
1379 RegDTR4 dtr4;\r
1380 RegDTR4 dtr4save;\r
1381#endif // BACKUP_WDQS\r
1382\r
1383 ENTERFN();\r
1384\r
1385 // wr_level starts\r
1386 post_code(0x06, 0x00);\r
1387\r
1388#ifdef R2R_SHARING\r
1389 // need to set "final_delay[][]" elements to "0"\r
1390 memset((void *) (final_delay), 0x00, (size_t) sizeof(final_delay));\r
1391#endif // R2R_SHARING\r
1392 // loop through each enabled channel\r
1393 for (channel_i = 0; channel_i < NUM_CHANNELS; channel_i++)\r
1394 {\r
1395 if (mrc_params->channel_enables & (1 << channel_i))\r
1396 {\r
1397 // perform WRITE LEVELING algorithm on a per rank basis\r
1398 for (rank_i = 0; rank_i < NUM_RANKS; rank_i++)\r
1399 {\r
1400 if (mrc_params->rank_enables & (1 << rank_i))\r
1401 {\r
1402 // POST_CODE here indicates the current rank and channel being calibrated\r
1403 post_code(0x06, (0x10 + ((channel_i << 4) | rank_i)));\r
1404\r
1405#ifdef BACKUP_WDQS\r
1406 for (bl_i=0; bl_i<(NUM_BYTE_LANES/bl_divisor); bl_i++)\r
1407 {\r
1408 set_wdqs(channel_i, rank_i, bl_i, ddr_wdqs[PLATFORM_ID]);\r
1409 set_wdq(channel_i, rank_i, bl_i, (ddr_wdqs[PLATFORM_ID] - QRTR_CLK));\r
1410 }\r
1411#else\r
1412\r
1413 { // Begin product specific code\r
1414\r
1415 // perform a single PRECHARGE_ALL command to make DRAM state machine go to IDLE state\r
1416 dram_init_command(DCMD_PREA(rank_i));\r
1417\r
1418 // enable Write Levelling Mode (EMRS1 w/ Write Levelling Mode Enable)\r
1419 dram_init_command(DCMD_MRS1(rank_i,0x0082));\r
1420\r
1421 // set ODT DRAM Full Time Termination disable in MCU\r
1422 dtr4.raw = dtr4save.raw = isbR32m(MCU, DTR4);\r
1423 dtr4.field.ODTDIS = 1;\r
1424 isbW32m(MCU, DTR4, dtr4.raw);\r
1425\r
1426 for (bl_i = 0; bl_i < ((NUM_BYTE_LANES / bl_divisor) / 2); bl_i++)\r
1427 {\r
1428 isbM32m(DDRPHY, DQCTL + (DDRIODQ_BL_OFFSET * bl_i) + (DDRIODQ_CH_OFFSET * channel_i),\r
1429 (BIT28 | (0x1 << 8) | (0x1 << 6) | (0x1 << 4) | (0x1 << 2)),\r
1430 (BIT28 | (BIT9|BIT8) | (BIT7|BIT6) | (BIT5|BIT4) | (BIT3|BIT2))); // Enable Sandy Bridge Mode (WDQ Tri-State) & Ensure 5 WDQS pulses during Write Leveling\r
1431 }\r
1432\r
1433 isbM32m(DDRPHY, CCDDR3RESETCTL + (DDRIOCCC_CH_OFFSET * channel_i), (BIT16), (BIT16)); // Write Leveling Mode enabled in IO\r
1434 } // End product specific code\r
1435 // Initialise the starting delay to WCLK\r
1436 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
1437 {\r
1438 { // Begin product specific code\r
1439 // CLK0 --> RK0\r
1440 // CLK1 --> RK1\r
1441 delay[bl_i] = get_wclk(channel_i, rank_i);\r
1442 } // End product specific code\r
1443 set_wdqs(channel_i, rank_i, bl_i, delay[bl_i]);\r
1444 } // bl_i loop\r
1445 // now find the rising edge\r
1446 find_rising_edge(mrc_params, delay, channel_i, rank_i, false);\r
1447 { // Begin product specific code\r
1448 // disable Write Levelling Mode\r
1449 isbM32m(DDRPHY, CCDDR3RESETCTL + (DDRIOCCC_CH_OFFSET * channel_i), (0), (BIT16)); // Write Leveling Mode disabled in IO\r
1450\r
1451 for (bl_i = 0; bl_i < ((NUM_BYTE_LANES / bl_divisor) / 2); bl_i++)\r
1452 {\r
1453 isbM32m(DDRPHY, DQCTL + (DDRIODQ_BL_OFFSET * bl_i) + (DDRIODQ_CH_OFFSET * channel_i),\r
1454 ((0x1 << 8) | (0x1 << 6) | (0x1 << 4) | (0x1 << 2)),\r
1455 (BIT28 | (BIT9|BIT8) | (BIT7|BIT6) | (BIT5|BIT4) | (BIT3|BIT2))); // Disable Sandy Bridge Mode & Ensure 4 WDQS pulses during normal operation\r
1456 } // bl_i loop\r
1457\r
1458 // restore original DTR4\r
1459 isbW32m(MCU, DTR4, dtr4save.raw);\r
1460\r
1461 // restore original value (Write Levelling Mode Disable)\r
1462 dram_init_command(DCMD_MRS1(rank_i, mrc_params->mrs1));\r
1463\r
1464 // perform a single PRECHARGE_ALL command to make DRAM state machine go to IDLE state\r
1465 dram_init_command(DCMD_PREA(rank_i));\r
1466 } // End product specific code\r
1467\r
1468 post_code(0x06, (0x30 + ((channel_i << 4) | rank_i)));\r
1469\r
1470 // COARSE WRITE LEVEL:\r
1471 // check that we're on the correct clock edge\r
1472\r
1473 // hte reconfiguration request\r
1474 mrc_params->hte_setup = 1;\r
1475\r
1476 // start CRS_WR_LVL with WDQS = WDQS + 128 PI\r
1477 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
1478 {\r
1479 delay[bl_i] = get_wdqs(channel_i, rank_i, bl_i) + FULL_CLK;\r
1480 set_wdqs(channel_i, rank_i, bl_i, delay[bl_i]);\r
1481 // program WDQ timings based on WDQS (WDQ = WDQS - 32 PI)\r
1482 set_wdq(channel_i, rank_i, bl_i, (delay[bl_i] - QRTR_CLK));\r
1483 } // bl_i loop\r
1484\r
1485 // get an address in the targeted channel/rank\r
1486 address = get_addr(mrc_params, channel_i, rank_i);\r
1487 do\r
1488 {\r
1489 uint32_t coarse_result = 0x00;\r
1490 uint32_t coarse_result_mask = byte_lane_mask(mrc_params);\r
1491 all_edges_found = true; // assume pass\r
1492\r
1493#ifdef SIM\r
1494 // need restore memory to idle state as write can be in bad sync\r
1495 dram_init_command (DCMD_PREA(rank_i));\r
1496#endif\r
1497\r
1498 mrc_params->hte_setup = 1;\r
1499 coarse_result = check_rw_coarse(mrc_params, address);\r
1500\r
1501 // check for failures and margin the byte lane back 128 PI (1 CLK)\r
1502 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
1503 {\r
1504 if (coarse_result & (coarse_result_mask << bl_i))\r
1505 {\r
1506 all_edges_found = false;\r
1507 delay[bl_i] -= FULL_CLK;\r
1508 set_wdqs(channel_i, rank_i, bl_i, delay[bl_i]);\r
1509 // program WDQ timings based on WDQS (WDQ = WDQS - 32 PI)\r
1510 set_wdq(channel_i, rank_i, bl_i, (delay[bl_i] - QRTR_CLK));\r
1511 }\r
1512 } // bl_i loop\r
1513\r
1514 } while (!all_edges_found);\r
1515\r
1516#ifdef R2R_SHARING\r
1517 // increment "num_ranks_enabled"\r
1518 num_ranks_enabled++;\r
1519 // accumulate "final_delay[][]" values from "delay[]" values for rolling average\r
1520 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
1521 {\r
1522 final_delay[channel_i][bl_i] += delay[bl_i];\r
1523 set_wdqs(channel_i, rank_i, bl_i, ((final_delay[channel_i][bl_i]) / num_ranks_enabled));\r
1524 // program WDQ timings based on WDQS (WDQ = WDQS - 32 PI)\r
1525 set_wdq(channel_i, rank_i, bl_i, ((final_delay[channel_i][bl_i]) / num_ranks_enabled) - QRTR_CLK);\r
1526 } // bl_i loop\r
1527#endif // R2R_SHARING\r
1528#endif // BACKUP_WDQS\r
1529\r
1530 } // if rank is enabled\r
1531 } // rank_i loop\r
1532 } // if channel is enabled\r
1533 } // channel_i loop\r
1534\r
1535 LEAVEFN();\r
1536 return;\r
1537}\r
1538\r
1539// rd_train:\r
1540// POST_CODE[major] == 0x07\r
1541//\r
1542// This function will perform the READ TRAINING Algorithm on all channels/ranks/byte_lanes simultaneously to minimize execution time.\r
1543// The idea here is to train the VREF and RDQS (and eventually RDQ) values to achieve maximum READ margins.\r
1544// The algorithm will first determine the X coordinate (RDQS setting).\r
1545// This is done by collapsing the VREF eye until we find a minimum required RDQS eye for VREF_MIN and VREF_MAX.\r
1546// Then we take the averages of the RDQS eye at VREF_MIN and VREF_MAX, then average those; this will be the final X coordinate.\r
1547// The algorithm will then determine the Y coordinate (VREF setting).\r
1548// This is done by collapsing the RDQS eye until we find a minimum required VREF eye for RDQS_MIN and RDQS_MAX.\r
1549// Then we take the averages of the VREF eye at RDQS_MIN and RDQS_MAX, then average those; this will be the final Y coordinate.\r
1550// NOTE: this algorithm assumes the eye curves have a one-to-one relationship, meaning for each X the curve has only one Y and vice-a-versa.\r
1551static void rd_train(\r
1552 MRCParams_t *mrc_params)\r
1553{\r
1554\r
1555#define MIN_RDQS_EYE 10 // in PI Codes\r
1556#define MIN_VREF_EYE 10 // in VREF Codes\r
1557#define RDQS_STEP 1 // how many RDQS codes to jump while margining\r
1558#define VREF_STEP 1 // how many VREF codes to jump while margining\r
1559#define VREF_MIN (0x00) // offset into "vref_codes[]" for minimum allowed VREF setting\r
1560#define VREF_MAX (0x3F) // offset into "vref_codes[]" for maximum allowed VREF setting\r
1561#define RDQS_MIN (0x00) // minimum RDQS delay value\r
1562#define RDQS_MAX (0x3F) // maximum RDQS delay value\r
1563#define B 0 // BOTTOM VREF\r
1564#define T 1 // TOP VREF\r
1565#define L 0 // LEFT RDQS\r
1566#define R 1 // RIGHT RDQS\r
1567\r
1568 uint8_t channel_i; // channel counter\r
1569 uint8_t rank_i; // rank counter\r
1570 uint8_t bl_i; // byte lane counter\r
1571 uint8_t bl_divisor = (mrc_params->channel_width == x16) ? 2 : 1; // byte lane divisor\r
1572#ifdef BACKUP_RDQS\r
1573#else\r
1574 uint8_t side_x; // tracks LEFT/RIGHT approach vectors\r
1575 uint8_t side_y; // tracks BOTTOM/TOP approach vectors\r
1576 uint8_t x_coordinate[2/*side_x*/][2/*side_y*/][NUM_CHANNELS][NUM_RANKS][NUM_BYTE_LANES]; // X coordinate data (passing RDQS values) for approach vectors\r
1577 uint8_t y_coordinate[2/*side_x*/][2/*side_y*/][NUM_CHANNELS][NUM_BYTE_LANES]; // Y coordinate data (passing VREF values) for approach vectors\r
1578 uint8_t x_center[NUM_CHANNELS][NUM_RANKS][NUM_BYTE_LANES]; // centered X (RDQS)\r
1579 uint8_t y_center[NUM_CHANNELS][NUM_BYTE_LANES]; // centered Y (VREF)\r
1580 uint32_t address; // target address for "check_bls_ex()"\r
1581 uint32_t result; // result of "check_bls_ex()"\r
1582 uint32_t bl_mask; // byte lane mask for "result" checking\r
1583#ifdef R2R_SHARING\r
1584 uint32_t final_delay[NUM_CHANNELS][NUM_BYTE_LANES]; // used to find placement for rank2rank sharing configs\r
1585 uint32_t num_ranks_enabled = 0; // used to find placement for rank2rank sharing configs\r
1586#endif // R2R_SHARING\r
1587#endif // BACKUP_RDQS\r
1588 // rd_train starts\r
1589 post_code(0x07, 0x00);\r
1590\r
1591 ENTERFN();\r
1592\r
1593#ifdef BACKUP_RDQS\r
1594 for (channel_i=0; channel_i<NUM_CHANNELS; channel_i++)\r
1595 {\r
1596 if (mrc_params->channel_enables & (1<<channel_i))\r
1597 {\r
1598 for (rank_i=0; rank_i<NUM_RANKS; rank_i++)\r
1599 {\r
1600 if (mrc_params->rank_enables & (1<<rank_i))\r
1601 {\r
1602 for (bl_i=0; bl_i<(NUM_BYTE_LANES/bl_divisor); bl_i++)\r
1603 {\r
1604 set_rdqs(channel_i, rank_i, bl_i, ddr_rdqs[PLATFORM_ID]);\r
1605 } // bl_i loop\r
1606 } // if rank is enabled\r
1607 } // rank_i loop\r
1608 } // if channel is enabled\r
1609 } // channel_i loop\r
1610#else\r
1611 // initialise x/y_coordinate arrays\r
1612 for (channel_i = 0; channel_i < NUM_CHANNELS; channel_i++)\r
1613 {\r
1614 if (mrc_params->channel_enables & (1 << channel_i))\r
1615 {\r
1616 for (rank_i = 0; rank_i < NUM_RANKS; rank_i++)\r
1617 {\r
1618 if (mrc_params->rank_enables & (1 << rank_i))\r
1619 {\r
1620 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
1621 {\r
1622 // x_coordinate:\r
1623 x_coordinate[L][B][channel_i][rank_i][bl_i] = RDQS_MIN;\r
1624 x_coordinate[R][B][channel_i][rank_i][bl_i] = RDQS_MAX;\r
1625 x_coordinate[L][T][channel_i][rank_i][bl_i] = RDQS_MIN;\r
1626 x_coordinate[R][T][channel_i][rank_i][bl_i] = RDQS_MAX;\r
1627 // y_coordinate:\r
1628 y_coordinate[L][B][channel_i][bl_i] = VREF_MIN;\r
1629 y_coordinate[R][B][channel_i][bl_i] = VREF_MIN;\r
1630 y_coordinate[L][T][channel_i][bl_i] = VREF_MAX;\r
1631 y_coordinate[R][T][channel_i][bl_i] = VREF_MAX;\r
1632 } // bl_i loop\r
1633 } // if rank is enabled\r
1634 } // rank_i loop\r
1635 } // if channel is enabled\r
1636 } // channel_i loop\r
1637\r
1638 // initialise other variables\r
1639 bl_mask = byte_lane_mask(mrc_params);\r
1640 address = get_addr(mrc_params, 0, 0);\r
1641\r
1642#ifdef R2R_SHARING\r
1643 // need to set "final_delay[][]" elements to "0"\r
1644 memset((void *) (final_delay), 0x00, (size_t) sizeof(final_delay));\r
1645#endif // R2R_SHARING\r
1646\r
1647 // look for passing coordinates\r
1648 for (side_y = B; side_y <= T; side_y++)\r
1649 {\r
1650 for (side_x = L; side_x <= R; side_x++)\r
1651 {\r
1652\r
1653 post_code(0x07, (0x10 + (side_y * 2) + (side_x)));\r
1654\r
1655 // find passing values\r
1656 for (channel_i = 0; channel_i < NUM_CHANNELS; channel_i++)\r
1657 {\r
1658 if (mrc_params->channel_enables & (0x1 << channel_i))\r
1659 {\r
1660 for (rank_i = 0; rank_i < NUM_RANKS; rank_i++)\r
1661 {\r
1662\r
1663 if (mrc_params->rank_enables & (0x1 << rank_i))\r
1664 {\r
1665 // set x/y_coordinate search starting settings\r
1666 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
1667 {\r
1668 set_rdqs(channel_i, rank_i, bl_i, x_coordinate[side_x][side_y][channel_i][rank_i][bl_i]);\r
1669 set_vref(channel_i, bl_i, y_coordinate[side_x][side_y][channel_i][bl_i]);\r
1670 } // bl_i loop\r
1671 // get an address in the target channel/rank\r
1672 address = get_addr(mrc_params, channel_i, rank_i);\r
1673\r
1674 // request HTE reconfiguration\r
1675 mrc_params->hte_setup = 1;\r
1676\r
1677 // test the settings\r
1678 do\r
1679 {\r
1680\r
1681 // result[07:00] == failing byte lane (MAX 8)\r
1682 result = check_bls_ex( mrc_params, address);\r
1683\r
1684 // check for failures\r
1685 if (result & 0xFF)\r
1686 {\r
1687 // at least 1 byte lane failed\r
1688 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
1689 {\r
1690 if (result & (bl_mask << bl_i))\r
1691 {\r
1692 // adjust the RDQS values accordingly\r
1693 if (side_x == L)\r
1694 {\r
1695 x_coordinate[L][side_y][channel_i][rank_i][bl_i] += RDQS_STEP;\r
1696 }\r
1697 else\r
1698 {\r
1699 x_coordinate[R][side_y][channel_i][rank_i][bl_i] -= RDQS_STEP;\r
1700 }\r
1701 // check that we haven't closed the RDQS_EYE too much\r
1702 if ((x_coordinate[L][side_y][channel_i][rank_i][bl_i] > (RDQS_MAX - MIN_RDQS_EYE)) ||\r
1703 (x_coordinate[R][side_y][channel_i][rank_i][bl_i] < (RDQS_MIN + MIN_RDQS_EYE))\r
1704 ||\r
1705 (x_coordinate[L][side_y][channel_i][rank_i][bl_i]\r
1706 == x_coordinate[R][side_y][channel_i][rank_i][bl_i]))\r
1707 {\r
1708 // not enough RDQS margin available at this VREF\r
1709 // update VREF values accordingly\r
1710 if (side_y == B)\r
1711 {\r
1712 y_coordinate[side_x][B][channel_i][bl_i] += VREF_STEP;\r
1713 }\r
1714 else\r
1715 {\r
1716 y_coordinate[side_x][T][channel_i][bl_i] -= VREF_STEP;\r
1717 }\r
1718 // check that we haven't closed the VREF_EYE too much\r
1719 if ((y_coordinate[side_x][B][channel_i][bl_i] > (VREF_MAX - MIN_VREF_EYE)) ||\r
1720 (y_coordinate[side_x][T][channel_i][bl_i] < (VREF_MIN + MIN_VREF_EYE)) ||\r
1721 (y_coordinate[side_x][B][channel_i][bl_i] == y_coordinate[side_x][T][channel_i][bl_i]))\r
1722 {\r
1723 // VREF_EYE collapsed below MIN_VREF_EYE\r
1724 training_message(channel_i, rank_i, bl_i);\r
1725 post_code(0xEE, (0x70 + (side_y * 2) + (side_x)));\r
1726 }\r
1727 else\r
1728 {\r
1729 // update the VREF setting\r
1730 set_vref(channel_i, bl_i, y_coordinate[side_x][side_y][channel_i][bl_i]);\r
1731 // reset the X coordinate to begin the search at the new VREF\r
1732 x_coordinate[side_x][side_y][channel_i][rank_i][bl_i] =\r
1733 (side_x == L) ? (RDQS_MIN) : (RDQS_MAX);\r
1734 }\r
1735 }\r
1736 // update the RDQS setting\r
1737 set_rdqs(channel_i, rank_i, bl_i, x_coordinate[side_x][side_y][channel_i][rank_i][bl_i]);\r
1738 } // if bl_i failed\r
1739 } // bl_i loop\r
1740 } // at least 1 byte lane failed\r
1741 } while (result & 0xFF);\r
1742 } // if rank is enabled\r
1743 } // rank_i loop\r
1744 } // if channel is enabled\r
1745 } // channel_i loop\r
1746 } // side_x loop\r
1747 } // side_y loop\r
1748\r
1749 post_code(0x07, 0x20);\r
1750\r
1751 // find final RDQS (X coordinate) & final VREF (Y coordinate)\r
1752 for (channel_i = 0; channel_i < NUM_CHANNELS; channel_i++)\r
1753 {\r
1754 if (mrc_params->channel_enables & (1 << channel_i))\r
1755 {\r
1756 for (rank_i = 0; rank_i < NUM_RANKS; rank_i++)\r
1757 {\r
1758 if (mrc_params->rank_enables & (1 << rank_i))\r
1759 {\r
1760 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
1761 {\r
1762 uint32_t tempD1;\r
1763 uint32_t tempD2;\r
1764\r
1765 // x_coordinate:\r
1766 DPF(D_INFO, "RDQS T/B eye rank%d lane%d : %d-%d %d-%d\n", rank_i, bl_i,\r
1767 x_coordinate[L][T][channel_i][rank_i][bl_i],\r
1768 x_coordinate[R][T][channel_i][rank_i][bl_i],\r
1769 x_coordinate[L][B][channel_i][rank_i][bl_i],\r
1770 x_coordinate[R][B][channel_i][rank_i][bl_i]);\r
1771\r
1772 tempD1 = (x_coordinate[R][T][channel_i][rank_i][bl_i] + x_coordinate[L][T][channel_i][rank_i][bl_i]) / 2; // average the TOP side LEFT & RIGHT values\r
1773 tempD2 = (x_coordinate[R][B][channel_i][rank_i][bl_i] + x_coordinate[L][B][channel_i][rank_i][bl_i]) / 2; // average the BOTTOM side LEFT & RIGHT values\r
1774 x_center[channel_i][rank_i][bl_i] = (uint8_t) ((tempD1 + tempD2) / 2); // average the above averages\r
1775\r
1776 // y_coordinate:\r
1777 DPF(D_INFO, "VREF R/L eye lane%d : %d-%d %d-%d\n", bl_i,\r
1778 y_coordinate[R][B][channel_i][bl_i],\r
1779 y_coordinate[R][T][channel_i][bl_i],\r
1780 y_coordinate[L][B][channel_i][bl_i],\r
1781 y_coordinate[L][T][channel_i][bl_i]);\r
1782\r
1783 tempD1 = (y_coordinate[R][T][channel_i][bl_i] + y_coordinate[R][B][channel_i][bl_i]) / 2; // average the RIGHT side TOP & BOTTOM values\r
1784 tempD2 = (y_coordinate[L][T][channel_i][bl_i] + y_coordinate[L][B][channel_i][bl_i]) / 2; // average the LEFT side TOP & BOTTOM values\r
1785 y_center[channel_i][bl_i] = (uint8_t) ((tempD1 + tempD2) / 2); // average the above averages\r
1786 } // bl_i loop\r
1787 } // if rank is enabled\r
1788 } // rank_i loop\r
1789 } // if channel is enabled\r
1790 } // channel_i loop\r
1791\r
1792#ifdef RX_EYE_CHECK\r
1793 // perform an eye check\r
1794 for (side_y=B; side_y<=T; side_y++)\r
1795 {\r
1796 for (side_x=L; side_x<=R; side_x++)\r
1797 {\r
1798\r
1799 post_code(0x07, (0x30 + (side_y * 2) + (side_x)));\r
1800\r
1801 // update the settings for the eye check\r
1802 for (channel_i=0; channel_i<NUM_CHANNELS; channel_i++)\r
1803 {\r
1804 if (mrc_params->channel_enables & (1<<channel_i))\r
1805 {\r
1806 for (rank_i=0; rank_i<NUM_RANKS; rank_i++)\r
1807 {\r
1808 if (mrc_params->rank_enables & (1<<rank_i))\r
1809 {\r
1810 for (bl_i=0; bl_i<(NUM_BYTE_LANES/bl_divisor); bl_i++)\r
1811 {\r
1812 if (side_x == L)\r
1813 {\r
1814 set_rdqs(channel_i, rank_i, bl_i, (x_center[channel_i][rank_i][bl_i] - (MIN_RDQS_EYE / 2)));\r
1815 }\r
1816 else\r
1817 {\r
1818 set_rdqs(channel_i, rank_i, bl_i, (x_center[channel_i][rank_i][bl_i] + (MIN_RDQS_EYE / 2)));\r
1819 }\r
1820 if (side_y == B)\r
1821 {\r
1822 set_vref(channel_i, bl_i, (y_center[channel_i][bl_i] - (MIN_VREF_EYE / 2)));\r
1823 }\r
1824 else\r
1825 {\r
1826 set_vref(channel_i, bl_i, (y_center[channel_i][bl_i] + (MIN_VREF_EYE / 2)));\r
1827 }\r
1828 } // bl_i loop\r
1829 } // if rank is enabled\r
1830 } // rank_i loop\r
1831 } // if channel is enabled\r
1832 } // channel_i loop\r
1833\r
1834 // request HTE reconfiguration\r
1835 mrc_params->hte_setup = 1;\r
1836\r
1837 // check the eye\r
1838 if (check_bls_ex( mrc_params, address) & 0xFF)\r
1839 {\r
1840 // one or more byte lanes failed\r
1841 post_code(0xEE, (0x74 + (side_x * 2) + (side_y)));\r
1842 }\r
1843 } // side_x loop\r
1844 } // side_y loop\r
1845#endif // RX_EYE_CHECK\r
1846\r
1847 post_code(0x07, 0x40);\r
1848\r
1849 // set final placements\r
1850 for (channel_i = 0; channel_i < NUM_CHANNELS; channel_i++)\r
1851 {\r
1852 if (mrc_params->channel_enables & (1 << channel_i))\r
1853 {\r
1854 for (rank_i = 0; rank_i < NUM_RANKS; rank_i++)\r
1855 {\r
1856 if (mrc_params->rank_enables & (1 << rank_i))\r
1857 {\r
1858#ifdef R2R_SHARING\r
1859 // increment "num_ranks_enabled"\r
1860 num_ranks_enabled++;\r
1861#endif // R2R_SHARING\r
1862 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
1863 {\r
1864 // x_coordinate:\r
1865#ifdef R2R_SHARING\r
1866 final_delay[channel_i][bl_i] += x_center[channel_i][rank_i][bl_i];\r
1867 set_rdqs(channel_i, rank_i, bl_i, ((final_delay[channel_i][bl_i]) / num_ranks_enabled));\r
1868#else\r
1869 set_rdqs(channel_i, rank_i, bl_i, x_center[channel_i][rank_i][bl_i]);\r
1870#endif // R2R_SHARING\r
1871 // y_coordinate:\r
1872 set_vref(channel_i, bl_i, y_center[channel_i][bl_i]);\r
1873 } // bl_i loop\r
1874 } // if rank is enabled\r
1875 } // rank_i loop\r
1876 } // if channel is enabled\r
1877 } // channel_i loop\r
1878#endif // BACKUP_RDQS\r
1879 LEAVEFN();\r
1880 return;\r
1881}\r
1882\r
1883// wr_train:\r
1884// POST_CODE[major] == 0x08\r
1885//\r
1886// This function will perform the WRITE TRAINING Algorithm on all channels/ranks/byte_lanes simultaneously to minimize execution time.\r
1887// The idea here is to train the WDQ timings to achieve maximum WRITE margins.\r
1888// The algorithm will start with WDQ at the current WDQ setting (tracks WDQS in WR_LVL) +/- 32 PIs (+/- 1/4 CLK) and collapse the eye until all data patterns pass.\r
1889// This is because WDQS will be aligned to WCLK by the Write Leveling algorithm and WDQ will only ever have a 1/2 CLK window of validity.\r
1890static void wr_train(\r
1891 MRCParams_t *mrc_params)\r
1892{\r
1893\r
1894#define WDQ_STEP 1 // how many WDQ codes to jump while margining\r
1895#define L 0 // LEFT side loop value definition\r
1896#define R 1 // RIGHT side loop value definition\r
1897\r
1898 uint8_t channel_i; // channel counter\r
1899 uint8_t rank_i; // rank counter\r
1900 uint8_t bl_i; // byte lane counter\r
1901 uint8_t bl_divisor = (mrc_params->channel_width == x16) ? 2 : 1; // byte lane divisor\r
1902#ifdef BACKUP_WDQ\r
1903#else\r
1904 uint8_t side_i; // LEFT/RIGHT side indicator (0=L, 1=R)\r
1905 uint32_t tempD; // temporary DWORD\r
1906 uint32_t delay[2/*side_i*/][NUM_CHANNELS][NUM_RANKS][NUM_BYTE_LANES]; // 2 arrays, for L & R side passing delays\r
1907 uint32_t address; // target address for "check_bls_ex()"\r
1908 uint32_t result; // result of "check_bls_ex()"\r
1909 uint32_t bl_mask; // byte lane mask for "result" checking\r
1910#ifdef R2R_SHARING\r
1911 uint32_t final_delay[NUM_CHANNELS][NUM_BYTE_LANES]; // used to find placement for rank2rank sharing configs\r
1912 uint32_t num_ranks_enabled = 0; // used to find placement for rank2rank sharing configs\r
1913#endif // R2R_SHARING\r
1914#endif // BACKUP_WDQ\r
1915\r
1916 // wr_train starts\r
1917 post_code(0x08, 0x00);\r
1918\r
1919 ENTERFN();\r
1920\r
1921#ifdef BACKUP_WDQ\r
1922 for (channel_i=0; channel_i<NUM_CHANNELS; channel_i++)\r
1923 {\r
1924 if (mrc_params->channel_enables & (1<<channel_i))\r
1925 {\r
1926 for (rank_i=0; rank_i<NUM_RANKS; rank_i++)\r
1927 {\r
1928 if (mrc_params->rank_enables & (1<<rank_i))\r
1929 {\r
1930 for (bl_i=0; bl_i<(NUM_BYTE_LANES/bl_divisor); bl_i++)\r
1931 {\r
1932 set_wdq(channel_i, rank_i, bl_i, ddr_wdq[PLATFORM_ID]);\r
1933 } // bl_i loop\r
1934 } // if rank is enabled\r
1935 } // rank_i loop\r
1936 } // if channel is enabled\r
1937 } // channel_i loop\r
1938#else\r
1939 // initialise "delay"\r
1940 for (channel_i = 0; channel_i < NUM_CHANNELS; channel_i++)\r
1941 {\r
1942 if (mrc_params->channel_enables & (1 << channel_i))\r
1943 {\r
1944 for (rank_i = 0; rank_i < NUM_RANKS; rank_i++)\r
1945 {\r
1946 if (mrc_params->rank_enables & (1 << rank_i))\r
1947 {\r
1948 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
1949 {\r
1950 // want to start with WDQ = (WDQS - QRTR_CLK) +/- QRTR_CLK\r
1951 tempD = get_wdqs(channel_i, rank_i, bl_i) - QRTR_CLK;\r
1952 delay[L][channel_i][rank_i][bl_i] = tempD - QRTR_CLK;\r
1953 delay[R][channel_i][rank_i][bl_i] = tempD + QRTR_CLK;\r
1954 } // bl_i loop\r
1955 } // if rank is enabled\r
1956 } // rank_i loop\r
1957 } // if channel is enabled\r
1958 } // channel_i loop\r
1959\r
1960 // initialise other variables\r
1961 bl_mask = byte_lane_mask(mrc_params);\r
1962 address = get_addr(mrc_params, 0, 0);\r
1963\r
1964#ifdef R2R_SHARING\r
1965 // need to set "final_delay[][]" elements to "0"\r
1966 memset((void *) (final_delay), 0x00, (size_t) sizeof(final_delay));\r
1967#endif // R2R_SHARING\r
1968\r
1969 // start algorithm on the LEFT side and train each channel/bl until no failures are observed, then repeat for the RIGHT side.\r
1970 for (side_i = L; side_i <= R; side_i++)\r
1971 {\r
1972 post_code(0x08, (0x10 + (side_i)));\r
1973\r
1974 // set starting values\r
1975 for (channel_i = 0; channel_i < NUM_CHANNELS; channel_i++)\r
1976 {\r
1977 if (mrc_params->channel_enables & (1 << channel_i))\r
1978 {\r
1979 for (rank_i = 0; rank_i < NUM_RANKS; rank_i++)\r
1980 {\r
1981 if (mrc_params->rank_enables & (1 << rank_i))\r
1982 {\r
1983 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
1984 {\r
1985 set_wdq(channel_i, rank_i, bl_i, delay[side_i][channel_i][rank_i][bl_i]);\r
1986 } // bl_i loop\r
1987 } // if rank is enabled\r
1988 } // rank_i loop\r
1989 } // if channel is enabled\r
1990 } // channel_i loop\r
1991\r
1992 // find passing values\r
1993 for (channel_i = 0; channel_i < NUM_CHANNELS; channel_i++)\r
1994 {\r
1995 if (mrc_params->channel_enables & (0x1 << channel_i))\r
1996 {\r
1997 for (rank_i = 0; rank_i < NUM_RANKS; rank_i++)\r
1998 {\r
1999 if (mrc_params->rank_enables & (0x1 << rank_i))\r
2000 {\r
2001 // get an address in the target channel/rank\r
2002 address = get_addr(mrc_params, channel_i, rank_i);\r
2003\r
2004 // request HTE reconfiguration\r
2005 mrc_params->hte_setup = 1;\r
2006\r
2007 // check the settings\r
2008 do\r
2009 {\r
2010\r
2011#ifdef SIM\r
2012 // need restore memory to idle state as write can be in bad sync\r
2013 dram_init_command (DCMD_PREA(rank_i));\r
2014#endif\r
2015\r
2016 // result[07:00] == failing byte lane (MAX 8)\r
2017 result = check_bls_ex( mrc_params, address);\r
2018 // check for failures\r
2019 if (result & 0xFF)\r
2020 {\r
2021 // at least 1 byte lane failed\r
2022 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
2023 {\r
2024 if (result & (bl_mask << bl_i))\r
2025 {\r
2026 if (side_i == L)\r
2027 {\r
2028 delay[L][channel_i][rank_i][bl_i] += WDQ_STEP;\r
2029 }\r
2030 else\r
2031 {\r
2032 delay[R][channel_i][rank_i][bl_i] -= WDQ_STEP;\r
2033 }\r
2034 // check for algorithm failure\r
2035 if (delay[L][channel_i][rank_i][bl_i] != delay[R][channel_i][rank_i][bl_i])\r
2036 {\r
2037 // margin available, update delay setting\r
2038 set_wdq(channel_i, rank_i, bl_i, delay[side_i][channel_i][rank_i][bl_i]);\r
2039 }\r
2040 else\r
2041 {\r
2042 // no margin available, notify the user and halt\r
2043 training_message(channel_i, rank_i, bl_i);\r
2044 post_code(0xEE, (0x80 + side_i));\r
2045 }\r
2046 } // if bl_i failed\r
2047 } // bl_i loop\r
2048 } // at least 1 byte lane failed\r
2049 } while (result & 0xFF); // stop when all byte lanes pass\r
2050 } // if rank is enabled\r
2051 } // rank_i loop\r
2052 } // if channel is enabled\r
2053 } // channel_i loop\r
2054 } // side_i loop\r
2055\r
2056 // program WDQ to the middle of passing window\r
2057 for (channel_i = 0; channel_i < NUM_CHANNELS; channel_i++)\r
2058 {\r
2059 if (mrc_params->channel_enables & (1 << channel_i))\r
2060 {\r
2061 for (rank_i = 0; rank_i < NUM_RANKS; rank_i++)\r
2062 {\r
2063 if (mrc_params->rank_enables & (1 << rank_i))\r
2064 {\r
2065#ifdef R2R_SHARING\r
2066 // increment "num_ranks_enabled"\r
2067 num_ranks_enabled++;\r
2068#endif // R2R_SHARING\r
2069 for (bl_i = 0; bl_i < (NUM_BYTE_LANES / bl_divisor); bl_i++)\r
2070 {\r
2071\r
2072 DPF(D_INFO, "WDQ eye rank%d lane%d : %d-%d\n", rank_i, bl_i,\r
2073 delay[L][channel_i][rank_i][bl_i],\r
2074 delay[R][channel_i][rank_i][bl_i]);\r
2075\r
2076 tempD = (delay[R][channel_i][rank_i][bl_i] + delay[L][channel_i][rank_i][bl_i]) / 2;\r
2077\r
2078#ifdef R2R_SHARING\r
2079 final_delay[channel_i][bl_i] += tempD;\r
2080 set_wdq(channel_i, rank_i, bl_i, ((final_delay[channel_i][bl_i]) / num_ranks_enabled));\r
2081#else\r
2082 set_wdq(channel_i, rank_i, bl_i, tempD);\r
2083#endif // R2R_SHARING\r
2084\r
2085 } // bl_i loop\r
2086 } // if rank is enabled\r
2087 } // rank_i loop\r
2088 } // if channel is enabled\r
2089 } // channel_i loop\r
2090#endif // BACKUP_WDQ\r
2091 LEAVEFN();\r
2092 return;\r
2093}\r
2094\r
2095// Wrapper for jedec initialisation routine\r
2096static void perform_jedec_init(\r
2097 MRCParams_t *mrc_params)\r
2098{\r
2099 jedec_init(mrc_params, 0);\r
2100}\r
2101\r
2102// Configure DDRPHY for Auto-Refresh, Periodic Compensations,\r
2103// Dynamic Diff-Amp, ZQSPERIOD, Auto-Precharge, CKE Power-Down\r
2104static void set_auto_refresh(\r
2105 MRCParams_t *mrc_params)\r
2106{\r
2107 uint32_t channel_i;\r
2108 uint32_t rank_i;\r
2109 uint32_t bl_i;\r
2110 uint32_t bl_divisor = /*(mrc_params->channel_width==x16)?2:*/1;\r
2111 uint32_t tempD;\r
2112\r
2113 ENTERFN();\r
2114\r
2115 // enable Auto-Refresh, Periodic Compensations, Dynamic Diff-Amp, ZQSPERIOD, Auto-Precharge, CKE Power-Down\r
2116 for (channel_i = 0; channel_i < NUM_CHANNELS; channel_i++)\r
2117 {\r
2118 if (mrc_params->channel_enables & (1 << channel_i))\r
2119 {\r
2120 // Enable Periodic RCOMPS\r
2121 isbM32m(DDRPHY, CMPCTRL, (BIT1), (BIT1));\r
2122\r
2123\r
2124 // Enable Dynamic DiffAmp & Set Read ODT Value\r
2125 switch (mrc_params->rd_odt_value)\r
2126 {\r
2127 case 0: tempD = 0x3F; break; // OFF\r
2128 default: tempD = 0x00; break; // Auto\r
2129 } // rd_odt_value switch\r
2130\r
2131 for (bl_i=0; bl_i<((NUM_BYTE_LANES/bl_divisor)/2); bl_i++)\r
2132 {\r
2133 isbM32m(DDRPHY, (B0OVRCTL + (bl_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)),\r
2134 ((0x00<<16)|(tempD<<10)),\r
2135 ((BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT15|BIT14|BIT13|BIT12|BIT11|BIT10))); // Override: DIFFAMP, ODT\r
2136\r
2137 isbM32m(DDRPHY, (B1OVRCTL + (bl_i * DDRIODQ_BL_OFFSET) + (channel_i * DDRIODQ_CH_OFFSET)),\r
2138 ((0x00<<16)|(tempD<<10)),\r
2139 ((BIT21|BIT20|BIT19|BIT18|BIT17|BIT16)|(BIT15|BIT14|BIT13|BIT12|BIT11|BIT10)));// Override: DIFFAMP, ODT\r
2140 } // bl_i loop\r
2141\r
2142 // Issue ZQCS command\r
2143 for (rank_i = 0; rank_i < NUM_RANKS; rank_i++)\r
2144 {\r
2145 if (mrc_params->rank_enables & (1 << rank_i))\r
2146 {\r
2147 dram_init_command(DCMD_ZQCS(rank_i));\r
2148 } // if rank_i enabled\r
2149 } // rank_i loop\r
2150\r
2151 } // if channel_i enabled\r
2152 } // channel_i loop\r
2153\r
2154 clear_pointers();\r
2155\r
2156 LEAVEFN();\r
2157 return;\r
2158}\r
2159\r
2160// Depending on configuration enables ECC support.\r
2161// Available memory size is decresed, and updated with 0s\r
2162// in order to clear error status. Address mode 2 forced.\r
2163static void ecc_enable(\r
2164 MRCParams_t *mrc_params)\r
2165{\r
2166 RegDRP Drp;\r
2167 RegDSCH Dsch;\r
2168 RegDECCCTRL Ctr;\r
2169\r
2170 if (mrc_params->ecc_enables == 0) return;\r
2171\r
2172 ENTERFN();\r
2173\r
2174 // Configuration required in ECC mode\r
2175 Drp.raw = isbR32m(MCU, DRP);\r
2176 Drp.field.addressMap = 2;\r
2177 Drp.field.split64 = 1;\r
2178 isbW32m(MCU, DRP, Drp.raw);\r
2179\r
2180 // Disable new request bypass\r
2181 Dsch.raw = isbR32m(MCU, DSCH);\r
2182 Dsch.field.NEWBYPDIS = 1;\r
2183 isbW32m(MCU, DSCH, Dsch.raw);\r
2184\r
2185 // Enable ECC\r
2186 Ctr.raw = 0;\r
2187 Ctr.field.SBEEN = 1;\r
2188 Ctr.field.DBEEN = 1;\r
2189 Ctr.field.ENCBGEN = 1;\r
2190 isbW32m(MCU, DECCCTRL, Ctr.raw);\r
2191\r
2192#ifdef SIM\r
2193 // Read back to be sure writing took place\r
2194 Ctr.raw = isbR32m(MCU, DECCCTRL);\r
2195#endif\r
2196\r
2197 // Assume 8 bank memory, one bank is gone for ECC\r
2198 mrc_params->mem_size -= mrc_params->mem_size / 8;\r
2199\r
2200 // For S3 resume memory content has to be preserved\r
2201 if (mrc_params->boot_mode != bmS3)\r
2202 {\r
2203 select_hte(mrc_params);\r
2204 HteMemInit(mrc_params, MrcMemInit, MrcHaltHteEngineOnError);\r
2205 select_memory_manager(mrc_params);\r
2206 }\r
2207\r
2208 LEAVEFN();\r
2209 return;\r
2210}\r
2211\r
2212// Lock MCU registers at the end of initialisation sequence.\r
2213static void lock_registers(\r
2214 MRCParams_t *mrc_params)\r
2215{\r
2216 RegDCO Dco;\r
2217\r
2218 ENTERFN();\r
2219\r
2220 Dco.raw = isbR32m(MCU, DCO);\r
2221 Dco.field.PMIDIS = 0; //0 - PRI enabled\r
2222 Dco.field.PMICTL = 0; //0 - PRI owned by MEMORY_MANAGER\r
2223 Dco.field.DRPLOCK = 1;\r
2224 Dco.field.REUTLOCK = 1;\r
2225 isbW32m(MCU, DCO, Dco.raw);\r
2226\r
2227 LEAVEFN();\r
2228\r
2229}\r
2230\r
2231#ifdef MRC_SV\r
2232\r
2233// cache write back invalidate\r
2234static void asm_wbinvd(void)\r
2235{\r
2236#if defined (SIM) || defined (GCC)\r
2237 asm(\r
2238 "wbinvd;"\r
2239 );\r
2240#else\r
2241 __asm wbinvd;\r
2242#endif\r
2243}\r
2244\r
2245// cache invalidate\r
2246static void asm_invd(void)\r
2247{\r
2248#if defined (SIM) || defined (GCC)\r
2249 asm(\r
2250 "invd;"\r
2251 );\r
2252#else\r
2253 __asm invd;\r
2254#endif\r
2255}\r
2256\r
2257\r
2258static void cpu_read(void)\r
2259{\r
2260 uint32_t adr, dat, limit;\r
2261\r
2262 asm_invd();\r
2263\r
2264 limit = 8 * 1024;\r
2265 for (adr = 0; adr < limit; adr += 4)\r
2266 {\r
2267 dat = *(uint32_t*) adr;\r
2268 if ((adr & 0x0F) == 0)\r
2269 {\r
2270 DPF(D_INFO, "\n%x : ", adr);\r
2271 }\r
2272 DPF(D_INFO, "%x ", dat);\r
2273 }\r
2274 DPF(D_INFO, "\n");\r
2275\r
2276 DPF(D_INFO, "CPU read done\n");\r
2277}\r
2278\r
2279\r
2280static void cpu_write(void)\r
2281{\r
2282 uint32_t adr, limit;\r
2283\r
2284 limit = 8 * 1024;\r
2285 for (adr = 0; adr < limit; adr += 4)\r
2286 {\r
2287 *(uint32_t*) adr = 0xDEAD0000 + adr;\r
2288 }\r
2289\r
2290 asm_wbinvd();\r
2291\r
2292 DPF(D_INFO, "CPU write done\n");\r
2293}\r
2294\r
2295\r
2296static void cpu_memory_test(\r
2297 MRCParams_t *mrc_params)\r
2298{\r
2299 uint32_t result = 0;\r
2300 uint32_t val, dat, adr, adr0, step, limit;\r
2301 uint64_t my_tsc;\r
2302\r
2303 ENTERFN();\r
2304\r
2305 asm_invd();\r
2306\r
2307 adr0 = 1 * 1024 * 1024;\r
2308 limit = 256 * 1024 * 1024;\r
2309\r
2310 for (step = 0; step <= 4; step++)\r
2311 {\r
2312 DPF(D_INFO, "Mem test step %d starting from %xh\n", step, adr0);\r
2313\r
2314 my_tsc = read_tsc();\r
2315 for (adr = adr0; adr < limit; adr += sizeof(uint32_t))\r
2316 {\r
2317 if (step == 0) dat = adr;\r
2318 else if (step == 1) dat = (1 << ((adr >> 2) & 0x1f));\r
2319 else if (step == 2) dat = ~(1 << ((adr >> 2) & 0x1f));\r
2320 else if (step == 3) dat = 0x5555AAAA;\r
2321 else if (step == 4) dat = 0xAAAA5555;\r
2322\r
2323 *(uint32_t*) adr = dat;\r
2324 }\r
2325 DPF(D_INFO, "Write time %llXh\n", read_tsc() - my_tsc);\r
2326\r
2327 my_tsc = read_tsc();\r
2328 for (adr = adr0; adr < limit; adr += sizeof(uint32_t))\r
2329 {\r
2330 if (step == 0) dat = adr;\r
2331 else if (step == 1) dat = (1 << ((adr >> 2) & 0x1f));\r
2332 else if (step == 2) dat = ~(1 << ((adr >> 2) & 0x1f));\r
2333 else if (step == 3) dat = 0x5555AAAA;\r
2334 else if (step == 4) dat = 0xAAAA5555;\r
2335\r
2336 val = *(uint32_t*) adr;\r
2337\r
2338 if (val != dat)\r
2339 {\r
2340 DPF(D_INFO, "%x vs. %x@%x\n", dat, val, adr);\r
2341 result = adr|BIT31;\r
2342 }\r
2343 }\r
2344 DPF(D_INFO, "Read time %llXh\n", read_tsc() - my_tsc);\r
2345 }\r
2346\r
2347 DPF( D_INFO, "Memory test result %x\n", result);\r
2348 LEAVEFN();\r
2349}\r
2350#endif // MRC_SV\r
2351\r
2352\r
2353// Execute memory test, if error dtected it is\r
2354// indicated in mrc_params->status.\r
2355static void memory_test(\r
2356 MRCParams_t *mrc_params)\r
2357{\r
2358 uint32_t result = 0;\r
2359\r
2360 ENTERFN();\r
2361\r
2362 select_hte(mrc_params);\r
2363 result = HteMemInit(mrc_params, MrcMemTest, MrcHaltHteEngineOnError);\r
2364 select_memory_manager(mrc_params);\r
2365\r
2366 DPF(D_INFO, "Memory test result %x\n", result);\r
2367 mrc_params->status = ((result == 0) ? MRC_SUCCESS : MRC_E_MEMTEST);\r
2368 LEAVEFN();\r
2369}\r
2370\r
2371\r
2372// Force same timings as with backup settings\r
2373static void static_timings(\r
2374 MRCParams_t *mrc_params)\r
2375\r
2376{\r
2377 uint8_t ch, rk, bl;\r
2378\r
2379 for (ch = 0; ch < NUM_CHANNELS; ch++)\r
2380 {\r
2381 for (rk = 0; rk < NUM_RANKS; rk++)\r
2382 {\r
2383 for (bl = 0; bl < NUM_BYTE_LANES; bl++)\r
2384 {\r
2385 set_rcvn(ch, rk, bl, 498); // RCVN\r
2386 set_rdqs(ch, rk, bl, 24); // RDQS\r
2387 set_wdqs(ch, rk, bl, 292); // WDQS\r
2388 set_wdq( ch, rk, bl, 260); // WDQ\r
2389 if (rk == 0)\r
2390 {\r
2391 set_vref(ch, bl, 32); // VREF (RANK0 only)\r
2392 }\r
2393 }\r
2394 set_wctl(ch, rk, 217); // WCTL\r
2395 }\r
2396 set_wcmd(ch, 220); // WCMD\r
2397 }\r
2398\r
2399 return;\r
2400}\r
2401\r
2402//\r
2403// Initialise system memory.\r
2404//\r
2405void MemInit(\r
2406 MRCParams_t *mrc_params)\r
2407{\r
2408 static const MemInit_t init[] =\r
2409 {\r
2410 { 0x0101, bmCold|bmFast|bmWarm|bmS3, clear_self_refresh }, //0\r
2411 { 0x0200, bmCold|bmFast|bmWarm|bmS3, prog_ddr_timing_control }, //1 initialise the MCU\r
2412 { 0x0103, bmCold|bmFast , prog_decode_before_jedec }, //2\r
2413 { 0x0104, bmCold|bmFast , perform_ddr_reset }, //3\r
2414 { 0x0300, bmCold|bmFast |bmS3, ddrphy_init }, //4 initialise the DDRPHY\r
2415 { 0x0400, bmCold|bmFast , perform_jedec_init }, //5 perform JEDEC initialisation of DRAMs\r
2416 { 0x0105, bmCold|bmFast , set_ddr_init_complete }, //6\r
2417 { 0x0106, bmFast|bmWarm|bmS3, restore_timings }, //7\r
2418 { 0x0106, bmCold , default_timings }, //8\r
2419 { 0x0500, bmCold , rcvn_cal }, //9 perform RCVN_CAL algorithm\r
2420 { 0x0600, bmCold , wr_level }, //10 perform WR_LEVEL algorithm\r
2421 { 0x0120, bmCold , prog_page_ctrl }, //11\r
2422 { 0x0700, bmCold , rd_train }, //12 perform RD_TRAIN algorithm\r
2423 { 0x0800, bmCold , wr_train }, //13 perform WR_TRAIN algorithm\r
2424 { 0x010B, bmCold , store_timings }, //14\r
2425 { 0x010C, bmCold|bmFast|bmWarm|bmS3, enable_scrambling }, //15\r
2426 { 0x010D, bmCold|bmFast|bmWarm|bmS3, prog_ddr_control }, //16\r
2427 { 0x010E, bmCold|bmFast|bmWarm|bmS3, prog_dra_drb }, //17\r
2428 { 0x010F, bmWarm|bmS3, perform_wake }, //18\r
2429 { 0x0110, bmCold|bmFast|bmWarm|bmS3, change_refresh_period }, //19\r
2430 { 0x0111, bmCold|bmFast|bmWarm|bmS3, set_auto_refresh }, //20\r
2431 { 0x0112, bmCold|bmFast|bmWarm|bmS3, ecc_enable }, //21\r
2432 { 0x0113, bmCold|bmFast , memory_test }, //22\r
2433 { 0x0114, bmCold|bmFast|bmWarm|bmS3, lock_registers } //23 set init done\r
2434 };\r
2435\r
2436 uint32_t i;\r
2437\r
2438 ENTERFN();\r
2439\r
2440 DPF(D_INFO, "Meminit build %s %s\n", __DATE__, __TIME__);\r
2441\r
2442 // MRC started\r
2443 post_code(0x01, 0x00);\r
2444\r
2445 if (mrc_params->boot_mode != bmCold)\r
2446 {\r
2447 if (mrc_params->ddr_speed != mrc_params->timings.ddr_speed)\r
2448 {\r
2449 // full training required as frequency changed\r
2450 mrc_params->boot_mode = bmCold;\r
2451 }\r
2452 }\r
2453\r
2454 for (i = 0; i < MCOUNT(init); i++)\r
2455 {\r
2456 uint64_t my_tsc;\r
2457\r
2458#ifdef MRC_SV\r
2459 if (mrc_params->menu_after_mrc && i > 14)\r
2460 {\r
2461 uint8_t ch;\r
2462\r
2463 mylop:\r
2464\r
2465 DPF(D_INFO, "-- c - continue --\n");\r
2466 DPF(D_INFO, "-- j - move to jedec init --\n");\r
2467 DPF(D_INFO, "-- m - memory test --\n");\r
2468 DPF(D_INFO, "-- r - cpu read --\n");\r
2469 DPF(D_INFO, "-- w - cpu write --\n");\r
2470 DPF(D_INFO, "-- b - hte base test --\n");\r
2471 DPF(D_INFO, "-- g - hte extended test --\n");\r
2472\r
2473 ch = mgetc();\r
2474 switch (ch)\r
2475 {\r
2476 case 'c':\r
2477 break;\r
2478 case 'j': //move to jedec init\r
2479 i = 5;\r
2480 break;\r
2481\r
2482 case 'M':\r
2483 case 'N':\r
2484 {\r
2485 uint32_t n, res, cnt=0;\r
2486\r
2487 for(n=0; mgetch()==0; n++)\r
2488 {\r
2489 if( ch == 'M' || n % 256 == 0)\r
2490 {\r
2491 DPF(D_INFO, "n=%d e=%d\n", n, cnt);\r
2492 }\r
2493\r
2494 res = 0;\r
2495\r
2496 if( ch == 'M')\r
2497 {\r
2498 memory_test(mrc_params);\r
2499 res |= mrc_params->status;\r
2500 }\r
2501\r
2502 mrc_params->hte_setup = 1;\r
2503 res |= check_bls_ex(mrc_params, 0x00000000);\r
2504 res |= check_bls_ex(mrc_params, 0x00000000);\r
2505 res |= check_bls_ex(mrc_params, 0x00000000);\r
2506 res |= check_bls_ex(mrc_params, 0x00000000);\r
2507\r
2508 if( mrc_params->rank_enables & 2)\r
2509 {\r
2510 mrc_params->hte_setup = 1;\r
2511 res |= check_bls_ex(mrc_params, 0x40000000);\r
2512 res |= check_bls_ex(mrc_params, 0x40000000);\r
2513 res |= check_bls_ex(mrc_params, 0x40000000);\r
2514 res |= check_bls_ex(mrc_params, 0x40000000);\r
2515 }\r
2516\r
2517 if( res != 0)\r
2518 {\r
2519 DPF(D_INFO, "###########\n");\r
2520 DPF(D_INFO, "#\n");\r
2521 DPF(D_INFO, "# Error count %d\n", ++cnt);\r
2522 DPF(D_INFO, "#\n");\r
2523 DPF(D_INFO, "###########\n");\r
2524 }\r
2525\r
2526 } // for\r
2527\r
2528 select_memory_manager(mrc_params);\r
2529 }\r
2530 goto mylop;\r
2531 case 'm':\r
2532 memory_test(mrc_params);\r
2533 goto mylop;\r
2534 case 'n':\r
2535 cpu_memory_test(mrc_params);\r
2536 goto mylop;\r
2537\r
2538 case 'l':\r
2539 ch = mgetc();\r
2540 if (ch <= '9') DpfPrintMask ^= (ch - '0') << 3;\r
2541 DPF(D_INFO, "Log mask %x\n", DpfPrintMask);\r
2542 goto mylop;\r
2543 case 'p':\r
2544 print_timings(mrc_params);\r
2545 goto mylop;\r
2546 case 'R':\r
2547 rd_train(mrc_params);\r
2548 goto mylop;\r
2549 case 'W':\r
2550 wr_train(mrc_params);\r
2551 goto mylop;\r
2552\r
2553 case 'r':\r
2554 cpu_read();\r
2555 goto mylop;\r
2556 case 'w':\r
2557 cpu_write();\r
2558 goto mylop;\r
2559\r
2560 case 'g':\r
2561 {\r
2562 uint32_t result;\r
2563 select_hte(mrc_params);\r
2564 mrc_params->hte_setup = 1;\r
2565 result = check_bls_ex(mrc_params, 0);\r
2566 DPF(D_INFO, "Extended test result %x\n", result);\r
2567 select_memory_manager(mrc_params);\r
2568 }\r
2569 goto mylop;\r
2570 case 'b':\r
2571 {\r
2572 uint32_t result;\r
2573 select_hte(mrc_params);\r
2574 mrc_params->hte_setup = 1;\r
2575 result = check_rw_coarse(mrc_params, 0);\r
2576 DPF(D_INFO, "Base test result %x\n", result);\r
2577 select_memory_manager(mrc_params);\r
2578 }\r
2579 goto mylop;\r
2580 case 'B':\r
2581 select_hte(mrc_params);\r
2582 HteMemOp(0x2340, 1, 1);\r
2583 select_memory_manager(mrc_params);\r
2584 goto mylop;\r
2585\r
2586 case '3':\r
2587 {\r
2588 RegDPMC0 DPMC0reg;\r
2589\r
2590 DPF( D_INFO, "===>> Start suspend\n");\r
2591 isbR32m(MCU, DSTAT);\r
2592\r
2593 DPMC0reg.raw = isbR32m(MCU, DPMC0);\r
2594 DPMC0reg.field.DYNSREN = 0;\r
2595 DPMC0reg.field.powerModeOpCode = 0x05; // Disable Master DLL\r
2596 isbW32m(MCU, DPMC0, DPMC0reg.raw);\r
2597\r
2598 // Should be off for negative test case verification\r
2599 #if 1\r
2600 Wr32(MMIO, PCIADDR(0,0,0,SB_PACKET_REG),\r
2601 (uint32_t)SB_COMMAND(SB_SUSPEND_CMND_OPCODE, MCU, 0));\r
2602 #endif\r
2603\r
2604 DPF( D_INFO, "press key\n");\r
2605 mgetc();\r
2606 DPF( D_INFO, "===>> Start resume\n");\r
2607 isbR32m(MCU, DSTAT);\r
2608\r
2609 mrc_params->boot_mode = bmS3;\r
2610 i = 0;\r
2611 }\r
2612\r
2613 } // switch\r
2614\r
2615 } // if( menu\r
2616#endif //MRC_SV\r
2617\r
2618 if (mrc_params->boot_mode & init[i].boot_path)\r
2619 {\r
2620 uint8_t major = init[i].post_code >> 8 & 0xFF;\r
2621 uint8_t minor = init[i].post_code >> 0 & 0xFF;\r
2622 post_code(major, minor);\r
2623\r
2624 my_tsc = read_tsc();\r
2625 init[i].init_fn(mrc_params);\r
2626 DPF(D_TIME, "Execution time %llX", read_tsc() - my_tsc);\r
2627 }\r
2628 }\r
2629\r
2630 // display the timings\r
2631 print_timings(mrc_params);\r
2632\r
2633 // MRC is complete.\r
2634 post_code(0x01, 0xFF);\r
2635\r
2636 LEAVEFN();\r
2637 return;\r
2638}\r