1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2012 Texas Instruments, Inc.
7 * Aneesh V <aneesh@ti.com>
8 * Santosh Shilimkar <santosh.shilimkar@ti.com>
10 #include <linux/err.h>
11 #include <linux/kernel.h>
12 #include <linux/reboot.h>
13 #include <linux/platform_data/emif_plat.h>
15 #include <linux/device.h>
16 #include <linux/platform_device.h>
17 #include <linux/interrupt.h>
18 #include <linux/slab.h>
20 #include <linux/debugfs.h>
21 #include <linux/seq_file.h>
22 #include <linux/module.h>
23 #include <linux/list.h>
24 #include <linux/spinlock.h>
26 #include <memory/jedec_ddr.h>
28 #include "of_memory.h"
31 * struct emif_data - Per device static data for driver's use
32 * @duplicate: Whether the DDR devices attached to this EMIF
33 * instance are exactly same as that on EMIF1. In
34 * this case we can save some memory and processing
35 * @temperature_level: Maximum temperature of LPDDR2 devices attached
36 * to this EMIF - read from MR4 register. If there
37 * are two devices attached to this EMIF, this
38 * value is the maximum of the two temperature
40 * @node: node in the device list
41 * @base: base address of memory-mapped IO registers.
42 * @dev: device pointer.
43 * @addressing table with addressing information from the spec
44 * @regs_cache: An array of 'struct emif_regs' that stores
45 * calculated register values for different
46 * frequencies, to avoid re-calculating them on
47 * each DVFS transition.
48 * @curr_regs: The set of register values used in the last
49 * frequency change (i.e. corresponding to the
50 * frequency in effect at the moment)
51 * @plat_data: Pointer to saved platform data.
52 * @debugfs_root: dentry to the root folder for EMIF in debugfs
53 * @np_ddr: Pointer to ddr device tree node
59 struct list_head node
;
60 unsigned long irq_state
;
63 const struct lpddr2_addressing
*addressing
;
64 struct emif_regs
*regs_cache
[EMIF_MAX_NUM_FREQUENCIES
];
65 struct emif_regs
*curr_regs
;
66 struct emif_platform_data
*plat_data
;
67 struct dentry
*debugfs_root
;
68 struct device_node
*np_ddr
;
71 static struct emif_data
*emif1
;
72 static spinlock_t emif_lock
;
73 static unsigned long irq_state
;
74 static u32 t_ck
; /* DDR clock period in ps */
75 static LIST_HEAD(device_list
);
77 #ifdef CONFIG_DEBUG_FS
78 static void do_emif_regdump_show(struct seq_file
*s
, struct emif_data
*emif
,
79 struct emif_regs
*regs
)
81 u32 type
= emif
->plat_data
->device_info
->type
;
82 u32 ip_rev
= emif
->plat_data
->ip_rev
;
84 seq_printf(s
, "EMIF register cache dump for %dMHz\n",
87 seq_printf(s
, "ref_ctrl_shdw\t: 0x%08x\n", regs
->ref_ctrl_shdw
);
88 seq_printf(s
, "sdram_tim1_shdw\t: 0x%08x\n", regs
->sdram_tim1_shdw
);
89 seq_printf(s
, "sdram_tim2_shdw\t: 0x%08x\n", regs
->sdram_tim2_shdw
);
90 seq_printf(s
, "sdram_tim3_shdw\t: 0x%08x\n", regs
->sdram_tim3_shdw
);
92 if (ip_rev
== EMIF_4D
) {
93 seq_printf(s
, "read_idle_ctrl_shdw_normal\t: 0x%08x\n",
94 regs
->read_idle_ctrl_shdw_normal
);
95 seq_printf(s
, "read_idle_ctrl_shdw_volt_ramp\t: 0x%08x\n",
96 regs
->read_idle_ctrl_shdw_volt_ramp
);
97 } else if (ip_rev
== EMIF_4D5
) {
98 seq_printf(s
, "dll_calib_ctrl_shdw_normal\t: 0x%08x\n",
99 regs
->dll_calib_ctrl_shdw_normal
);
100 seq_printf(s
, "dll_calib_ctrl_shdw_volt_ramp\t: 0x%08x\n",
101 regs
->dll_calib_ctrl_shdw_volt_ramp
);
104 if (type
== DDR_TYPE_LPDDR2_S2
|| type
== DDR_TYPE_LPDDR2_S4
) {
105 seq_printf(s
, "ref_ctrl_shdw_derated\t: 0x%08x\n",
106 regs
->ref_ctrl_shdw_derated
);
107 seq_printf(s
, "sdram_tim1_shdw_derated\t: 0x%08x\n",
108 regs
->sdram_tim1_shdw_derated
);
109 seq_printf(s
, "sdram_tim3_shdw_derated\t: 0x%08x\n",
110 regs
->sdram_tim3_shdw_derated
);
114 static int emif_regdump_show(struct seq_file
*s
, void *unused
)
116 struct emif_data
*emif
= s
->private;
117 struct emif_regs
**regs_cache
;
121 regs_cache
= emif1
->regs_cache
;
123 regs_cache
= emif
->regs_cache
;
125 for (i
= 0; i
< EMIF_MAX_NUM_FREQUENCIES
&& regs_cache
[i
]; i
++) {
126 do_emif_regdump_show(s
, emif
, regs_cache
[i
]);
133 static int emif_regdump_open(struct inode
*inode
, struct file
*file
)
135 return single_open(file
, emif_regdump_show
, inode
->i_private
);
138 static const struct file_operations emif_regdump_fops
= {
139 .open
= emif_regdump_open
,
141 .release
= single_release
,
144 static int emif_mr4_show(struct seq_file
*s
, void *unused
)
146 struct emif_data
*emif
= s
->private;
148 seq_printf(s
, "MR4=%d\n", emif
->temperature_level
);
152 static int emif_mr4_open(struct inode
*inode
, struct file
*file
)
154 return single_open(file
, emif_mr4_show
, inode
->i_private
);
157 static const struct file_operations emif_mr4_fops
= {
158 .open
= emif_mr4_open
,
160 .release
= single_release
,
163 static int __init_or_module
emif_debugfs_init(struct emif_data
*emif
)
165 struct dentry
*dentry
;
168 dentry
= debugfs_create_dir(dev_name(emif
->dev
), NULL
);
173 emif
->debugfs_root
= dentry
;
175 dentry
= debugfs_create_file("regcache_dump", S_IRUGO
,
176 emif
->debugfs_root
, emif
, &emif_regdump_fops
);
182 dentry
= debugfs_create_file("mr4", S_IRUGO
,
183 emif
->debugfs_root
, emif
, &emif_mr4_fops
);
191 debugfs_remove_recursive(emif
->debugfs_root
);
196 static void __exit
emif_debugfs_exit(struct emif_data
*emif
)
198 debugfs_remove_recursive(emif
->debugfs_root
);
199 emif
->debugfs_root
= NULL
;
202 static inline int __init_or_module
emif_debugfs_init(struct emif_data
*emif
)
207 static inline void __exit
emif_debugfs_exit(struct emif_data
*emif
)
213 * Calculate the period of DDR clock from frequency value
215 static void set_ddr_clk_period(u32 freq
)
217 /* Divide 10^12 by frequency to get period in ps */
218 t_ck
= (u32
)DIV_ROUND_UP_ULL(1000000000000ull, freq
);
222 * Get bus width used by EMIF. Note that this may be different from the
223 * bus width of the DDR devices used. For instance two 16-bit DDR devices
224 * may be connected to a given CS of EMIF. In this case bus width as far
225 * as EMIF is concerned is 32, where as the DDR bus width is 16 bits.
227 static u32
get_emif_bus_width(struct emif_data
*emif
)
230 void __iomem
*base
= emif
->base
;
232 width
= (readl(base
+ EMIF_SDRAM_CONFIG
) & NARROW_MODE_MASK
)
233 >> NARROW_MODE_SHIFT
;
234 width
= width
== 0 ? 32 : 16;
240 * Get the CL from SDRAM_CONFIG register
242 static u32
get_cl(struct emif_data
*emif
)
245 void __iomem
*base
= emif
->base
;
247 cl
= (readl(base
+ EMIF_SDRAM_CONFIG
) & CL_MASK
) >> CL_SHIFT
;
252 static void set_lpmode(struct emif_data
*emif
, u8 lpmode
)
255 void __iomem
*base
= emif
->base
;
258 * Workaround for errata i743 - LPDDR2 Power-Down State is Not
262 * The EMIF supports power-down state for low power. The EMIF
263 * automatically puts the SDRAM into power-down after the memory is
264 * not accessed for a defined number of cycles and the
265 * EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE bit field is set to 0x4.
266 * As the EMIF supports automatic output impedance calibration, a ZQ
267 * calibration long command is issued every time it exits active
268 * power-down and precharge power-down modes. The EMIF waits and
269 * blocks any other command during this calibration.
270 * The EMIF does not allow selective disabling of ZQ calibration upon
271 * exit of power-down mode. Due to very short periods of power-down
272 * cycles, ZQ calibration overhead creates bandwidth issues and
273 * increases overall system power consumption. On the other hand,
274 * issuing ZQ calibration long commands when exiting self-refresh is
278 * Because there is no power consumption benefit of the power-down due
279 * to the calibration and there is a performance risk, the guideline
280 * is to not allow power-down state and, therefore, to not have set
281 * the EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE bit field to 0x4.
283 if ((emif
->plat_data
->ip_rev
== EMIF_4D
) &&
284 (EMIF_LP_MODE_PWR_DN
== lpmode
)) {
286 "REG_LP_MODE = LP_MODE_PWR_DN(4) is prohibited by"
287 "erratum i743 switch to LP_MODE_SELF_REFRESH(2)\n");
288 /* rollback LP_MODE to Self-refresh mode */
289 lpmode
= EMIF_LP_MODE_SELF_REFRESH
;
292 temp
= readl(base
+ EMIF_POWER_MANAGEMENT_CONTROL
);
293 temp
&= ~LP_MODE_MASK
;
294 temp
|= (lpmode
<< LP_MODE_SHIFT
);
295 writel(temp
, base
+ EMIF_POWER_MANAGEMENT_CONTROL
);
298 static void do_freq_update(void)
300 struct emif_data
*emif
;
303 * Workaround for errata i728: Disable LPMODE during FREQ_UPDATE
306 * The EMIF automatically puts the SDRAM into self-refresh mode
307 * after the EMIF has not performed accesses during
308 * EMIF_PWR_MGMT_CTRL[7:4] REG_SR_TIM number of DDR clock cycles
309 * and the EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE bit field is set
310 * to 0x2. If during a small window the following three events
312 * - The SR_TIMING counter expires
313 * - And frequency change is requested
314 * - And OCP access is requested
315 * Then it causes instable clock on the DDR interface.
318 * To avoid the occurrence of the three events, the workaround
319 * is to disable the self-refresh when requesting a frequency
320 * change. Before requesting a frequency change the software must
321 * program EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE to 0x0. When the
322 * frequency change has been done, the software can reprogram
323 * EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE to 0x2
325 list_for_each_entry(emif
, &device_list
, node
) {
326 if (emif
->lpmode
== EMIF_LP_MODE_SELF_REFRESH
)
327 set_lpmode(emif
, EMIF_LP_MODE_DISABLE
);
331 * TODO: Do FREQ_UPDATE here when an API
332 * is available for this as part of the new
336 list_for_each_entry(emif
, &device_list
, node
) {
337 if (emif
->lpmode
== EMIF_LP_MODE_SELF_REFRESH
)
338 set_lpmode(emif
, EMIF_LP_MODE_SELF_REFRESH
);
342 /* Find addressing table entry based on the device's type and density */
343 static const struct lpddr2_addressing
*get_addressing_table(
344 const struct ddr_device_info
*device_info
)
346 u32 index
, type
, density
;
348 type
= device_info
->type
;
349 density
= device_info
->density
;
352 case DDR_TYPE_LPDDR2_S4
:
355 case DDR_TYPE_LPDDR2_S2
:
357 case DDR_DENSITY_1Gb
:
358 case DDR_DENSITY_2Gb
:
369 return &lpddr2_jedec_addressing_table
[index
];
373 * Find the the right timing table from the array of timing
374 * tables of the device using DDR clock frequency
376 static const struct lpddr2_timings
*get_timings_table(struct emif_data
*emif
,
379 u32 i
, min
, max
, freq_nearest
;
380 const struct lpddr2_timings
*timings
= NULL
;
381 const struct lpddr2_timings
*timings_arr
= emif
->plat_data
->timings
;
382 struct device
*dev
= emif
->dev
;
384 /* Start with a very high frequency - 1GHz */
385 freq_nearest
= 1000000000;
388 * Find the timings table such that:
389 * 1. the frequency range covers the required frequency(safe) AND
390 * 2. the max_freq is closest to the required frequency(optimal)
392 for (i
= 0; i
< emif
->plat_data
->timings_arr_size
; i
++) {
393 max
= timings_arr
[i
].max_freq
;
394 min
= timings_arr
[i
].min_freq
;
395 if ((freq
>= min
) && (freq
<= max
) && (max
< freq_nearest
)) {
397 timings
= &timings_arr
[i
];
402 dev_err(dev
, "%s: couldn't find timings for - %dHz\n",
405 dev_dbg(dev
, "%s: timings table: freq %d, speed bin freq %d\n",
406 __func__
, freq
, freq_nearest
);
411 static u32
get_sdram_ref_ctrl_shdw(u32 freq
,
412 const struct lpddr2_addressing
*addressing
)
414 u32 ref_ctrl_shdw
= 0, val
= 0, freq_khz
, t_refi
;
416 /* Scale down frequency and t_refi to avoid overflow */
417 freq_khz
= freq
/ 1000;
418 t_refi
= addressing
->tREFI_ns
/ 100;
421 * refresh rate to be set is 'tREFI(in us) * freq in MHz
422 * division by 10000 to account for change in units
424 val
= t_refi
* freq_khz
/ 10000;
425 ref_ctrl_shdw
|= val
<< REFRESH_RATE_SHIFT
;
427 return ref_ctrl_shdw
;
430 static u32
get_sdram_tim_1_shdw(const struct lpddr2_timings
*timings
,
431 const struct lpddr2_min_tck
*min_tck
,
432 const struct lpddr2_addressing
*addressing
)
434 u32 tim1
= 0, val
= 0;
436 val
= max(min_tck
->tWTR
, DIV_ROUND_UP(timings
->tWTR
, t_ck
)) - 1;
437 tim1
|= val
<< T_WTR_SHIFT
;
439 if (addressing
->num_banks
== B8
)
440 val
= DIV_ROUND_UP(timings
->tFAW
, t_ck
*4);
442 val
= max(min_tck
->tRRD
, DIV_ROUND_UP(timings
->tRRD
, t_ck
));
443 tim1
|= (val
- 1) << T_RRD_SHIFT
;
445 val
= DIV_ROUND_UP(timings
->tRAS_min
+ timings
->tRPab
, t_ck
) - 1;
446 tim1
|= val
<< T_RC_SHIFT
;
448 val
= max(min_tck
->tRASmin
, DIV_ROUND_UP(timings
->tRAS_min
, t_ck
));
449 tim1
|= (val
- 1) << T_RAS_SHIFT
;
451 val
= max(min_tck
->tWR
, DIV_ROUND_UP(timings
->tWR
, t_ck
)) - 1;
452 tim1
|= val
<< T_WR_SHIFT
;
454 val
= max(min_tck
->tRCD
, DIV_ROUND_UP(timings
->tRCD
, t_ck
)) - 1;
455 tim1
|= val
<< T_RCD_SHIFT
;
457 val
= max(min_tck
->tRPab
, DIV_ROUND_UP(timings
->tRPab
, t_ck
)) - 1;
458 tim1
|= val
<< T_RP_SHIFT
;
463 static u32
get_sdram_tim_1_shdw_derated(const struct lpddr2_timings
*timings
,
464 const struct lpddr2_min_tck
*min_tck
,
465 const struct lpddr2_addressing
*addressing
)
467 u32 tim1
= 0, val
= 0;
469 val
= max(min_tck
->tWTR
, DIV_ROUND_UP(timings
->tWTR
, t_ck
)) - 1;
470 tim1
= val
<< T_WTR_SHIFT
;
473 * tFAW is approximately 4 times tRRD. So add 1875*4 = 7500ps
474 * to tFAW for de-rating
476 if (addressing
->num_banks
== B8
) {
477 val
= DIV_ROUND_UP(timings
->tFAW
+ 7500, 4 * t_ck
) - 1;
479 val
= DIV_ROUND_UP(timings
->tRRD
+ 1875, t_ck
);
480 val
= max(min_tck
->tRRD
, val
) - 1;
482 tim1
|= val
<< T_RRD_SHIFT
;
484 val
= DIV_ROUND_UP(timings
->tRAS_min
+ timings
->tRPab
+ 1875, t_ck
);
485 tim1
|= (val
- 1) << T_RC_SHIFT
;
487 val
= DIV_ROUND_UP(timings
->tRAS_min
+ 1875, t_ck
);
488 val
= max(min_tck
->tRASmin
, val
) - 1;
489 tim1
|= val
<< T_RAS_SHIFT
;
491 val
= max(min_tck
->tWR
, DIV_ROUND_UP(timings
->tWR
, t_ck
)) - 1;
492 tim1
|= val
<< T_WR_SHIFT
;
494 val
= max(min_tck
->tRCD
, DIV_ROUND_UP(timings
->tRCD
+ 1875, t_ck
));
495 tim1
|= (val
- 1) << T_RCD_SHIFT
;
497 val
= max(min_tck
->tRPab
, DIV_ROUND_UP(timings
->tRPab
+ 1875, t_ck
));
498 tim1
|= (val
- 1) << T_RP_SHIFT
;
503 static u32
get_sdram_tim_2_shdw(const struct lpddr2_timings
*timings
,
504 const struct lpddr2_min_tck
*min_tck
,
505 const struct lpddr2_addressing
*addressing
,
508 u32 tim2
= 0, val
= 0;
510 val
= min_tck
->tCKE
- 1;
511 tim2
|= val
<< T_CKE_SHIFT
;
513 val
= max(min_tck
->tRTP
, DIV_ROUND_UP(timings
->tRTP
, t_ck
)) - 1;
514 tim2
|= val
<< T_RTP_SHIFT
;
516 /* tXSNR = tRFCab_ps + 10 ns(tRFCab_ps for LPDDR2). */
517 val
= DIV_ROUND_UP(addressing
->tRFCab_ps
+ 10000, t_ck
) - 1;
518 tim2
|= val
<< T_XSNR_SHIFT
;
520 /* XSRD same as XSNR for LPDDR2 */
521 tim2
|= val
<< T_XSRD_SHIFT
;
523 val
= max(min_tck
->tXP
, DIV_ROUND_UP(timings
->tXP
, t_ck
)) - 1;
524 tim2
|= val
<< T_XP_SHIFT
;
529 static u32
get_sdram_tim_3_shdw(const struct lpddr2_timings
*timings
,
530 const struct lpddr2_min_tck
*min_tck
,
531 const struct lpddr2_addressing
*addressing
,
532 u32 type
, u32 ip_rev
, u32 derated
)
534 u32 tim3
= 0, val
= 0, t_dqsck
;
536 val
= timings
->tRAS_max_ns
/ addressing
->tREFI_ns
- 1;
537 val
= val
> 0xF ? 0xF : val
;
538 tim3
|= val
<< T_RAS_MAX_SHIFT
;
540 val
= DIV_ROUND_UP(addressing
->tRFCab_ps
, t_ck
) - 1;
541 tim3
|= val
<< T_RFC_SHIFT
;
543 t_dqsck
= (derated
== EMIF_DERATED_TIMINGS
) ?
544 timings
->tDQSCK_max_derated
: timings
->tDQSCK_max
;
545 if (ip_rev
== EMIF_4D5
)
546 val
= DIV_ROUND_UP(t_dqsck
+ 1000, t_ck
) - 1;
548 val
= DIV_ROUND_UP(t_dqsck
, t_ck
) - 1;
550 tim3
|= val
<< T_TDQSCKMAX_SHIFT
;
552 val
= DIV_ROUND_UP(timings
->tZQCS
, t_ck
) - 1;
553 tim3
|= val
<< ZQ_ZQCS_SHIFT
;
555 val
= DIV_ROUND_UP(timings
->tCKESR
, t_ck
);
556 val
= max(min_tck
->tCKESR
, val
) - 1;
557 tim3
|= val
<< T_CKESR_SHIFT
;
559 if (ip_rev
== EMIF_4D5
) {
560 tim3
|= (EMIF_T_CSTA
- 1) << T_CSTA_SHIFT
;
562 val
= DIV_ROUND_UP(EMIF_T_PDLL_UL
, 128) - 1;
563 tim3
|= val
<< T_PDLL_UL_SHIFT
;
569 static u32
get_zq_config_reg(const struct lpddr2_addressing
*addressing
,
570 bool cs1_used
, bool cal_resistors_per_cs
)
574 val
= EMIF_ZQCS_INTERVAL_US
* 1000 / addressing
->tREFI_ns
;
575 zq
|= val
<< ZQ_REFINTERVAL_SHIFT
;
577 val
= DIV_ROUND_UP(T_ZQCL_DEFAULT_NS
, T_ZQCS_DEFAULT_NS
) - 1;
578 zq
|= val
<< ZQ_ZQCL_MULT_SHIFT
;
580 val
= DIV_ROUND_UP(T_ZQINIT_DEFAULT_NS
, T_ZQCL_DEFAULT_NS
) - 1;
581 zq
|= val
<< ZQ_ZQINIT_MULT_SHIFT
;
583 zq
|= ZQ_SFEXITEN_ENABLE
<< ZQ_SFEXITEN_SHIFT
;
585 if (cal_resistors_per_cs
)
586 zq
|= ZQ_DUALCALEN_ENABLE
<< ZQ_DUALCALEN_SHIFT
;
588 zq
|= ZQ_DUALCALEN_DISABLE
<< ZQ_DUALCALEN_SHIFT
;
590 zq
|= ZQ_CS0EN_MASK
; /* CS0 is used for sure */
592 val
= cs1_used
? 1 : 0;
593 zq
|= val
<< ZQ_CS1EN_SHIFT
;
598 static u32
get_temp_alert_config(const struct lpddr2_addressing
*addressing
,
599 const struct emif_custom_configs
*custom_configs
, bool cs1_used
,
600 u32 sdram_io_width
, u32 emif_bus_width
)
602 u32 alert
= 0, interval
, devcnt
;
604 if (custom_configs
&& (custom_configs
->mask
&
605 EMIF_CUSTOM_CONFIG_TEMP_ALERT_POLL_INTERVAL
))
606 interval
= custom_configs
->temp_alert_poll_interval_ms
;
608 interval
= TEMP_ALERT_POLL_INTERVAL_DEFAULT_MS
;
610 interval
*= 1000000; /* Convert to ns */
611 interval
/= addressing
->tREFI_ns
; /* Convert to refresh cycles */
612 alert
|= (interval
<< TA_REFINTERVAL_SHIFT
);
615 * sdram_io_width is in 'log2(x) - 1' form. Convert emif_bus_width
616 * also to this form and subtract to get TA_DEVCNT, which is
619 emif_bus_width
= __fls(emif_bus_width
) - 1;
620 devcnt
= emif_bus_width
- sdram_io_width
;
621 alert
|= devcnt
<< TA_DEVCNT_SHIFT
;
623 /* DEVWDT is in 'log2(x) - 3' form */
624 alert
|= (sdram_io_width
- 2) << TA_DEVWDT_SHIFT
;
626 alert
|= 1 << TA_SFEXITEN_SHIFT
;
627 alert
|= 1 << TA_CS0EN_SHIFT
;
628 alert
|= (cs1_used
? 1 : 0) << TA_CS1EN_SHIFT
;
633 static u32
get_read_idle_ctrl_shdw(u8 volt_ramp
)
635 u32 idle
= 0, val
= 0;
638 * Maximum value in normal conditions and increased frequency
639 * when voltage is ramping
642 val
= READ_IDLE_INTERVAL_DVFS
/ t_ck
/ 64 - 1;
647 * READ_IDLE_CTRL register in EMIF4D has same offset and fields
648 * as DLL_CALIB_CTRL in EMIF4D5, so use the same shifts
650 idle
|= val
<< DLL_CALIB_INTERVAL_SHIFT
;
651 idle
|= EMIF_READ_IDLE_LEN_VAL
<< ACK_WAIT_SHIFT
;
656 static u32
get_dll_calib_ctrl_shdw(u8 volt_ramp
)
658 u32 calib
= 0, val
= 0;
660 if (volt_ramp
== DDR_VOLTAGE_RAMPING
)
661 val
= DLL_CALIB_INTERVAL_DVFS
/ t_ck
/ 16 - 1;
663 val
= 0; /* Disabled when voltage is stable */
665 calib
|= val
<< DLL_CALIB_INTERVAL_SHIFT
;
666 calib
|= DLL_CALIB_ACK_WAIT_VAL
<< ACK_WAIT_SHIFT
;
671 static u32
get_ddr_phy_ctrl_1_attilaphy_4d(const struct lpddr2_timings
*timings
,
674 u32 phy
= EMIF_DDR_PHY_CTRL_1_BASE_VAL_ATTILAPHY
, val
= 0;
676 val
= RL
+ DIV_ROUND_UP(timings
->tDQSCK_max
, t_ck
) - 1;
677 phy
|= val
<< READ_LATENCY_SHIFT_4D
;
679 if (freq
<= 100000000)
680 val
= EMIF_DLL_SLAVE_DLY_CTRL_100_MHZ_AND_LESS_ATTILAPHY
;
681 else if (freq
<= 200000000)
682 val
= EMIF_DLL_SLAVE_DLY_CTRL_200_MHZ_ATTILAPHY
;
684 val
= EMIF_DLL_SLAVE_DLY_CTRL_400_MHZ_ATTILAPHY
;
686 phy
|= val
<< DLL_SLAVE_DLY_CTRL_SHIFT_4D
;
691 static u32
get_phy_ctrl_1_intelliphy_4d5(u32 freq
, u8 cl
)
693 u32 phy
= EMIF_DDR_PHY_CTRL_1_BASE_VAL_INTELLIPHY
, half_delay
;
696 * DLL operates at 266 MHz. If DDR frequency is near 266 MHz,
697 * half-delay is not needed else set half-delay
699 if (freq
>= 265000000 && freq
< 267000000)
704 phy
|= half_delay
<< DLL_HALF_DELAY_SHIFT_4D5
;
705 phy
|= ((cl
+ DIV_ROUND_UP(EMIF_PHY_TOTAL_READ_LATENCY_INTELLIPHY_PS
,
706 t_ck
) - 1) << READ_LATENCY_SHIFT_4D5
);
711 static u32
get_ext_phy_ctrl_2_intelliphy_4d5(void)
713 u32 fifo_we_slave_ratio
;
715 fifo_we_slave_ratio
= DIV_ROUND_CLOSEST(
716 EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS
* 256 , t_ck
);
718 return fifo_we_slave_ratio
| fifo_we_slave_ratio
<< 11 |
719 fifo_we_slave_ratio
<< 22;
722 static u32
get_ext_phy_ctrl_3_intelliphy_4d5(void)
724 u32 fifo_we_slave_ratio
;
726 fifo_we_slave_ratio
= DIV_ROUND_CLOSEST(
727 EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS
* 256 , t_ck
);
729 return fifo_we_slave_ratio
>> 10 | fifo_we_slave_ratio
<< 1 |
730 fifo_we_slave_ratio
<< 12 | fifo_we_slave_ratio
<< 23;
733 static u32
get_ext_phy_ctrl_4_intelliphy_4d5(void)
735 u32 fifo_we_slave_ratio
;
737 fifo_we_slave_ratio
= DIV_ROUND_CLOSEST(
738 EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS
* 256 , t_ck
);
740 return fifo_we_slave_ratio
>> 9 | fifo_we_slave_ratio
<< 2 |
741 fifo_we_slave_ratio
<< 13;
744 static u32
get_pwr_mgmt_ctrl(u32 freq
, struct emif_data
*emif
, u32 ip_rev
)
746 u32 pwr_mgmt_ctrl
= 0, timeout
;
747 u32 lpmode
= EMIF_LP_MODE_SELF_REFRESH
;
748 u32 timeout_perf
= EMIF_LP_MODE_TIMEOUT_PERFORMANCE
;
749 u32 timeout_pwr
= EMIF_LP_MODE_TIMEOUT_POWER
;
750 u32 freq_threshold
= EMIF_LP_MODE_FREQ_THRESHOLD
;
754 struct emif_custom_configs
*cust_cfgs
= emif
->plat_data
->custom_configs
;
756 if (cust_cfgs
&& (cust_cfgs
->mask
& EMIF_CUSTOM_CONFIG_LPMODE
)) {
757 lpmode
= cust_cfgs
->lpmode
;
758 timeout_perf
= cust_cfgs
->lpmode_timeout_performance
;
759 timeout_pwr
= cust_cfgs
->lpmode_timeout_power
;
760 freq_threshold
= cust_cfgs
->lpmode_freq_threshold
;
763 /* Timeout based on DDR frequency */
764 timeout
= freq
>= freq_threshold
? timeout_perf
: timeout_pwr
;
767 * The value to be set in register is "log2(timeout) - 3"
768 * if timeout < 16 load 0 in register
769 * if timeout is not a power of 2, round to next highest power of 2
774 if (timeout
& (timeout
- 1))
776 timeout
= __fls(timeout
) - 3;
780 case EMIF_LP_MODE_CLOCK_STOP
:
781 shift
= CS_TIM_SHIFT
;
784 case EMIF_LP_MODE_SELF_REFRESH
:
785 /* Workaround for errata i735 */
789 shift
= SR_TIM_SHIFT
;
792 case EMIF_LP_MODE_PWR_DN
:
793 shift
= PD_TIM_SHIFT
;
796 case EMIF_LP_MODE_DISABLE
:
802 /* Round to maximum in case of overflow, BUT warn! */
803 if (lpmode
!= EMIF_LP_MODE_DISABLE
&& timeout
> mask
>> shift
) {
804 pr_err("TIMEOUT Overflow - lpmode=%d perf=%d pwr=%d freq=%d\n",
809 WARN(1, "timeout=0x%02x greater than 0x%02x. Using max\n",
810 timeout
, mask
>> shift
);
811 timeout
= mask
>> shift
;
814 /* Setup required timing */
815 pwr_mgmt_ctrl
= (timeout
<< shift
) & mask
;
816 /* setup a default mask for rest of the modes */
817 pwr_mgmt_ctrl
|= (SR_TIM_MASK
| CS_TIM_MASK
| PD_TIM_MASK
) &
820 /* No CS_TIM in EMIF_4D5 */
821 if (ip_rev
== EMIF_4D5
)
822 pwr_mgmt_ctrl
&= ~CS_TIM_MASK
;
824 pwr_mgmt_ctrl
|= lpmode
<< LP_MODE_SHIFT
;
826 return pwr_mgmt_ctrl
;
830 * Get the temperature level of the EMIF instance:
831 * Reads the MR4 register of attached SDRAM parts to find out the temperature
832 * level. If there are two parts attached(one on each CS), then the temperature
833 * level for the EMIF instance is the higher of the two temperatures.
835 static void get_temperature_level(struct emif_data
*emif
)
837 u32 temp
, temperature_level
;
842 /* Read mode register 4 */
843 writel(DDR_MR4
, base
+ EMIF_LPDDR2_MODE_REG_CONFIG
);
844 temperature_level
= readl(base
+ EMIF_LPDDR2_MODE_REG_DATA
);
845 temperature_level
= (temperature_level
& MR4_SDRAM_REF_RATE_MASK
) >>
846 MR4_SDRAM_REF_RATE_SHIFT
;
848 if (emif
->plat_data
->device_info
->cs1_used
) {
849 writel(DDR_MR4
| CS_MASK
, base
+ EMIF_LPDDR2_MODE_REG_CONFIG
);
850 temp
= readl(base
+ EMIF_LPDDR2_MODE_REG_DATA
);
851 temp
= (temp
& MR4_SDRAM_REF_RATE_MASK
)
852 >> MR4_SDRAM_REF_RATE_SHIFT
;
853 temperature_level
= max(temp
, temperature_level
);
856 /* treat everything less than nominal(3) in MR4 as nominal */
857 if (unlikely(temperature_level
< SDRAM_TEMP_NOMINAL
))
858 temperature_level
= SDRAM_TEMP_NOMINAL
;
860 /* if we get reserved value in MR4 persist with the existing value */
861 if (likely(temperature_level
!= SDRAM_TEMP_RESERVED_4
))
862 emif
->temperature_level
= temperature_level
;
866 * Program EMIF shadow registers that are not dependent on temperature
869 static void setup_registers(struct emif_data
*emif
, struct emif_regs
*regs
)
871 void __iomem
*base
= emif
->base
;
873 writel(regs
->sdram_tim2_shdw
, base
+ EMIF_SDRAM_TIMING_2_SHDW
);
874 writel(regs
->phy_ctrl_1_shdw
, base
+ EMIF_DDR_PHY_CTRL_1_SHDW
);
875 writel(regs
->pwr_mgmt_ctrl_shdw
,
876 base
+ EMIF_POWER_MANAGEMENT_CTRL_SHDW
);
878 /* Settings specific for EMIF4D5 */
879 if (emif
->plat_data
->ip_rev
!= EMIF_4D5
)
881 writel(regs
->ext_phy_ctrl_2_shdw
, base
+ EMIF_EXT_PHY_CTRL_2_SHDW
);
882 writel(regs
->ext_phy_ctrl_3_shdw
, base
+ EMIF_EXT_PHY_CTRL_3_SHDW
);
883 writel(regs
->ext_phy_ctrl_4_shdw
, base
+ EMIF_EXT_PHY_CTRL_4_SHDW
);
887 * When voltage ramps dll calibration and forced read idle should
890 static void setup_volt_sensitive_regs(struct emif_data
*emif
,
891 struct emif_regs
*regs
, u32 volt_state
)
894 void __iomem
*base
= emif
->base
;
897 * EMIF_READ_IDLE_CTRL in EMIF4D refers to the same register as
898 * EMIF_DLL_CALIB_CTRL in EMIF4D5 and dll_calib_ctrl_shadow_*
899 * is an alias of the respective read_idle_ctrl_shdw_* (members of
900 * a union). So, the below code takes care of both cases
902 if (volt_state
== DDR_VOLTAGE_RAMPING
)
903 calib_ctrl
= regs
->dll_calib_ctrl_shdw_volt_ramp
;
905 calib_ctrl
= regs
->dll_calib_ctrl_shdw_normal
;
907 writel(calib_ctrl
, base
+ EMIF_DLL_CALIB_CTRL_SHDW
);
911 * setup_temperature_sensitive_regs() - set the timings for temperature
912 * sensitive registers. This happens once at initialisation time based
913 * on the temperature at boot time and subsequently based on the temperature
914 * alert interrupt. Temperature alert can happen when the temperature
915 * increases or drops. So this function can have the effect of either
916 * derating the timings or going back to nominal values.
918 static void setup_temperature_sensitive_regs(struct emif_data
*emif
,
919 struct emif_regs
*regs
)
921 u32 tim1
, tim3
, ref_ctrl
, type
;
922 void __iomem
*base
= emif
->base
;
925 type
= emif
->plat_data
->device_info
->type
;
927 tim1
= regs
->sdram_tim1_shdw
;
928 tim3
= regs
->sdram_tim3_shdw
;
929 ref_ctrl
= regs
->ref_ctrl_shdw
;
931 /* No de-rating for non-lpddr2 devices */
932 if (type
!= DDR_TYPE_LPDDR2_S2
&& type
!= DDR_TYPE_LPDDR2_S4
)
935 temperature
= emif
->temperature_level
;
936 if (temperature
== SDRAM_TEMP_HIGH_DERATE_REFRESH
) {
937 ref_ctrl
= regs
->ref_ctrl_shdw_derated
;
938 } else if (temperature
== SDRAM_TEMP_HIGH_DERATE_REFRESH_AND_TIMINGS
) {
939 tim1
= regs
->sdram_tim1_shdw_derated
;
940 tim3
= regs
->sdram_tim3_shdw_derated
;
941 ref_ctrl
= regs
->ref_ctrl_shdw_derated
;
945 writel(tim1
, base
+ EMIF_SDRAM_TIMING_1_SHDW
);
946 writel(tim3
, base
+ EMIF_SDRAM_TIMING_3_SHDW
);
947 writel(ref_ctrl
, base
+ EMIF_SDRAM_REFRESH_CTRL_SHDW
);
950 static irqreturn_t
handle_temp_alert(void __iomem
*base
, struct emif_data
*emif
)
953 irqreturn_t ret
= IRQ_HANDLED
;
954 struct emif_custom_configs
*custom_configs
;
956 spin_lock_irqsave(&emif_lock
, irq_state
);
957 old_temp_level
= emif
->temperature_level
;
958 get_temperature_level(emif
);
960 if (unlikely(emif
->temperature_level
== old_temp_level
)) {
962 } else if (!emif
->curr_regs
) {
963 dev_err(emif
->dev
, "temperature alert before registers are calculated, not de-rating timings\n");
967 custom_configs
= emif
->plat_data
->custom_configs
;
970 * IF we detect higher than "nominal rating" from DDR sensor
971 * on an unsupported DDR part, shutdown system
973 if (custom_configs
&& !(custom_configs
->mask
&
974 EMIF_CUSTOM_CONFIG_EXTENDED_TEMP_PART
)) {
975 if (emif
->temperature_level
>= SDRAM_TEMP_HIGH_DERATE_REFRESH
) {
977 "%s:NOT Extended temperature capable memory."
978 "Converting MR4=0x%02x as shutdown event\n",
979 __func__
, emif
->temperature_level
);
981 * Temperature far too high - do kernel_power_off()
982 * from thread context
984 emif
->temperature_level
= SDRAM_TEMP_VERY_HIGH_SHUTDOWN
;
985 ret
= IRQ_WAKE_THREAD
;
990 if (emif
->temperature_level
< old_temp_level
||
991 emif
->temperature_level
== SDRAM_TEMP_VERY_HIGH_SHUTDOWN
) {
993 * Temperature coming down - defer handling to thread OR
994 * Temperature far too high - do kernel_power_off() from
997 ret
= IRQ_WAKE_THREAD
;
999 /* Temperature is going up - handle immediately */
1000 setup_temperature_sensitive_regs(emif
, emif
->curr_regs
);
1005 spin_unlock_irqrestore(&emif_lock
, irq_state
);
1009 static irqreturn_t
emif_interrupt_handler(int irq
, void *dev_id
)
1012 struct emif_data
*emif
= dev_id
;
1013 void __iomem
*base
= emif
->base
;
1014 struct device
*dev
= emif
->dev
;
1015 irqreturn_t ret
= IRQ_HANDLED
;
1017 /* Save the status and clear it */
1018 interrupts
= readl(base
+ EMIF_SYSTEM_OCP_INTERRUPT_STATUS
);
1019 writel(interrupts
, base
+ EMIF_SYSTEM_OCP_INTERRUPT_STATUS
);
1022 * Handle temperature alert
1023 * Temperature alert should be same for all ports
1024 * So, it's enough to process it only for one of the ports
1026 if (interrupts
& TA_SYS_MASK
)
1027 ret
= handle_temp_alert(base
, emif
);
1029 if (interrupts
& ERR_SYS_MASK
)
1030 dev_err(dev
, "Access error from SYS port - %x\n", interrupts
);
1032 if (emif
->plat_data
->hw_caps
& EMIF_HW_CAPS_LL_INTERFACE
) {
1033 /* Save the status and clear it */
1034 interrupts
= readl(base
+ EMIF_LL_OCP_INTERRUPT_STATUS
);
1035 writel(interrupts
, base
+ EMIF_LL_OCP_INTERRUPT_STATUS
);
1037 if (interrupts
& ERR_LL_MASK
)
1038 dev_err(dev
, "Access error from LL port - %x\n",
1045 static irqreturn_t
emif_threaded_isr(int irq
, void *dev_id
)
1047 struct emif_data
*emif
= dev_id
;
1049 if (emif
->temperature_level
== SDRAM_TEMP_VERY_HIGH_SHUTDOWN
) {
1050 dev_emerg(emif
->dev
, "SDRAM temperature exceeds operating limit.. Needs shut down!!!\n");
1052 /* If we have Power OFF ability, use it, else try restarting */
1056 WARN(1, "FIXME: NO pm_power_off!!! trying restart\n");
1057 kernel_restart("SDRAM Over-temp Emergency restart");
1062 spin_lock_irqsave(&emif_lock
, irq_state
);
1064 if (emif
->curr_regs
) {
1065 setup_temperature_sensitive_regs(emif
, emif
->curr_regs
);
1068 dev_err(emif
->dev
, "temperature alert before registers are calculated, not de-rating timings\n");
1071 spin_unlock_irqrestore(&emif_lock
, irq_state
);
1076 static void clear_all_interrupts(struct emif_data
*emif
)
1078 void __iomem
*base
= emif
->base
;
1080 writel(readl(base
+ EMIF_SYSTEM_OCP_INTERRUPT_STATUS
),
1081 base
+ EMIF_SYSTEM_OCP_INTERRUPT_STATUS
);
1082 if (emif
->plat_data
->hw_caps
& EMIF_HW_CAPS_LL_INTERFACE
)
1083 writel(readl(base
+ EMIF_LL_OCP_INTERRUPT_STATUS
),
1084 base
+ EMIF_LL_OCP_INTERRUPT_STATUS
);
1087 static void disable_and_clear_all_interrupts(struct emif_data
*emif
)
1089 void __iomem
*base
= emif
->base
;
1091 /* Disable all interrupts */
1092 writel(readl(base
+ EMIF_SYSTEM_OCP_INTERRUPT_ENABLE_SET
),
1093 base
+ EMIF_SYSTEM_OCP_INTERRUPT_ENABLE_CLEAR
);
1094 if (emif
->plat_data
->hw_caps
& EMIF_HW_CAPS_LL_INTERFACE
)
1095 writel(readl(base
+ EMIF_LL_OCP_INTERRUPT_ENABLE_SET
),
1096 base
+ EMIF_LL_OCP_INTERRUPT_ENABLE_CLEAR
);
1098 /* Clear all interrupts */
1099 clear_all_interrupts(emif
);
1102 static int __init_or_module
setup_interrupts(struct emif_data
*emif
, u32 irq
)
1104 u32 interrupts
, type
;
1105 void __iomem
*base
= emif
->base
;
1107 type
= emif
->plat_data
->device_info
->type
;
1109 clear_all_interrupts(emif
);
1111 /* Enable interrupts for SYS interface */
1112 interrupts
= EN_ERR_SYS_MASK
;
1113 if (type
== DDR_TYPE_LPDDR2_S2
|| type
== DDR_TYPE_LPDDR2_S4
)
1114 interrupts
|= EN_TA_SYS_MASK
;
1115 writel(interrupts
, base
+ EMIF_SYSTEM_OCP_INTERRUPT_ENABLE_SET
);
1117 /* Enable interrupts for LL interface */
1118 if (emif
->plat_data
->hw_caps
& EMIF_HW_CAPS_LL_INTERFACE
) {
1119 /* TA need not be enabled for LL */
1120 interrupts
= EN_ERR_LL_MASK
;
1121 writel(interrupts
, base
+ EMIF_LL_OCP_INTERRUPT_ENABLE_SET
);
1124 /* setup IRQ handlers */
1125 return devm_request_threaded_irq(emif
->dev
, irq
,
1126 emif_interrupt_handler
,
1128 0, dev_name(emif
->dev
),
1133 static void __init_or_module
emif_onetime_settings(struct emif_data
*emif
)
1135 u32 pwr_mgmt_ctrl
, zq
, temp_alert_cfg
;
1136 void __iomem
*base
= emif
->base
;
1137 const struct lpddr2_addressing
*addressing
;
1138 const struct ddr_device_info
*device_info
;
1140 device_info
= emif
->plat_data
->device_info
;
1141 addressing
= get_addressing_table(device_info
);
1144 * Init power management settings
1145 * We don't know the frequency yet. Use a high frequency
1146 * value for a conservative timeout setting
1148 pwr_mgmt_ctrl
= get_pwr_mgmt_ctrl(1000000000, emif
,
1149 emif
->plat_data
->ip_rev
);
1150 emif
->lpmode
= (pwr_mgmt_ctrl
& LP_MODE_MASK
) >> LP_MODE_SHIFT
;
1151 writel(pwr_mgmt_ctrl
, base
+ EMIF_POWER_MANAGEMENT_CONTROL
);
1153 /* Init ZQ calibration settings */
1154 zq
= get_zq_config_reg(addressing
, device_info
->cs1_used
,
1155 device_info
->cal_resistors_per_cs
);
1156 writel(zq
, base
+ EMIF_SDRAM_OUTPUT_IMPEDANCE_CALIBRATION_CONFIG
);
1158 /* Check temperature level temperature level*/
1159 get_temperature_level(emif
);
1160 if (emif
->temperature_level
== SDRAM_TEMP_VERY_HIGH_SHUTDOWN
)
1161 dev_emerg(emif
->dev
, "SDRAM temperature exceeds operating limit.. Needs shut down!!!\n");
1163 /* Init temperature polling */
1164 temp_alert_cfg
= get_temp_alert_config(addressing
,
1165 emif
->plat_data
->custom_configs
, device_info
->cs1_used
,
1166 device_info
->io_width
, get_emif_bus_width(emif
));
1167 writel(temp_alert_cfg
, base
+ EMIF_TEMPERATURE_ALERT_CONFIG
);
1170 * Program external PHY control registers that are not frequency
1173 if (emif
->plat_data
->phy_type
!= EMIF_PHY_TYPE_INTELLIPHY
)
1175 writel(EMIF_EXT_PHY_CTRL_1_VAL
, base
+ EMIF_EXT_PHY_CTRL_1_SHDW
);
1176 writel(EMIF_EXT_PHY_CTRL_5_VAL
, base
+ EMIF_EXT_PHY_CTRL_5_SHDW
);
1177 writel(EMIF_EXT_PHY_CTRL_6_VAL
, base
+ EMIF_EXT_PHY_CTRL_6_SHDW
);
1178 writel(EMIF_EXT_PHY_CTRL_7_VAL
, base
+ EMIF_EXT_PHY_CTRL_7_SHDW
);
1179 writel(EMIF_EXT_PHY_CTRL_8_VAL
, base
+ EMIF_EXT_PHY_CTRL_8_SHDW
);
1180 writel(EMIF_EXT_PHY_CTRL_9_VAL
, base
+ EMIF_EXT_PHY_CTRL_9_SHDW
);
1181 writel(EMIF_EXT_PHY_CTRL_10_VAL
, base
+ EMIF_EXT_PHY_CTRL_10_SHDW
);
1182 writel(EMIF_EXT_PHY_CTRL_11_VAL
, base
+ EMIF_EXT_PHY_CTRL_11_SHDW
);
1183 writel(EMIF_EXT_PHY_CTRL_12_VAL
, base
+ EMIF_EXT_PHY_CTRL_12_SHDW
);
1184 writel(EMIF_EXT_PHY_CTRL_13_VAL
, base
+ EMIF_EXT_PHY_CTRL_13_SHDW
);
1185 writel(EMIF_EXT_PHY_CTRL_14_VAL
, base
+ EMIF_EXT_PHY_CTRL_14_SHDW
);
1186 writel(EMIF_EXT_PHY_CTRL_15_VAL
, base
+ EMIF_EXT_PHY_CTRL_15_SHDW
);
1187 writel(EMIF_EXT_PHY_CTRL_16_VAL
, base
+ EMIF_EXT_PHY_CTRL_16_SHDW
);
1188 writel(EMIF_EXT_PHY_CTRL_17_VAL
, base
+ EMIF_EXT_PHY_CTRL_17_SHDW
);
1189 writel(EMIF_EXT_PHY_CTRL_18_VAL
, base
+ EMIF_EXT_PHY_CTRL_18_SHDW
);
1190 writel(EMIF_EXT_PHY_CTRL_19_VAL
, base
+ EMIF_EXT_PHY_CTRL_19_SHDW
);
1191 writel(EMIF_EXT_PHY_CTRL_20_VAL
, base
+ EMIF_EXT_PHY_CTRL_20_SHDW
);
1192 writel(EMIF_EXT_PHY_CTRL_21_VAL
, base
+ EMIF_EXT_PHY_CTRL_21_SHDW
);
1193 writel(EMIF_EXT_PHY_CTRL_22_VAL
, base
+ EMIF_EXT_PHY_CTRL_22_SHDW
);
1194 writel(EMIF_EXT_PHY_CTRL_23_VAL
, base
+ EMIF_EXT_PHY_CTRL_23_SHDW
);
1195 writel(EMIF_EXT_PHY_CTRL_24_VAL
, base
+ EMIF_EXT_PHY_CTRL_24_SHDW
);
1198 static void get_default_timings(struct emif_data
*emif
)
1200 struct emif_platform_data
*pd
= emif
->plat_data
;
1202 pd
->timings
= lpddr2_jedec_timings
;
1203 pd
->timings_arr_size
= ARRAY_SIZE(lpddr2_jedec_timings
);
1205 dev_warn(emif
->dev
, "%s: using default timings\n", __func__
);
1208 static int is_dev_data_valid(u32 type
, u32 density
, u32 io_width
, u32 phy_type
,
1209 u32 ip_rev
, struct device
*dev
)
1213 valid
= (type
== DDR_TYPE_LPDDR2_S4
||
1214 type
== DDR_TYPE_LPDDR2_S2
)
1215 && (density
>= DDR_DENSITY_64Mb
1216 && density
<= DDR_DENSITY_8Gb
)
1217 && (io_width
>= DDR_IO_WIDTH_8
1218 && io_width
<= DDR_IO_WIDTH_32
);
1220 /* Combinations of EMIF and PHY revisions that we support today */
1223 valid
= valid
&& (phy_type
== EMIF_PHY_TYPE_ATTILAPHY
);
1226 valid
= valid
&& (phy_type
== EMIF_PHY_TYPE_INTELLIPHY
);
1233 dev_err(dev
, "%s: invalid DDR details\n", __func__
);
1237 static int is_custom_config_valid(struct emif_custom_configs
*cust_cfgs
,
1242 if ((cust_cfgs
->mask
& EMIF_CUSTOM_CONFIG_LPMODE
) &&
1243 (cust_cfgs
->lpmode
!= EMIF_LP_MODE_DISABLE
))
1244 valid
= cust_cfgs
->lpmode_freq_threshold
&&
1245 cust_cfgs
->lpmode_timeout_performance
&&
1246 cust_cfgs
->lpmode_timeout_power
;
1248 if (cust_cfgs
->mask
& EMIF_CUSTOM_CONFIG_TEMP_ALERT_POLL_INTERVAL
)
1249 valid
= valid
&& cust_cfgs
->temp_alert_poll_interval_ms
;
1252 dev_warn(dev
, "%s: invalid custom configs\n", __func__
);
1257 #if defined(CONFIG_OF)
1258 static void __init_or_module
of_get_custom_configs(struct device_node
*np_emif
,
1259 struct emif_data
*emif
)
1261 struct emif_custom_configs
*cust_cfgs
= NULL
;
1263 const __be32
*lpmode
, *poll_intvl
;
1265 lpmode
= of_get_property(np_emif
, "low-power-mode", &len
);
1266 poll_intvl
= of_get_property(np_emif
, "temp-alert-poll-interval", &len
);
1268 if (lpmode
|| poll_intvl
)
1269 cust_cfgs
= devm_kzalloc(emif
->dev
, sizeof(*cust_cfgs
),
1276 cust_cfgs
->mask
|= EMIF_CUSTOM_CONFIG_LPMODE
;
1277 cust_cfgs
->lpmode
= be32_to_cpup(lpmode
);
1278 of_property_read_u32(np_emif
,
1279 "low-power-mode-timeout-performance",
1280 &cust_cfgs
->lpmode_timeout_performance
);
1281 of_property_read_u32(np_emif
,
1282 "low-power-mode-timeout-power",
1283 &cust_cfgs
->lpmode_timeout_power
);
1284 of_property_read_u32(np_emif
,
1285 "low-power-mode-freq-threshold",
1286 &cust_cfgs
->lpmode_freq_threshold
);
1291 EMIF_CUSTOM_CONFIG_TEMP_ALERT_POLL_INTERVAL
;
1292 cust_cfgs
->temp_alert_poll_interval_ms
=
1293 be32_to_cpup(poll_intvl
);
1296 if (of_find_property(np_emif
, "extended-temp-part", &len
))
1297 cust_cfgs
->mask
|= EMIF_CUSTOM_CONFIG_EXTENDED_TEMP_PART
;
1299 if (!is_custom_config_valid(cust_cfgs
, emif
->dev
)) {
1300 devm_kfree(emif
->dev
, cust_cfgs
);
1304 emif
->plat_data
->custom_configs
= cust_cfgs
;
1307 static void __init_or_module
of_get_ddr_info(struct device_node
*np_emif
,
1308 struct device_node
*np_ddr
,
1309 struct ddr_device_info
*dev_info
)
1311 u32 density
= 0, io_width
= 0;
1314 if (of_find_property(np_emif
, "cs1-used", &len
))
1315 dev_info
->cs1_used
= true;
1317 if (of_find_property(np_emif
, "cal-resistor-per-cs", &len
))
1318 dev_info
->cal_resistors_per_cs
= true;
1320 if (of_device_is_compatible(np_ddr
, "jedec,lpddr2-s4"))
1321 dev_info
->type
= DDR_TYPE_LPDDR2_S4
;
1322 else if (of_device_is_compatible(np_ddr
, "jedec,lpddr2-s2"))
1323 dev_info
->type
= DDR_TYPE_LPDDR2_S2
;
1325 of_property_read_u32(np_ddr
, "density", &density
);
1326 of_property_read_u32(np_ddr
, "io-width", &io_width
);
1328 /* Convert from density in Mb to the density encoding in jedc_ddr.h */
1329 if (density
& (density
- 1))
1330 dev_info
->density
= 0;
1332 dev_info
->density
= __fls(density
) - 5;
1334 /* Convert from io_width in bits to io_width encoding in jedc_ddr.h */
1335 if (io_width
& (io_width
- 1))
1336 dev_info
->io_width
= 0;
1338 dev_info
->io_width
= __fls(io_width
) - 1;
1341 static struct emif_data
* __init_or_module
of_get_memory_device_details(
1342 struct device_node
*np_emif
, struct device
*dev
)
1344 struct emif_data
*emif
= NULL
;
1345 struct ddr_device_info
*dev_info
= NULL
;
1346 struct emif_platform_data
*pd
= NULL
;
1347 struct device_node
*np_ddr
;
1350 np_ddr
= of_parse_phandle(np_emif
, "device-handle", 0);
1353 emif
= devm_kzalloc(dev
, sizeof(struct emif_data
), GFP_KERNEL
);
1354 pd
= devm_kzalloc(dev
, sizeof(*pd
), GFP_KERNEL
);
1355 dev_info
= devm_kzalloc(dev
, sizeof(*dev_info
), GFP_KERNEL
);
1357 if (!emif
|| !pd
|| !dev_info
) {
1358 dev_err(dev
, "%s: Out of memory!!\n",
1363 emif
->plat_data
= pd
;
1364 pd
->device_info
= dev_info
;
1366 emif
->np_ddr
= np_ddr
;
1367 emif
->temperature_level
= SDRAM_TEMP_NOMINAL
;
1369 if (of_device_is_compatible(np_emif
, "ti,emif-4d"))
1370 emif
->plat_data
->ip_rev
= EMIF_4D
;
1371 else if (of_device_is_compatible(np_emif
, "ti,emif-4d5"))
1372 emif
->plat_data
->ip_rev
= EMIF_4D5
;
1374 of_property_read_u32(np_emif
, "phy-type", &pd
->phy_type
);
1376 if (of_find_property(np_emif
, "hw-caps-ll-interface", &len
))
1377 pd
->hw_caps
|= EMIF_HW_CAPS_LL_INTERFACE
;
1379 of_get_ddr_info(np_emif
, np_ddr
, dev_info
);
1380 if (!is_dev_data_valid(pd
->device_info
->type
, pd
->device_info
->density
,
1381 pd
->device_info
->io_width
, pd
->phy_type
, pd
->ip_rev
,
1383 dev_err(dev
, "%s: invalid device data!!\n", __func__
);
1387 * For EMIF instances other than EMIF1 see if the devices connected
1388 * are exactly same as on EMIF1(which is typically the case). If so,
1389 * mark it as a duplicate of EMIF1. This will save some memory and
1392 if (emif1
&& emif1
->np_ddr
== np_ddr
) {
1393 emif
->duplicate
= true;
1396 dev_warn(emif
->dev
, "%s: Non-symmetric DDR geometry\n",
1400 of_get_custom_configs(np_emif
, emif
);
1401 emif
->plat_data
->timings
= of_get_ddr_timings(np_ddr
, emif
->dev
,
1402 emif
->plat_data
->device_info
->type
,
1403 &emif
->plat_data
->timings_arr_size
);
1405 emif
->plat_data
->min_tck
= of_get_min_tck(np_ddr
, emif
->dev
);
1416 static struct emif_data
* __init_or_module
of_get_memory_device_details(
1417 struct device_node
*np_emif
, struct device
*dev
)
1423 static struct emif_data
*__init_or_module
get_device_details(
1424 struct platform_device
*pdev
)
1427 struct emif_data
*emif
= NULL
;
1428 struct ddr_device_info
*dev_info
;
1429 struct emif_custom_configs
*cust_cfgs
;
1430 struct emif_platform_data
*pd
;
1434 pd
= pdev
->dev
.platform_data
;
1437 if (!(pd
&& pd
->device_info
&& is_dev_data_valid(pd
->device_info
->type
,
1438 pd
->device_info
->density
, pd
->device_info
->io_width
,
1439 pd
->phy_type
, pd
->ip_rev
, dev
))) {
1440 dev_err(dev
, "%s: invalid device data\n", __func__
);
1444 emif
= devm_kzalloc(dev
, sizeof(*emif
), GFP_KERNEL
);
1445 temp
= devm_kzalloc(dev
, sizeof(*pd
), GFP_KERNEL
);
1446 dev_info
= devm_kzalloc(dev
, sizeof(*dev_info
), GFP_KERNEL
);
1448 if (!emif
|| !pd
|| !dev_info
) {
1449 dev_err(dev
, "%s:%d: allocation error\n", __func__
, __LINE__
);
1453 memcpy(temp
, pd
, sizeof(*pd
));
1455 memcpy(dev_info
, pd
->device_info
, sizeof(*dev_info
));
1457 pd
->device_info
= dev_info
;
1458 emif
->plat_data
= pd
;
1460 emif
->temperature_level
= SDRAM_TEMP_NOMINAL
;
1463 * For EMIF instances other than EMIF1 see if the devices connected
1464 * are exactly same as on EMIF1(which is typically the case). If so,
1465 * mark it as a duplicate of EMIF1 and skip copying timings data.
1466 * This will save some memory and some computation later.
1468 emif
->duplicate
= emif1
&& (memcmp(dev_info
,
1469 emif1
->plat_data
->device_info
,
1470 sizeof(struct ddr_device_info
)) == 0);
1472 if (emif
->duplicate
) {
1477 dev_warn(emif
->dev
, "%s: Non-symmetric DDR geometry\n",
1482 * Copy custom configs - ignore allocation error, if any, as
1483 * custom_configs is not very critical
1485 cust_cfgs
= pd
->custom_configs
;
1486 if (cust_cfgs
&& is_custom_config_valid(cust_cfgs
, dev
)) {
1487 temp
= devm_kzalloc(dev
, sizeof(*cust_cfgs
), GFP_KERNEL
);
1489 memcpy(temp
, cust_cfgs
, sizeof(*cust_cfgs
));
1491 dev_warn(dev
, "%s:%d: allocation error\n", __func__
,
1493 pd
->custom_configs
= temp
;
1497 * Copy timings and min-tck values from platform data. If it is not
1498 * available or if memory allocation fails, use JEDEC defaults
1500 size
= sizeof(struct lpddr2_timings
) * pd
->timings_arr_size
;
1502 temp
= devm_kzalloc(dev
, size
, GFP_KERNEL
);
1504 memcpy(temp
, pd
->timings
, size
);
1507 dev_warn(dev
, "%s:%d: allocation error\n", __func__
,
1509 get_default_timings(emif
);
1512 get_default_timings(emif
);
1516 temp
= devm_kzalloc(dev
, sizeof(*pd
->min_tck
), GFP_KERNEL
);
1518 memcpy(temp
, pd
->min_tck
, sizeof(*pd
->min_tck
));
1521 dev_warn(dev
, "%s:%d: allocation error\n", __func__
,
1523 pd
->min_tck
= &lpddr2_jedec_min_tck
;
1526 pd
->min_tck
= &lpddr2_jedec_min_tck
;
1536 static int __init_or_module
emif_probe(struct platform_device
*pdev
)
1538 struct emif_data
*emif
;
1539 struct resource
*res
;
1542 if (pdev
->dev
.of_node
)
1543 emif
= of_get_memory_device_details(pdev
->dev
.of_node
, &pdev
->dev
);
1545 emif
= get_device_details(pdev
);
1548 pr_err("%s: error getting device data\n", __func__
);
1552 list_add(&emif
->node
, &device_list
);
1553 emif
->addressing
= get_addressing_table(emif
->plat_data
->device_info
);
1555 /* Save pointers to each other in emif and device structures */
1556 emif
->dev
= &pdev
->dev
;
1557 platform_set_drvdata(pdev
, emif
);
1559 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1560 emif
->base
= devm_ioremap_resource(emif
->dev
, res
);
1561 if (IS_ERR(emif
->base
))
1564 irq
= platform_get_irq(pdev
, 0);
1566 dev_err(emif
->dev
, "%s: error getting IRQ resource - %d\n",
1571 emif_onetime_settings(emif
);
1572 emif_debugfs_init(emif
);
1573 disable_and_clear_all_interrupts(emif
);
1574 setup_interrupts(emif
, irq
);
1576 /* One-time actions taken on probing the first device */
1579 spin_lock_init(&emif_lock
);
1582 * TODO: register notifiers for frequency and voltage
1583 * change here once the respective frameworks are
1588 dev_info(&pdev
->dev
, "%s: device configured with addr = %p and IRQ%d\n",
1589 __func__
, emif
->base
, irq
);
1596 static int __exit
emif_remove(struct platform_device
*pdev
)
1598 struct emif_data
*emif
= platform_get_drvdata(pdev
);
1600 emif_debugfs_exit(emif
);
1605 static void emif_shutdown(struct platform_device
*pdev
)
1607 struct emif_data
*emif
= platform_get_drvdata(pdev
);
1609 disable_and_clear_all_interrupts(emif
);
1612 static int get_emif_reg_values(struct emif_data
*emif
, u32 freq
,
1613 struct emif_regs
*regs
)
1615 u32 cs1_used
, ip_rev
, phy_type
;
1617 const struct lpddr2_timings
*timings
;
1618 const struct lpddr2_min_tck
*min_tck
;
1619 const struct ddr_device_info
*device_info
;
1620 const struct lpddr2_addressing
*addressing
;
1621 struct emif_data
*emif_for_calc
;
1623 const struct emif_custom_configs
*custom_configs
;
1627 * If the devices on this EMIF instance is duplicate of EMIF1,
1628 * use EMIF1 details for the calculation
1630 emif_for_calc
= emif
->duplicate
? emif1
: emif
;
1631 timings
= get_timings_table(emif_for_calc
, freq
);
1632 addressing
= emif_for_calc
->addressing
;
1633 if (!timings
|| !addressing
) {
1634 dev_err(dev
, "%s: not enough data available for %dHz",
1639 device_info
= emif_for_calc
->plat_data
->device_info
;
1640 type
= device_info
->type
;
1641 cs1_used
= device_info
->cs1_used
;
1642 ip_rev
= emif_for_calc
->plat_data
->ip_rev
;
1643 phy_type
= emif_for_calc
->plat_data
->phy_type
;
1645 min_tck
= emif_for_calc
->plat_data
->min_tck
;
1646 custom_configs
= emif_for_calc
->plat_data
->custom_configs
;
1648 set_ddr_clk_period(freq
);
1650 regs
->ref_ctrl_shdw
= get_sdram_ref_ctrl_shdw(freq
, addressing
);
1651 regs
->sdram_tim1_shdw
= get_sdram_tim_1_shdw(timings
, min_tck
,
1653 regs
->sdram_tim2_shdw
= get_sdram_tim_2_shdw(timings
, min_tck
,
1655 regs
->sdram_tim3_shdw
= get_sdram_tim_3_shdw(timings
, min_tck
,
1656 addressing
, type
, ip_rev
, EMIF_NORMAL_TIMINGS
);
1660 if (phy_type
== EMIF_PHY_TYPE_ATTILAPHY
&& ip_rev
== EMIF_4D
) {
1661 regs
->phy_ctrl_1_shdw
= get_ddr_phy_ctrl_1_attilaphy_4d(
1663 } else if (phy_type
== EMIF_PHY_TYPE_INTELLIPHY
&& ip_rev
== EMIF_4D5
) {
1664 regs
->phy_ctrl_1_shdw
= get_phy_ctrl_1_intelliphy_4d5(freq
, cl
);
1665 regs
->ext_phy_ctrl_2_shdw
= get_ext_phy_ctrl_2_intelliphy_4d5();
1666 regs
->ext_phy_ctrl_3_shdw
= get_ext_phy_ctrl_3_intelliphy_4d5();
1667 regs
->ext_phy_ctrl_4_shdw
= get_ext_phy_ctrl_4_intelliphy_4d5();
1672 /* Only timeout values in pwr_mgmt_ctrl_shdw register */
1673 regs
->pwr_mgmt_ctrl_shdw
=
1674 get_pwr_mgmt_ctrl(freq
, emif_for_calc
, ip_rev
) &
1675 (CS_TIM_MASK
| SR_TIM_MASK
| PD_TIM_MASK
);
1677 if (ip_rev
& EMIF_4D
) {
1678 regs
->read_idle_ctrl_shdw_normal
=
1679 get_read_idle_ctrl_shdw(DDR_VOLTAGE_STABLE
);
1681 regs
->read_idle_ctrl_shdw_volt_ramp
=
1682 get_read_idle_ctrl_shdw(DDR_VOLTAGE_RAMPING
);
1683 } else if (ip_rev
& EMIF_4D5
) {
1684 regs
->dll_calib_ctrl_shdw_normal
=
1685 get_dll_calib_ctrl_shdw(DDR_VOLTAGE_STABLE
);
1687 regs
->dll_calib_ctrl_shdw_volt_ramp
=
1688 get_dll_calib_ctrl_shdw(DDR_VOLTAGE_RAMPING
);
1691 if (type
== DDR_TYPE_LPDDR2_S2
|| type
== DDR_TYPE_LPDDR2_S4
) {
1692 regs
->ref_ctrl_shdw_derated
= get_sdram_ref_ctrl_shdw(freq
/ 4,
1695 regs
->sdram_tim1_shdw_derated
=
1696 get_sdram_tim_1_shdw_derated(timings
, min_tck
,
1699 regs
->sdram_tim3_shdw_derated
= get_sdram_tim_3_shdw(timings
,
1700 min_tck
, addressing
, type
, ip_rev
,
1701 EMIF_DERATED_TIMINGS
);
1710 * get_regs() - gets the cached emif_regs structure for a given EMIF instance
1711 * given frequency(freq):
1713 * As an optimisation, every EMIF instance other than EMIF1 shares the
1714 * register cache with EMIF1 if the devices connected on this instance
1715 * are same as that on EMIF1(indicated by the duplicate flag)
1717 * If we do not have an entry corresponding to the frequency given, we
1718 * allocate a new entry and calculate the values
1720 * Upon finding the right reg dump, save it in curr_regs. It can be
1721 * directly used for thermal de-rating and voltage ramping changes.
1723 static struct emif_regs
*get_regs(struct emif_data
*emif
, u32 freq
)
1726 struct emif_regs
**regs_cache
;
1727 struct emif_regs
*regs
= NULL
;
1731 if (emif
->curr_regs
&& emif
->curr_regs
->freq
== freq
) {
1732 dev_dbg(dev
, "%s: using curr_regs - %u Hz", __func__
, freq
);
1733 return emif
->curr_regs
;
1736 if (emif
->duplicate
)
1737 regs_cache
= emif1
->regs_cache
;
1739 regs_cache
= emif
->regs_cache
;
1741 for (i
= 0; i
< EMIF_MAX_NUM_FREQUENCIES
&& regs_cache
[i
]; i
++) {
1742 if (regs_cache
[i
]->freq
== freq
) {
1743 regs
= regs_cache
[i
];
1745 "%s: reg dump found in reg cache for %u Hz\n",
1752 * If we don't have an entry for this frequency in the cache create one
1753 * and calculate the values
1756 regs
= devm_kzalloc(emif
->dev
, sizeof(*regs
), GFP_ATOMIC
);
1760 if (get_emif_reg_values(emif
, freq
, regs
)) {
1761 devm_kfree(emif
->dev
, regs
);
1766 * Now look for an un-used entry in the cache and save the
1767 * newly created struct. If there are no free entries
1768 * over-write the last entry
1770 for (i
= 0; i
< EMIF_MAX_NUM_FREQUENCIES
&& regs_cache
[i
]; i
++)
1773 if (i
>= EMIF_MAX_NUM_FREQUENCIES
) {
1774 dev_warn(dev
, "%s: regs_cache full - reusing a slot!!\n",
1776 i
= EMIF_MAX_NUM_FREQUENCIES
- 1;
1777 devm_kfree(emif
->dev
, regs_cache
[i
]);
1779 regs_cache
[i
] = regs
;
1785 static void do_volt_notify_handling(struct emif_data
*emif
, u32 volt_state
)
1787 dev_dbg(emif
->dev
, "%s: voltage notification : %d", __func__
,
1790 if (!emif
->curr_regs
) {
1792 "%s: volt-notify before registers are ready: %d\n",
1793 __func__
, volt_state
);
1797 setup_volt_sensitive_regs(emif
, emif
->curr_regs
, volt_state
);
1801 * TODO: voltage notify handling should be hooked up to
1802 * regulator framework as soon as the necessary support
1803 * is available in mainline kernel. This function is un-used
1806 static void __attribute__((unused
)) volt_notify_handling(u32 volt_state
)
1808 struct emif_data
*emif
;
1810 spin_lock_irqsave(&emif_lock
, irq_state
);
1812 list_for_each_entry(emif
, &device_list
, node
)
1813 do_volt_notify_handling(emif
, volt_state
);
1816 spin_unlock_irqrestore(&emif_lock
, irq_state
);
1819 static void do_freq_pre_notify_handling(struct emif_data
*emif
, u32 new_freq
)
1821 struct emif_regs
*regs
;
1823 regs
= get_regs(emif
, new_freq
);
1827 emif
->curr_regs
= regs
;
1830 * Update the shadow registers:
1831 * Temperature and voltage-ramp sensitive settings are also configured
1832 * in terms of DDR cycles. So, we need to update them too when there
1835 dev_dbg(emif
->dev
, "%s: setting up shadow registers for %uHz",
1836 __func__
, new_freq
);
1837 setup_registers(emif
, regs
);
1838 setup_temperature_sensitive_regs(emif
, regs
);
1839 setup_volt_sensitive_regs(emif
, regs
, DDR_VOLTAGE_STABLE
);
1842 * Part of workaround for errata i728. See do_freq_update()
1845 if (emif
->lpmode
== EMIF_LP_MODE_SELF_REFRESH
)
1846 set_lpmode(emif
, EMIF_LP_MODE_DISABLE
);
1850 * TODO: frequency notify handling should be hooked up to
1851 * clock framework as soon as the necessary support is
1852 * available in mainline kernel. This function is un-used
1855 static void __attribute__((unused
)) freq_pre_notify_handling(u32 new_freq
)
1857 struct emif_data
*emif
;
1860 * NOTE: we are taking the spin-lock here and releases it
1861 * only in post-notifier. This doesn't look good and
1862 * Sparse complains about it, but this seems to be
1863 * un-avoidable. We need to lock a sequence of events
1864 * that is split between EMIF and clock framework.
1866 * 1. EMIF driver updates EMIF timings in shadow registers in the
1867 * frequency pre-notify callback from clock framework
1868 * 2. clock framework sets up the registers for the new frequency
1869 * 3. clock framework initiates a hw-sequence that updates
1870 * the frequency EMIF timings synchronously.
1872 * All these 3 steps should be performed as an atomic operation
1873 * vis-a-vis similar sequence in the EMIF interrupt handler
1874 * for temperature events. Otherwise, there could be race
1875 * conditions that could result in incorrect EMIF timings for
1878 spin_lock_irqsave(&emif_lock
, irq_state
);
1880 list_for_each_entry(emif
, &device_list
, node
)
1881 do_freq_pre_notify_handling(emif
, new_freq
);
1884 static void do_freq_post_notify_handling(struct emif_data
*emif
)
1887 * Part of workaround for errata i728. See do_freq_update()
1890 if (emif
->lpmode
== EMIF_LP_MODE_SELF_REFRESH
)
1891 set_lpmode(emif
, EMIF_LP_MODE_SELF_REFRESH
);
1895 * TODO: frequency notify handling should be hooked up to
1896 * clock framework as soon as the necessary support is
1897 * available in mainline kernel. This function is un-used
1900 static void __attribute__((unused
)) freq_post_notify_handling(void)
1902 struct emif_data
*emif
;
1904 list_for_each_entry(emif
, &device_list
, node
)
1905 do_freq_post_notify_handling(emif
);
1908 * Lock is done in pre-notify handler. See freq_pre_notify_handling()
1911 spin_unlock_irqrestore(&emif_lock
, irq_state
);
1914 #if defined(CONFIG_OF)
1915 static const struct of_device_id emif_of_match
[] = {
1916 { .compatible
= "ti,emif-4d" },
1917 { .compatible
= "ti,emif-4d5" },
1920 MODULE_DEVICE_TABLE(of
, emif_of_match
);
1923 static struct platform_driver emif_driver
= {
1924 .remove
= __exit_p(emif_remove
),
1925 .shutdown
= emif_shutdown
,
1928 .of_match_table
= of_match_ptr(emif_of_match
),
1932 module_platform_driver_probe(emif_driver
, emif_probe
);
1934 MODULE_DESCRIPTION("TI EMIF SDRAM Controller Driver");
1935 MODULE_LICENSE("GPL");
1936 MODULE_ALIAS("platform:emif");
1937 MODULE_AUTHOR("Texas Instruments Inc");