4 * Copyright (C) 2012 Texas Instruments, Inc.
6 * Aneesh V <aneesh@ti.com>
7 * Santosh Shilimkar <santosh.shilimkar@ti.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 #include <linux/kernel.h>
14 #include <linux/reboot.h>
15 #include <linux/platform_data/emif_plat.h>
17 #include <linux/device.h>
18 #include <linux/platform_device.h>
19 #include <linux/interrupt.h>
20 #include <linux/slab.h>
21 #include <linux/debugfs.h>
22 #include <linux/seq_file.h>
23 #include <linux/module.h>
24 #include <linux/list.h>
25 #include <linux/spinlock.h>
26 #include <memory/jedec_ddr.h>
30 * struct emif_data - Per device static data for driver's use
31 * @duplicate: Whether the DDR devices attached to this EMIF
32 * instance are exactly same as that on EMIF1. In
33 * this case we can save some memory and processing
34 * @temperature_level: Maximum temperature of LPDDR2 devices attached
35 * to this EMIF - read from MR4 register. If there
36 * are two devices attached to this EMIF, this
37 * value is the maximum of the two temperature
39 * @node: node in the device list
40 * @base: base address of memory-mapped IO registers.
41 * @dev: device pointer.
42 * @addressing table with addressing information from the spec
43 * @regs_cache: An array of 'struct emif_regs' that stores
44 * calculated register values for different
45 * frequencies, to avoid re-calculating them on
46 * each DVFS transition.
47 * @curr_regs: The set of register values used in the last
48 * frequency change (i.e. corresponding to the
49 * frequency in effect at the moment)
50 * @plat_data: Pointer to saved platform data.
51 * @debugfs_root: dentry to the root folder for EMIF in debugfs
57 struct list_head node
;
58 unsigned long irq_state
;
61 const struct lpddr2_addressing
*addressing
;
62 struct emif_regs
*regs_cache
[EMIF_MAX_NUM_FREQUENCIES
];
63 struct emif_regs
*curr_regs
;
64 struct emif_platform_data
*plat_data
;
65 struct dentry
*debugfs_root
;
68 static struct emif_data
*emif1
;
69 static spinlock_t emif_lock
;
70 static unsigned long irq_state
;
71 static u32 t_ck
; /* DDR clock period in ps */
72 static LIST_HEAD(device_list
);
74 static void do_emif_regdump_show(struct seq_file
*s
, struct emif_data
*emif
,
75 struct emif_regs
*regs
)
77 u32 type
= emif
->plat_data
->device_info
->type
;
78 u32 ip_rev
= emif
->plat_data
->ip_rev
;
80 seq_printf(s
, "EMIF register cache dump for %dMHz\n",
83 seq_printf(s
, "ref_ctrl_shdw\t: 0x%08x\n", regs
->ref_ctrl_shdw
);
84 seq_printf(s
, "sdram_tim1_shdw\t: 0x%08x\n", regs
->sdram_tim1_shdw
);
85 seq_printf(s
, "sdram_tim2_shdw\t: 0x%08x\n", regs
->sdram_tim2_shdw
);
86 seq_printf(s
, "sdram_tim3_shdw\t: 0x%08x\n", regs
->sdram_tim3_shdw
);
88 if (ip_rev
== EMIF_4D
) {
89 seq_printf(s
, "read_idle_ctrl_shdw_normal\t: 0x%08x\n",
90 regs
->read_idle_ctrl_shdw_normal
);
91 seq_printf(s
, "read_idle_ctrl_shdw_volt_ramp\t: 0x%08x\n",
92 regs
->read_idle_ctrl_shdw_volt_ramp
);
93 } else if (ip_rev
== EMIF_4D5
) {
94 seq_printf(s
, "dll_calib_ctrl_shdw_normal\t: 0x%08x\n",
95 regs
->dll_calib_ctrl_shdw_normal
);
96 seq_printf(s
, "dll_calib_ctrl_shdw_volt_ramp\t: 0x%08x\n",
97 regs
->dll_calib_ctrl_shdw_volt_ramp
);
100 if (type
== DDR_TYPE_LPDDR2_S2
|| type
== DDR_TYPE_LPDDR2_S4
) {
101 seq_printf(s
, "ref_ctrl_shdw_derated\t: 0x%08x\n",
102 regs
->ref_ctrl_shdw_derated
);
103 seq_printf(s
, "sdram_tim1_shdw_derated\t: 0x%08x\n",
104 regs
->sdram_tim1_shdw_derated
);
105 seq_printf(s
, "sdram_tim3_shdw_derated\t: 0x%08x\n",
106 regs
->sdram_tim3_shdw_derated
);
110 static int emif_regdump_show(struct seq_file
*s
, void *unused
)
112 struct emif_data
*emif
= s
->private;
113 struct emif_regs
**regs_cache
;
117 regs_cache
= emif1
->regs_cache
;
119 regs_cache
= emif
->regs_cache
;
121 for (i
= 0; i
< EMIF_MAX_NUM_FREQUENCIES
&& regs_cache
[i
]; i
++) {
122 do_emif_regdump_show(s
, emif
, regs_cache
[i
]);
129 static int emif_regdump_open(struct inode
*inode
, struct file
*file
)
131 return single_open(file
, emif_regdump_show
, inode
->i_private
);
134 static const struct file_operations emif_regdump_fops
= {
135 .open
= emif_regdump_open
,
137 .release
= single_release
,
140 static int emif_mr4_show(struct seq_file
*s
, void *unused
)
142 struct emif_data
*emif
= s
->private;
144 seq_printf(s
, "MR4=%d\n", emif
->temperature_level
);
148 static int emif_mr4_open(struct inode
*inode
, struct file
*file
)
150 return single_open(file
, emif_mr4_show
, inode
->i_private
);
153 static const struct file_operations emif_mr4_fops
= {
154 .open
= emif_mr4_open
,
156 .release
= single_release
,
159 static int __init_or_module
emif_debugfs_init(struct emif_data
*emif
)
161 struct dentry
*dentry
;
164 dentry
= debugfs_create_dir(dev_name(emif
->dev
), NULL
);
165 if (IS_ERR(dentry
)) {
166 ret
= PTR_ERR(dentry
);
169 emif
->debugfs_root
= dentry
;
171 dentry
= debugfs_create_file("regcache_dump", S_IRUGO
,
172 emif
->debugfs_root
, emif
, &emif_regdump_fops
);
173 if (IS_ERR(dentry
)) {
174 ret
= PTR_ERR(dentry
);
178 dentry
= debugfs_create_file("mr4", S_IRUGO
,
179 emif
->debugfs_root
, emif
, &emif_mr4_fops
);
180 if (IS_ERR(dentry
)) {
181 ret
= PTR_ERR(dentry
);
187 debugfs_remove_recursive(emif
->debugfs_root
);
192 static void __exit
emif_debugfs_exit(struct emif_data
*emif
)
194 debugfs_remove_recursive(emif
->debugfs_root
);
195 emif
->debugfs_root
= NULL
;
199 * Calculate the period of DDR clock from frequency value
201 static void set_ddr_clk_period(u32 freq
)
203 /* Divide 10^12 by frequency to get period in ps */
204 t_ck
= (u32
)DIV_ROUND_UP_ULL(1000000000000ull, freq
);
208 * Get bus width used by EMIF. Note that this may be different from the
209 * bus width of the DDR devices used. For instance two 16-bit DDR devices
210 * may be connected to a given CS of EMIF. In this case bus width as far
211 * as EMIF is concerned is 32, where as the DDR bus width is 16 bits.
213 static u32
get_emif_bus_width(struct emif_data
*emif
)
216 void __iomem
*base
= emif
->base
;
218 width
= (readl(base
+ EMIF_SDRAM_CONFIG
) & NARROW_MODE_MASK
)
219 >> NARROW_MODE_SHIFT
;
220 width
= width
== 0 ? 32 : 16;
226 * Get the CL from SDRAM_CONFIG register
228 static u32
get_cl(struct emif_data
*emif
)
231 void __iomem
*base
= emif
->base
;
233 cl
= (readl(base
+ EMIF_SDRAM_CONFIG
) & CL_MASK
) >> CL_SHIFT
;
238 static void set_lpmode(struct emif_data
*emif
, u8 lpmode
)
241 void __iomem
*base
= emif
->base
;
243 temp
= readl(base
+ EMIF_POWER_MANAGEMENT_CONTROL
);
244 temp
&= ~LP_MODE_MASK
;
245 temp
|= (lpmode
<< LP_MODE_SHIFT
);
246 writel(temp
, base
+ EMIF_POWER_MANAGEMENT_CONTROL
);
249 static void do_freq_update(void)
251 struct emif_data
*emif
;
254 * Workaround for errata i728: Disable LPMODE during FREQ_UPDATE
257 * The EMIF automatically puts the SDRAM into self-refresh mode
258 * after the EMIF has not performed accesses during
259 * EMIF_PWR_MGMT_CTRL[7:4] REG_SR_TIM number of DDR clock cycles
260 * and the EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE bit field is set
261 * to 0x2. If during a small window the following three events
263 * - The SR_TIMING counter expires
264 * - And frequency change is requested
265 * - And OCP access is requested
266 * Then it causes instable clock on the DDR interface.
269 * To avoid the occurrence of the three events, the workaround
270 * is to disable the self-refresh when requesting a frequency
271 * change. Before requesting a frequency change the software must
272 * program EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE to 0x0. When the
273 * frequency change has been done, the software can reprogram
274 * EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE to 0x2
276 list_for_each_entry(emif
, &device_list
, node
) {
277 if (emif
->lpmode
== EMIF_LP_MODE_SELF_REFRESH
)
278 set_lpmode(emif
, EMIF_LP_MODE_DISABLE
);
282 * TODO: Do FREQ_UPDATE here when an API
283 * is available for this as part of the new
287 list_for_each_entry(emif
, &device_list
, node
) {
288 if (emif
->lpmode
== EMIF_LP_MODE_SELF_REFRESH
)
289 set_lpmode(emif
, EMIF_LP_MODE_SELF_REFRESH
);
293 /* Find addressing table entry based on the device's type and density */
294 static const struct lpddr2_addressing
*get_addressing_table(
295 const struct ddr_device_info
*device_info
)
297 u32 index
, type
, density
;
299 type
= device_info
->type
;
300 density
= device_info
->density
;
303 case DDR_TYPE_LPDDR2_S4
:
306 case DDR_TYPE_LPDDR2_S2
:
308 case DDR_DENSITY_1Gb
:
309 case DDR_DENSITY_2Gb
:
320 return &lpddr2_jedec_addressing_table
[index
];
324 * Find the the right timing table from the array of timing
325 * tables of the device using DDR clock frequency
327 static const struct lpddr2_timings
*get_timings_table(struct emif_data
*emif
,
330 u32 i
, min
, max
, freq_nearest
;
331 const struct lpddr2_timings
*timings
= NULL
;
332 const struct lpddr2_timings
*timings_arr
= emif
->plat_data
->timings
;
333 struct device
*dev
= emif
->dev
;
335 /* Start with a very high frequency - 1GHz */
336 freq_nearest
= 1000000000;
339 * Find the timings table such that:
340 * 1. the frequency range covers the required frequency(safe) AND
341 * 2. the max_freq is closest to the required frequency(optimal)
343 for (i
= 0; i
< emif
->plat_data
->timings_arr_size
; i
++) {
344 max
= timings_arr
[i
].max_freq
;
345 min
= timings_arr
[i
].min_freq
;
346 if ((freq
>= min
) && (freq
<= max
) && (max
< freq_nearest
)) {
348 timings
= &timings_arr
[i
];
353 dev_err(dev
, "%s: couldn't find timings for - %dHz\n",
356 dev_dbg(dev
, "%s: timings table: freq %d, speed bin freq %d\n",
357 __func__
, freq
, freq_nearest
);
362 static u32
get_sdram_ref_ctrl_shdw(u32 freq
,
363 const struct lpddr2_addressing
*addressing
)
365 u32 ref_ctrl_shdw
= 0, val
= 0, freq_khz
, t_refi
;
367 /* Scale down frequency and t_refi to avoid overflow */
368 freq_khz
= freq
/ 1000;
369 t_refi
= addressing
->tREFI_ns
/ 100;
372 * refresh rate to be set is 'tREFI(in us) * freq in MHz
373 * division by 10000 to account for change in units
375 val
= t_refi
* freq_khz
/ 10000;
376 ref_ctrl_shdw
|= val
<< REFRESH_RATE_SHIFT
;
378 return ref_ctrl_shdw
;
381 static u32
get_sdram_tim_1_shdw(const struct lpddr2_timings
*timings
,
382 const struct lpddr2_min_tck
*min_tck
,
383 const struct lpddr2_addressing
*addressing
)
385 u32 tim1
= 0, val
= 0;
387 val
= max(min_tck
->tWTR
, DIV_ROUND_UP(timings
->tWTR
, t_ck
)) - 1;
388 tim1
|= val
<< T_WTR_SHIFT
;
390 if (addressing
->num_banks
== B8
)
391 val
= DIV_ROUND_UP(timings
->tFAW
, t_ck
*4);
393 val
= max(min_tck
->tRRD
, DIV_ROUND_UP(timings
->tRRD
, t_ck
));
394 tim1
|= (val
- 1) << T_RRD_SHIFT
;
396 val
= DIV_ROUND_UP(timings
->tRAS_min
+ timings
->tRPab
, t_ck
) - 1;
397 tim1
|= val
<< T_RC_SHIFT
;
399 val
= max(min_tck
->tRASmin
, DIV_ROUND_UP(timings
->tRAS_min
, t_ck
));
400 tim1
|= (val
- 1) << T_RAS_SHIFT
;
402 val
= max(min_tck
->tWR
, DIV_ROUND_UP(timings
->tWR
, t_ck
)) - 1;
403 tim1
|= val
<< T_WR_SHIFT
;
405 val
= max(min_tck
->tRCD
, DIV_ROUND_UP(timings
->tRCD
, t_ck
)) - 1;
406 tim1
|= val
<< T_RCD_SHIFT
;
408 val
= max(min_tck
->tRPab
, DIV_ROUND_UP(timings
->tRPab
, t_ck
)) - 1;
409 tim1
|= val
<< T_RP_SHIFT
;
414 static u32
get_sdram_tim_1_shdw_derated(const struct lpddr2_timings
*timings
,
415 const struct lpddr2_min_tck
*min_tck
,
416 const struct lpddr2_addressing
*addressing
)
418 u32 tim1
= 0, val
= 0;
420 val
= max(min_tck
->tWTR
, DIV_ROUND_UP(timings
->tWTR
, t_ck
)) - 1;
421 tim1
= val
<< T_WTR_SHIFT
;
424 * tFAW is approximately 4 times tRRD. So add 1875*4 = 7500ps
425 * to tFAW for de-rating
427 if (addressing
->num_banks
== B8
) {
428 val
= DIV_ROUND_UP(timings
->tFAW
+ 7500, 4 * t_ck
) - 1;
430 val
= DIV_ROUND_UP(timings
->tRRD
+ 1875, t_ck
);
431 val
= max(min_tck
->tRRD
, val
) - 1;
433 tim1
|= val
<< T_RRD_SHIFT
;
435 val
= DIV_ROUND_UP(timings
->tRAS_min
+ timings
->tRPab
+ 1875, t_ck
);
436 tim1
|= (val
- 1) << T_RC_SHIFT
;
438 val
= DIV_ROUND_UP(timings
->tRAS_min
+ 1875, t_ck
);
439 val
= max(min_tck
->tRASmin
, val
) - 1;
440 tim1
|= val
<< T_RAS_SHIFT
;
442 val
= max(min_tck
->tWR
, DIV_ROUND_UP(timings
->tWR
, t_ck
)) - 1;
443 tim1
|= val
<< T_WR_SHIFT
;
445 val
= max(min_tck
->tRCD
, DIV_ROUND_UP(timings
->tRCD
+ 1875, t_ck
));
446 tim1
|= (val
- 1) << T_RCD_SHIFT
;
448 val
= max(min_tck
->tRPab
, DIV_ROUND_UP(timings
->tRPab
+ 1875, t_ck
));
449 tim1
|= (val
- 1) << T_RP_SHIFT
;
454 static u32
get_sdram_tim_2_shdw(const struct lpddr2_timings
*timings
,
455 const struct lpddr2_min_tck
*min_tck
,
456 const struct lpddr2_addressing
*addressing
,
459 u32 tim2
= 0, val
= 0;
461 val
= min_tck
->tCKE
- 1;
462 tim2
|= val
<< T_CKE_SHIFT
;
464 val
= max(min_tck
->tRTP
, DIV_ROUND_UP(timings
->tRTP
, t_ck
)) - 1;
465 tim2
|= val
<< T_RTP_SHIFT
;
467 /* tXSNR = tRFCab_ps + 10 ns(tRFCab_ps for LPDDR2). */
468 val
= DIV_ROUND_UP(addressing
->tRFCab_ps
+ 10000, t_ck
) - 1;
469 tim2
|= val
<< T_XSNR_SHIFT
;
471 /* XSRD same as XSNR for LPDDR2 */
472 tim2
|= val
<< T_XSRD_SHIFT
;
474 val
= max(min_tck
->tXP
, DIV_ROUND_UP(timings
->tXP
, t_ck
)) - 1;
475 tim2
|= val
<< T_XP_SHIFT
;
480 static u32
get_sdram_tim_3_shdw(const struct lpddr2_timings
*timings
,
481 const struct lpddr2_min_tck
*min_tck
,
482 const struct lpddr2_addressing
*addressing
,
483 u32 type
, u32 ip_rev
, u32 derated
)
485 u32 tim3
= 0, val
= 0, t_dqsck
;
487 val
= timings
->tRAS_max_ns
/ addressing
->tREFI_ns
- 1;
488 val
= val
> 0xF ? 0xF : val
;
489 tim3
|= val
<< T_RAS_MAX_SHIFT
;
491 val
= DIV_ROUND_UP(addressing
->tRFCab_ps
, t_ck
) - 1;
492 tim3
|= val
<< T_RFC_SHIFT
;
494 t_dqsck
= (derated
== EMIF_DERATED_TIMINGS
) ?
495 timings
->tDQSCK_max_derated
: timings
->tDQSCK_max
;
496 if (ip_rev
== EMIF_4D5
)
497 val
= DIV_ROUND_UP(t_dqsck
+ 1000, t_ck
) - 1;
499 val
= DIV_ROUND_UP(t_dqsck
, t_ck
) - 1;
501 tim3
|= val
<< T_TDQSCKMAX_SHIFT
;
503 val
= DIV_ROUND_UP(timings
->tZQCS
, t_ck
) - 1;
504 tim3
|= val
<< ZQ_ZQCS_SHIFT
;
506 val
= DIV_ROUND_UP(timings
->tCKESR
, t_ck
);
507 val
= max(min_tck
->tCKESR
, val
) - 1;
508 tim3
|= val
<< T_CKESR_SHIFT
;
510 if (ip_rev
== EMIF_4D5
) {
511 tim3
|= (EMIF_T_CSTA
- 1) << T_CSTA_SHIFT
;
513 val
= DIV_ROUND_UP(EMIF_T_PDLL_UL
, 128) - 1;
514 tim3
|= val
<< T_PDLL_UL_SHIFT
;
520 static u32
get_zq_config_reg(const struct lpddr2_addressing
*addressing
,
521 bool cs1_used
, bool cal_resistors_per_cs
)
525 val
= EMIF_ZQCS_INTERVAL_US
* 1000 / addressing
->tREFI_ns
;
526 zq
|= val
<< ZQ_REFINTERVAL_SHIFT
;
528 val
= DIV_ROUND_UP(T_ZQCL_DEFAULT_NS
, T_ZQCS_DEFAULT_NS
) - 1;
529 zq
|= val
<< ZQ_ZQCL_MULT_SHIFT
;
531 val
= DIV_ROUND_UP(T_ZQINIT_DEFAULT_NS
, T_ZQCL_DEFAULT_NS
) - 1;
532 zq
|= val
<< ZQ_ZQINIT_MULT_SHIFT
;
534 zq
|= ZQ_SFEXITEN_ENABLE
<< ZQ_SFEXITEN_SHIFT
;
536 if (cal_resistors_per_cs
)
537 zq
|= ZQ_DUALCALEN_ENABLE
<< ZQ_DUALCALEN_SHIFT
;
539 zq
|= ZQ_DUALCALEN_DISABLE
<< ZQ_DUALCALEN_SHIFT
;
541 zq
|= ZQ_CS0EN_MASK
; /* CS0 is used for sure */
543 val
= cs1_used
? 1 : 0;
544 zq
|= val
<< ZQ_CS1EN_SHIFT
;
549 static u32
get_temp_alert_config(const struct lpddr2_addressing
*addressing
,
550 const struct emif_custom_configs
*custom_configs
, bool cs1_used
,
551 u32 sdram_io_width
, u32 emif_bus_width
)
553 u32 alert
= 0, interval
, devcnt
;
555 if (custom_configs
&& (custom_configs
->mask
&
556 EMIF_CUSTOM_CONFIG_TEMP_ALERT_POLL_INTERVAL
))
557 interval
= custom_configs
->temp_alert_poll_interval_ms
;
559 interval
= TEMP_ALERT_POLL_INTERVAL_DEFAULT_MS
;
561 interval
*= 1000000; /* Convert to ns */
562 interval
/= addressing
->tREFI_ns
; /* Convert to refresh cycles */
563 alert
|= (interval
<< TA_REFINTERVAL_SHIFT
);
566 * sdram_io_width is in 'log2(x) - 1' form. Convert emif_bus_width
567 * also to this form and subtract to get TA_DEVCNT, which is
570 emif_bus_width
= __fls(emif_bus_width
) - 1;
571 devcnt
= emif_bus_width
- sdram_io_width
;
572 alert
|= devcnt
<< TA_DEVCNT_SHIFT
;
574 /* DEVWDT is in 'log2(x) - 3' form */
575 alert
|= (sdram_io_width
- 2) << TA_DEVWDT_SHIFT
;
577 alert
|= 1 << TA_SFEXITEN_SHIFT
;
578 alert
|= 1 << TA_CS0EN_SHIFT
;
579 alert
|= (cs1_used
? 1 : 0) << TA_CS1EN_SHIFT
;
584 static u32
get_read_idle_ctrl_shdw(u8 volt_ramp
)
586 u32 idle
= 0, val
= 0;
589 * Maximum value in normal conditions and increased frequency
590 * when voltage is ramping
593 val
= READ_IDLE_INTERVAL_DVFS
/ t_ck
/ 64 - 1;
598 * READ_IDLE_CTRL register in EMIF4D has same offset and fields
599 * as DLL_CALIB_CTRL in EMIF4D5, so use the same shifts
601 idle
|= val
<< DLL_CALIB_INTERVAL_SHIFT
;
602 idle
|= EMIF_READ_IDLE_LEN_VAL
<< ACK_WAIT_SHIFT
;
607 static u32
get_dll_calib_ctrl_shdw(u8 volt_ramp
)
609 u32 calib
= 0, val
= 0;
611 if (volt_ramp
== DDR_VOLTAGE_RAMPING
)
612 val
= DLL_CALIB_INTERVAL_DVFS
/ t_ck
/ 16 - 1;
614 val
= 0; /* Disabled when voltage is stable */
616 calib
|= val
<< DLL_CALIB_INTERVAL_SHIFT
;
617 calib
|= DLL_CALIB_ACK_WAIT_VAL
<< ACK_WAIT_SHIFT
;
622 static u32
get_ddr_phy_ctrl_1_attilaphy_4d(const struct lpddr2_timings
*timings
,
625 u32 phy
= EMIF_DDR_PHY_CTRL_1_BASE_VAL_ATTILAPHY
, val
= 0;
627 val
= RL
+ DIV_ROUND_UP(timings
->tDQSCK_max
, t_ck
) - 1;
628 phy
|= val
<< READ_LATENCY_SHIFT_4D
;
630 if (freq
<= 100000000)
631 val
= EMIF_DLL_SLAVE_DLY_CTRL_100_MHZ_AND_LESS_ATTILAPHY
;
632 else if (freq
<= 200000000)
633 val
= EMIF_DLL_SLAVE_DLY_CTRL_200_MHZ_ATTILAPHY
;
635 val
= EMIF_DLL_SLAVE_DLY_CTRL_400_MHZ_ATTILAPHY
;
637 phy
|= val
<< DLL_SLAVE_DLY_CTRL_SHIFT_4D
;
642 static u32
get_phy_ctrl_1_intelliphy_4d5(u32 freq
, u8 cl
)
644 u32 phy
= EMIF_DDR_PHY_CTRL_1_BASE_VAL_INTELLIPHY
, half_delay
;
647 * DLL operates at 266 MHz. If DDR frequency is near 266 MHz,
648 * half-delay is not needed else set half-delay
650 if (freq
>= 265000000 && freq
< 267000000)
655 phy
|= half_delay
<< DLL_HALF_DELAY_SHIFT_4D5
;
656 phy
|= ((cl
+ DIV_ROUND_UP(EMIF_PHY_TOTAL_READ_LATENCY_INTELLIPHY_PS
,
657 t_ck
) - 1) << READ_LATENCY_SHIFT_4D5
);
662 static u32
get_ext_phy_ctrl_2_intelliphy_4d5(void)
664 u32 fifo_we_slave_ratio
;
666 fifo_we_slave_ratio
= DIV_ROUND_CLOSEST(
667 EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS
* 256 , t_ck
);
669 return fifo_we_slave_ratio
| fifo_we_slave_ratio
<< 11 |
670 fifo_we_slave_ratio
<< 22;
673 static u32
get_ext_phy_ctrl_3_intelliphy_4d5(void)
675 u32 fifo_we_slave_ratio
;
677 fifo_we_slave_ratio
= DIV_ROUND_CLOSEST(
678 EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS
* 256 , t_ck
);
680 return fifo_we_slave_ratio
>> 10 | fifo_we_slave_ratio
<< 1 |
681 fifo_we_slave_ratio
<< 12 | fifo_we_slave_ratio
<< 23;
684 static u32
get_ext_phy_ctrl_4_intelliphy_4d5(void)
686 u32 fifo_we_slave_ratio
;
688 fifo_we_slave_ratio
= DIV_ROUND_CLOSEST(
689 EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS
* 256 , t_ck
);
691 return fifo_we_slave_ratio
>> 9 | fifo_we_slave_ratio
<< 2 |
692 fifo_we_slave_ratio
<< 13;
695 static u32
get_pwr_mgmt_ctrl(u32 freq
, struct emif_data
*emif
, u32 ip_rev
)
697 u32 pwr_mgmt_ctrl
= 0, timeout
;
698 u32 lpmode
= EMIF_LP_MODE_SELF_REFRESH
;
699 u32 timeout_perf
= EMIF_LP_MODE_TIMEOUT_PERFORMANCE
;
700 u32 timeout_pwr
= EMIF_LP_MODE_TIMEOUT_POWER
;
701 u32 freq_threshold
= EMIF_LP_MODE_FREQ_THRESHOLD
;
703 struct emif_custom_configs
*cust_cfgs
= emif
->plat_data
->custom_configs
;
705 if (cust_cfgs
&& (cust_cfgs
->mask
& EMIF_CUSTOM_CONFIG_LPMODE
)) {
706 lpmode
= cust_cfgs
->lpmode
;
707 timeout_perf
= cust_cfgs
->lpmode_timeout_performance
;
708 timeout_pwr
= cust_cfgs
->lpmode_timeout_power
;
709 freq_threshold
= cust_cfgs
->lpmode_freq_threshold
;
712 /* Timeout based on DDR frequency */
713 timeout
= freq
>= freq_threshold
? timeout_perf
: timeout_pwr
;
715 /* The value to be set in register is "log2(timeout) - 3" */
719 timeout
= __fls(timeout
) - 3;
720 if (timeout
& (timeout
- 1))
725 case EMIF_LP_MODE_CLOCK_STOP
:
726 pwr_mgmt_ctrl
= (timeout
<< CS_TIM_SHIFT
) |
727 SR_TIM_MASK
| PD_TIM_MASK
;
729 case EMIF_LP_MODE_SELF_REFRESH
:
730 /* Workaround for errata i735 */
734 pwr_mgmt_ctrl
= (timeout
<< SR_TIM_SHIFT
) |
735 CS_TIM_MASK
| PD_TIM_MASK
;
737 case EMIF_LP_MODE_PWR_DN
:
738 pwr_mgmt_ctrl
= (timeout
<< PD_TIM_SHIFT
) |
739 CS_TIM_MASK
| SR_TIM_MASK
;
741 case EMIF_LP_MODE_DISABLE
:
743 pwr_mgmt_ctrl
= CS_TIM_MASK
|
744 PD_TIM_MASK
| SR_TIM_MASK
;
747 /* No CS_TIM in EMIF_4D5 */
748 if (ip_rev
== EMIF_4D5
)
749 pwr_mgmt_ctrl
&= ~CS_TIM_MASK
;
751 pwr_mgmt_ctrl
|= lpmode
<< LP_MODE_SHIFT
;
753 return pwr_mgmt_ctrl
;
757 * Get the temperature level of the EMIF instance:
758 * Reads the MR4 register of attached SDRAM parts to find out the temperature
759 * level. If there are two parts attached(one on each CS), then the temperature
760 * level for the EMIF instance is the higher of the two temperatures.
762 static void get_temperature_level(struct emif_data
*emif
)
764 u32 temp
, temperature_level
;
769 /* Read mode register 4 */
770 writel(DDR_MR4
, base
+ EMIF_LPDDR2_MODE_REG_CONFIG
);
771 temperature_level
= readl(base
+ EMIF_LPDDR2_MODE_REG_DATA
);
772 temperature_level
= (temperature_level
& MR4_SDRAM_REF_RATE_MASK
) >>
773 MR4_SDRAM_REF_RATE_SHIFT
;
775 if (emif
->plat_data
->device_info
->cs1_used
) {
776 writel(DDR_MR4
| CS_MASK
, base
+ EMIF_LPDDR2_MODE_REG_CONFIG
);
777 temp
= readl(base
+ EMIF_LPDDR2_MODE_REG_DATA
);
778 temp
= (temp
& MR4_SDRAM_REF_RATE_MASK
)
779 >> MR4_SDRAM_REF_RATE_SHIFT
;
780 temperature_level
= max(temp
, temperature_level
);
783 /* treat everything less than nominal(3) in MR4 as nominal */
784 if (unlikely(temperature_level
< SDRAM_TEMP_NOMINAL
))
785 temperature_level
= SDRAM_TEMP_NOMINAL
;
787 /* if we get reserved value in MR4 persist with the existing value */
788 if (likely(temperature_level
!= SDRAM_TEMP_RESERVED_4
))
789 emif
->temperature_level
= temperature_level
;
793 * Program EMIF shadow registers that are not dependent on temperature
796 static void setup_registers(struct emif_data
*emif
, struct emif_regs
*regs
)
798 void __iomem
*base
= emif
->base
;
800 writel(regs
->sdram_tim2_shdw
, base
+ EMIF_SDRAM_TIMING_2_SHDW
);
801 writel(regs
->phy_ctrl_1_shdw
, base
+ EMIF_DDR_PHY_CTRL_1_SHDW
);
803 /* Settings specific for EMIF4D5 */
804 if (emif
->plat_data
->ip_rev
!= EMIF_4D5
)
806 writel(regs
->ext_phy_ctrl_2_shdw
, base
+ EMIF_EXT_PHY_CTRL_2_SHDW
);
807 writel(regs
->ext_phy_ctrl_3_shdw
, base
+ EMIF_EXT_PHY_CTRL_3_SHDW
);
808 writel(regs
->ext_phy_ctrl_4_shdw
, base
+ EMIF_EXT_PHY_CTRL_4_SHDW
);
812 * When voltage ramps dll calibration and forced read idle should
815 static void setup_volt_sensitive_regs(struct emif_data
*emif
,
816 struct emif_regs
*regs
, u32 volt_state
)
819 void __iomem
*base
= emif
->base
;
822 * EMIF_READ_IDLE_CTRL in EMIF4D refers to the same register as
823 * EMIF_DLL_CALIB_CTRL in EMIF4D5 and dll_calib_ctrl_shadow_*
824 * is an alias of the respective read_idle_ctrl_shdw_* (members of
825 * a union). So, the below code takes care of both cases
827 if (volt_state
== DDR_VOLTAGE_RAMPING
)
828 calib_ctrl
= regs
->dll_calib_ctrl_shdw_volt_ramp
;
830 calib_ctrl
= regs
->dll_calib_ctrl_shdw_normal
;
832 writel(calib_ctrl
, base
+ EMIF_DLL_CALIB_CTRL_SHDW
);
836 * setup_temperature_sensitive_regs() - set the timings for temperature
837 * sensitive registers. This happens once at initialisation time based
838 * on the temperature at boot time and subsequently based on the temperature
839 * alert interrupt. Temperature alert can happen when the temperature
840 * increases or drops. So this function can have the effect of either
841 * derating the timings or going back to nominal values.
843 static void setup_temperature_sensitive_regs(struct emif_data
*emif
,
844 struct emif_regs
*regs
)
846 u32 tim1
, tim3
, ref_ctrl
, type
;
847 void __iomem
*base
= emif
->base
;
850 type
= emif
->plat_data
->device_info
->type
;
852 tim1
= regs
->sdram_tim1_shdw
;
853 tim3
= regs
->sdram_tim3_shdw
;
854 ref_ctrl
= regs
->ref_ctrl_shdw
;
856 /* No de-rating for non-lpddr2 devices */
857 if (type
!= DDR_TYPE_LPDDR2_S2
&& type
!= DDR_TYPE_LPDDR2_S4
)
860 temperature
= emif
->temperature_level
;
861 if (temperature
== SDRAM_TEMP_HIGH_DERATE_REFRESH
) {
862 ref_ctrl
= regs
->ref_ctrl_shdw_derated
;
863 } else if (temperature
== SDRAM_TEMP_HIGH_DERATE_REFRESH_AND_TIMINGS
) {
864 tim1
= regs
->sdram_tim1_shdw_derated
;
865 tim3
= regs
->sdram_tim3_shdw_derated
;
866 ref_ctrl
= regs
->ref_ctrl_shdw_derated
;
870 writel(tim1
, base
+ EMIF_SDRAM_TIMING_1_SHDW
);
871 writel(tim3
, base
+ EMIF_SDRAM_TIMING_3_SHDW
);
872 writel(ref_ctrl
, base
+ EMIF_SDRAM_REFRESH_CTRL_SHDW
);
875 static irqreturn_t
handle_temp_alert(void __iomem
*base
, struct emif_data
*emif
)
878 irqreturn_t ret
= IRQ_HANDLED
;
880 spin_lock_irqsave(&emif_lock
, irq_state
);
881 old_temp_level
= emif
->temperature_level
;
882 get_temperature_level(emif
);
884 if (unlikely(emif
->temperature_level
== old_temp_level
)) {
886 } else if (!emif
->curr_regs
) {
887 dev_err(emif
->dev
, "temperature alert before registers are calculated, not de-rating timings\n");
891 if (emif
->temperature_level
< old_temp_level
||
892 emif
->temperature_level
== SDRAM_TEMP_VERY_HIGH_SHUTDOWN
) {
894 * Temperature coming down - defer handling to thread OR
895 * Temperature far too high - do kernel_power_off() from
898 ret
= IRQ_WAKE_THREAD
;
900 /* Temperature is going up - handle immediately */
901 setup_temperature_sensitive_regs(emif
, emif
->curr_regs
);
906 spin_unlock_irqrestore(&emif_lock
, irq_state
);
910 static irqreturn_t
emif_interrupt_handler(int irq
, void *dev_id
)
913 struct emif_data
*emif
= dev_id
;
914 void __iomem
*base
= emif
->base
;
915 struct device
*dev
= emif
->dev
;
916 irqreturn_t ret
= IRQ_HANDLED
;
918 /* Save the status and clear it */
919 interrupts
= readl(base
+ EMIF_SYSTEM_OCP_INTERRUPT_STATUS
);
920 writel(interrupts
, base
+ EMIF_SYSTEM_OCP_INTERRUPT_STATUS
);
923 * Handle temperature alert
924 * Temperature alert should be same for all ports
925 * So, it's enough to process it only for one of the ports
927 if (interrupts
& TA_SYS_MASK
)
928 ret
= handle_temp_alert(base
, emif
);
930 if (interrupts
& ERR_SYS_MASK
)
931 dev_err(dev
, "Access error from SYS port - %x\n", interrupts
);
933 if (emif
->plat_data
->hw_caps
& EMIF_HW_CAPS_LL_INTERFACE
) {
934 /* Save the status and clear it */
935 interrupts
= readl(base
+ EMIF_LL_OCP_INTERRUPT_STATUS
);
936 writel(interrupts
, base
+ EMIF_LL_OCP_INTERRUPT_STATUS
);
938 if (interrupts
& ERR_LL_MASK
)
939 dev_err(dev
, "Access error from LL port - %x\n",
946 static irqreturn_t
emif_threaded_isr(int irq
, void *dev_id
)
948 struct emif_data
*emif
= dev_id
;
950 if (emif
->temperature_level
== SDRAM_TEMP_VERY_HIGH_SHUTDOWN
) {
951 dev_emerg(emif
->dev
, "SDRAM temperature exceeds operating limit.. Needs shut down!!!\n");
956 spin_lock_irqsave(&emif_lock
, irq_state
);
958 if (emif
->curr_regs
) {
959 setup_temperature_sensitive_regs(emif
, emif
->curr_regs
);
962 dev_err(emif
->dev
, "temperature alert before registers are calculated, not de-rating timings\n");
965 spin_unlock_irqrestore(&emif_lock
, irq_state
);
970 static void clear_all_interrupts(struct emif_data
*emif
)
972 void __iomem
*base
= emif
->base
;
974 writel(readl(base
+ EMIF_SYSTEM_OCP_INTERRUPT_STATUS
),
975 base
+ EMIF_SYSTEM_OCP_INTERRUPT_STATUS
);
976 if (emif
->plat_data
->hw_caps
& EMIF_HW_CAPS_LL_INTERFACE
)
977 writel(readl(base
+ EMIF_LL_OCP_INTERRUPT_STATUS
),
978 base
+ EMIF_LL_OCP_INTERRUPT_STATUS
);
981 static void disable_and_clear_all_interrupts(struct emif_data
*emif
)
983 void __iomem
*base
= emif
->base
;
985 /* Disable all interrupts */
986 writel(readl(base
+ EMIF_SYSTEM_OCP_INTERRUPT_ENABLE_SET
),
987 base
+ EMIF_SYSTEM_OCP_INTERRUPT_ENABLE_CLEAR
);
988 if (emif
->plat_data
->hw_caps
& EMIF_HW_CAPS_LL_INTERFACE
)
989 writel(readl(base
+ EMIF_LL_OCP_INTERRUPT_ENABLE_SET
),
990 base
+ EMIF_LL_OCP_INTERRUPT_ENABLE_CLEAR
);
992 /* Clear all interrupts */
993 clear_all_interrupts(emif
);
996 static int __init_or_module
setup_interrupts(struct emif_data
*emif
, u32 irq
)
998 u32 interrupts
, type
;
999 void __iomem
*base
= emif
->base
;
1001 type
= emif
->plat_data
->device_info
->type
;
1003 clear_all_interrupts(emif
);
1005 /* Enable interrupts for SYS interface */
1006 interrupts
= EN_ERR_SYS_MASK
;
1007 if (type
== DDR_TYPE_LPDDR2_S2
|| type
== DDR_TYPE_LPDDR2_S4
)
1008 interrupts
|= EN_TA_SYS_MASK
;
1009 writel(interrupts
, base
+ EMIF_SYSTEM_OCP_INTERRUPT_ENABLE_SET
);
1011 /* Enable interrupts for LL interface */
1012 if (emif
->plat_data
->hw_caps
& EMIF_HW_CAPS_LL_INTERFACE
) {
1013 /* TA need not be enabled for LL */
1014 interrupts
= EN_ERR_LL_MASK
;
1015 writel(interrupts
, base
+ EMIF_LL_OCP_INTERRUPT_ENABLE_SET
);
1018 /* setup IRQ handlers */
1019 return devm_request_threaded_irq(emif
->dev
, irq
,
1020 emif_interrupt_handler
,
1022 0, dev_name(emif
->dev
),
1027 static void __init_or_module
emif_onetime_settings(struct emif_data
*emif
)
1029 u32 pwr_mgmt_ctrl
, zq
, temp_alert_cfg
;
1030 void __iomem
*base
= emif
->base
;
1031 const struct lpddr2_addressing
*addressing
;
1032 const struct ddr_device_info
*device_info
;
1034 device_info
= emif
->plat_data
->device_info
;
1035 addressing
= get_addressing_table(device_info
);
1038 * Init power management settings
1039 * We don't know the frequency yet. Use a high frequency
1040 * value for a conservative timeout setting
1042 pwr_mgmt_ctrl
= get_pwr_mgmt_ctrl(1000000000, emif
,
1043 emif
->plat_data
->ip_rev
);
1044 emif
->lpmode
= (pwr_mgmt_ctrl
& LP_MODE_MASK
) >> LP_MODE_SHIFT
;
1045 writel(pwr_mgmt_ctrl
, base
+ EMIF_POWER_MANAGEMENT_CONTROL
);
1047 /* Init ZQ calibration settings */
1048 zq
= get_zq_config_reg(addressing
, device_info
->cs1_used
,
1049 device_info
->cal_resistors_per_cs
);
1050 writel(zq
, base
+ EMIF_SDRAM_OUTPUT_IMPEDANCE_CALIBRATION_CONFIG
);
1052 /* Check temperature level temperature level*/
1053 get_temperature_level(emif
);
1054 if (emif
->temperature_level
== SDRAM_TEMP_VERY_HIGH_SHUTDOWN
)
1055 dev_emerg(emif
->dev
, "SDRAM temperature exceeds operating limit.. Needs shut down!!!\n");
1057 /* Init temperature polling */
1058 temp_alert_cfg
= get_temp_alert_config(addressing
,
1059 emif
->plat_data
->custom_configs
, device_info
->cs1_used
,
1060 device_info
->io_width
, get_emif_bus_width(emif
));
1061 writel(temp_alert_cfg
, base
+ EMIF_TEMPERATURE_ALERT_CONFIG
);
1064 * Program external PHY control registers that are not frequency
1067 if (emif
->plat_data
->phy_type
!= EMIF_PHY_TYPE_INTELLIPHY
)
1069 writel(EMIF_EXT_PHY_CTRL_1_VAL
, base
+ EMIF_EXT_PHY_CTRL_1_SHDW
);
1070 writel(EMIF_EXT_PHY_CTRL_5_VAL
, base
+ EMIF_EXT_PHY_CTRL_5_SHDW
);
1071 writel(EMIF_EXT_PHY_CTRL_6_VAL
, base
+ EMIF_EXT_PHY_CTRL_6_SHDW
);
1072 writel(EMIF_EXT_PHY_CTRL_7_VAL
, base
+ EMIF_EXT_PHY_CTRL_7_SHDW
);
1073 writel(EMIF_EXT_PHY_CTRL_8_VAL
, base
+ EMIF_EXT_PHY_CTRL_8_SHDW
);
1074 writel(EMIF_EXT_PHY_CTRL_9_VAL
, base
+ EMIF_EXT_PHY_CTRL_9_SHDW
);
1075 writel(EMIF_EXT_PHY_CTRL_10_VAL
, base
+ EMIF_EXT_PHY_CTRL_10_SHDW
);
1076 writel(EMIF_EXT_PHY_CTRL_11_VAL
, base
+ EMIF_EXT_PHY_CTRL_11_SHDW
);
1077 writel(EMIF_EXT_PHY_CTRL_12_VAL
, base
+ EMIF_EXT_PHY_CTRL_12_SHDW
);
1078 writel(EMIF_EXT_PHY_CTRL_13_VAL
, base
+ EMIF_EXT_PHY_CTRL_13_SHDW
);
1079 writel(EMIF_EXT_PHY_CTRL_14_VAL
, base
+ EMIF_EXT_PHY_CTRL_14_SHDW
);
1080 writel(EMIF_EXT_PHY_CTRL_15_VAL
, base
+ EMIF_EXT_PHY_CTRL_15_SHDW
);
1081 writel(EMIF_EXT_PHY_CTRL_16_VAL
, base
+ EMIF_EXT_PHY_CTRL_16_SHDW
);
1082 writel(EMIF_EXT_PHY_CTRL_17_VAL
, base
+ EMIF_EXT_PHY_CTRL_17_SHDW
);
1083 writel(EMIF_EXT_PHY_CTRL_18_VAL
, base
+ EMIF_EXT_PHY_CTRL_18_SHDW
);
1084 writel(EMIF_EXT_PHY_CTRL_19_VAL
, base
+ EMIF_EXT_PHY_CTRL_19_SHDW
);
1085 writel(EMIF_EXT_PHY_CTRL_20_VAL
, base
+ EMIF_EXT_PHY_CTRL_20_SHDW
);
1086 writel(EMIF_EXT_PHY_CTRL_21_VAL
, base
+ EMIF_EXT_PHY_CTRL_21_SHDW
);
1087 writel(EMIF_EXT_PHY_CTRL_22_VAL
, base
+ EMIF_EXT_PHY_CTRL_22_SHDW
);
1088 writel(EMIF_EXT_PHY_CTRL_23_VAL
, base
+ EMIF_EXT_PHY_CTRL_23_SHDW
);
1089 writel(EMIF_EXT_PHY_CTRL_24_VAL
, base
+ EMIF_EXT_PHY_CTRL_24_SHDW
);
1092 static void get_default_timings(struct emif_data
*emif
)
1094 struct emif_platform_data
*pd
= emif
->plat_data
;
1096 pd
->timings
= lpddr2_jedec_timings
;
1097 pd
->timings_arr_size
= ARRAY_SIZE(lpddr2_jedec_timings
);
1099 dev_warn(emif
->dev
, "%s: using default timings\n", __func__
);
1102 static int is_dev_data_valid(u32 type
, u32 density
, u32 io_width
, u32 phy_type
,
1103 u32 ip_rev
, struct device
*dev
)
1107 valid
= (type
== DDR_TYPE_LPDDR2_S4
||
1108 type
== DDR_TYPE_LPDDR2_S2
)
1109 && (density
>= DDR_DENSITY_64Mb
1110 && density
<= DDR_DENSITY_8Gb
)
1111 && (io_width
>= DDR_IO_WIDTH_8
1112 && io_width
<= DDR_IO_WIDTH_32
);
1114 /* Combinations of EMIF and PHY revisions that we support today */
1117 valid
= valid
&& (phy_type
== EMIF_PHY_TYPE_ATTILAPHY
);
1120 valid
= valid
&& (phy_type
== EMIF_PHY_TYPE_INTELLIPHY
);
1127 dev_err(dev
, "%s: invalid DDR details\n", __func__
);
1131 static int is_custom_config_valid(struct emif_custom_configs
*cust_cfgs
,
1136 if ((cust_cfgs
->mask
& EMIF_CUSTOM_CONFIG_LPMODE
) &&
1137 (cust_cfgs
->lpmode
!= EMIF_LP_MODE_DISABLE
))
1138 valid
= cust_cfgs
->lpmode_freq_threshold
&&
1139 cust_cfgs
->lpmode_timeout_performance
&&
1140 cust_cfgs
->lpmode_timeout_power
;
1142 if (cust_cfgs
->mask
& EMIF_CUSTOM_CONFIG_TEMP_ALERT_POLL_INTERVAL
)
1143 valid
= valid
&& cust_cfgs
->temp_alert_poll_interval_ms
;
1146 dev_warn(dev
, "%s: invalid custom configs\n", __func__
);
1151 static struct emif_data
*__init_or_module
get_device_details(
1152 struct platform_device
*pdev
)
1155 struct emif_data
*emif
= NULL
;
1156 struct ddr_device_info
*dev_info
;
1157 struct emif_custom_configs
*cust_cfgs
;
1158 struct emif_platform_data
*pd
;
1162 pd
= pdev
->dev
.platform_data
;
1165 if (!(pd
&& pd
->device_info
&& is_dev_data_valid(pd
->device_info
->type
,
1166 pd
->device_info
->density
, pd
->device_info
->io_width
,
1167 pd
->phy_type
, pd
->ip_rev
, dev
))) {
1168 dev_err(dev
, "%s: invalid device data\n", __func__
);
1172 emif
= devm_kzalloc(dev
, sizeof(*emif
), GFP_KERNEL
);
1173 temp
= devm_kzalloc(dev
, sizeof(*pd
), GFP_KERNEL
);
1174 dev_info
= devm_kzalloc(dev
, sizeof(*dev_info
), GFP_KERNEL
);
1176 if (!emif
|| !pd
|| !dev_info
) {
1177 dev_err(dev
, "%s:%d: allocation error\n", __func__
, __LINE__
);
1181 memcpy(temp
, pd
, sizeof(*pd
));
1183 memcpy(dev_info
, pd
->device_info
, sizeof(*dev_info
));
1185 pd
->device_info
= dev_info
;
1186 emif
->plat_data
= pd
;
1188 emif
->temperature_level
= SDRAM_TEMP_NOMINAL
;
1191 * For EMIF instances other than EMIF1 see if the devices connected
1192 * are exactly same as on EMIF1(which is typically the case). If so,
1193 * mark it as a duplicate of EMIF1 and skip copying timings data.
1194 * This will save some memory and some computation later.
1196 emif
->duplicate
= emif1
&& (memcmp(dev_info
,
1197 emif1
->plat_data
->device_info
,
1198 sizeof(struct ddr_device_info
)) == 0);
1200 if (emif
->duplicate
) {
1205 dev_warn(emif
->dev
, "%s: Non-symmetric DDR geometry\n",
1210 * Copy custom configs - ignore allocation error, if any, as
1211 * custom_configs is not very critical
1213 cust_cfgs
= pd
->custom_configs
;
1214 if (cust_cfgs
&& is_custom_config_valid(cust_cfgs
, dev
)) {
1215 temp
= devm_kzalloc(dev
, sizeof(*cust_cfgs
), GFP_KERNEL
);
1217 memcpy(temp
, cust_cfgs
, sizeof(*cust_cfgs
));
1219 dev_warn(dev
, "%s:%d: allocation error\n", __func__
,
1221 pd
->custom_configs
= temp
;
1225 * Copy timings and min-tck values from platform data. If it is not
1226 * available or if memory allocation fails, use JEDEC defaults
1228 size
= sizeof(struct lpddr2_timings
) * pd
->timings_arr_size
;
1230 temp
= devm_kzalloc(dev
, size
, GFP_KERNEL
);
1232 memcpy(temp
, pd
->timings
, sizeof(*pd
->timings
));
1235 dev_warn(dev
, "%s:%d: allocation error\n", __func__
,
1237 get_default_timings(emif
);
1240 get_default_timings(emif
);
1244 temp
= devm_kzalloc(dev
, sizeof(*pd
->min_tck
), GFP_KERNEL
);
1246 memcpy(temp
, pd
->min_tck
, sizeof(*pd
->min_tck
));
1249 dev_warn(dev
, "%s:%d: allocation error\n", __func__
,
1251 pd
->min_tck
= &lpddr2_jedec_min_tck
;
1254 pd
->min_tck
= &lpddr2_jedec_min_tck
;
1264 static int __init_or_module
emif_probe(struct platform_device
*pdev
)
1266 struct emif_data
*emif
;
1267 struct resource
*res
;
1270 emif
= get_device_details(pdev
);
1272 pr_err("%s: error getting device data\n", __func__
);
1276 list_add(&emif
->node
, &device_list
);
1277 emif
->addressing
= get_addressing_table(emif
->plat_data
->device_info
);
1279 /* Save pointers to each other in emif and device structures */
1280 emif
->dev
= &pdev
->dev
;
1281 platform_set_drvdata(pdev
, emif
);
1283 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1285 dev_err(emif
->dev
, "%s: error getting memory resource\n",
1290 emif
->base
= devm_request_and_ioremap(emif
->dev
, res
);
1292 dev_err(emif
->dev
, "%s: devm_request_and_ioremap() failed\n",
1297 irq
= platform_get_irq(pdev
, 0);
1299 dev_err(emif
->dev
, "%s: error getting IRQ resource - %d\n",
1304 emif_onetime_settings(emif
);
1305 emif_debugfs_init(emif
);
1306 disable_and_clear_all_interrupts(emif
);
1307 setup_interrupts(emif
, irq
);
1309 /* One-time actions taken on probing the first device */
1312 spin_lock_init(&emif_lock
);
1315 * TODO: register notifiers for frequency and voltage
1316 * change here once the respective frameworks are
1321 dev_info(&pdev
->dev
, "%s: device configured with addr = %p and IRQ%d\n",
1322 __func__
, emif
->base
, irq
);
1329 static int __exit
emif_remove(struct platform_device
*pdev
)
1331 struct emif_data
*emif
= platform_get_drvdata(pdev
);
1333 emif_debugfs_exit(emif
);
1338 static void emif_shutdown(struct platform_device
*pdev
)
1340 struct emif_data
*emif
= platform_get_drvdata(pdev
);
1342 disable_and_clear_all_interrupts(emif
);
1345 static int get_emif_reg_values(struct emif_data
*emif
, u32 freq
,
1346 struct emif_regs
*regs
)
1348 u32 cs1_used
, ip_rev
, phy_type
;
1350 const struct lpddr2_timings
*timings
;
1351 const struct lpddr2_min_tck
*min_tck
;
1352 const struct ddr_device_info
*device_info
;
1353 const struct lpddr2_addressing
*addressing
;
1354 struct emif_data
*emif_for_calc
;
1356 const struct emif_custom_configs
*custom_configs
;
1360 * If the devices on this EMIF instance is duplicate of EMIF1,
1361 * use EMIF1 details for the calculation
1363 emif_for_calc
= emif
->duplicate
? emif1
: emif
;
1364 timings
= get_timings_table(emif_for_calc
, freq
);
1365 addressing
= emif_for_calc
->addressing
;
1366 if (!timings
|| !addressing
) {
1367 dev_err(dev
, "%s: not enough data available for %dHz",
1372 device_info
= emif_for_calc
->plat_data
->device_info
;
1373 type
= device_info
->type
;
1374 cs1_used
= device_info
->cs1_used
;
1375 ip_rev
= emif_for_calc
->plat_data
->ip_rev
;
1376 phy_type
= emif_for_calc
->plat_data
->phy_type
;
1378 min_tck
= emif_for_calc
->plat_data
->min_tck
;
1379 custom_configs
= emif_for_calc
->plat_data
->custom_configs
;
1381 set_ddr_clk_period(freq
);
1383 regs
->ref_ctrl_shdw
= get_sdram_ref_ctrl_shdw(freq
, addressing
);
1384 regs
->sdram_tim1_shdw
= get_sdram_tim_1_shdw(timings
, min_tck
,
1386 regs
->sdram_tim2_shdw
= get_sdram_tim_2_shdw(timings
, min_tck
,
1388 regs
->sdram_tim3_shdw
= get_sdram_tim_3_shdw(timings
, min_tck
,
1389 addressing
, type
, ip_rev
, EMIF_NORMAL_TIMINGS
);
1393 if (phy_type
== EMIF_PHY_TYPE_ATTILAPHY
&& ip_rev
== EMIF_4D
) {
1394 regs
->phy_ctrl_1_shdw
= get_ddr_phy_ctrl_1_attilaphy_4d(
1396 } else if (phy_type
== EMIF_PHY_TYPE_INTELLIPHY
&& ip_rev
== EMIF_4D5
) {
1397 regs
->phy_ctrl_1_shdw
= get_phy_ctrl_1_intelliphy_4d5(freq
, cl
);
1398 regs
->ext_phy_ctrl_2_shdw
= get_ext_phy_ctrl_2_intelliphy_4d5();
1399 regs
->ext_phy_ctrl_3_shdw
= get_ext_phy_ctrl_3_intelliphy_4d5();
1400 regs
->ext_phy_ctrl_4_shdw
= get_ext_phy_ctrl_4_intelliphy_4d5();
1405 /* Only timeout values in pwr_mgmt_ctrl_shdw register */
1406 regs
->pwr_mgmt_ctrl_shdw
=
1407 get_pwr_mgmt_ctrl(freq
, emif_for_calc
, ip_rev
) &
1408 (CS_TIM_MASK
| SR_TIM_MASK
| PD_TIM_MASK
);
1410 if (ip_rev
& EMIF_4D
) {
1411 regs
->read_idle_ctrl_shdw_normal
=
1412 get_read_idle_ctrl_shdw(DDR_VOLTAGE_STABLE
);
1414 regs
->read_idle_ctrl_shdw_volt_ramp
=
1415 get_read_idle_ctrl_shdw(DDR_VOLTAGE_RAMPING
);
1416 } else if (ip_rev
& EMIF_4D5
) {
1417 regs
->dll_calib_ctrl_shdw_normal
=
1418 get_dll_calib_ctrl_shdw(DDR_VOLTAGE_STABLE
);
1420 regs
->dll_calib_ctrl_shdw_volt_ramp
=
1421 get_dll_calib_ctrl_shdw(DDR_VOLTAGE_RAMPING
);
1424 if (type
== DDR_TYPE_LPDDR2_S2
|| type
== DDR_TYPE_LPDDR2_S4
) {
1425 regs
->ref_ctrl_shdw_derated
= get_sdram_ref_ctrl_shdw(freq
/ 4,
1428 regs
->sdram_tim1_shdw_derated
=
1429 get_sdram_tim_1_shdw_derated(timings
, min_tck
,
1432 regs
->sdram_tim3_shdw_derated
= get_sdram_tim_3_shdw(timings
,
1433 min_tck
, addressing
, type
, ip_rev
,
1434 EMIF_DERATED_TIMINGS
);
1443 * get_regs() - gets the cached emif_regs structure for a given EMIF instance
1444 * given frequency(freq):
1446 * As an optimisation, every EMIF instance other than EMIF1 shares the
1447 * register cache with EMIF1 if the devices connected on this instance
1448 * are same as that on EMIF1(indicated by the duplicate flag)
1450 * If we do not have an entry corresponding to the frequency given, we
1451 * allocate a new entry and calculate the values
1453 * Upon finding the right reg dump, save it in curr_regs. It can be
1454 * directly used for thermal de-rating and voltage ramping changes.
1456 static struct emif_regs
*get_regs(struct emif_data
*emif
, u32 freq
)
1459 struct emif_regs
**regs_cache
;
1460 struct emif_regs
*regs
= NULL
;
1464 if (emif
->curr_regs
&& emif
->curr_regs
->freq
== freq
) {
1465 dev_dbg(dev
, "%s: using curr_regs - %u Hz", __func__
, freq
);
1466 return emif
->curr_regs
;
1469 if (emif
->duplicate
)
1470 regs_cache
= emif1
->regs_cache
;
1472 regs_cache
= emif
->regs_cache
;
1474 for (i
= 0; i
< EMIF_MAX_NUM_FREQUENCIES
&& regs_cache
[i
]; i
++) {
1475 if (regs_cache
[i
]->freq
== freq
) {
1476 regs
= regs_cache
[i
];
1478 "%s: reg dump found in reg cache for %u Hz\n",
1485 * If we don't have an entry for this frequency in the cache create one
1486 * and calculate the values
1489 regs
= devm_kzalloc(emif
->dev
, sizeof(*regs
), GFP_ATOMIC
);
1493 if (get_emif_reg_values(emif
, freq
, regs
)) {
1494 devm_kfree(emif
->dev
, regs
);
1499 * Now look for an un-used entry in the cache and save the
1500 * newly created struct. If there are no free entries
1501 * over-write the last entry
1503 for (i
= 0; i
< EMIF_MAX_NUM_FREQUENCIES
&& regs_cache
[i
]; i
++)
1506 if (i
>= EMIF_MAX_NUM_FREQUENCIES
) {
1507 dev_warn(dev
, "%s: regs_cache full - reusing a slot!!\n",
1509 i
= EMIF_MAX_NUM_FREQUENCIES
- 1;
1510 devm_kfree(emif
->dev
, regs_cache
[i
]);
1512 regs_cache
[i
] = regs
;
1518 static void do_volt_notify_handling(struct emif_data
*emif
, u32 volt_state
)
1520 dev_dbg(emif
->dev
, "%s: voltage notification : %d", __func__
,
1523 if (!emif
->curr_regs
) {
1525 "%s: volt-notify before registers are ready: %d\n",
1526 __func__
, volt_state
);
1530 setup_volt_sensitive_regs(emif
, emif
->curr_regs
, volt_state
);
1534 * TODO: voltage notify handling should be hooked up to
1535 * regulator framework as soon as the necessary support
1536 * is available in mainline kernel. This function is un-used
1539 static void __attribute__((unused
)) volt_notify_handling(u32 volt_state
)
1541 struct emif_data
*emif
;
1543 spin_lock_irqsave(&emif_lock
, irq_state
);
1545 list_for_each_entry(emif
, &device_list
, node
)
1546 do_volt_notify_handling(emif
, volt_state
);
1549 spin_unlock_irqrestore(&emif_lock
, irq_state
);
1552 static void do_freq_pre_notify_handling(struct emif_data
*emif
, u32 new_freq
)
1554 struct emif_regs
*regs
;
1556 regs
= get_regs(emif
, new_freq
);
1560 emif
->curr_regs
= regs
;
1563 * Update the shadow registers:
1564 * Temperature and voltage-ramp sensitive settings are also configured
1565 * in terms of DDR cycles. So, we need to update them too when there
1568 dev_dbg(emif
->dev
, "%s: setting up shadow registers for %uHz",
1569 __func__
, new_freq
);
1570 setup_registers(emif
, regs
);
1571 setup_temperature_sensitive_regs(emif
, regs
);
1572 setup_volt_sensitive_regs(emif
, regs
, DDR_VOLTAGE_STABLE
);
1575 * Part of workaround for errata i728. See do_freq_update()
1578 if (emif
->lpmode
== EMIF_LP_MODE_SELF_REFRESH
)
1579 set_lpmode(emif
, EMIF_LP_MODE_DISABLE
);
1583 * TODO: frequency notify handling should be hooked up to
1584 * clock framework as soon as the necessary support is
1585 * available in mainline kernel. This function is un-used
1588 static void __attribute__((unused
)) freq_pre_notify_handling(u32 new_freq
)
1590 struct emif_data
*emif
;
1593 * NOTE: we are taking the spin-lock here and releases it
1594 * only in post-notifier. This doesn't look good and
1595 * Sparse complains about it, but this seems to be
1596 * un-avoidable. We need to lock a sequence of events
1597 * that is split between EMIF and clock framework.
1599 * 1. EMIF driver updates EMIF timings in shadow registers in the
1600 * frequency pre-notify callback from clock framework
1601 * 2. clock framework sets up the registers for the new frequency
1602 * 3. clock framework initiates a hw-sequence that updates
1603 * the frequency EMIF timings synchronously.
1605 * All these 3 steps should be performed as an atomic operation
1606 * vis-a-vis similar sequence in the EMIF interrupt handler
1607 * for temperature events. Otherwise, there could be race
1608 * conditions that could result in incorrect EMIF timings for
1611 spin_lock_irqsave(&emif_lock
, irq_state
);
1613 list_for_each_entry(emif
, &device_list
, node
)
1614 do_freq_pre_notify_handling(emif
, new_freq
);
1617 static void do_freq_post_notify_handling(struct emif_data
*emif
)
1620 * Part of workaround for errata i728. See do_freq_update()
1623 if (emif
->lpmode
== EMIF_LP_MODE_SELF_REFRESH
)
1624 set_lpmode(emif
, EMIF_LP_MODE_SELF_REFRESH
);
1628 * TODO: frequency notify handling should be hooked up to
1629 * clock framework as soon as the necessary support is
1630 * available in mainline kernel. This function is un-used
1633 static void __attribute__((unused
)) freq_post_notify_handling(void)
1635 struct emif_data
*emif
;
1637 list_for_each_entry(emif
, &device_list
, node
)
1638 do_freq_post_notify_handling(emif
);
1641 * Lock is done in pre-notify handler. See freq_pre_notify_handling()
1644 spin_unlock_irqrestore(&emif_lock
, irq_state
);
1647 static struct platform_driver emif_driver
= {
1648 .remove
= __exit_p(emif_remove
),
1649 .shutdown
= emif_shutdown
,
1655 static int __init_or_module
emif_register(void)
1657 return platform_driver_probe(&emif_driver
, emif_probe
);
1660 static void __exit
emif_unregister(void)
1662 platform_driver_unregister(&emif_driver
);
1665 module_init(emif_register
);
1666 module_exit(emif_unregister
);
1667 MODULE_DESCRIPTION("TI EMIF SDRAM Controller Driver");
1668 MODULE_LICENSE("GPL");
1669 MODULE_ALIAS("platform:emif");
1670 MODULE_AUTHOR("Texas Instruments Inc");