x86/smpboot: Init apic mapping before usage
[cascardo/linux.git] / drivers / memory / emif.c
1 /*
2  * EMIF driver
3  *
4  * Copyright (C) 2012 Texas Instruments, Inc.
5  *
6  * Aneesh V <aneesh@ti.com>
7  * Santosh Shilimkar <santosh.shilimkar@ti.com>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13 #include <linux/err.h>
14 #include <linux/kernel.h>
15 #include <linux/reboot.h>
16 #include <linux/platform_data/emif_plat.h>
17 #include <linux/io.h>
18 #include <linux/device.h>
19 #include <linux/platform_device.h>
20 #include <linux/interrupt.h>
21 #include <linux/slab.h>
22 #include <linux/of.h>
23 #include <linux/debugfs.h>
24 #include <linux/seq_file.h>
25 #include <linux/module.h>
26 #include <linux/list.h>
27 #include <linux/spinlock.h>
28 #include <linux/pm.h>
29 #include <memory/jedec_ddr.h>
30 #include "emif.h"
31 #include "of_memory.h"
32
33 /**
34  * struct emif_data - Per device static data for driver's use
35  * @duplicate:                  Whether the DDR devices attached to this EMIF
36  *                              instance are exactly same as that on EMIF1. In
37  *                              this case we can save some memory and processing
38  * @temperature_level:          Maximum temperature of LPDDR2 devices attached
39  *                              to this EMIF - read from MR4 register. If there
40  *                              are two devices attached to this EMIF, this
41  *                              value is the maximum of the two temperature
42  *                              levels.
43  * @node:                       node in the device list
44  * @base:                       base address of memory-mapped IO registers.
45  * @dev:                        device pointer.
46  * @addressing                  table with addressing information from the spec
47  * @regs_cache:                 An array of 'struct emif_regs' that stores
48  *                              calculated register values for different
49  *                              frequencies, to avoid re-calculating them on
50  *                              each DVFS transition.
51  * @curr_regs:                  The set of register values used in the last
52  *                              frequency change (i.e. corresponding to the
53  *                              frequency in effect at the moment)
54  * @plat_data:                  Pointer to saved platform data.
55  * @debugfs_root:               dentry to the root folder for EMIF in debugfs
56  * @np_ddr:                     Pointer to ddr device tree node
57  */
58 struct emif_data {
59         u8                              duplicate;
60         u8                              temperature_level;
61         u8                              lpmode;
62         struct list_head                node;
63         unsigned long                   irq_state;
64         void __iomem                    *base;
65         struct device                   *dev;
66         const struct lpddr2_addressing  *addressing;
67         struct emif_regs                *regs_cache[EMIF_MAX_NUM_FREQUENCIES];
68         struct emif_regs                *curr_regs;
69         struct emif_platform_data       *plat_data;
70         struct dentry                   *debugfs_root;
71         struct device_node              *np_ddr;
72 };
73
74 static struct emif_data *emif1;
75 static spinlock_t       emif_lock;
76 static unsigned long    irq_state;
77 static u32              t_ck; /* DDR clock period in ps */
78 static LIST_HEAD(device_list);
79
80 #ifdef CONFIG_DEBUG_FS
81 static void do_emif_regdump_show(struct seq_file *s, struct emif_data *emif,
82         struct emif_regs *regs)
83 {
84         u32 type = emif->plat_data->device_info->type;
85         u32 ip_rev = emif->plat_data->ip_rev;
86
87         seq_printf(s, "EMIF register cache dump for %dMHz\n",
88                 regs->freq/1000000);
89
90         seq_printf(s, "ref_ctrl_shdw\t: 0x%08x\n", regs->ref_ctrl_shdw);
91         seq_printf(s, "sdram_tim1_shdw\t: 0x%08x\n", regs->sdram_tim1_shdw);
92         seq_printf(s, "sdram_tim2_shdw\t: 0x%08x\n", regs->sdram_tim2_shdw);
93         seq_printf(s, "sdram_tim3_shdw\t: 0x%08x\n", regs->sdram_tim3_shdw);
94
95         if (ip_rev == EMIF_4D) {
96                 seq_printf(s, "read_idle_ctrl_shdw_normal\t: 0x%08x\n",
97                         regs->read_idle_ctrl_shdw_normal);
98                 seq_printf(s, "read_idle_ctrl_shdw_volt_ramp\t: 0x%08x\n",
99                         regs->read_idle_ctrl_shdw_volt_ramp);
100         } else if (ip_rev == EMIF_4D5) {
101                 seq_printf(s, "dll_calib_ctrl_shdw_normal\t: 0x%08x\n",
102                         regs->dll_calib_ctrl_shdw_normal);
103                 seq_printf(s, "dll_calib_ctrl_shdw_volt_ramp\t: 0x%08x\n",
104                         regs->dll_calib_ctrl_shdw_volt_ramp);
105         }
106
107         if (type == DDR_TYPE_LPDDR2_S2 || type == DDR_TYPE_LPDDR2_S4) {
108                 seq_printf(s, "ref_ctrl_shdw_derated\t: 0x%08x\n",
109                         regs->ref_ctrl_shdw_derated);
110                 seq_printf(s, "sdram_tim1_shdw_derated\t: 0x%08x\n",
111                         regs->sdram_tim1_shdw_derated);
112                 seq_printf(s, "sdram_tim3_shdw_derated\t: 0x%08x\n",
113                         regs->sdram_tim3_shdw_derated);
114         }
115 }
116
117 static int emif_regdump_show(struct seq_file *s, void *unused)
118 {
119         struct emif_data        *emif   = s->private;
120         struct emif_regs        **regs_cache;
121         int                     i;
122
123         if (emif->duplicate)
124                 regs_cache = emif1->regs_cache;
125         else
126                 regs_cache = emif->regs_cache;
127
128         for (i = 0; i < EMIF_MAX_NUM_FREQUENCIES && regs_cache[i]; i++) {
129                 do_emif_regdump_show(s, emif, regs_cache[i]);
130                 seq_printf(s, "\n");
131         }
132
133         return 0;
134 }
135
136 static int emif_regdump_open(struct inode *inode, struct file *file)
137 {
138         return single_open(file, emif_regdump_show, inode->i_private);
139 }
140
141 static const struct file_operations emif_regdump_fops = {
142         .open                   = emif_regdump_open,
143         .read                   = seq_read,
144         .release                = single_release,
145 };
146
147 static int emif_mr4_show(struct seq_file *s, void *unused)
148 {
149         struct emif_data *emif = s->private;
150
151         seq_printf(s, "MR4=%d\n", emif->temperature_level);
152         return 0;
153 }
154
155 static int emif_mr4_open(struct inode *inode, struct file *file)
156 {
157         return single_open(file, emif_mr4_show, inode->i_private);
158 }
159
160 static const struct file_operations emif_mr4_fops = {
161         .open                   = emif_mr4_open,
162         .read                   = seq_read,
163         .release                = single_release,
164 };
165
166 static int __init_or_module emif_debugfs_init(struct emif_data *emif)
167 {
168         struct dentry   *dentry;
169         int             ret;
170
171         dentry = debugfs_create_dir(dev_name(emif->dev), NULL);
172         if (!dentry) {
173                 ret = -ENOMEM;
174                 goto err0;
175         }
176         emif->debugfs_root = dentry;
177
178         dentry = debugfs_create_file("regcache_dump", S_IRUGO,
179                         emif->debugfs_root, emif, &emif_regdump_fops);
180         if (!dentry) {
181                 ret = -ENOMEM;
182                 goto err1;
183         }
184
185         dentry = debugfs_create_file("mr4", S_IRUGO,
186                         emif->debugfs_root, emif, &emif_mr4_fops);
187         if (!dentry) {
188                 ret = -ENOMEM;
189                 goto err1;
190         }
191
192         return 0;
193 err1:
194         debugfs_remove_recursive(emif->debugfs_root);
195 err0:
196         return ret;
197 }
198
199 static void __exit emif_debugfs_exit(struct emif_data *emif)
200 {
201         debugfs_remove_recursive(emif->debugfs_root);
202         emif->debugfs_root = NULL;
203 }
204 #else
205 static inline int __init_or_module emif_debugfs_init(struct emif_data *emif)
206 {
207         return 0;
208 }
209
210 static inline void __exit emif_debugfs_exit(struct emif_data *emif)
211 {
212 }
213 #endif
214
215 /*
216  * Calculate the period of DDR clock from frequency value
217  */
218 static void set_ddr_clk_period(u32 freq)
219 {
220         /* Divide 10^12 by frequency to get period in ps */
221         t_ck = (u32)DIV_ROUND_UP_ULL(1000000000000ull, freq);
222 }
223
224 /*
225  * Get bus width used by EMIF. Note that this may be different from the
226  * bus width of the DDR devices used. For instance two 16-bit DDR devices
227  * may be connected to a given CS of EMIF. In this case bus width as far
228  * as EMIF is concerned is 32, where as the DDR bus width is 16 bits.
229  */
230 static u32 get_emif_bus_width(struct emif_data *emif)
231 {
232         u32             width;
233         void __iomem    *base = emif->base;
234
235         width = (readl(base + EMIF_SDRAM_CONFIG) & NARROW_MODE_MASK)
236                         >> NARROW_MODE_SHIFT;
237         width = width == 0 ? 32 : 16;
238
239         return width;
240 }
241
242 /*
243  * Get the CL from SDRAM_CONFIG register
244  */
245 static u32 get_cl(struct emif_data *emif)
246 {
247         u32             cl;
248         void __iomem    *base = emif->base;
249
250         cl = (readl(base + EMIF_SDRAM_CONFIG) & CL_MASK) >> CL_SHIFT;
251
252         return cl;
253 }
254
255 static void set_lpmode(struct emif_data *emif, u8 lpmode)
256 {
257         u32 temp;
258         void __iomem *base = emif->base;
259
260         /*
261          * Workaround for errata i743 - LPDDR2 Power-Down State is Not
262          * Efficient
263          *
264          * i743 DESCRIPTION:
265          * The EMIF supports power-down state for low power. The EMIF
266          * automatically puts the SDRAM into power-down after the memory is
267          * not accessed for a defined number of cycles and the
268          * EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE bit field is set to 0x4.
269          * As the EMIF supports automatic output impedance calibration, a ZQ
270          * calibration long command is issued every time it exits active
271          * power-down and precharge power-down modes. The EMIF waits and
272          * blocks any other command during this calibration.
273          * The EMIF does not allow selective disabling of ZQ calibration upon
274          * exit of power-down mode. Due to very short periods of power-down
275          * cycles, ZQ calibration overhead creates bandwidth issues and
276          * increases overall system power consumption. On the other hand,
277          * issuing ZQ calibration long commands when exiting self-refresh is
278          * still required.
279          *
280          * WORKAROUND
281          * Because there is no power consumption benefit of the power-down due
282          * to the calibration and there is a performance risk, the guideline
283          * is to not allow power-down state and, therefore, to not have set
284          * the EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE bit field to 0x4.
285          */
286         if ((emif->plat_data->ip_rev == EMIF_4D) &&
287             (EMIF_LP_MODE_PWR_DN == lpmode)) {
288                 WARN_ONCE(1,
289                           "REG_LP_MODE = LP_MODE_PWR_DN(4) is prohibited by"
290                           "erratum i743 switch to LP_MODE_SELF_REFRESH(2)\n");
291                 /* rollback LP_MODE to Self-refresh mode */
292                 lpmode = EMIF_LP_MODE_SELF_REFRESH;
293         }
294
295         temp = readl(base + EMIF_POWER_MANAGEMENT_CONTROL);
296         temp &= ~LP_MODE_MASK;
297         temp |= (lpmode << LP_MODE_SHIFT);
298         writel(temp, base + EMIF_POWER_MANAGEMENT_CONTROL);
299 }
300
301 static void do_freq_update(void)
302 {
303         struct emif_data *emif;
304
305         /*
306          * Workaround for errata i728: Disable LPMODE during FREQ_UPDATE
307          *
308          * i728 DESCRIPTION:
309          * The EMIF automatically puts the SDRAM into self-refresh mode
310          * after the EMIF has not performed accesses during
311          * EMIF_PWR_MGMT_CTRL[7:4] REG_SR_TIM number of DDR clock cycles
312          * and the EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE bit field is set
313          * to 0x2. If during a small window the following three events
314          * occur:
315          * - The SR_TIMING counter expires
316          * - And frequency change is requested
317          * - And OCP access is requested
318          * Then it causes instable clock on the DDR interface.
319          *
320          * WORKAROUND
321          * To avoid the occurrence of the three events, the workaround
322          * is to disable the self-refresh when requesting a frequency
323          * change. Before requesting a frequency change the software must
324          * program EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE to 0x0. When the
325          * frequency change has been done, the software can reprogram
326          * EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE to 0x2
327          */
328         list_for_each_entry(emif, &device_list, node) {
329                 if (emif->lpmode == EMIF_LP_MODE_SELF_REFRESH)
330                         set_lpmode(emif, EMIF_LP_MODE_DISABLE);
331         }
332
333         /*
334          * TODO: Do FREQ_UPDATE here when an API
335          * is available for this as part of the new
336          * clock framework
337          */
338
339         list_for_each_entry(emif, &device_list, node) {
340                 if (emif->lpmode == EMIF_LP_MODE_SELF_REFRESH)
341                         set_lpmode(emif, EMIF_LP_MODE_SELF_REFRESH);
342         }
343 }
344
345 /* Find addressing table entry based on the device's type and density */
346 static const struct lpddr2_addressing *get_addressing_table(
347         const struct ddr_device_info *device_info)
348 {
349         u32             index, type, density;
350
351         type = device_info->type;
352         density = device_info->density;
353
354         switch (type) {
355         case DDR_TYPE_LPDDR2_S4:
356                 index = density - 1;
357                 break;
358         case DDR_TYPE_LPDDR2_S2:
359                 switch (density) {
360                 case DDR_DENSITY_1Gb:
361                 case DDR_DENSITY_2Gb:
362                         index = density + 3;
363                         break;
364                 default:
365                         index = density - 1;
366                 }
367                 break;
368         default:
369                 return NULL;
370         }
371
372         return &lpddr2_jedec_addressing_table[index];
373 }
374
375 /*
376  * Find the the right timing table from the array of timing
377  * tables of the device using DDR clock frequency
378  */
379 static const struct lpddr2_timings *get_timings_table(struct emif_data *emif,
380                 u32 freq)
381 {
382         u32                             i, min, max, freq_nearest;
383         const struct lpddr2_timings     *timings = NULL;
384         const struct lpddr2_timings     *timings_arr = emif->plat_data->timings;
385         struct                          device *dev = emif->dev;
386
387         /* Start with a very high frequency - 1GHz */
388         freq_nearest = 1000000000;
389
390         /*
391          * Find the timings table such that:
392          *  1. the frequency range covers the required frequency(safe) AND
393          *  2. the max_freq is closest to the required frequency(optimal)
394          */
395         for (i = 0; i < emif->plat_data->timings_arr_size; i++) {
396                 max = timings_arr[i].max_freq;
397                 min = timings_arr[i].min_freq;
398                 if ((freq >= min) && (freq <= max) && (max < freq_nearest)) {
399                         freq_nearest = max;
400                         timings = &timings_arr[i];
401                 }
402         }
403
404         if (!timings)
405                 dev_err(dev, "%s: couldn't find timings for - %dHz\n",
406                         __func__, freq);
407
408         dev_dbg(dev, "%s: timings table: freq %d, speed bin freq %d\n",
409                 __func__, freq, freq_nearest);
410
411         return timings;
412 }
413
414 static u32 get_sdram_ref_ctrl_shdw(u32 freq,
415                 const struct lpddr2_addressing *addressing)
416 {
417         u32 ref_ctrl_shdw = 0, val = 0, freq_khz, t_refi;
418
419         /* Scale down frequency and t_refi to avoid overflow */
420         freq_khz = freq / 1000;
421         t_refi = addressing->tREFI_ns / 100;
422
423         /*
424          * refresh rate to be set is 'tREFI(in us) * freq in MHz
425          * division by 10000 to account for change in units
426          */
427         val = t_refi * freq_khz / 10000;
428         ref_ctrl_shdw |= val << REFRESH_RATE_SHIFT;
429
430         return ref_ctrl_shdw;
431 }
432
433 static u32 get_sdram_tim_1_shdw(const struct lpddr2_timings *timings,
434                 const struct lpddr2_min_tck *min_tck,
435                 const struct lpddr2_addressing *addressing)
436 {
437         u32 tim1 = 0, val = 0;
438
439         val = max(min_tck->tWTR, DIV_ROUND_UP(timings->tWTR, t_ck)) - 1;
440         tim1 |= val << T_WTR_SHIFT;
441
442         if (addressing->num_banks == B8)
443                 val = DIV_ROUND_UP(timings->tFAW, t_ck*4);
444         else
445                 val = max(min_tck->tRRD, DIV_ROUND_UP(timings->tRRD, t_ck));
446         tim1 |= (val - 1) << T_RRD_SHIFT;
447
448         val = DIV_ROUND_UP(timings->tRAS_min + timings->tRPab, t_ck) - 1;
449         tim1 |= val << T_RC_SHIFT;
450
451         val = max(min_tck->tRASmin, DIV_ROUND_UP(timings->tRAS_min, t_ck));
452         tim1 |= (val - 1) << T_RAS_SHIFT;
453
454         val = max(min_tck->tWR, DIV_ROUND_UP(timings->tWR, t_ck)) - 1;
455         tim1 |= val << T_WR_SHIFT;
456
457         val = max(min_tck->tRCD, DIV_ROUND_UP(timings->tRCD, t_ck)) - 1;
458         tim1 |= val << T_RCD_SHIFT;
459
460         val = max(min_tck->tRPab, DIV_ROUND_UP(timings->tRPab, t_ck)) - 1;
461         tim1 |= val << T_RP_SHIFT;
462
463         return tim1;
464 }
465
466 static u32 get_sdram_tim_1_shdw_derated(const struct lpddr2_timings *timings,
467                 const struct lpddr2_min_tck *min_tck,
468                 const struct lpddr2_addressing *addressing)
469 {
470         u32 tim1 = 0, val = 0;
471
472         val = max(min_tck->tWTR, DIV_ROUND_UP(timings->tWTR, t_ck)) - 1;
473         tim1 = val << T_WTR_SHIFT;
474
475         /*
476          * tFAW is approximately 4 times tRRD. So add 1875*4 = 7500ps
477          * to tFAW for de-rating
478          */
479         if (addressing->num_banks == B8) {
480                 val = DIV_ROUND_UP(timings->tFAW + 7500, 4 * t_ck) - 1;
481         } else {
482                 val = DIV_ROUND_UP(timings->tRRD + 1875, t_ck);
483                 val = max(min_tck->tRRD, val) - 1;
484         }
485         tim1 |= val << T_RRD_SHIFT;
486
487         val = DIV_ROUND_UP(timings->tRAS_min + timings->tRPab + 1875, t_ck);
488         tim1 |= (val - 1) << T_RC_SHIFT;
489
490         val = DIV_ROUND_UP(timings->tRAS_min + 1875, t_ck);
491         val = max(min_tck->tRASmin, val) - 1;
492         tim1 |= val << T_RAS_SHIFT;
493
494         val = max(min_tck->tWR, DIV_ROUND_UP(timings->tWR, t_ck)) - 1;
495         tim1 |= val << T_WR_SHIFT;
496
497         val = max(min_tck->tRCD, DIV_ROUND_UP(timings->tRCD + 1875, t_ck));
498         tim1 |= (val - 1) << T_RCD_SHIFT;
499
500         val = max(min_tck->tRPab, DIV_ROUND_UP(timings->tRPab + 1875, t_ck));
501         tim1 |= (val - 1) << T_RP_SHIFT;
502
503         return tim1;
504 }
505
506 static u32 get_sdram_tim_2_shdw(const struct lpddr2_timings *timings,
507                 const struct lpddr2_min_tck *min_tck,
508                 const struct lpddr2_addressing *addressing,
509                 u32 type)
510 {
511         u32 tim2 = 0, val = 0;
512
513         val = min_tck->tCKE - 1;
514         tim2 |= val << T_CKE_SHIFT;
515
516         val = max(min_tck->tRTP, DIV_ROUND_UP(timings->tRTP, t_ck)) - 1;
517         tim2 |= val << T_RTP_SHIFT;
518
519         /* tXSNR = tRFCab_ps + 10 ns(tRFCab_ps for LPDDR2). */
520         val = DIV_ROUND_UP(addressing->tRFCab_ps + 10000, t_ck) - 1;
521         tim2 |= val << T_XSNR_SHIFT;
522
523         /* XSRD same as XSNR for LPDDR2 */
524         tim2 |= val << T_XSRD_SHIFT;
525
526         val = max(min_tck->tXP, DIV_ROUND_UP(timings->tXP, t_ck)) - 1;
527         tim2 |= val << T_XP_SHIFT;
528
529         return tim2;
530 }
531
532 static u32 get_sdram_tim_3_shdw(const struct lpddr2_timings *timings,
533                 const struct lpddr2_min_tck *min_tck,
534                 const struct lpddr2_addressing *addressing,
535                 u32 type, u32 ip_rev, u32 derated)
536 {
537         u32 tim3 = 0, val = 0, t_dqsck;
538
539         val = timings->tRAS_max_ns / addressing->tREFI_ns - 1;
540         val = val > 0xF ? 0xF : val;
541         tim3 |= val << T_RAS_MAX_SHIFT;
542
543         val = DIV_ROUND_UP(addressing->tRFCab_ps, t_ck) - 1;
544         tim3 |= val << T_RFC_SHIFT;
545
546         t_dqsck = (derated == EMIF_DERATED_TIMINGS) ?
547                 timings->tDQSCK_max_derated : timings->tDQSCK_max;
548         if (ip_rev == EMIF_4D5)
549                 val = DIV_ROUND_UP(t_dqsck + 1000, t_ck) - 1;
550         else
551                 val = DIV_ROUND_UP(t_dqsck, t_ck) - 1;
552
553         tim3 |= val << T_TDQSCKMAX_SHIFT;
554
555         val = DIV_ROUND_UP(timings->tZQCS, t_ck) - 1;
556         tim3 |= val << ZQ_ZQCS_SHIFT;
557
558         val = DIV_ROUND_UP(timings->tCKESR, t_ck);
559         val = max(min_tck->tCKESR, val) - 1;
560         tim3 |= val << T_CKESR_SHIFT;
561
562         if (ip_rev == EMIF_4D5) {
563                 tim3 |= (EMIF_T_CSTA - 1) << T_CSTA_SHIFT;
564
565                 val = DIV_ROUND_UP(EMIF_T_PDLL_UL, 128) - 1;
566                 tim3 |= val << T_PDLL_UL_SHIFT;
567         }
568
569         return tim3;
570 }
571
572 static u32 get_zq_config_reg(const struct lpddr2_addressing *addressing,
573                 bool cs1_used, bool cal_resistors_per_cs)
574 {
575         u32 zq = 0, val = 0;
576
577         val = EMIF_ZQCS_INTERVAL_US * 1000 / addressing->tREFI_ns;
578         zq |= val << ZQ_REFINTERVAL_SHIFT;
579
580         val = DIV_ROUND_UP(T_ZQCL_DEFAULT_NS, T_ZQCS_DEFAULT_NS) - 1;
581         zq |= val << ZQ_ZQCL_MULT_SHIFT;
582
583         val = DIV_ROUND_UP(T_ZQINIT_DEFAULT_NS, T_ZQCL_DEFAULT_NS) - 1;
584         zq |= val << ZQ_ZQINIT_MULT_SHIFT;
585
586         zq |= ZQ_SFEXITEN_ENABLE << ZQ_SFEXITEN_SHIFT;
587
588         if (cal_resistors_per_cs)
589                 zq |= ZQ_DUALCALEN_ENABLE << ZQ_DUALCALEN_SHIFT;
590         else
591                 zq |= ZQ_DUALCALEN_DISABLE << ZQ_DUALCALEN_SHIFT;
592
593         zq |= ZQ_CS0EN_MASK; /* CS0 is used for sure */
594
595         val = cs1_used ? 1 : 0;
596         zq |= val << ZQ_CS1EN_SHIFT;
597
598         return zq;
599 }
600
601 static u32 get_temp_alert_config(const struct lpddr2_addressing *addressing,
602                 const struct emif_custom_configs *custom_configs, bool cs1_used,
603                 u32 sdram_io_width, u32 emif_bus_width)
604 {
605         u32 alert = 0, interval, devcnt;
606
607         if (custom_configs && (custom_configs->mask &
608                                 EMIF_CUSTOM_CONFIG_TEMP_ALERT_POLL_INTERVAL))
609                 interval = custom_configs->temp_alert_poll_interval_ms;
610         else
611                 interval = TEMP_ALERT_POLL_INTERVAL_DEFAULT_MS;
612
613         interval *= 1000000;                    /* Convert to ns */
614         interval /= addressing->tREFI_ns;       /* Convert to refresh cycles */
615         alert |= (interval << TA_REFINTERVAL_SHIFT);
616
617         /*
618          * sdram_io_width is in 'log2(x) - 1' form. Convert emif_bus_width
619          * also to this form and subtract to get TA_DEVCNT, which is
620          * in log2(x) form.
621          */
622         emif_bus_width = __fls(emif_bus_width) - 1;
623         devcnt = emif_bus_width - sdram_io_width;
624         alert |= devcnt << TA_DEVCNT_SHIFT;
625
626         /* DEVWDT is in 'log2(x) - 3' form */
627         alert |= (sdram_io_width - 2) << TA_DEVWDT_SHIFT;
628
629         alert |= 1 << TA_SFEXITEN_SHIFT;
630         alert |= 1 << TA_CS0EN_SHIFT;
631         alert |= (cs1_used ? 1 : 0) << TA_CS1EN_SHIFT;
632
633         return alert;
634 }
635
636 static u32 get_read_idle_ctrl_shdw(u8 volt_ramp)
637 {
638         u32 idle = 0, val = 0;
639
640         /*
641          * Maximum value in normal conditions and increased frequency
642          * when voltage is ramping
643          */
644         if (volt_ramp)
645                 val = READ_IDLE_INTERVAL_DVFS / t_ck / 64 - 1;
646         else
647                 val = 0x1FF;
648
649         /*
650          * READ_IDLE_CTRL register in EMIF4D has same offset and fields
651          * as DLL_CALIB_CTRL in EMIF4D5, so use the same shifts
652          */
653         idle |= val << DLL_CALIB_INTERVAL_SHIFT;
654         idle |= EMIF_READ_IDLE_LEN_VAL << ACK_WAIT_SHIFT;
655
656         return idle;
657 }
658
659 static u32 get_dll_calib_ctrl_shdw(u8 volt_ramp)
660 {
661         u32 calib = 0, val = 0;
662
663         if (volt_ramp == DDR_VOLTAGE_RAMPING)
664                 val = DLL_CALIB_INTERVAL_DVFS / t_ck / 16 - 1;
665         else
666                 val = 0; /* Disabled when voltage is stable */
667
668         calib |= val << DLL_CALIB_INTERVAL_SHIFT;
669         calib |= DLL_CALIB_ACK_WAIT_VAL << ACK_WAIT_SHIFT;
670
671         return calib;
672 }
673
674 static u32 get_ddr_phy_ctrl_1_attilaphy_4d(const struct lpddr2_timings *timings,
675         u32 freq, u8 RL)
676 {
677         u32 phy = EMIF_DDR_PHY_CTRL_1_BASE_VAL_ATTILAPHY, val = 0;
678
679         val = RL + DIV_ROUND_UP(timings->tDQSCK_max, t_ck) - 1;
680         phy |= val << READ_LATENCY_SHIFT_4D;
681
682         if (freq <= 100000000)
683                 val = EMIF_DLL_SLAVE_DLY_CTRL_100_MHZ_AND_LESS_ATTILAPHY;
684         else if (freq <= 200000000)
685                 val = EMIF_DLL_SLAVE_DLY_CTRL_200_MHZ_ATTILAPHY;
686         else
687                 val = EMIF_DLL_SLAVE_DLY_CTRL_400_MHZ_ATTILAPHY;
688
689         phy |= val << DLL_SLAVE_DLY_CTRL_SHIFT_4D;
690
691         return phy;
692 }
693
694 static u32 get_phy_ctrl_1_intelliphy_4d5(u32 freq, u8 cl)
695 {
696         u32 phy = EMIF_DDR_PHY_CTRL_1_BASE_VAL_INTELLIPHY, half_delay;
697
698         /*
699          * DLL operates at 266 MHz. If DDR frequency is near 266 MHz,
700          * half-delay is not needed else set half-delay
701          */
702         if (freq >= 265000000 && freq < 267000000)
703                 half_delay = 0;
704         else
705                 half_delay = 1;
706
707         phy |= half_delay << DLL_HALF_DELAY_SHIFT_4D5;
708         phy |= ((cl + DIV_ROUND_UP(EMIF_PHY_TOTAL_READ_LATENCY_INTELLIPHY_PS,
709                         t_ck) - 1) << READ_LATENCY_SHIFT_4D5);
710
711         return phy;
712 }
713
714 static u32 get_ext_phy_ctrl_2_intelliphy_4d5(void)
715 {
716         u32 fifo_we_slave_ratio;
717
718         fifo_we_slave_ratio =  DIV_ROUND_CLOSEST(
719                 EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS * 256 , t_ck);
720
721         return fifo_we_slave_ratio | fifo_we_slave_ratio << 11 |
722                 fifo_we_slave_ratio << 22;
723 }
724
725 static u32 get_ext_phy_ctrl_3_intelliphy_4d5(void)
726 {
727         u32 fifo_we_slave_ratio;
728
729         fifo_we_slave_ratio =  DIV_ROUND_CLOSEST(
730                 EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS * 256 , t_ck);
731
732         return fifo_we_slave_ratio >> 10 | fifo_we_slave_ratio << 1 |
733                 fifo_we_slave_ratio << 12 | fifo_we_slave_ratio << 23;
734 }
735
736 static u32 get_ext_phy_ctrl_4_intelliphy_4d5(void)
737 {
738         u32 fifo_we_slave_ratio;
739
740         fifo_we_slave_ratio =  DIV_ROUND_CLOSEST(
741                 EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS * 256 , t_ck);
742
743         return fifo_we_slave_ratio >> 9 | fifo_we_slave_ratio << 2 |
744                 fifo_we_slave_ratio << 13;
745 }
746
747 static u32 get_pwr_mgmt_ctrl(u32 freq, struct emif_data *emif, u32 ip_rev)
748 {
749         u32 pwr_mgmt_ctrl       = 0, timeout;
750         u32 lpmode              = EMIF_LP_MODE_SELF_REFRESH;
751         u32 timeout_perf        = EMIF_LP_MODE_TIMEOUT_PERFORMANCE;
752         u32 timeout_pwr         = EMIF_LP_MODE_TIMEOUT_POWER;
753         u32 freq_threshold      = EMIF_LP_MODE_FREQ_THRESHOLD;
754         u32 mask;
755         u8 shift;
756
757         struct emif_custom_configs *cust_cfgs = emif->plat_data->custom_configs;
758
759         if (cust_cfgs && (cust_cfgs->mask & EMIF_CUSTOM_CONFIG_LPMODE)) {
760                 lpmode          = cust_cfgs->lpmode;
761                 timeout_perf    = cust_cfgs->lpmode_timeout_performance;
762                 timeout_pwr     = cust_cfgs->lpmode_timeout_power;
763                 freq_threshold  = cust_cfgs->lpmode_freq_threshold;
764         }
765
766         /* Timeout based on DDR frequency */
767         timeout = freq >= freq_threshold ? timeout_perf : timeout_pwr;
768
769         /*
770          * The value to be set in register is "log2(timeout) - 3"
771          * if timeout < 16 load 0 in register
772          * if timeout is not a power of 2, round to next highest power of 2
773          */
774         if (timeout < 16) {
775                 timeout = 0;
776         } else {
777                 if (timeout & (timeout - 1))
778                         timeout <<= 1;
779                 timeout = __fls(timeout) - 3;
780         }
781
782         switch (lpmode) {
783         case EMIF_LP_MODE_CLOCK_STOP:
784                 shift = CS_TIM_SHIFT;
785                 mask = CS_TIM_MASK;
786                 break;
787         case EMIF_LP_MODE_SELF_REFRESH:
788                 /* Workaround for errata i735 */
789                 if (timeout < 6)
790                         timeout = 6;
791
792                 shift = SR_TIM_SHIFT;
793                 mask = SR_TIM_MASK;
794                 break;
795         case EMIF_LP_MODE_PWR_DN:
796                 shift = PD_TIM_SHIFT;
797                 mask = PD_TIM_MASK;
798                 break;
799         case EMIF_LP_MODE_DISABLE:
800         default:
801                 mask = 0;
802                 shift = 0;
803                 break;
804         }
805         /* Round to maximum in case of overflow, BUT warn! */
806         if (lpmode != EMIF_LP_MODE_DISABLE && timeout > mask >> shift) {
807                 pr_err("TIMEOUT Overflow - lpmode=%d perf=%d pwr=%d freq=%d\n",
808                        lpmode,
809                        timeout_perf,
810                        timeout_pwr,
811                        freq_threshold);
812                 WARN(1, "timeout=0x%02x greater than 0x%02x. Using max\n",
813                      timeout, mask >> shift);
814                 timeout = mask >> shift;
815         }
816
817         /* Setup required timing */
818         pwr_mgmt_ctrl = (timeout << shift) & mask;
819         /* setup a default mask for rest of the modes */
820         pwr_mgmt_ctrl |= (SR_TIM_MASK | CS_TIM_MASK | PD_TIM_MASK) &
821                           ~mask;
822
823         /* No CS_TIM in EMIF_4D5 */
824         if (ip_rev == EMIF_4D5)
825                 pwr_mgmt_ctrl &= ~CS_TIM_MASK;
826
827         pwr_mgmt_ctrl |= lpmode << LP_MODE_SHIFT;
828
829         return pwr_mgmt_ctrl;
830 }
831
832 /*
833  * Get the temperature level of the EMIF instance:
834  * Reads the MR4 register of attached SDRAM parts to find out the temperature
835  * level. If there are two parts attached(one on each CS), then the temperature
836  * level for the EMIF instance is the higher of the two temperatures.
837  */
838 static void get_temperature_level(struct emif_data *emif)
839 {
840         u32             temp, temperature_level;
841         void __iomem    *base;
842
843         base = emif->base;
844
845         /* Read mode register 4 */
846         writel(DDR_MR4, base + EMIF_LPDDR2_MODE_REG_CONFIG);
847         temperature_level = readl(base + EMIF_LPDDR2_MODE_REG_DATA);
848         temperature_level = (temperature_level & MR4_SDRAM_REF_RATE_MASK) >>
849                                 MR4_SDRAM_REF_RATE_SHIFT;
850
851         if (emif->plat_data->device_info->cs1_used) {
852                 writel(DDR_MR4 | CS_MASK, base + EMIF_LPDDR2_MODE_REG_CONFIG);
853                 temp = readl(base + EMIF_LPDDR2_MODE_REG_DATA);
854                 temp = (temp & MR4_SDRAM_REF_RATE_MASK)
855                                 >> MR4_SDRAM_REF_RATE_SHIFT;
856                 temperature_level = max(temp, temperature_level);
857         }
858
859         /* treat everything less than nominal(3) in MR4 as nominal */
860         if (unlikely(temperature_level < SDRAM_TEMP_NOMINAL))
861                 temperature_level = SDRAM_TEMP_NOMINAL;
862
863         /* if we get reserved value in MR4 persist with the existing value */
864         if (likely(temperature_level != SDRAM_TEMP_RESERVED_4))
865                 emif->temperature_level = temperature_level;
866 }
867
868 /*
869  * Program EMIF shadow registers that are not dependent on temperature
870  * or voltage
871  */
872 static void setup_registers(struct emif_data *emif, struct emif_regs *regs)
873 {
874         void __iomem    *base = emif->base;
875
876         writel(regs->sdram_tim2_shdw, base + EMIF_SDRAM_TIMING_2_SHDW);
877         writel(regs->phy_ctrl_1_shdw, base + EMIF_DDR_PHY_CTRL_1_SHDW);
878         writel(regs->pwr_mgmt_ctrl_shdw,
879                base + EMIF_POWER_MANAGEMENT_CTRL_SHDW);
880
881         /* Settings specific for EMIF4D5 */
882         if (emif->plat_data->ip_rev != EMIF_4D5)
883                 return;
884         writel(regs->ext_phy_ctrl_2_shdw, base + EMIF_EXT_PHY_CTRL_2_SHDW);
885         writel(regs->ext_phy_ctrl_3_shdw, base + EMIF_EXT_PHY_CTRL_3_SHDW);
886         writel(regs->ext_phy_ctrl_4_shdw, base + EMIF_EXT_PHY_CTRL_4_SHDW);
887 }
888
889 /*
890  * When voltage ramps dll calibration and forced read idle should
891  * happen more often
892  */
893 static void setup_volt_sensitive_regs(struct emif_data *emif,
894                 struct emif_regs *regs, u32 volt_state)
895 {
896         u32             calib_ctrl;
897         void __iomem    *base = emif->base;
898
899         /*
900          * EMIF_READ_IDLE_CTRL in EMIF4D refers to the same register as
901          * EMIF_DLL_CALIB_CTRL in EMIF4D5 and dll_calib_ctrl_shadow_*
902          * is an alias of the respective read_idle_ctrl_shdw_* (members of
903          * a union). So, the below code takes care of both cases
904          */
905         if (volt_state == DDR_VOLTAGE_RAMPING)
906                 calib_ctrl = regs->dll_calib_ctrl_shdw_volt_ramp;
907         else
908                 calib_ctrl = regs->dll_calib_ctrl_shdw_normal;
909
910         writel(calib_ctrl, base + EMIF_DLL_CALIB_CTRL_SHDW);
911 }
912
913 /*
914  * setup_temperature_sensitive_regs() - set the timings for temperature
915  * sensitive registers. This happens once at initialisation time based
916  * on the temperature at boot time and subsequently based on the temperature
917  * alert interrupt. Temperature alert can happen when the temperature
918  * increases or drops. So this function can have the effect of either
919  * derating the timings or going back to nominal values.
920  */
921 static void setup_temperature_sensitive_regs(struct emif_data *emif,
922                 struct emif_regs *regs)
923 {
924         u32             tim1, tim3, ref_ctrl, type;
925         void __iomem    *base = emif->base;
926         u32             temperature;
927
928         type = emif->plat_data->device_info->type;
929
930         tim1 = regs->sdram_tim1_shdw;
931         tim3 = regs->sdram_tim3_shdw;
932         ref_ctrl = regs->ref_ctrl_shdw;
933
934         /* No de-rating for non-lpddr2 devices */
935         if (type != DDR_TYPE_LPDDR2_S2 && type != DDR_TYPE_LPDDR2_S4)
936                 goto out;
937
938         temperature = emif->temperature_level;
939         if (temperature == SDRAM_TEMP_HIGH_DERATE_REFRESH) {
940                 ref_ctrl = regs->ref_ctrl_shdw_derated;
941         } else if (temperature == SDRAM_TEMP_HIGH_DERATE_REFRESH_AND_TIMINGS) {
942                 tim1 = regs->sdram_tim1_shdw_derated;
943                 tim3 = regs->sdram_tim3_shdw_derated;
944                 ref_ctrl = regs->ref_ctrl_shdw_derated;
945         }
946
947 out:
948         writel(tim1, base + EMIF_SDRAM_TIMING_1_SHDW);
949         writel(tim3, base + EMIF_SDRAM_TIMING_3_SHDW);
950         writel(ref_ctrl, base + EMIF_SDRAM_REFRESH_CTRL_SHDW);
951 }
952
953 static irqreturn_t handle_temp_alert(void __iomem *base, struct emif_data *emif)
954 {
955         u32             old_temp_level;
956         irqreturn_t     ret = IRQ_HANDLED;
957         struct emif_custom_configs *custom_configs;
958
959         spin_lock_irqsave(&emif_lock, irq_state);
960         old_temp_level = emif->temperature_level;
961         get_temperature_level(emif);
962
963         if (unlikely(emif->temperature_level == old_temp_level)) {
964                 goto out;
965         } else if (!emif->curr_regs) {
966                 dev_err(emif->dev, "temperature alert before registers are calculated, not de-rating timings\n");
967                 goto out;
968         }
969
970         custom_configs = emif->plat_data->custom_configs;
971
972         /*
973          * IF we detect higher than "nominal rating" from DDR sensor
974          * on an unsupported DDR part, shutdown system
975          */
976         if (custom_configs && !(custom_configs->mask &
977                                 EMIF_CUSTOM_CONFIG_EXTENDED_TEMP_PART)) {
978                 if (emif->temperature_level >= SDRAM_TEMP_HIGH_DERATE_REFRESH) {
979                         dev_err(emif->dev,
980                                 "%s:NOT Extended temperature capable memory."
981                                 "Converting MR4=0x%02x as shutdown event\n",
982                                 __func__, emif->temperature_level);
983                         /*
984                          * Temperature far too high - do kernel_power_off()
985                          * from thread context
986                          */
987                         emif->temperature_level = SDRAM_TEMP_VERY_HIGH_SHUTDOWN;
988                         ret = IRQ_WAKE_THREAD;
989                         goto out;
990                 }
991         }
992
993         if (emif->temperature_level < old_temp_level ||
994                 emif->temperature_level == SDRAM_TEMP_VERY_HIGH_SHUTDOWN) {
995                 /*
996                  * Temperature coming down - defer handling to thread OR
997                  * Temperature far too high - do kernel_power_off() from
998                  * thread context
999                  */
1000                 ret = IRQ_WAKE_THREAD;
1001         } else {
1002                 /* Temperature is going up - handle immediately */
1003                 setup_temperature_sensitive_regs(emif, emif->curr_regs);
1004                 do_freq_update();
1005         }
1006
1007 out:
1008         spin_unlock_irqrestore(&emif_lock, irq_state);
1009         return ret;
1010 }
1011
1012 static irqreturn_t emif_interrupt_handler(int irq, void *dev_id)
1013 {
1014         u32                     interrupts;
1015         struct emif_data        *emif = dev_id;
1016         void __iomem            *base = emif->base;
1017         struct device           *dev = emif->dev;
1018         irqreturn_t             ret = IRQ_HANDLED;
1019
1020         /* Save the status and clear it */
1021         interrupts = readl(base + EMIF_SYSTEM_OCP_INTERRUPT_STATUS);
1022         writel(interrupts, base + EMIF_SYSTEM_OCP_INTERRUPT_STATUS);
1023
1024         /*
1025          * Handle temperature alert
1026          * Temperature alert should be same for all ports
1027          * So, it's enough to process it only for one of the ports
1028          */
1029         if (interrupts & TA_SYS_MASK)
1030                 ret = handle_temp_alert(base, emif);
1031
1032         if (interrupts & ERR_SYS_MASK)
1033                 dev_err(dev, "Access error from SYS port - %x\n", interrupts);
1034
1035         if (emif->plat_data->hw_caps & EMIF_HW_CAPS_LL_INTERFACE) {
1036                 /* Save the status and clear it */
1037                 interrupts = readl(base + EMIF_LL_OCP_INTERRUPT_STATUS);
1038                 writel(interrupts, base + EMIF_LL_OCP_INTERRUPT_STATUS);
1039
1040                 if (interrupts & ERR_LL_MASK)
1041                         dev_err(dev, "Access error from LL port - %x\n",
1042                                 interrupts);
1043         }
1044
1045         return ret;
1046 }
1047
1048 static irqreturn_t emif_threaded_isr(int irq, void *dev_id)
1049 {
1050         struct emif_data        *emif = dev_id;
1051
1052         if (emif->temperature_level == SDRAM_TEMP_VERY_HIGH_SHUTDOWN) {
1053                 dev_emerg(emif->dev, "SDRAM temperature exceeds operating limit.. Needs shut down!!!\n");
1054
1055                 /* If we have Power OFF ability, use it, else try restarting */
1056                 if (pm_power_off) {
1057                         kernel_power_off();
1058                 } else {
1059                         WARN(1, "FIXME: NO pm_power_off!!! trying restart\n");
1060                         kernel_restart("SDRAM Over-temp Emergency restart");
1061                 }
1062                 return IRQ_HANDLED;
1063         }
1064
1065         spin_lock_irqsave(&emif_lock, irq_state);
1066
1067         if (emif->curr_regs) {
1068                 setup_temperature_sensitive_regs(emif, emif->curr_regs);
1069                 do_freq_update();
1070         } else {
1071                 dev_err(emif->dev, "temperature alert before registers are calculated, not de-rating timings\n");
1072         }
1073
1074         spin_unlock_irqrestore(&emif_lock, irq_state);
1075
1076         return IRQ_HANDLED;
1077 }
1078
1079 static void clear_all_interrupts(struct emif_data *emif)
1080 {
1081         void __iomem    *base = emif->base;
1082
1083         writel(readl(base + EMIF_SYSTEM_OCP_INTERRUPT_STATUS),
1084                 base + EMIF_SYSTEM_OCP_INTERRUPT_STATUS);
1085         if (emif->plat_data->hw_caps & EMIF_HW_CAPS_LL_INTERFACE)
1086                 writel(readl(base + EMIF_LL_OCP_INTERRUPT_STATUS),
1087                         base + EMIF_LL_OCP_INTERRUPT_STATUS);
1088 }
1089
1090 static void disable_and_clear_all_interrupts(struct emif_data *emif)
1091 {
1092         void __iomem            *base = emif->base;
1093
1094         /* Disable all interrupts */
1095         writel(readl(base + EMIF_SYSTEM_OCP_INTERRUPT_ENABLE_SET),
1096                 base + EMIF_SYSTEM_OCP_INTERRUPT_ENABLE_CLEAR);
1097         if (emif->plat_data->hw_caps & EMIF_HW_CAPS_LL_INTERFACE)
1098                 writel(readl(base + EMIF_LL_OCP_INTERRUPT_ENABLE_SET),
1099                         base + EMIF_LL_OCP_INTERRUPT_ENABLE_CLEAR);
1100
1101         /* Clear all interrupts */
1102         clear_all_interrupts(emif);
1103 }
1104
1105 static int __init_or_module setup_interrupts(struct emif_data *emif, u32 irq)
1106 {
1107         u32             interrupts, type;
1108         void __iomem    *base = emif->base;
1109
1110         type = emif->plat_data->device_info->type;
1111
1112         clear_all_interrupts(emif);
1113
1114         /* Enable interrupts for SYS interface */
1115         interrupts = EN_ERR_SYS_MASK;
1116         if (type == DDR_TYPE_LPDDR2_S2 || type == DDR_TYPE_LPDDR2_S4)
1117                 interrupts |= EN_TA_SYS_MASK;
1118         writel(interrupts, base + EMIF_SYSTEM_OCP_INTERRUPT_ENABLE_SET);
1119
1120         /* Enable interrupts for LL interface */
1121         if (emif->plat_data->hw_caps & EMIF_HW_CAPS_LL_INTERFACE) {
1122                 /* TA need not be enabled for LL */
1123                 interrupts = EN_ERR_LL_MASK;
1124                 writel(interrupts, base + EMIF_LL_OCP_INTERRUPT_ENABLE_SET);
1125         }
1126
1127         /* setup IRQ handlers */
1128         return devm_request_threaded_irq(emif->dev, irq,
1129                                     emif_interrupt_handler,
1130                                     emif_threaded_isr,
1131                                     0, dev_name(emif->dev),
1132                                     emif);
1133
1134 }
1135
1136 static void __init_or_module emif_onetime_settings(struct emif_data *emif)
1137 {
1138         u32                             pwr_mgmt_ctrl, zq, temp_alert_cfg;
1139         void __iomem                    *base = emif->base;
1140         const struct lpddr2_addressing  *addressing;
1141         const struct ddr_device_info    *device_info;
1142
1143         device_info = emif->plat_data->device_info;
1144         addressing = get_addressing_table(device_info);
1145
1146         /*
1147          * Init power management settings
1148          * We don't know the frequency yet. Use a high frequency
1149          * value for a conservative timeout setting
1150          */
1151         pwr_mgmt_ctrl = get_pwr_mgmt_ctrl(1000000000, emif,
1152                         emif->plat_data->ip_rev);
1153         emif->lpmode = (pwr_mgmt_ctrl & LP_MODE_MASK) >> LP_MODE_SHIFT;
1154         writel(pwr_mgmt_ctrl, base + EMIF_POWER_MANAGEMENT_CONTROL);
1155
1156         /* Init ZQ calibration settings */
1157         zq = get_zq_config_reg(addressing, device_info->cs1_used,
1158                 device_info->cal_resistors_per_cs);
1159         writel(zq, base + EMIF_SDRAM_OUTPUT_IMPEDANCE_CALIBRATION_CONFIG);
1160
1161         /* Check temperature level temperature level*/
1162         get_temperature_level(emif);
1163         if (emif->temperature_level == SDRAM_TEMP_VERY_HIGH_SHUTDOWN)
1164                 dev_emerg(emif->dev, "SDRAM temperature exceeds operating limit.. Needs shut down!!!\n");
1165
1166         /* Init temperature polling */
1167         temp_alert_cfg = get_temp_alert_config(addressing,
1168                 emif->plat_data->custom_configs, device_info->cs1_used,
1169                 device_info->io_width, get_emif_bus_width(emif));
1170         writel(temp_alert_cfg, base + EMIF_TEMPERATURE_ALERT_CONFIG);
1171
1172         /*
1173          * Program external PHY control registers that are not frequency
1174          * dependent
1175          */
1176         if (emif->plat_data->phy_type != EMIF_PHY_TYPE_INTELLIPHY)
1177                 return;
1178         writel(EMIF_EXT_PHY_CTRL_1_VAL, base + EMIF_EXT_PHY_CTRL_1_SHDW);
1179         writel(EMIF_EXT_PHY_CTRL_5_VAL, base + EMIF_EXT_PHY_CTRL_5_SHDW);
1180         writel(EMIF_EXT_PHY_CTRL_6_VAL, base + EMIF_EXT_PHY_CTRL_6_SHDW);
1181         writel(EMIF_EXT_PHY_CTRL_7_VAL, base + EMIF_EXT_PHY_CTRL_7_SHDW);
1182         writel(EMIF_EXT_PHY_CTRL_8_VAL, base + EMIF_EXT_PHY_CTRL_8_SHDW);
1183         writel(EMIF_EXT_PHY_CTRL_9_VAL, base + EMIF_EXT_PHY_CTRL_9_SHDW);
1184         writel(EMIF_EXT_PHY_CTRL_10_VAL, base + EMIF_EXT_PHY_CTRL_10_SHDW);
1185         writel(EMIF_EXT_PHY_CTRL_11_VAL, base + EMIF_EXT_PHY_CTRL_11_SHDW);
1186         writel(EMIF_EXT_PHY_CTRL_12_VAL, base + EMIF_EXT_PHY_CTRL_12_SHDW);
1187         writel(EMIF_EXT_PHY_CTRL_13_VAL, base + EMIF_EXT_PHY_CTRL_13_SHDW);
1188         writel(EMIF_EXT_PHY_CTRL_14_VAL, base + EMIF_EXT_PHY_CTRL_14_SHDW);
1189         writel(EMIF_EXT_PHY_CTRL_15_VAL, base + EMIF_EXT_PHY_CTRL_15_SHDW);
1190         writel(EMIF_EXT_PHY_CTRL_16_VAL, base + EMIF_EXT_PHY_CTRL_16_SHDW);
1191         writel(EMIF_EXT_PHY_CTRL_17_VAL, base + EMIF_EXT_PHY_CTRL_17_SHDW);
1192         writel(EMIF_EXT_PHY_CTRL_18_VAL, base + EMIF_EXT_PHY_CTRL_18_SHDW);
1193         writel(EMIF_EXT_PHY_CTRL_19_VAL, base + EMIF_EXT_PHY_CTRL_19_SHDW);
1194         writel(EMIF_EXT_PHY_CTRL_20_VAL, base + EMIF_EXT_PHY_CTRL_20_SHDW);
1195         writel(EMIF_EXT_PHY_CTRL_21_VAL, base + EMIF_EXT_PHY_CTRL_21_SHDW);
1196         writel(EMIF_EXT_PHY_CTRL_22_VAL, base + EMIF_EXT_PHY_CTRL_22_SHDW);
1197         writel(EMIF_EXT_PHY_CTRL_23_VAL, base + EMIF_EXT_PHY_CTRL_23_SHDW);
1198         writel(EMIF_EXT_PHY_CTRL_24_VAL, base + EMIF_EXT_PHY_CTRL_24_SHDW);
1199 }
1200
1201 static void get_default_timings(struct emif_data *emif)
1202 {
1203         struct emif_platform_data *pd = emif->plat_data;
1204
1205         pd->timings             = lpddr2_jedec_timings;
1206         pd->timings_arr_size    = ARRAY_SIZE(lpddr2_jedec_timings);
1207
1208         dev_warn(emif->dev, "%s: using default timings\n", __func__);
1209 }
1210
1211 static int is_dev_data_valid(u32 type, u32 density, u32 io_width, u32 phy_type,
1212                 u32 ip_rev, struct device *dev)
1213 {
1214         int valid;
1215
1216         valid = (type == DDR_TYPE_LPDDR2_S4 ||
1217                         type == DDR_TYPE_LPDDR2_S2)
1218                 && (density >= DDR_DENSITY_64Mb
1219                         && density <= DDR_DENSITY_8Gb)
1220                 && (io_width >= DDR_IO_WIDTH_8
1221                         && io_width <= DDR_IO_WIDTH_32);
1222
1223         /* Combinations of EMIF and PHY revisions that we support today */
1224         switch (ip_rev) {
1225         case EMIF_4D:
1226                 valid = valid && (phy_type == EMIF_PHY_TYPE_ATTILAPHY);
1227                 break;
1228         case EMIF_4D5:
1229                 valid = valid && (phy_type == EMIF_PHY_TYPE_INTELLIPHY);
1230                 break;
1231         default:
1232                 valid = 0;
1233         }
1234
1235         if (!valid)
1236                 dev_err(dev, "%s: invalid DDR details\n", __func__);
1237         return valid;
1238 }
1239
1240 static int is_custom_config_valid(struct emif_custom_configs *cust_cfgs,
1241                 struct device *dev)
1242 {
1243         int valid = 1;
1244
1245         if ((cust_cfgs->mask & EMIF_CUSTOM_CONFIG_LPMODE) &&
1246                 (cust_cfgs->lpmode != EMIF_LP_MODE_DISABLE))
1247                 valid = cust_cfgs->lpmode_freq_threshold &&
1248                         cust_cfgs->lpmode_timeout_performance &&
1249                         cust_cfgs->lpmode_timeout_power;
1250
1251         if (cust_cfgs->mask & EMIF_CUSTOM_CONFIG_TEMP_ALERT_POLL_INTERVAL)
1252                 valid = valid && cust_cfgs->temp_alert_poll_interval_ms;
1253
1254         if (!valid)
1255                 dev_warn(dev, "%s: invalid custom configs\n", __func__);
1256
1257         return valid;
1258 }
1259
1260 #if defined(CONFIG_OF)
1261 static void __init_or_module of_get_custom_configs(struct device_node *np_emif,
1262                 struct emif_data *emif)
1263 {
1264         struct emif_custom_configs      *cust_cfgs = NULL;
1265         int                             len;
1266         const __be32                    *lpmode, *poll_intvl;
1267
1268         lpmode = of_get_property(np_emif, "low-power-mode", &len);
1269         poll_intvl = of_get_property(np_emif, "temp-alert-poll-interval", &len);
1270
1271         if (lpmode || poll_intvl)
1272                 cust_cfgs = devm_kzalloc(emif->dev, sizeof(*cust_cfgs),
1273                         GFP_KERNEL);
1274
1275         if (!cust_cfgs)
1276                 return;
1277
1278         if (lpmode) {
1279                 cust_cfgs->mask |= EMIF_CUSTOM_CONFIG_LPMODE;
1280                 cust_cfgs->lpmode = be32_to_cpup(lpmode);
1281                 of_property_read_u32(np_emif,
1282                                 "low-power-mode-timeout-performance",
1283                                 &cust_cfgs->lpmode_timeout_performance);
1284                 of_property_read_u32(np_emif,
1285                                 "low-power-mode-timeout-power",
1286                                 &cust_cfgs->lpmode_timeout_power);
1287                 of_property_read_u32(np_emif,
1288                                 "low-power-mode-freq-threshold",
1289                                 &cust_cfgs->lpmode_freq_threshold);
1290         }
1291
1292         if (poll_intvl) {
1293                 cust_cfgs->mask |=
1294                                 EMIF_CUSTOM_CONFIG_TEMP_ALERT_POLL_INTERVAL;
1295                 cust_cfgs->temp_alert_poll_interval_ms =
1296                                                 be32_to_cpup(poll_intvl);
1297         }
1298
1299         if (of_find_property(np_emif, "extended-temp-part", &len))
1300                 cust_cfgs->mask |= EMIF_CUSTOM_CONFIG_EXTENDED_TEMP_PART;
1301
1302         if (!is_custom_config_valid(cust_cfgs, emif->dev)) {
1303                 devm_kfree(emif->dev, cust_cfgs);
1304                 return;
1305         }
1306
1307         emif->plat_data->custom_configs = cust_cfgs;
1308 }
1309
1310 static void __init_or_module of_get_ddr_info(struct device_node *np_emif,
1311                 struct device_node *np_ddr,
1312                 struct ddr_device_info *dev_info)
1313 {
1314         u32 density = 0, io_width = 0;
1315         int len;
1316
1317         if (of_find_property(np_emif, "cs1-used", &len))
1318                 dev_info->cs1_used = true;
1319
1320         if (of_find_property(np_emif, "cal-resistor-per-cs", &len))
1321                 dev_info->cal_resistors_per_cs = true;
1322
1323         if (of_device_is_compatible(np_ddr , "jedec,lpddr2-s4"))
1324                 dev_info->type = DDR_TYPE_LPDDR2_S4;
1325         else if (of_device_is_compatible(np_ddr , "jedec,lpddr2-s2"))
1326                 dev_info->type = DDR_TYPE_LPDDR2_S2;
1327
1328         of_property_read_u32(np_ddr, "density", &density);
1329         of_property_read_u32(np_ddr, "io-width", &io_width);
1330
1331         /* Convert from density in Mb to the density encoding in jedc_ddr.h */
1332         if (density & (density - 1))
1333                 dev_info->density = 0;
1334         else
1335                 dev_info->density = __fls(density) - 5;
1336
1337         /* Convert from io_width in bits to io_width encoding in jedc_ddr.h */
1338         if (io_width & (io_width - 1))
1339                 dev_info->io_width = 0;
1340         else
1341                 dev_info->io_width = __fls(io_width) - 1;
1342 }
1343
1344 static struct emif_data * __init_or_module of_get_memory_device_details(
1345                 struct device_node *np_emif, struct device *dev)
1346 {
1347         struct emif_data                *emif = NULL;
1348         struct ddr_device_info          *dev_info = NULL;
1349         struct emif_platform_data       *pd = NULL;
1350         struct device_node              *np_ddr;
1351         int                             len;
1352
1353         np_ddr = of_parse_phandle(np_emif, "device-handle", 0);
1354         if (!np_ddr)
1355                 goto error;
1356         emif    = devm_kzalloc(dev, sizeof(struct emif_data), GFP_KERNEL);
1357         pd      = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
1358         dev_info = devm_kzalloc(dev, sizeof(*dev_info), GFP_KERNEL);
1359
1360         if (!emif || !pd || !dev_info) {
1361                 dev_err(dev, "%s: Out of memory!!\n",
1362                         __func__);
1363                 goto error;
1364         }
1365
1366         emif->plat_data         = pd;
1367         pd->device_info         = dev_info;
1368         emif->dev               = dev;
1369         emif->np_ddr            = np_ddr;
1370         emif->temperature_level = SDRAM_TEMP_NOMINAL;
1371
1372         if (of_device_is_compatible(np_emif, "ti,emif-4d"))
1373                 emif->plat_data->ip_rev = EMIF_4D;
1374         else if (of_device_is_compatible(np_emif, "ti,emif-4d5"))
1375                 emif->plat_data->ip_rev = EMIF_4D5;
1376
1377         of_property_read_u32(np_emif, "phy-type", &pd->phy_type);
1378
1379         if (of_find_property(np_emif, "hw-caps-ll-interface", &len))
1380                 pd->hw_caps |= EMIF_HW_CAPS_LL_INTERFACE;
1381
1382         of_get_ddr_info(np_emif, np_ddr, dev_info);
1383         if (!is_dev_data_valid(pd->device_info->type, pd->device_info->density,
1384                         pd->device_info->io_width, pd->phy_type, pd->ip_rev,
1385                         emif->dev)) {
1386                 dev_err(dev, "%s: invalid device data!!\n", __func__);
1387                 goto error;
1388         }
1389         /*
1390          * For EMIF instances other than EMIF1 see if the devices connected
1391          * are exactly same as on EMIF1(which is typically the case). If so,
1392          * mark it as a duplicate of EMIF1. This will save some memory and
1393          * computation.
1394          */
1395         if (emif1 && emif1->np_ddr == np_ddr) {
1396                 emif->duplicate = true;
1397                 goto out;
1398         } else if (emif1) {
1399                 dev_warn(emif->dev, "%s: Non-symmetric DDR geometry\n",
1400                         __func__);
1401         }
1402
1403         of_get_custom_configs(np_emif, emif);
1404         emif->plat_data->timings = of_get_ddr_timings(np_ddr, emif->dev,
1405                                         emif->plat_data->device_info->type,
1406                                         &emif->plat_data->timings_arr_size);
1407
1408         emif->plat_data->min_tck = of_get_min_tck(np_ddr, emif->dev);
1409         goto out;
1410
1411 error:
1412         return NULL;
1413 out:
1414         return emif;
1415 }
1416
1417 #else
1418
1419 static struct emif_data * __init_or_module of_get_memory_device_details(
1420                 struct device_node *np_emif, struct device *dev)
1421 {
1422         return NULL;
1423 }
1424 #endif
1425
1426 static struct emif_data *__init_or_module get_device_details(
1427                 struct platform_device *pdev)
1428 {
1429         u32                             size;
1430         struct emif_data                *emif = NULL;
1431         struct ddr_device_info          *dev_info;
1432         struct emif_custom_configs      *cust_cfgs;
1433         struct emif_platform_data       *pd;
1434         struct device                   *dev;
1435         void                            *temp;
1436
1437         pd = pdev->dev.platform_data;
1438         dev = &pdev->dev;
1439
1440         if (!(pd && pd->device_info && is_dev_data_valid(pd->device_info->type,
1441                         pd->device_info->density, pd->device_info->io_width,
1442                         pd->phy_type, pd->ip_rev, dev))) {
1443                 dev_err(dev, "%s: invalid device data\n", __func__);
1444                 goto error;
1445         }
1446
1447         emif    = devm_kzalloc(dev, sizeof(*emif), GFP_KERNEL);
1448         temp    = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
1449         dev_info = devm_kzalloc(dev, sizeof(*dev_info), GFP_KERNEL);
1450
1451         if (!emif || !pd || !dev_info) {
1452                 dev_err(dev, "%s:%d: allocation error\n", __func__, __LINE__);
1453                 goto error;
1454         }
1455
1456         memcpy(temp, pd, sizeof(*pd));
1457         pd = temp;
1458         memcpy(dev_info, pd->device_info, sizeof(*dev_info));
1459
1460         pd->device_info         = dev_info;
1461         emif->plat_data         = pd;
1462         emif->dev               = dev;
1463         emif->temperature_level = SDRAM_TEMP_NOMINAL;
1464
1465         /*
1466          * For EMIF instances other than EMIF1 see if the devices connected
1467          * are exactly same as on EMIF1(which is typically the case). If so,
1468          * mark it as a duplicate of EMIF1 and skip copying timings data.
1469          * This will save some memory and some computation later.
1470          */
1471         emif->duplicate = emif1 && (memcmp(dev_info,
1472                 emif1->plat_data->device_info,
1473                 sizeof(struct ddr_device_info)) == 0);
1474
1475         if (emif->duplicate) {
1476                 pd->timings = NULL;
1477                 pd->min_tck = NULL;
1478                 goto out;
1479         } else if (emif1) {
1480                 dev_warn(emif->dev, "%s: Non-symmetric DDR geometry\n",
1481                         __func__);
1482         }
1483
1484         /*
1485          * Copy custom configs - ignore allocation error, if any, as
1486          * custom_configs is not very critical
1487          */
1488         cust_cfgs = pd->custom_configs;
1489         if (cust_cfgs && is_custom_config_valid(cust_cfgs, dev)) {
1490                 temp = devm_kzalloc(dev, sizeof(*cust_cfgs), GFP_KERNEL);
1491                 if (temp)
1492                         memcpy(temp, cust_cfgs, sizeof(*cust_cfgs));
1493                 else
1494                         dev_warn(dev, "%s:%d: allocation error\n", __func__,
1495                                 __LINE__);
1496                 pd->custom_configs = temp;
1497         }
1498
1499         /*
1500          * Copy timings and min-tck values from platform data. If it is not
1501          * available or if memory allocation fails, use JEDEC defaults
1502          */
1503         size = sizeof(struct lpddr2_timings) * pd->timings_arr_size;
1504         if (pd->timings) {
1505                 temp = devm_kzalloc(dev, size, GFP_KERNEL);
1506                 if (temp) {
1507                         memcpy(temp, pd->timings, size);
1508                         pd->timings = temp;
1509                 } else {
1510                         dev_warn(dev, "%s:%d: allocation error\n", __func__,
1511                                 __LINE__);
1512                         get_default_timings(emif);
1513                 }
1514         } else {
1515                 get_default_timings(emif);
1516         }
1517
1518         if (pd->min_tck) {
1519                 temp = devm_kzalloc(dev, sizeof(*pd->min_tck), GFP_KERNEL);
1520                 if (temp) {
1521                         memcpy(temp, pd->min_tck, sizeof(*pd->min_tck));
1522                         pd->min_tck = temp;
1523                 } else {
1524                         dev_warn(dev, "%s:%d: allocation error\n", __func__,
1525                                 __LINE__);
1526                         pd->min_tck = &lpddr2_jedec_min_tck;
1527                 }
1528         } else {
1529                 pd->min_tck = &lpddr2_jedec_min_tck;
1530         }
1531
1532 out:
1533         return emif;
1534
1535 error:
1536         return NULL;
1537 }
1538
1539 static int __init_or_module emif_probe(struct platform_device *pdev)
1540 {
1541         struct emif_data        *emif;
1542         struct resource         *res;
1543         int                     irq;
1544
1545         if (pdev->dev.of_node)
1546                 emif = of_get_memory_device_details(pdev->dev.of_node, &pdev->dev);
1547         else
1548                 emif = get_device_details(pdev);
1549
1550         if (!emif) {
1551                 pr_err("%s: error getting device data\n", __func__);
1552                 goto error;
1553         }
1554
1555         list_add(&emif->node, &device_list);
1556         emif->addressing = get_addressing_table(emif->plat_data->device_info);
1557
1558         /* Save pointers to each other in emif and device structures */
1559         emif->dev = &pdev->dev;
1560         platform_set_drvdata(pdev, emif);
1561
1562         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1563         emif->base = devm_ioremap_resource(emif->dev, res);
1564         if (IS_ERR(emif->base))
1565                 goto error;
1566
1567         irq = platform_get_irq(pdev, 0);
1568         if (irq < 0) {
1569                 dev_err(emif->dev, "%s: error getting IRQ resource - %d\n",
1570                         __func__, irq);
1571                 goto error;
1572         }
1573
1574         emif_onetime_settings(emif);
1575         emif_debugfs_init(emif);
1576         disable_and_clear_all_interrupts(emif);
1577         setup_interrupts(emif, irq);
1578
1579         /* One-time actions taken on probing the first device */
1580         if (!emif1) {
1581                 emif1 = emif;
1582                 spin_lock_init(&emif_lock);
1583
1584                 /*
1585                  * TODO: register notifiers for frequency and voltage
1586                  * change here once the respective frameworks are
1587                  * available
1588                  */
1589         }
1590
1591         dev_info(&pdev->dev, "%s: device configured with addr = %p and IRQ%d\n",
1592                 __func__, emif->base, irq);
1593
1594         return 0;
1595 error:
1596         return -ENODEV;
1597 }
1598
1599 static int __exit emif_remove(struct platform_device *pdev)
1600 {
1601         struct emif_data *emif = platform_get_drvdata(pdev);
1602
1603         emif_debugfs_exit(emif);
1604
1605         return 0;
1606 }
1607
1608 static void emif_shutdown(struct platform_device *pdev)
1609 {
1610         struct emif_data        *emif = platform_get_drvdata(pdev);
1611
1612         disable_and_clear_all_interrupts(emif);
1613 }
1614
1615 static int get_emif_reg_values(struct emif_data *emif, u32 freq,
1616                 struct emif_regs *regs)
1617 {
1618         u32                             cs1_used, ip_rev, phy_type;
1619         u32                             cl, type;
1620         const struct lpddr2_timings     *timings;
1621         const struct lpddr2_min_tck     *min_tck;
1622         const struct ddr_device_info    *device_info;
1623         const struct lpddr2_addressing  *addressing;
1624         struct emif_data                *emif_for_calc;
1625         struct device                   *dev;
1626         const struct emif_custom_configs *custom_configs;
1627
1628         dev = emif->dev;
1629         /*
1630          * If the devices on this EMIF instance is duplicate of EMIF1,
1631          * use EMIF1 details for the calculation
1632          */
1633         emif_for_calc   = emif->duplicate ? emif1 : emif;
1634         timings         = get_timings_table(emif_for_calc, freq);
1635         addressing      = emif_for_calc->addressing;
1636         if (!timings || !addressing) {
1637                 dev_err(dev, "%s: not enough data available for %dHz",
1638                         __func__, freq);
1639                 return -1;
1640         }
1641
1642         device_info     = emif_for_calc->plat_data->device_info;
1643         type            = device_info->type;
1644         cs1_used        = device_info->cs1_used;
1645         ip_rev          = emif_for_calc->plat_data->ip_rev;
1646         phy_type        = emif_for_calc->plat_data->phy_type;
1647
1648         min_tck         = emif_for_calc->plat_data->min_tck;
1649         custom_configs  = emif_for_calc->plat_data->custom_configs;
1650
1651         set_ddr_clk_period(freq);
1652
1653         regs->ref_ctrl_shdw = get_sdram_ref_ctrl_shdw(freq, addressing);
1654         regs->sdram_tim1_shdw = get_sdram_tim_1_shdw(timings, min_tck,
1655                         addressing);
1656         regs->sdram_tim2_shdw = get_sdram_tim_2_shdw(timings, min_tck,
1657                         addressing, type);
1658         regs->sdram_tim3_shdw = get_sdram_tim_3_shdw(timings, min_tck,
1659                 addressing, type, ip_rev, EMIF_NORMAL_TIMINGS);
1660
1661         cl = get_cl(emif);
1662
1663         if (phy_type == EMIF_PHY_TYPE_ATTILAPHY && ip_rev == EMIF_4D) {
1664                 regs->phy_ctrl_1_shdw = get_ddr_phy_ctrl_1_attilaphy_4d(
1665                         timings, freq, cl);
1666         } else if (phy_type == EMIF_PHY_TYPE_INTELLIPHY && ip_rev == EMIF_4D5) {
1667                 regs->phy_ctrl_1_shdw = get_phy_ctrl_1_intelliphy_4d5(freq, cl);
1668                 regs->ext_phy_ctrl_2_shdw = get_ext_phy_ctrl_2_intelliphy_4d5();
1669                 regs->ext_phy_ctrl_3_shdw = get_ext_phy_ctrl_3_intelliphy_4d5();
1670                 regs->ext_phy_ctrl_4_shdw = get_ext_phy_ctrl_4_intelliphy_4d5();
1671         } else {
1672                 return -1;
1673         }
1674
1675         /* Only timeout values in pwr_mgmt_ctrl_shdw register */
1676         regs->pwr_mgmt_ctrl_shdw =
1677                 get_pwr_mgmt_ctrl(freq, emif_for_calc, ip_rev) &
1678                 (CS_TIM_MASK | SR_TIM_MASK | PD_TIM_MASK);
1679
1680         if (ip_rev & EMIF_4D) {
1681                 regs->read_idle_ctrl_shdw_normal =
1682                         get_read_idle_ctrl_shdw(DDR_VOLTAGE_STABLE);
1683
1684                 regs->read_idle_ctrl_shdw_volt_ramp =
1685                         get_read_idle_ctrl_shdw(DDR_VOLTAGE_RAMPING);
1686         } else if (ip_rev & EMIF_4D5) {
1687                 regs->dll_calib_ctrl_shdw_normal =
1688                         get_dll_calib_ctrl_shdw(DDR_VOLTAGE_STABLE);
1689
1690                 regs->dll_calib_ctrl_shdw_volt_ramp =
1691                         get_dll_calib_ctrl_shdw(DDR_VOLTAGE_RAMPING);
1692         }
1693
1694         if (type == DDR_TYPE_LPDDR2_S2 || type == DDR_TYPE_LPDDR2_S4) {
1695                 regs->ref_ctrl_shdw_derated = get_sdram_ref_ctrl_shdw(freq / 4,
1696                         addressing);
1697
1698                 regs->sdram_tim1_shdw_derated =
1699                         get_sdram_tim_1_shdw_derated(timings, min_tck,
1700                                 addressing);
1701
1702                 regs->sdram_tim3_shdw_derated = get_sdram_tim_3_shdw(timings,
1703                         min_tck, addressing, type, ip_rev,
1704                         EMIF_DERATED_TIMINGS);
1705         }
1706
1707         regs->freq = freq;
1708
1709         return 0;
1710 }
1711
1712 /*
1713  * get_regs() - gets the cached emif_regs structure for a given EMIF instance
1714  * given frequency(freq):
1715  *
1716  * As an optimisation, every EMIF instance other than EMIF1 shares the
1717  * register cache with EMIF1 if the devices connected on this instance
1718  * are same as that on EMIF1(indicated by the duplicate flag)
1719  *
1720  * If we do not have an entry corresponding to the frequency given, we
1721  * allocate a new entry and calculate the values
1722  *
1723  * Upon finding the right reg dump, save it in curr_regs. It can be
1724  * directly used for thermal de-rating and voltage ramping changes.
1725  */
1726 static struct emif_regs *get_regs(struct emif_data *emif, u32 freq)
1727 {
1728         int                     i;
1729         struct emif_regs        **regs_cache;
1730         struct emif_regs        *regs = NULL;
1731         struct device           *dev;
1732
1733         dev = emif->dev;
1734         if (emif->curr_regs && emif->curr_regs->freq == freq) {
1735                 dev_dbg(dev, "%s: using curr_regs - %u Hz", __func__, freq);
1736                 return emif->curr_regs;
1737         }
1738
1739         if (emif->duplicate)
1740                 regs_cache = emif1->regs_cache;
1741         else
1742                 regs_cache = emif->regs_cache;
1743
1744         for (i = 0; i < EMIF_MAX_NUM_FREQUENCIES && regs_cache[i]; i++) {
1745                 if (regs_cache[i]->freq == freq) {
1746                         regs = regs_cache[i];
1747                         dev_dbg(dev,
1748                                 "%s: reg dump found in reg cache for %u Hz\n",
1749                                 __func__, freq);
1750                         break;
1751                 }
1752         }
1753
1754         /*
1755          * If we don't have an entry for this frequency in the cache create one
1756          * and calculate the values
1757          */
1758         if (!regs) {
1759                 regs = devm_kzalloc(emif->dev, sizeof(*regs), GFP_ATOMIC);
1760                 if (!regs)
1761                         return NULL;
1762
1763                 if (get_emif_reg_values(emif, freq, regs)) {
1764                         devm_kfree(emif->dev, regs);
1765                         return NULL;
1766                 }
1767
1768                 /*
1769                  * Now look for an un-used entry in the cache and save the
1770                  * newly created struct. If there are no free entries
1771                  * over-write the last entry
1772                  */
1773                 for (i = 0; i < EMIF_MAX_NUM_FREQUENCIES && regs_cache[i]; i++)
1774                         ;
1775
1776                 if (i >= EMIF_MAX_NUM_FREQUENCIES) {
1777                         dev_warn(dev, "%s: regs_cache full - reusing a slot!!\n",
1778                                 __func__);
1779                         i = EMIF_MAX_NUM_FREQUENCIES - 1;
1780                         devm_kfree(emif->dev, regs_cache[i]);
1781                 }
1782                 regs_cache[i] = regs;
1783         }
1784
1785         return regs;
1786 }
1787
1788 static void do_volt_notify_handling(struct emif_data *emif, u32 volt_state)
1789 {
1790         dev_dbg(emif->dev, "%s: voltage notification : %d", __func__,
1791                 volt_state);
1792
1793         if (!emif->curr_regs) {
1794                 dev_err(emif->dev,
1795                         "%s: volt-notify before registers are ready: %d\n",
1796                         __func__, volt_state);
1797                 return;
1798         }
1799
1800         setup_volt_sensitive_regs(emif, emif->curr_regs, volt_state);
1801 }
1802
1803 /*
1804  * TODO: voltage notify handling should be hooked up to
1805  * regulator framework as soon as the necessary support
1806  * is available in mainline kernel. This function is un-used
1807  * right now.
1808  */
1809 static void __attribute__((unused)) volt_notify_handling(u32 volt_state)
1810 {
1811         struct emif_data *emif;
1812
1813         spin_lock_irqsave(&emif_lock, irq_state);
1814
1815         list_for_each_entry(emif, &device_list, node)
1816                 do_volt_notify_handling(emif, volt_state);
1817         do_freq_update();
1818
1819         spin_unlock_irqrestore(&emif_lock, irq_state);
1820 }
1821
1822 static void do_freq_pre_notify_handling(struct emif_data *emif, u32 new_freq)
1823 {
1824         struct emif_regs *regs;
1825
1826         regs = get_regs(emif, new_freq);
1827         if (!regs)
1828                 return;
1829
1830         emif->curr_regs = regs;
1831
1832         /*
1833          * Update the shadow registers:
1834          * Temperature and voltage-ramp sensitive settings are also configured
1835          * in terms of DDR cycles. So, we need to update them too when there
1836          * is a freq change
1837          */
1838         dev_dbg(emif->dev, "%s: setting up shadow registers for %uHz",
1839                 __func__, new_freq);
1840         setup_registers(emif, regs);
1841         setup_temperature_sensitive_regs(emif, regs);
1842         setup_volt_sensitive_regs(emif, regs, DDR_VOLTAGE_STABLE);
1843
1844         /*
1845          * Part of workaround for errata i728. See do_freq_update()
1846          * for more details
1847          */
1848         if (emif->lpmode == EMIF_LP_MODE_SELF_REFRESH)
1849                 set_lpmode(emif, EMIF_LP_MODE_DISABLE);
1850 }
1851
1852 /*
1853  * TODO: frequency notify handling should be hooked up to
1854  * clock framework as soon as the necessary support is
1855  * available in mainline kernel. This function is un-used
1856  * right now.
1857  */
1858 static void __attribute__((unused)) freq_pre_notify_handling(u32 new_freq)
1859 {
1860         struct emif_data *emif;
1861
1862         /*
1863          * NOTE: we are taking the spin-lock here and releases it
1864          * only in post-notifier. This doesn't look good and
1865          * Sparse complains about it, but this seems to be
1866          * un-avoidable. We need to lock a sequence of events
1867          * that is split between EMIF and clock framework.
1868          *
1869          * 1. EMIF driver updates EMIF timings in shadow registers in the
1870          *    frequency pre-notify callback from clock framework
1871          * 2. clock framework sets up the registers for the new frequency
1872          * 3. clock framework initiates a hw-sequence that updates
1873          *    the frequency EMIF timings synchronously.
1874          *
1875          * All these 3 steps should be performed as an atomic operation
1876          * vis-a-vis similar sequence in the EMIF interrupt handler
1877          * for temperature events. Otherwise, there could be race
1878          * conditions that could result in incorrect EMIF timings for
1879          * a given frequency
1880          */
1881         spin_lock_irqsave(&emif_lock, irq_state);
1882
1883         list_for_each_entry(emif, &device_list, node)
1884                 do_freq_pre_notify_handling(emif, new_freq);
1885 }
1886
1887 static void do_freq_post_notify_handling(struct emif_data *emif)
1888 {
1889         /*
1890          * Part of workaround for errata i728. See do_freq_update()
1891          * for more details
1892          */
1893         if (emif->lpmode == EMIF_LP_MODE_SELF_REFRESH)
1894                 set_lpmode(emif, EMIF_LP_MODE_SELF_REFRESH);
1895 }
1896
1897 /*
1898  * TODO: frequency notify handling should be hooked up to
1899  * clock framework as soon as the necessary support is
1900  * available in mainline kernel. This function is un-used
1901  * right now.
1902  */
1903 static void __attribute__((unused)) freq_post_notify_handling(void)
1904 {
1905         struct emif_data *emif;
1906
1907         list_for_each_entry(emif, &device_list, node)
1908                 do_freq_post_notify_handling(emif);
1909
1910         /*
1911          * Lock is done in pre-notify handler. See freq_pre_notify_handling()
1912          * for more details
1913          */
1914         spin_unlock_irqrestore(&emif_lock, irq_state);
1915 }
1916
1917 #if defined(CONFIG_OF)
1918 static const struct of_device_id emif_of_match[] = {
1919                 { .compatible = "ti,emif-4d" },
1920                 { .compatible = "ti,emif-4d5" },
1921                 {},
1922 };
1923 MODULE_DEVICE_TABLE(of, emif_of_match);
1924 #endif
1925
1926 static struct platform_driver emif_driver = {
1927         .remove         = __exit_p(emif_remove),
1928         .shutdown       = emif_shutdown,
1929         .driver = {
1930                 .name = "emif",
1931                 .of_match_table = of_match_ptr(emif_of_match),
1932         },
1933 };
1934
1935 module_platform_driver_probe(emif_driver, emif_probe);
1936
1937 MODULE_DESCRIPTION("TI EMIF SDRAM Controller Driver");
1938 MODULE_LICENSE("GPL");
1939 MODULE_ALIAS("platform:emif");
1940 MODULE_AUTHOR("Texas Instruments Inc");