mtd: introduce the mtd_pairing_scheme concept
[cascardo/linux.git] / drivers / mtd / nand / davinci_nand.c
1 /*
2  * davinci_nand.c - NAND Flash Driver for DaVinci family chips
3  *
4  * Copyright © 2006 Texas Instruments.
5  *
6  * Port to 2.6.23 Copyright © 2008 by:
7  *   Sander Huijsen <Shuijsen@optelecom-nkf.com>
8  *   Troy Kisky <troy.kisky@boundarydevices.com>
9  *   Dirk Behme <Dirk.Behme@gmail.com>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2 of the License, or
14  * (at your option) any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24  */
25
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/platform_device.h>
29 #include <linux/err.h>
30 #include <linux/clk.h>
31 #include <linux/io.h>
32 #include <linux/mtd/nand.h>
33 #include <linux/mtd/partitions.h>
34 #include <linux/slab.h>
35 #include <linux/of_device.h>
36 #include <linux/of.h>
37
38 #include <linux/platform_data/mtd-davinci.h>
39 #include <linux/platform_data/mtd-davinci-aemif.h>
40
41 /*
42  * This is a device driver for the NAND flash controller found on the
43  * various DaVinci family chips.  It handles up to four SoC chipselects,
44  * and some flavors of secondary chipselect (e.g. based on A12) as used
45  * with multichip packages.
46  *
47  * The 1-bit ECC hardware is supported, as well as the newer 4-bit ECC
48  * available on chips like the DM355 and OMAP-L137 and needed with the
49  * more error-prone MLC NAND chips.
50  *
51  * This driver assumes EM_WAIT connects all the NAND devices' RDY/nBUSY
52  * outputs in a "wire-AND" configuration, with no per-chip signals.
53  */
54 struct davinci_nand_info {
55         struct nand_chip        chip;
56
57         struct device           *dev;
58         struct clk              *clk;
59
60         bool                    is_readmode;
61
62         void __iomem            *base;
63         void __iomem            *vaddr;
64
65         uint32_t                ioaddr;
66         uint32_t                current_cs;
67
68         uint32_t                mask_chipsel;
69         uint32_t                mask_ale;
70         uint32_t                mask_cle;
71
72         uint32_t                core_chipsel;
73
74         struct davinci_aemif_timing     *timing;
75 };
76
77 static DEFINE_SPINLOCK(davinci_nand_lock);
78 static bool ecc4_busy;
79
80 static inline struct davinci_nand_info *to_davinci_nand(struct mtd_info *mtd)
81 {
82         return container_of(mtd_to_nand(mtd), struct davinci_nand_info, chip);
83 }
84
85 static inline unsigned int davinci_nand_readl(struct davinci_nand_info *info,
86                 int offset)
87 {
88         return __raw_readl(info->base + offset);
89 }
90
91 static inline void davinci_nand_writel(struct davinci_nand_info *info,
92                 int offset, unsigned long value)
93 {
94         __raw_writel(value, info->base + offset);
95 }
96
97 /*----------------------------------------------------------------------*/
98
99 /*
100  * Access to hardware control lines:  ALE, CLE, secondary chipselect.
101  */
102
103 static void nand_davinci_hwcontrol(struct mtd_info *mtd, int cmd,
104                                    unsigned int ctrl)
105 {
106         struct davinci_nand_info        *info = to_davinci_nand(mtd);
107         uint32_t                        addr = info->current_cs;
108         struct nand_chip                *nand = mtd_to_nand(mtd);
109
110         /* Did the control lines change? */
111         if (ctrl & NAND_CTRL_CHANGE) {
112                 if ((ctrl & NAND_CTRL_CLE) == NAND_CTRL_CLE)
113                         addr |= info->mask_cle;
114                 else if ((ctrl & NAND_CTRL_ALE) == NAND_CTRL_ALE)
115                         addr |= info->mask_ale;
116
117                 nand->IO_ADDR_W = (void __iomem __force *)addr;
118         }
119
120         if (cmd != NAND_CMD_NONE)
121                 iowrite8(cmd, nand->IO_ADDR_W);
122 }
123
124 static void nand_davinci_select_chip(struct mtd_info *mtd, int chip)
125 {
126         struct davinci_nand_info        *info = to_davinci_nand(mtd);
127         uint32_t                        addr = info->ioaddr;
128
129         /* maybe kick in a second chipselect */
130         if (chip > 0)
131                 addr |= info->mask_chipsel;
132         info->current_cs = addr;
133
134         info->chip.IO_ADDR_W = (void __iomem __force *)addr;
135         info->chip.IO_ADDR_R = info->chip.IO_ADDR_W;
136 }
137
138 /*----------------------------------------------------------------------*/
139
140 /*
141  * 1-bit hardware ECC ... context maintained for each core chipselect
142  */
143
144 static inline uint32_t nand_davinci_readecc_1bit(struct mtd_info *mtd)
145 {
146         struct davinci_nand_info *info = to_davinci_nand(mtd);
147
148         return davinci_nand_readl(info, NANDF1ECC_OFFSET
149                         + 4 * info->core_chipsel);
150 }
151
152 static void nand_davinci_hwctl_1bit(struct mtd_info *mtd, int mode)
153 {
154         struct davinci_nand_info *info;
155         uint32_t nandcfr;
156         unsigned long flags;
157
158         info = to_davinci_nand(mtd);
159
160         /* Reset ECC hardware */
161         nand_davinci_readecc_1bit(mtd);
162
163         spin_lock_irqsave(&davinci_nand_lock, flags);
164
165         /* Restart ECC hardware */
166         nandcfr = davinci_nand_readl(info, NANDFCR_OFFSET);
167         nandcfr |= BIT(8 + info->core_chipsel);
168         davinci_nand_writel(info, NANDFCR_OFFSET, nandcfr);
169
170         spin_unlock_irqrestore(&davinci_nand_lock, flags);
171 }
172
173 /*
174  * Read hardware ECC value and pack into three bytes
175  */
176 static int nand_davinci_calculate_1bit(struct mtd_info *mtd,
177                                       const u_char *dat, u_char *ecc_code)
178 {
179         unsigned int ecc_val = nand_davinci_readecc_1bit(mtd);
180         unsigned int ecc24 = (ecc_val & 0x0fff) | ((ecc_val & 0x0fff0000) >> 4);
181
182         /* invert so that erased block ecc is correct */
183         ecc24 = ~ecc24;
184         ecc_code[0] = (u_char)(ecc24);
185         ecc_code[1] = (u_char)(ecc24 >> 8);
186         ecc_code[2] = (u_char)(ecc24 >> 16);
187
188         return 0;
189 }
190
191 static int nand_davinci_correct_1bit(struct mtd_info *mtd, u_char *dat,
192                                      u_char *read_ecc, u_char *calc_ecc)
193 {
194         struct nand_chip *chip = mtd_to_nand(mtd);
195         uint32_t eccNand = read_ecc[0] | (read_ecc[1] << 8) |
196                                           (read_ecc[2] << 16);
197         uint32_t eccCalc = calc_ecc[0] | (calc_ecc[1] << 8) |
198                                           (calc_ecc[2] << 16);
199         uint32_t diff = eccCalc ^ eccNand;
200
201         if (diff) {
202                 if ((((diff >> 12) ^ diff) & 0xfff) == 0xfff) {
203                         /* Correctable error */
204                         if ((diff >> (12 + 3)) < chip->ecc.size) {
205                                 dat[diff >> (12 + 3)] ^= BIT((diff >> 12) & 7);
206                                 return 1;
207                         } else {
208                                 return -EBADMSG;
209                         }
210                 } else if (!(diff & (diff - 1))) {
211                         /* Single bit ECC error in the ECC itself,
212                          * nothing to fix */
213                         return 1;
214                 } else {
215                         /* Uncorrectable error */
216                         return -EBADMSG;
217                 }
218
219         }
220         return 0;
221 }
222
223 /*----------------------------------------------------------------------*/
224
225 /*
226  * 4-bit hardware ECC ... context maintained over entire AEMIF
227  *
228  * This is a syndrome engine, but we avoid NAND_ECC_HW_SYNDROME
229  * since that forces use of a problematic "infix OOB" layout.
230  * Among other things, it trashes manufacturer bad block markers.
231  * Also, and specific to this hardware, it ECC-protects the "prepad"
232  * in the OOB ... while having ECC protection for parts of OOB would
233  * seem useful, the current MTD stack sometimes wants to update the
234  * OOB without recomputing ECC.
235  */
236
237 static void nand_davinci_hwctl_4bit(struct mtd_info *mtd, int mode)
238 {
239         struct davinci_nand_info *info = to_davinci_nand(mtd);
240         unsigned long flags;
241         u32 val;
242
243         spin_lock_irqsave(&davinci_nand_lock, flags);
244
245         /* Start 4-bit ECC calculation for read/write */
246         val = davinci_nand_readl(info, NANDFCR_OFFSET);
247         val &= ~(0x03 << 4);
248         val |= (info->core_chipsel << 4) | BIT(12);
249         davinci_nand_writel(info, NANDFCR_OFFSET, val);
250
251         info->is_readmode = (mode == NAND_ECC_READ);
252
253         spin_unlock_irqrestore(&davinci_nand_lock, flags);
254 }
255
256 /* Read raw ECC code after writing to NAND. */
257 static void
258 nand_davinci_readecc_4bit(struct davinci_nand_info *info, u32 code[4])
259 {
260         const u32 mask = 0x03ff03ff;
261
262         code[0] = davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET) & mask;
263         code[1] = davinci_nand_readl(info, NAND_4BIT_ECC2_OFFSET) & mask;
264         code[2] = davinci_nand_readl(info, NAND_4BIT_ECC3_OFFSET) & mask;
265         code[3] = davinci_nand_readl(info, NAND_4BIT_ECC4_OFFSET) & mask;
266 }
267
268 /* Terminate read ECC; or return ECC (as bytes) of data written to NAND. */
269 static int nand_davinci_calculate_4bit(struct mtd_info *mtd,
270                 const u_char *dat, u_char *ecc_code)
271 {
272         struct davinci_nand_info *info = to_davinci_nand(mtd);
273         u32 raw_ecc[4], *p;
274         unsigned i;
275
276         /* After a read, terminate ECC calculation by a dummy read
277          * of some 4-bit ECC register.  ECC covers everything that
278          * was read; correct() just uses the hardware state, so
279          * ecc_code is not needed.
280          */
281         if (info->is_readmode) {
282                 davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
283                 return 0;
284         }
285
286         /* Pack eight raw 10-bit ecc values into ten bytes, making
287          * two passes which each convert four values (in upper and
288          * lower halves of two 32-bit words) into five bytes.  The
289          * ROM boot loader uses this same packing scheme.
290          */
291         nand_davinci_readecc_4bit(info, raw_ecc);
292         for (i = 0, p = raw_ecc; i < 2; i++, p += 2) {
293                 *ecc_code++ =   p[0]        & 0xff;
294                 *ecc_code++ = ((p[0] >>  8) & 0x03) | ((p[0] >> 14) & 0xfc);
295                 *ecc_code++ = ((p[0] >> 22) & 0x0f) | ((p[1] <<  4) & 0xf0);
296                 *ecc_code++ = ((p[1] >>  4) & 0x3f) | ((p[1] >> 10) & 0xc0);
297                 *ecc_code++ =  (p[1] >> 18) & 0xff;
298         }
299
300         return 0;
301 }
302
303 /* Correct up to 4 bits in data we just read, using state left in the
304  * hardware plus the ecc_code computed when it was first written.
305  */
306 static int nand_davinci_correct_4bit(struct mtd_info *mtd,
307                 u_char *data, u_char *ecc_code, u_char *null)
308 {
309         int i;
310         struct davinci_nand_info *info = to_davinci_nand(mtd);
311         unsigned short ecc10[8];
312         unsigned short *ecc16;
313         u32 syndrome[4];
314         u32 ecc_state;
315         unsigned num_errors, corrected;
316         unsigned long timeo;
317
318         /* Unpack ten bytes into eight 10 bit values.  We know we're
319          * little-endian, and use type punning for less shifting/masking.
320          */
321         if (WARN_ON(0x01 & (unsigned) ecc_code))
322                 return -EINVAL;
323         ecc16 = (unsigned short *)ecc_code;
324
325         ecc10[0] =  (ecc16[0] >>  0) & 0x3ff;
326         ecc10[1] = ((ecc16[0] >> 10) & 0x3f) | ((ecc16[1] << 6) & 0x3c0);
327         ecc10[2] =  (ecc16[1] >>  4) & 0x3ff;
328         ecc10[3] = ((ecc16[1] >> 14) & 0x3)  | ((ecc16[2] << 2) & 0x3fc);
329         ecc10[4] =  (ecc16[2] >>  8)         | ((ecc16[3] << 8) & 0x300);
330         ecc10[5] =  (ecc16[3] >>  2) & 0x3ff;
331         ecc10[6] = ((ecc16[3] >> 12) & 0xf)  | ((ecc16[4] << 4) & 0x3f0);
332         ecc10[7] =  (ecc16[4] >>  6) & 0x3ff;
333
334         /* Tell ECC controller about the expected ECC codes. */
335         for (i = 7; i >= 0; i--)
336                 davinci_nand_writel(info, NAND_4BIT_ECC_LOAD_OFFSET, ecc10[i]);
337
338         /* Allow time for syndrome calculation ... then read it.
339          * A syndrome of all zeroes 0 means no detected errors.
340          */
341         davinci_nand_readl(info, NANDFSR_OFFSET);
342         nand_davinci_readecc_4bit(info, syndrome);
343         if (!(syndrome[0] | syndrome[1] | syndrome[2] | syndrome[3]))
344                 return 0;
345
346         /*
347          * Clear any previous address calculation by doing a dummy read of an
348          * error address register.
349          */
350         davinci_nand_readl(info, NAND_ERR_ADD1_OFFSET);
351
352         /* Start address calculation, and wait for it to complete.
353          * We _could_ start reading more data while this is working,
354          * to speed up the overall page read.
355          */
356         davinci_nand_writel(info, NANDFCR_OFFSET,
357                         davinci_nand_readl(info, NANDFCR_OFFSET) | BIT(13));
358
359         /*
360          * ECC_STATE field reads 0x3 (Error correction complete) immediately
361          * after setting the 4BITECC_ADD_CALC_START bit. So if you immediately
362          * begin trying to poll for the state, you may fall right out of your
363          * loop without any of the correction calculations having taken place.
364          * The recommendation from the hardware team is to initially delay as
365          * long as ECC_STATE reads less than 4. After that, ECC HW has entered
366          * correction state.
367          */
368         timeo = jiffies + usecs_to_jiffies(100);
369         do {
370                 ecc_state = (davinci_nand_readl(info,
371                                 NANDFSR_OFFSET) >> 8) & 0x0f;
372                 cpu_relax();
373         } while ((ecc_state < 4) && time_before(jiffies, timeo));
374
375         for (;;) {
376                 u32     fsr = davinci_nand_readl(info, NANDFSR_OFFSET);
377
378                 switch ((fsr >> 8) & 0x0f) {
379                 case 0:         /* no error, should not happen */
380                         davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET);
381                         return 0;
382                 case 1:         /* five or more errors detected */
383                         davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET);
384                         return -EBADMSG;
385                 case 2:         /* error addresses computed */
386                 case 3:
387                         num_errors = 1 + ((fsr >> 16) & 0x03);
388                         goto correct;
389                 default:        /* still working on it */
390                         cpu_relax();
391                         continue;
392                 }
393         }
394
395 correct:
396         /* correct each error */
397         for (i = 0, corrected = 0; i < num_errors; i++) {
398                 int error_address, error_value;
399
400                 if (i > 1) {
401                         error_address = davinci_nand_readl(info,
402                                                 NAND_ERR_ADD2_OFFSET);
403                         error_value = davinci_nand_readl(info,
404                                                 NAND_ERR_ERRVAL2_OFFSET);
405                 } else {
406                         error_address = davinci_nand_readl(info,
407                                                 NAND_ERR_ADD1_OFFSET);
408                         error_value = davinci_nand_readl(info,
409                                                 NAND_ERR_ERRVAL1_OFFSET);
410                 }
411
412                 if (i & 1) {
413                         error_address >>= 16;
414                         error_value >>= 16;
415                 }
416                 error_address &= 0x3ff;
417                 error_address = (512 + 7) - error_address;
418
419                 if (error_address < 512) {
420                         data[error_address] ^= error_value;
421                         corrected++;
422                 }
423         }
424
425         return corrected;
426 }
427
428 /*----------------------------------------------------------------------*/
429
430 /*
431  * NOTE:  NAND boot requires ALE == EM_A[1], CLE == EM_A[2], so that's
432  * how these chips are normally wired.  This translates to both 8 and 16
433  * bit busses using ALE == BIT(3) in byte addresses, and CLE == BIT(4).
434  *
435  * For now we assume that configuration, or any other one which ignores
436  * the two LSBs for NAND access ... so we can issue 32-bit reads/writes
437  * and have that transparently morphed into multiple NAND operations.
438  */
439 static void nand_davinci_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
440 {
441         struct nand_chip *chip = mtd_to_nand(mtd);
442
443         if ((0x03 & ((unsigned)buf)) == 0 && (0x03 & len) == 0)
444                 ioread32_rep(chip->IO_ADDR_R, buf, len >> 2);
445         else if ((0x01 & ((unsigned)buf)) == 0 && (0x01 & len) == 0)
446                 ioread16_rep(chip->IO_ADDR_R, buf, len >> 1);
447         else
448                 ioread8_rep(chip->IO_ADDR_R, buf, len);
449 }
450
451 static void nand_davinci_write_buf(struct mtd_info *mtd,
452                 const uint8_t *buf, int len)
453 {
454         struct nand_chip *chip = mtd_to_nand(mtd);
455
456         if ((0x03 & ((unsigned)buf)) == 0 && (0x03 & len) == 0)
457                 iowrite32_rep(chip->IO_ADDR_R, buf, len >> 2);
458         else if ((0x01 & ((unsigned)buf)) == 0 && (0x01 & len) == 0)
459                 iowrite16_rep(chip->IO_ADDR_R, buf, len >> 1);
460         else
461                 iowrite8_rep(chip->IO_ADDR_R, buf, len);
462 }
463
464 /*
465  * Check hardware register for wait status. Returns 1 if device is ready,
466  * 0 if it is still busy.
467  */
468 static int nand_davinci_dev_ready(struct mtd_info *mtd)
469 {
470         struct davinci_nand_info *info = to_davinci_nand(mtd);
471
472         return davinci_nand_readl(info, NANDFSR_OFFSET) & BIT(0);
473 }
474
475 /*----------------------------------------------------------------------*/
476
477 /* An ECC layout for using 4-bit ECC with small-page flash, storing
478  * ten ECC bytes plus the manufacturer's bad block marker byte, and
479  * and not overlapping the default BBT markers.
480  */
481 static int hwecc4_ooblayout_small_ecc(struct mtd_info *mtd, int section,
482                                       struct mtd_oob_region *oobregion)
483 {
484         if (section > 2)
485                 return -ERANGE;
486
487         if (!section) {
488                 oobregion->offset = 0;
489                 oobregion->length = 5;
490         } else if (section == 1) {
491                 oobregion->offset = 6;
492                 oobregion->length = 2;
493         } else {
494                 oobregion->offset = 13;
495                 oobregion->length = 3;
496         }
497
498         return 0;
499 }
500
501 static int hwecc4_ooblayout_small_free(struct mtd_info *mtd, int section,
502                                        struct mtd_oob_region *oobregion)
503 {
504         if (section > 1)
505                 return -ERANGE;
506
507         if (!section) {
508                 oobregion->offset = 8;
509                 oobregion->length = 5;
510         } else {
511                 oobregion->offset = 16;
512                 oobregion->length = mtd->oobsize - 16;
513         }
514
515         return 0;
516 }
517
518 static const struct mtd_ooblayout_ops hwecc4_small_ooblayout_ops = {
519         .ecc = hwecc4_ooblayout_small_ecc,
520         .free = hwecc4_ooblayout_small_free,
521 };
522
523 #if defined(CONFIG_OF)
524 static const struct of_device_id davinci_nand_of_match[] = {
525         {.compatible = "ti,davinci-nand", },
526         {.compatible = "ti,keystone-nand", },
527         {},
528 };
529 MODULE_DEVICE_TABLE(of, davinci_nand_of_match);
530
531 static struct davinci_nand_pdata
532         *nand_davinci_get_pdata(struct platform_device *pdev)
533 {
534         if (!dev_get_platdata(&pdev->dev) && pdev->dev.of_node) {
535                 struct davinci_nand_pdata *pdata;
536                 const char *mode;
537                 u32 prop;
538
539                 pdata =  devm_kzalloc(&pdev->dev,
540                                 sizeof(struct davinci_nand_pdata),
541                                 GFP_KERNEL);
542                 pdev->dev.platform_data = pdata;
543                 if (!pdata)
544                         return ERR_PTR(-ENOMEM);
545                 if (!of_property_read_u32(pdev->dev.of_node,
546                         "ti,davinci-chipselect", &prop))
547                         pdev->id = prop;
548                 else
549                         return ERR_PTR(-EINVAL);
550
551                 if (!of_property_read_u32(pdev->dev.of_node,
552                         "ti,davinci-mask-ale", &prop))
553                         pdata->mask_ale = prop;
554                 if (!of_property_read_u32(pdev->dev.of_node,
555                         "ti,davinci-mask-cle", &prop))
556                         pdata->mask_cle = prop;
557                 if (!of_property_read_u32(pdev->dev.of_node,
558                         "ti,davinci-mask-chipsel", &prop))
559                         pdata->mask_chipsel = prop;
560                 if (!of_property_read_string(pdev->dev.of_node,
561                         "ti,davinci-ecc-mode", &mode)) {
562                         if (!strncmp("none", mode, 4))
563                                 pdata->ecc_mode = NAND_ECC_NONE;
564                         if (!strncmp("soft", mode, 4))
565                                 pdata->ecc_mode = NAND_ECC_SOFT;
566                         if (!strncmp("hw", mode, 2))
567                                 pdata->ecc_mode = NAND_ECC_HW;
568                 }
569                 if (!of_property_read_u32(pdev->dev.of_node,
570                         "ti,davinci-ecc-bits", &prop))
571                         pdata->ecc_bits = prop;
572
573                 if (!of_property_read_u32(pdev->dev.of_node,
574                         "ti,davinci-nand-buswidth", &prop) && prop == 16)
575                         pdata->options |= NAND_BUSWIDTH_16;
576
577                 if (of_property_read_bool(pdev->dev.of_node,
578                         "ti,davinci-nand-use-bbt"))
579                         pdata->bbt_options = NAND_BBT_USE_FLASH;
580
581                 if (of_device_is_compatible(pdev->dev.of_node,
582                                             "ti,keystone-nand")) {
583                         pdata->options |= NAND_NO_SUBPAGE_WRITE;
584                 }
585         }
586
587         return dev_get_platdata(&pdev->dev);
588 }
589 #else
590 static struct davinci_nand_pdata
591         *nand_davinci_get_pdata(struct platform_device *pdev)
592 {
593         return dev_get_platdata(&pdev->dev);
594 }
595 #endif
596
597 static int nand_davinci_probe(struct platform_device *pdev)
598 {
599         struct davinci_nand_pdata       *pdata;
600         struct davinci_nand_info        *info;
601         struct resource                 *res1;
602         struct resource                 *res2;
603         void __iomem                    *vaddr;
604         void __iomem                    *base;
605         int                             ret;
606         uint32_t                        val;
607         struct mtd_info                 *mtd;
608
609         pdata = nand_davinci_get_pdata(pdev);
610         if (IS_ERR(pdata))
611                 return PTR_ERR(pdata);
612
613         /* insist on board-specific configuration */
614         if (!pdata)
615                 return -ENODEV;
616
617         /* which external chipselect will we be managing? */
618         if (pdev->id < 0 || pdev->id > 3)
619                 return -ENODEV;
620
621         info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
622         if (!info)
623                 return -ENOMEM;
624
625         platform_set_drvdata(pdev, info);
626
627         res1 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
628         res2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
629         if (!res1 || !res2) {
630                 dev_err(&pdev->dev, "resource missing\n");
631                 return -EINVAL;
632         }
633
634         vaddr = devm_ioremap_resource(&pdev->dev, res1);
635         if (IS_ERR(vaddr))
636                 return PTR_ERR(vaddr);
637
638         /*
639          * This registers range is used to setup NAND settings. In case with
640          * TI AEMIF driver, the same memory address range is requested already
641          * by AEMIF, so we cannot request it twice, just ioremap.
642          * The AEMIF and NAND drivers not use the same registers in this range.
643          */
644         base = devm_ioremap(&pdev->dev, res2->start, resource_size(res2));
645         if (!base) {
646                 dev_err(&pdev->dev, "ioremap failed for resource %pR\n", res2);
647                 return -EADDRNOTAVAIL;
648         }
649
650         info->dev               = &pdev->dev;
651         info->base              = base;
652         info->vaddr             = vaddr;
653
654         mtd                     = nand_to_mtd(&info->chip);
655         mtd->dev.parent         = &pdev->dev;
656         nand_set_flash_node(&info->chip, pdev->dev.of_node);
657
658         info->chip.IO_ADDR_R    = vaddr;
659         info->chip.IO_ADDR_W    = vaddr;
660         info->chip.chip_delay   = 0;
661         info->chip.select_chip  = nand_davinci_select_chip;
662
663         /* options such as NAND_BBT_USE_FLASH */
664         info->chip.bbt_options  = pdata->bbt_options;
665         /* options such as 16-bit widths */
666         info->chip.options      = pdata->options;
667         info->chip.bbt_td       = pdata->bbt_td;
668         info->chip.bbt_md       = pdata->bbt_md;
669         info->timing            = pdata->timing;
670
671         info->ioaddr            = (uint32_t __force) vaddr;
672
673         info->current_cs        = info->ioaddr;
674         info->core_chipsel      = pdev->id;
675         info->mask_chipsel      = pdata->mask_chipsel;
676
677         /* use nandboot-capable ALE/CLE masks by default */
678         info->mask_ale          = pdata->mask_ale ? : MASK_ALE;
679         info->mask_cle          = pdata->mask_cle ? : MASK_CLE;
680
681         /* Set address of hardware control function */
682         info->chip.cmd_ctrl     = nand_davinci_hwcontrol;
683         info->chip.dev_ready    = nand_davinci_dev_ready;
684
685         /* Speed up buffer I/O */
686         info->chip.read_buf     = nand_davinci_read_buf;
687         info->chip.write_buf    = nand_davinci_write_buf;
688
689         /* Use board-specific ECC config */
690         info->chip.ecc.mode     = pdata->ecc_mode;
691
692         ret = -EINVAL;
693
694         info->clk = devm_clk_get(&pdev->dev, "aemif");
695         if (IS_ERR(info->clk)) {
696                 ret = PTR_ERR(info->clk);
697                 dev_dbg(&pdev->dev, "unable to get AEMIF clock, err %d\n", ret);
698                 return ret;
699         }
700
701         ret = clk_prepare_enable(info->clk);
702         if (ret < 0) {
703                 dev_dbg(&pdev->dev, "unable to enable AEMIF clock, err %d\n",
704                         ret);
705                 goto err_clk_enable;
706         }
707
708         spin_lock_irq(&davinci_nand_lock);
709
710         /* put CSxNAND into NAND mode */
711         val = davinci_nand_readl(info, NANDFCR_OFFSET);
712         val |= BIT(info->core_chipsel);
713         davinci_nand_writel(info, NANDFCR_OFFSET, val);
714
715         spin_unlock_irq(&davinci_nand_lock);
716
717         /* Scan to find existence of the device(s) */
718         ret = nand_scan_ident(mtd, pdata->mask_chipsel ? 2 : 1, NULL);
719         if (ret < 0) {
720                 dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
721                 goto err;
722         }
723
724         switch (info->chip.ecc.mode) {
725         case NAND_ECC_NONE:
726                 pdata->ecc_bits = 0;
727                 break;
728         case NAND_ECC_SOFT:
729                 pdata->ecc_bits = 0;
730                 /*
731                  * This driver expects Hamming based ECC when ecc_mode is set
732                  * to NAND_ECC_SOFT. Force ecc.algo to NAND_ECC_HAMMING to
733                  * avoid adding an extra ->ecc_algo field to
734                  * davinci_nand_pdata.
735                  */
736                 info->chip.ecc.algo = NAND_ECC_HAMMING;
737                 break;
738         case NAND_ECC_HW:
739                 if (pdata->ecc_bits == 4) {
740                         /* No sanity checks:  CPUs must support this,
741                          * and the chips may not use NAND_BUSWIDTH_16.
742                          */
743
744                         /* No sharing 4-bit hardware between chipselects yet */
745                         spin_lock_irq(&davinci_nand_lock);
746                         if (ecc4_busy)
747                                 ret = -EBUSY;
748                         else
749                                 ecc4_busy = true;
750                         spin_unlock_irq(&davinci_nand_lock);
751
752                         if (ret == -EBUSY)
753                                 return ret;
754
755                         info->chip.ecc.calculate = nand_davinci_calculate_4bit;
756                         info->chip.ecc.correct = nand_davinci_correct_4bit;
757                         info->chip.ecc.hwctl = nand_davinci_hwctl_4bit;
758                         info->chip.ecc.bytes = 10;
759                         info->chip.ecc.options = NAND_ECC_GENERIC_ERASED_CHECK;
760                 } else {
761                         info->chip.ecc.calculate = nand_davinci_calculate_1bit;
762                         info->chip.ecc.correct = nand_davinci_correct_1bit;
763                         info->chip.ecc.hwctl = nand_davinci_hwctl_1bit;
764                         info->chip.ecc.bytes = 3;
765                 }
766                 info->chip.ecc.size = 512;
767                 info->chip.ecc.strength = pdata->ecc_bits;
768                 break;
769         default:
770                 return -EINVAL;
771         }
772
773         /* Update ECC layout if needed ... for 1-bit HW ECC, the default
774          * is OK, but it allocates 6 bytes when only 3 are needed (for
775          * each 512 bytes).  For the 4-bit HW ECC, that default is not
776          * usable:  10 bytes are needed, not 6.
777          */
778         if (pdata->ecc_bits == 4) {
779                 int     chunks = mtd->writesize / 512;
780
781                 if (!chunks || mtd->oobsize < 16) {
782                         dev_dbg(&pdev->dev, "too small\n");
783                         ret = -EINVAL;
784                         goto err;
785                 }
786
787                 /* For small page chips, preserve the manufacturer's
788                  * badblock marking data ... and make sure a flash BBT
789                  * table marker fits in the free bytes.
790                  */
791                 if (chunks == 1) {
792                         mtd_set_ooblayout(mtd, &hwecc4_small_ooblayout_ops);
793                 } else if (chunks == 4 || chunks == 8) {
794                         mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
795                         info->chip.ecc.mode = NAND_ECC_HW_OOB_FIRST;
796                 } else {
797                         ret = -EIO;
798                         goto err;
799                 }
800         }
801
802         ret = nand_scan_tail(mtd);
803         if (ret < 0)
804                 goto err;
805
806         if (pdata->parts)
807                 ret = mtd_device_parse_register(mtd, NULL, NULL,
808                                         pdata->parts, pdata->nr_parts);
809         else
810                 ret = mtd_device_register(mtd, NULL, 0);
811         if (ret < 0)
812                 goto err;
813
814         val = davinci_nand_readl(info, NRCSR_OFFSET);
815         dev_info(&pdev->dev, "controller rev. %d.%d\n",
816                (val >> 8) & 0xff, val & 0xff);
817
818         return 0;
819
820 err:
821         clk_disable_unprepare(info->clk);
822
823 err_clk_enable:
824         spin_lock_irq(&davinci_nand_lock);
825         if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
826                 ecc4_busy = false;
827         spin_unlock_irq(&davinci_nand_lock);
828         return ret;
829 }
830
831 static int nand_davinci_remove(struct platform_device *pdev)
832 {
833         struct davinci_nand_info *info = platform_get_drvdata(pdev);
834
835         spin_lock_irq(&davinci_nand_lock);
836         if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
837                 ecc4_busy = false;
838         spin_unlock_irq(&davinci_nand_lock);
839
840         nand_release(nand_to_mtd(&info->chip));
841
842         clk_disable_unprepare(info->clk);
843
844         return 0;
845 }
846
847 static struct platform_driver nand_davinci_driver = {
848         .probe          = nand_davinci_probe,
849         .remove         = nand_davinci_remove,
850         .driver         = {
851                 .name   = "davinci_nand",
852                 .of_match_table = of_match_ptr(davinci_nand_of_match),
853         },
854 };
855 MODULE_ALIAS("platform:davinci_nand");
856
857 module_platform_driver(nand_davinci_driver);
858
859 MODULE_LICENSE("GPL");
860 MODULE_AUTHOR("Texas Instruments");
861 MODULE_DESCRIPTION("Davinci NAND flash driver");
862