Merge tag 'edac/v4.3-1' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab...
[cascardo/linux.git] / drivers / mtd / nand / pxa3xx_nand.c
1 /*
2  * drivers/mtd/nand/pxa3xx_nand.c
3  *
4  * Copyright © 2005 Intel Corporation
5  * Copyright © 2006 Marvell International Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
12  */
13
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/platform_device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/delay.h>
20 #include <linux/clk.h>
21 #include <linux/mtd/mtd.h>
22 #include <linux/mtd/nand.h>
23 #include <linux/mtd/partitions.h>
24 #include <linux/io.h>
25 #include <linux/iopoll.h>
26 #include <linux/irq.h>
27 #include <linux/slab.h>
28 #include <linux/of.h>
29 #include <linux/of_device.h>
30 #include <linux/of_mtd.h>
31
32 #if defined(CONFIG_ARM) && (defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP))
33 #define ARCH_HAS_DMA
34 #endif
35
36 #ifdef ARCH_HAS_DMA
37 #include <mach/dma.h>
38 #endif
39
40 #include <linux/platform_data/mtd-nand-pxa3xx.h>
41
42 #define CHIP_DELAY_TIMEOUT      msecs_to_jiffies(200)
43 #define NAND_STOP_DELAY         msecs_to_jiffies(40)
44 #define PAGE_CHUNK_SIZE         (2048)
45
46 /*
47  * Define a buffer size for the initial command that detects the flash device:
48  * STATUS, READID and PARAM.
49  * ONFI param page is 256 bytes, and there are three redundant copies
50  * to be read. JEDEC param page is 512 bytes, and there are also three
51  * redundant copies to be read.
52  * Hence this buffer should be at least 512 x 3. Let's pick 2048.
53  */
54 #define INIT_BUFFER_SIZE        2048
55
56 /* registers and bit definitions */
57 #define NDCR            (0x00) /* Control register */
58 #define NDTR0CS0        (0x04) /* Timing Parameter 0 for CS0 */
59 #define NDTR1CS0        (0x0C) /* Timing Parameter 1 for CS0 */
60 #define NDSR            (0x14) /* Status Register */
61 #define NDPCR           (0x18) /* Page Count Register */
62 #define NDBDR0          (0x1C) /* Bad Block Register 0 */
63 #define NDBDR1          (0x20) /* Bad Block Register 1 */
64 #define NDECCCTRL       (0x28) /* ECC control */
65 #define NDDB            (0x40) /* Data Buffer */
66 #define NDCB0           (0x48) /* Command Buffer0 */
67 #define NDCB1           (0x4C) /* Command Buffer1 */
68 #define NDCB2           (0x50) /* Command Buffer2 */
69
70 #define NDCR_SPARE_EN           (0x1 << 31)
71 #define NDCR_ECC_EN             (0x1 << 30)
72 #define NDCR_DMA_EN             (0x1 << 29)
73 #define NDCR_ND_RUN             (0x1 << 28)
74 #define NDCR_DWIDTH_C           (0x1 << 27)
75 #define NDCR_DWIDTH_M           (0x1 << 26)
76 #define NDCR_PAGE_SZ            (0x1 << 24)
77 #define NDCR_NCSX               (0x1 << 23)
78 #define NDCR_ND_MODE            (0x3 << 21)
79 #define NDCR_NAND_MODE          (0x0)
80 #define NDCR_CLR_PG_CNT         (0x1 << 20)
81 #define NDCR_STOP_ON_UNCOR      (0x1 << 19)
82 #define NDCR_RD_ID_CNT_MASK     (0x7 << 16)
83 #define NDCR_RD_ID_CNT(x)       (((x) << 16) & NDCR_RD_ID_CNT_MASK)
84
85 #define NDCR_RA_START           (0x1 << 15)
86 #define NDCR_PG_PER_BLK         (0x1 << 14)
87 #define NDCR_ND_ARB_EN          (0x1 << 12)
88 #define NDCR_INT_MASK           (0xFFF)
89
90 #define NDSR_MASK               (0xfff)
91 #define NDSR_ERR_CNT_OFF        (16)
92 #define NDSR_ERR_CNT_MASK       (0x1f)
93 #define NDSR_ERR_CNT(sr)        ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
94 #define NDSR_RDY                (0x1 << 12)
95 #define NDSR_FLASH_RDY          (0x1 << 11)
96 #define NDSR_CS0_PAGED          (0x1 << 10)
97 #define NDSR_CS1_PAGED          (0x1 << 9)
98 #define NDSR_CS0_CMDD           (0x1 << 8)
99 #define NDSR_CS1_CMDD           (0x1 << 7)
100 #define NDSR_CS0_BBD            (0x1 << 6)
101 #define NDSR_CS1_BBD            (0x1 << 5)
102 #define NDSR_UNCORERR           (0x1 << 4)
103 #define NDSR_CORERR             (0x1 << 3)
104 #define NDSR_WRDREQ             (0x1 << 2)
105 #define NDSR_RDDREQ             (0x1 << 1)
106 #define NDSR_WRCMDREQ           (0x1)
107
108 #define NDCB0_LEN_OVRD          (0x1 << 28)
109 #define NDCB0_ST_ROW_EN         (0x1 << 26)
110 #define NDCB0_AUTO_RS           (0x1 << 25)
111 #define NDCB0_CSEL              (0x1 << 24)
112 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
113 #define NDCB0_EXT_CMD_TYPE(x)   (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
114 #define NDCB0_CMD_TYPE_MASK     (0x7 << 21)
115 #define NDCB0_CMD_TYPE(x)       (((x) << 21) & NDCB0_CMD_TYPE_MASK)
116 #define NDCB0_NC                (0x1 << 20)
117 #define NDCB0_DBC               (0x1 << 19)
118 #define NDCB0_ADDR_CYC_MASK     (0x7 << 16)
119 #define NDCB0_ADDR_CYC(x)       (((x) << 16) & NDCB0_ADDR_CYC_MASK)
120 #define NDCB0_CMD2_MASK         (0xff << 8)
121 #define NDCB0_CMD1_MASK         (0xff)
122 #define NDCB0_ADDR_CYC_SHIFT    (16)
123
124 #define EXT_CMD_TYPE_DISPATCH   6 /* Command dispatch */
125 #define EXT_CMD_TYPE_NAKED_RW   5 /* Naked read or Naked write */
126 #define EXT_CMD_TYPE_READ       4 /* Read */
127 #define EXT_CMD_TYPE_DISP_WR    4 /* Command dispatch with write */
128 #define EXT_CMD_TYPE_FINAL      3 /* Final command */
129 #define EXT_CMD_TYPE_LAST_RW    1 /* Last naked read/write */
130 #define EXT_CMD_TYPE_MONO       0 /* Monolithic read/write */
131
132 /*
133  * This should be large enough to read 'ONFI' and 'JEDEC'.
134  * Let's use 7 bytes, which is the maximum ID count supported
135  * by the controller (see NDCR_RD_ID_CNT_MASK).
136  */
137 #define READ_ID_BYTES           7
138
139 /* macros for registers read/write */
140 #define nand_writel(info, off, val)     \
141         writel_relaxed((val), (info)->mmio_base + (off))
142
143 #define nand_readl(info, off)           \
144         readl_relaxed((info)->mmio_base + (off))
145
146 /* error code and state */
147 enum {
148         ERR_NONE        = 0,
149         ERR_DMABUSERR   = -1,
150         ERR_SENDCMD     = -2,
151         ERR_UNCORERR    = -3,
152         ERR_BBERR       = -4,
153         ERR_CORERR      = -5,
154 };
155
156 enum {
157         STATE_IDLE = 0,
158         STATE_PREPARED,
159         STATE_CMD_HANDLE,
160         STATE_DMA_READING,
161         STATE_DMA_WRITING,
162         STATE_DMA_DONE,
163         STATE_PIO_READING,
164         STATE_PIO_WRITING,
165         STATE_CMD_DONE,
166         STATE_READY,
167 };
168
169 enum pxa3xx_nand_variant {
170         PXA3XX_NAND_VARIANT_PXA,
171         PXA3XX_NAND_VARIANT_ARMADA370,
172 };
173
174 struct pxa3xx_nand_host {
175         struct nand_chip        chip;
176         struct mtd_info         *mtd;
177         void                    *info_data;
178
179         /* page size of attached chip */
180         int                     use_ecc;
181         int                     cs;
182
183         /* calculated from pxa3xx_nand_flash data */
184         unsigned int            col_addr_cycles;
185         unsigned int            row_addr_cycles;
186 };
187
188 struct pxa3xx_nand_info {
189         struct nand_hw_control  controller;
190         struct platform_device   *pdev;
191
192         struct clk              *clk;
193         void __iomem            *mmio_base;
194         unsigned long           mmio_phys;
195         struct completion       cmd_complete, dev_ready;
196
197         unsigned int            buf_start;
198         unsigned int            buf_count;
199         unsigned int            buf_size;
200         unsigned int            data_buff_pos;
201         unsigned int            oob_buff_pos;
202
203         /* DMA information */
204         int                     drcmr_dat;
205         int                     drcmr_cmd;
206
207         unsigned char           *data_buff;
208         unsigned char           *oob_buff;
209         dma_addr_t              data_buff_phys;
210         int                     data_dma_ch;
211         struct pxa_dma_desc     *data_desc;
212         dma_addr_t              data_desc_addr;
213
214         struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
215         unsigned int            state;
216
217         /*
218          * This driver supports NFCv1 (as found in PXA SoC)
219          * and NFCv2 (as found in Armada 370/XP SoC).
220          */
221         enum pxa3xx_nand_variant variant;
222
223         int                     cs;
224         int                     use_ecc;        /* use HW ECC ? */
225         int                     ecc_bch;        /* using BCH ECC? */
226         int                     use_dma;        /* use DMA ? */
227         int                     use_spare;      /* use spare ? */
228         int                     need_wait;
229
230         unsigned int            data_size;      /* data to be read from FIFO */
231         unsigned int            chunk_size;     /* split commands chunk size */
232         unsigned int            oob_size;
233         unsigned int            spare_size;
234         unsigned int            ecc_size;
235         unsigned int            ecc_err_cnt;
236         unsigned int            max_bitflips;
237         int                     retcode;
238
239         /* cached register value */
240         uint32_t                reg_ndcr;
241         uint32_t                ndtr0cs0;
242         uint32_t                ndtr1cs0;
243
244         /* generated NDCBx register values */
245         uint32_t                ndcb0;
246         uint32_t                ndcb1;
247         uint32_t                ndcb2;
248         uint32_t                ndcb3;
249 };
250
251 static bool use_dma = 1;
252 module_param(use_dma, bool, 0444);
253 MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
254
255 static struct pxa3xx_nand_timing timing[] = {
256         { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
257         { 10,  0, 20,  40, 30,  40, 11123, 110, 10, },
258         { 10, 25, 15,  25, 15,  30, 25000,  60, 10, },
259         { 10, 35, 15,  25, 15,  25, 25000,  60, 10, },
260 };
261
262 static struct pxa3xx_nand_flash builtin_flash_types[] = {
263 { "DEFAULT FLASH",      0,   0, 2048,  8,  8,    0, &timing[0] },
264 { "64MiB 16-bit",  0x46ec,  32,  512, 16, 16, 4096, &timing[1] },
265 { "256MiB 8-bit",  0xdaec,  64, 2048,  8,  8, 2048, &timing[1] },
266 { "4GiB 8-bit",    0xd7ec, 128, 4096,  8,  8, 8192, &timing[1] },
267 { "128MiB 8-bit",  0xa12c,  64, 2048,  8,  8, 1024, &timing[2] },
268 { "128MiB 16-bit", 0xb12c,  64, 2048, 16, 16, 1024, &timing[2] },
269 { "512MiB 8-bit",  0xdc2c,  64, 2048,  8,  8, 4096, &timing[2] },
270 { "512MiB 16-bit", 0xcc2c,  64, 2048, 16, 16, 4096, &timing[2] },
271 { "256MiB 16-bit", 0xba20,  64, 2048, 16, 16, 2048, &timing[3] },
272 };
273
274 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
275 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
276
277 static struct nand_bbt_descr bbt_main_descr = {
278         .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
279                 | NAND_BBT_2BIT | NAND_BBT_VERSION,
280         .offs = 8,
281         .len = 6,
282         .veroffs = 14,
283         .maxblocks = 8,         /* Last 8 blocks in each chip */
284         .pattern = bbt_pattern
285 };
286
287 static struct nand_bbt_descr bbt_mirror_descr = {
288         .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
289                 | NAND_BBT_2BIT | NAND_BBT_VERSION,
290         .offs = 8,
291         .len = 6,
292         .veroffs = 14,
293         .maxblocks = 8,         /* Last 8 blocks in each chip */
294         .pattern = bbt_mirror_pattern
295 };
296
297 static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
298         .eccbytes = 32,
299         .eccpos = {
300                 32, 33, 34, 35, 36, 37, 38, 39,
301                 40, 41, 42, 43, 44, 45, 46, 47,
302                 48, 49, 50, 51, 52, 53, 54, 55,
303                 56, 57, 58, 59, 60, 61, 62, 63},
304         .oobfree = { {2, 30} }
305 };
306
307 static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
308         .eccbytes = 64,
309         .eccpos = {
310                 32,  33,  34,  35,  36,  37,  38,  39,
311                 40,  41,  42,  43,  44,  45,  46,  47,
312                 48,  49,  50,  51,  52,  53,  54,  55,
313                 56,  57,  58,  59,  60,  61,  62,  63,
314                 96,  97,  98,  99,  100, 101, 102, 103,
315                 104, 105, 106, 107, 108, 109, 110, 111,
316                 112, 113, 114, 115, 116, 117, 118, 119,
317                 120, 121, 122, 123, 124, 125, 126, 127},
318         /* Bootrom looks in bytes 0 & 5 for bad blocks */
319         .oobfree = { {6, 26}, { 64, 32} }
320 };
321
322 static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
323         .eccbytes = 128,
324         .eccpos = {
325                 32,  33,  34,  35,  36,  37,  38,  39,
326                 40,  41,  42,  43,  44,  45,  46,  47,
327                 48,  49,  50,  51,  52,  53,  54,  55,
328                 56,  57,  58,  59,  60,  61,  62,  63},
329         .oobfree = { }
330 };
331
332 /* Define a default flash type setting serve as flash detecting only */
333 #define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
334
335 #define NDTR0_tCH(c)    (min((c), 7) << 19)
336 #define NDTR0_tCS(c)    (min((c), 7) << 16)
337 #define NDTR0_tWH(c)    (min((c), 7) << 11)
338 #define NDTR0_tWP(c)    (min((c), 7) << 8)
339 #define NDTR0_tRH(c)    (min((c), 7) << 3)
340 #define NDTR0_tRP(c)    (min((c), 7) << 0)
341
342 #define NDTR1_tR(c)     (min((c), 65535) << 16)
343 #define NDTR1_tWHR(c)   (min((c), 15) << 4)
344 #define NDTR1_tAR(c)    (min((c), 15) << 0)
345
346 /* convert nano-seconds to nand flash controller clock cycles */
347 #define ns2cycle(ns, clk)       (int)((ns) * (clk / 1000000) / 1000)
348
349 static const struct of_device_id pxa3xx_nand_dt_ids[] = {
350         {
351                 .compatible = "marvell,pxa3xx-nand",
352                 .data       = (void *)PXA3XX_NAND_VARIANT_PXA,
353         },
354         {
355                 .compatible = "marvell,armada370-nand",
356                 .data       = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
357         },
358         {}
359 };
360 MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
361
362 static enum pxa3xx_nand_variant
363 pxa3xx_nand_get_variant(struct platform_device *pdev)
364 {
365         const struct of_device_id *of_id =
366                         of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
367         if (!of_id)
368                 return PXA3XX_NAND_VARIANT_PXA;
369         return (enum pxa3xx_nand_variant)of_id->data;
370 }
371
372 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
373                                    const struct pxa3xx_nand_timing *t)
374 {
375         struct pxa3xx_nand_info *info = host->info_data;
376         unsigned long nand_clk = clk_get_rate(info->clk);
377         uint32_t ndtr0, ndtr1;
378
379         ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
380                 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
381                 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
382                 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
383                 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
384                 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
385
386         ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
387                 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
388                 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
389
390         info->ndtr0cs0 = ndtr0;
391         info->ndtr1cs0 = ndtr1;
392         nand_writel(info, NDTR0CS0, ndtr0);
393         nand_writel(info, NDTR1CS0, ndtr1);
394 }
395
396 /*
397  * Set the data and OOB size, depending on the selected
398  * spare and ECC configuration.
399  * Only applicable to READ0, READOOB and PAGEPROG commands.
400  */
401 static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
402                                 struct mtd_info *mtd)
403 {
404         int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
405
406         info->data_size = mtd->writesize;
407         if (!oob_enable)
408                 return;
409
410         info->oob_size = info->spare_size;
411         if (!info->use_ecc)
412                 info->oob_size += info->ecc_size;
413 }
414
415 /**
416  * NOTE: it is a must to set ND_RUN firstly, then write
417  * command buffer, otherwise, it does not work.
418  * We enable all the interrupt at the same time, and
419  * let pxa3xx_nand_irq to handle all logic.
420  */
421 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
422 {
423         uint32_t ndcr;
424
425         ndcr = info->reg_ndcr;
426
427         if (info->use_ecc) {
428                 ndcr |= NDCR_ECC_EN;
429                 if (info->ecc_bch)
430                         nand_writel(info, NDECCCTRL, 0x1);
431         } else {
432                 ndcr &= ~NDCR_ECC_EN;
433                 if (info->ecc_bch)
434                         nand_writel(info, NDECCCTRL, 0x0);
435         }
436
437         if (info->use_dma)
438                 ndcr |= NDCR_DMA_EN;
439         else
440                 ndcr &= ~NDCR_DMA_EN;
441
442         if (info->use_spare)
443                 ndcr |= NDCR_SPARE_EN;
444         else
445                 ndcr &= ~NDCR_SPARE_EN;
446
447         ndcr |= NDCR_ND_RUN;
448
449         /* clear status bits and run */
450         nand_writel(info, NDSR, NDSR_MASK);
451         nand_writel(info, NDCR, 0);
452         nand_writel(info, NDCR, ndcr);
453 }
454
455 static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
456 {
457         uint32_t ndcr;
458         int timeout = NAND_STOP_DELAY;
459
460         /* wait RUN bit in NDCR become 0 */
461         ndcr = nand_readl(info, NDCR);
462         while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) {
463                 ndcr = nand_readl(info, NDCR);
464                 udelay(1);
465         }
466
467         if (timeout <= 0) {
468                 ndcr &= ~NDCR_ND_RUN;
469                 nand_writel(info, NDCR, ndcr);
470         }
471         /* clear status bits */
472         nand_writel(info, NDSR, NDSR_MASK);
473 }
474
475 static void __maybe_unused
476 enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
477 {
478         uint32_t ndcr;
479
480         ndcr = nand_readl(info, NDCR);
481         nand_writel(info, NDCR, ndcr & ~int_mask);
482 }
483
484 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
485 {
486         uint32_t ndcr;
487
488         ndcr = nand_readl(info, NDCR);
489         nand_writel(info, NDCR, ndcr | int_mask);
490 }
491
492 static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
493 {
494         if (info->ecc_bch) {
495                 u32 val;
496                 int ret;
497
498                 /*
499                  * According to the datasheet, when reading from NDDB
500                  * with BCH enabled, after each 32 bytes reads, we
501                  * have to make sure that the NDSR.RDDREQ bit is set.
502                  *
503                  * Drain the FIFO 8 32 bits reads at a time, and skip
504                  * the polling on the last read.
505                  */
506                 while (len > 8) {
507                         readsl(info->mmio_base + NDDB, data, 8);
508
509                         ret = readl_relaxed_poll_timeout(info->mmio_base + NDSR, val,
510                                                          val & NDSR_RDDREQ, 1000, 5000);
511                         if (ret) {
512                                 dev_err(&info->pdev->dev,
513                                         "Timeout on RDDREQ while draining the FIFO\n");
514                                 return;
515                         }
516
517                         data += 32;
518                         len -= 8;
519                 }
520         }
521
522         readsl(info->mmio_base + NDDB, data, len);
523 }
524
525 static void handle_data_pio(struct pxa3xx_nand_info *info)
526 {
527         unsigned int do_bytes = min(info->data_size, info->chunk_size);
528
529         switch (info->state) {
530         case STATE_PIO_WRITING:
531                 writesl(info->mmio_base + NDDB,
532                         info->data_buff + info->data_buff_pos,
533                         DIV_ROUND_UP(do_bytes, 4));
534
535                 if (info->oob_size > 0)
536                         writesl(info->mmio_base + NDDB,
537                                 info->oob_buff + info->oob_buff_pos,
538                                 DIV_ROUND_UP(info->oob_size, 4));
539                 break;
540         case STATE_PIO_READING:
541                 drain_fifo(info,
542                            info->data_buff + info->data_buff_pos,
543                            DIV_ROUND_UP(do_bytes, 4));
544
545                 if (info->oob_size > 0)
546                         drain_fifo(info,
547                                    info->oob_buff + info->oob_buff_pos,
548                                    DIV_ROUND_UP(info->oob_size, 4));
549                 break;
550         default:
551                 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
552                                 info->state);
553                 BUG();
554         }
555
556         /* Update buffer pointers for multi-page read/write */
557         info->data_buff_pos += do_bytes;
558         info->oob_buff_pos += info->oob_size;
559         info->data_size -= do_bytes;
560 }
561
562 #ifdef ARCH_HAS_DMA
563 static void start_data_dma(struct pxa3xx_nand_info *info)
564 {
565         struct pxa_dma_desc *desc = info->data_desc;
566         int dma_len = ALIGN(info->data_size + info->oob_size, 32);
567
568         desc->ddadr = DDADR_STOP;
569         desc->dcmd = DCMD_ENDIRQEN | DCMD_WIDTH4 | DCMD_BURST32 | dma_len;
570
571         switch (info->state) {
572         case STATE_DMA_WRITING:
573                 desc->dsadr = info->data_buff_phys;
574                 desc->dtadr = info->mmio_phys + NDDB;
575                 desc->dcmd |= DCMD_INCSRCADDR | DCMD_FLOWTRG;
576                 break;
577         case STATE_DMA_READING:
578                 desc->dtadr = info->data_buff_phys;
579                 desc->dsadr = info->mmio_phys + NDDB;
580                 desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC;
581                 break;
582         default:
583                 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
584                                 info->state);
585                 BUG();
586         }
587
588         DRCMR(info->drcmr_dat) = DRCMR_MAPVLD | info->data_dma_ch;
589         DDADR(info->data_dma_ch) = info->data_desc_addr;
590         DCSR(info->data_dma_ch) |= DCSR_RUN;
591 }
592
593 static void pxa3xx_nand_data_dma_irq(int channel, void *data)
594 {
595         struct pxa3xx_nand_info *info = data;
596         uint32_t dcsr;
597
598         dcsr = DCSR(channel);
599         DCSR(channel) = dcsr;
600
601         if (dcsr & DCSR_BUSERR) {
602                 info->retcode = ERR_DMABUSERR;
603         }
604
605         info->state = STATE_DMA_DONE;
606         enable_int(info, NDCR_INT_MASK);
607         nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
608 }
609 #else
610 static void start_data_dma(struct pxa3xx_nand_info *info)
611 {}
612 #endif
613
614 static irqreturn_t pxa3xx_nand_irq_thread(int irq, void *data)
615 {
616         struct pxa3xx_nand_info *info = data;
617
618         handle_data_pio(info);
619
620         info->state = STATE_CMD_DONE;
621         nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
622
623         return IRQ_HANDLED;
624 }
625
626 static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
627 {
628         struct pxa3xx_nand_info *info = devid;
629         unsigned int status, is_completed = 0, is_ready = 0;
630         unsigned int ready, cmd_done;
631         irqreturn_t ret = IRQ_HANDLED;
632
633         if (info->cs == 0) {
634                 ready           = NDSR_FLASH_RDY;
635                 cmd_done        = NDSR_CS0_CMDD;
636         } else {
637                 ready           = NDSR_RDY;
638                 cmd_done        = NDSR_CS1_CMDD;
639         }
640
641         status = nand_readl(info, NDSR);
642
643         if (status & NDSR_UNCORERR)
644                 info->retcode = ERR_UNCORERR;
645         if (status & NDSR_CORERR) {
646                 info->retcode = ERR_CORERR;
647                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
648                     info->ecc_bch)
649                         info->ecc_err_cnt = NDSR_ERR_CNT(status);
650                 else
651                         info->ecc_err_cnt = 1;
652
653                 /*
654                  * Each chunk composing a page is corrected independently,
655                  * and we need to store maximum number of corrected bitflips
656                  * to return it to the MTD layer in ecc.read_page().
657                  */
658                 info->max_bitflips = max_t(unsigned int,
659                                            info->max_bitflips,
660                                            info->ecc_err_cnt);
661         }
662         if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
663                 /* whether use dma to transfer data */
664                 if (info->use_dma) {
665                         disable_int(info, NDCR_INT_MASK);
666                         info->state = (status & NDSR_RDDREQ) ?
667                                       STATE_DMA_READING : STATE_DMA_WRITING;
668                         start_data_dma(info);
669                         goto NORMAL_IRQ_EXIT;
670                 } else {
671                         info->state = (status & NDSR_RDDREQ) ?
672                                       STATE_PIO_READING : STATE_PIO_WRITING;
673                         ret = IRQ_WAKE_THREAD;
674                         goto NORMAL_IRQ_EXIT;
675                 }
676         }
677         if (status & cmd_done) {
678                 info->state = STATE_CMD_DONE;
679                 is_completed = 1;
680         }
681         if (status & ready) {
682                 info->state = STATE_READY;
683                 is_ready = 1;
684         }
685
686         /*
687          * Clear all status bit before issuing the next command, which
688          * can and will alter the status bits and will deserve a new
689          * interrupt on its own. This lets the controller exit the IRQ
690          */
691         nand_writel(info, NDSR, status);
692
693         if (status & NDSR_WRCMDREQ) {
694                 status &= ~NDSR_WRCMDREQ;
695                 info->state = STATE_CMD_HANDLE;
696
697                 /*
698                  * Command buffer registers NDCB{0-2} (and optionally NDCB3)
699                  * must be loaded by writing directly either 12 or 16
700                  * bytes directly to NDCB0, four bytes at a time.
701                  *
702                  * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
703                  * but each NDCBx register can be read.
704                  */
705                 nand_writel(info, NDCB0, info->ndcb0);
706                 nand_writel(info, NDCB0, info->ndcb1);
707                 nand_writel(info, NDCB0, info->ndcb2);
708
709                 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
710                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
711                         nand_writel(info, NDCB0, info->ndcb3);
712         }
713
714         if (is_completed)
715                 complete(&info->cmd_complete);
716         if (is_ready)
717                 complete(&info->dev_ready);
718 NORMAL_IRQ_EXIT:
719         return ret;
720 }
721
722 static inline int is_buf_blank(uint8_t *buf, size_t len)
723 {
724         for (; len > 0; len--)
725                 if (*buf++ != 0xff)
726                         return 0;
727         return 1;
728 }
729
730 static void set_command_address(struct pxa3xx_nand_info *info,
731                 unsigned int page_size, uint16_t column, int page_addr)
732 {
733         /* small page addr setting */
734         if (page_size < PAGE_CHUNK_SIZE) {
735                 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
736                                 | (column & 0xFF);
737
738                 info->ndcb2 = 0;
739         } else {
740                 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
741                                 | (column & 0xFFFF);
742
743                 if (page_addr & 0xFF0000)
744                         info->ndcb2 = (page_addr & 0xFF0000) >> 16;
745                 else
746                         info->ndcb2 = 0;
747         }
748 }
749
750 static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
751 {
752         struct pxa3xx_nand_host *host = info->host[info->cs];
753         struct mtd_info *mtd = host->mtd;
754
755         /* reset data and oob column point to handle data */
756         info->buf_start         = 0;
757         info->buf_count         = 0;
758         info->oob_size          = 0;
759         info->data_buff_pos     = 0;
760         info->oob_buff_pos      = 0;
761         info->use_ecc           = 0;
762         info->use_spare         = 1;
763         info->retcode           = ERR_NONE;
764         info->ecc_err_cnt       = 0;
765         info->ndcb3             = 0;
766         info->need_wait         = 0;
767
768         switch (command) {
769         case NAND_CMD_READ0:
770         case NAND_CMD_PAGEPROG:
771                 info->use_ecc = 1;
772         case NAND_CMD_READOOB:
773                 pxa3xx_set_datasize(info, mtd);
774                 break;
775         case NAND_CMD_PARAM:
776                 info->use_spare = 0;
777                 break;
778         default:
779                 info->ndcb1 = 0;
780                 info->ndcb2 = 0;
781                 break;
782         }
783
784         /*
785          * If we are about to issue a read command, or about to set
786          * the write address, then clean the data buffer.
787          */
788         if (command == NAND_CMD_READ0 ||
789             command == NAND_CMD_READOOB ||
790             command == NAND_CMD_SEQIN) {
791
792                 info->buf_count = mtd->writesize + mtd->oobsize;
793                 memset(info->data_buff, 0xFF, info->buf_count);
794         }
795
796 }
797
798 static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
799                 int ext_cmd_type, uint16_t column, int page_addr)
800 {
801         int addr_cycle, exec_cmd;
802         struct pxa3xx_nand_host *host;
803         struct mtd_info *mtd;
804
805         host = info->host[info->cs];
806         mtd = host->mtd;
807         addr_cycle = 0;
808         exec_cmd = 1;
809
810         if (info->cs != 0)
811                 info->ndcb0 = NDCB0_CSEL;
812         else
813                 info->ndcb0 = 0;
814
815         if (command == NAND_CMD_SEQIN)
816                 exec_cmd = 0;
817
818         addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
819                                     + host->col_addr_cycles);
820
821         switch (command) {
822         case NAND_CMD_READOOB:
823         case NAND_CMD_READ0:
824                 info->buf_start = column;
825                 info->ndcb0 |= NDCB0_CMD_TYPE(0)
826                                 | addr_cycle
827                                 | NAND_CMD_READ0;
828
829                 if (command == NAND_CMD_READOOB)
830                         info->buf_start += mtd->writesize;
831
832                 /*
833                  * Multiple page read needs an 'extended command type' field,
834                  * which is either naked-read or last-read according to the
835                  * state.
836                  */
837                 if (mtd->writesize == PAGE_CHUNK_SIZE) {
838                         info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
839                 } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
840                         info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
841                                         | NDCB0_LEN_OVRD
842                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
843                         info->ndcb3 = info->chunk_size +
844                                       info->oob_size;
845                 }
846
847                 set_command_address(info, mtd->writesize, column, page_addr);
848                 break;
849
850         case NAND_CMD_SEQIN:
851
852                 info->buf_start = column;
853                 set_command_address(info, mtd->writesize, 0, page_addr);
854
855                 /*
856                  * Multiple page programming needs to execute the initial
857                  * SEQIN command that sets the page address.
858                  */
859                 if (mtd->writesize > PAGE_CHUNK_SIZE) {
860                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
861                                 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
862                                 | addr_cycle
863                                 | command;
864                         /* No data transfer in this case */
865                         info->data_size = 0;
866                         exec_cmd = 1;
867                 }
868                 break;
869
870         case NAND_CMD_PAGEPROG:
871                 if (is_buf_blank(info->data_buff,
872                                         (mtd->writesize + mtd->oobsize))) {
873                         exec_cmd = 0;
874                         break;
875                 }
876
877                 /* Second command setting for large pages */
878                 if (mtd->writesize > PAGE_CHUNK_SIZE) {
879                         /*
880                          * Multiple page write uses the 'extended command'
881                          * field. This can be used to issue a command dispatch
882                          * or a naked-write depending on the current stage.
883                          */
884                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
885                                         | NDCB0_LEN_OVRD
886                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
887                         info->ndcb3 = info->chunk_size +
888                                       info->oob_size;
889
890                         /*
891                          * This is the command dispatch that completes a chunked
892                          * page program operation.
893                          */
894                         if (info->data_size == 0) {
895                                 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
896                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
897                                         | command;
898                                 info->ndcb1 = 0;
899                                 info->ndcb2 = 0;
900                                 info->ndcb3 = 0;
901                         }
902                 } else {
903                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
904                                         | NDCB0_AUTO_RS
905                                         | NDCB0_ST_ROW_EN
906                                         | NDCB0_DBC
907                                         | (NAND_CMD_PAGEPROG << 8)
908                                         | NAND_CMD_SEQIN
909                                         | addr_cycle;
910                 }
911                 break;
912
913         case NAND_CMD_PARAM:
914                 info->buf_count = INIT_BUFFER_SIZE;
915                 info->ndcb0 |= NDCB0_CMD_TYPE(0)
916                                 | NDCB0_ADDR_CYC(1)
917                                 | NDCB0_LEN_OVRD
918                                 | command;
919                 info->ndcb1 = (column & 0xFF);
920                 info->ndcb3 = INIT_BUFFER_SIZE;
921                 info->data_size = INIT_BUFFER_SIZE;
922                 break;
923
924         case NAND_CMD_READID:
925                 info->buf_count = READ_ID_BYTES;
926                 info->ndcb0 |= NDCB0_CMD_TYPE(3)
927                                 | NDCB0_ADDR_CYC(1)
928                                 | command;
929                 info->ndcb1 = (column & 0xFF);
930
931                 info->data_size = 8;
932                 break;
933         case NAND_CMD_STATUS:
934                 info->buf_count = 1;
935                 info->ndcb0 |= NDCB0_CMD_TYPE(4)
936                                 | NDCB0_ADDR_CYC(1)
937                                 | command;
938
939                 info->data_size = 8;
940                 break;
941
942         case NAND_CMD_ERASE1:
943                 info->ndcb0 |= NDCB0_CMD_TYPE(2)
944                                 | NDCB0_AUTO_RS
945                                 | NDCB0_ADDR_CYC(3)
946                                 | NDCB0_DBC
947                                 | (NAND_CMD_ERASE2 << 8)
948                                 | NAND_CMD_ERASE1;
949                 info->ndcb1 = page_addr;
950                 info->ndcb2 = 0;
951
952                 break;
953         case NAND_CMD_RESET:
954                 info->ndcb0 |= NDCB0_CMD_TYPE(5)
955                                 | command;
956
957                 break;
958
959         case NAND_CMD_ERASE2:
960                 exec_cmd = 0;
961                 break;
962
963         default:
964                 exec_cmd = 0;
965                 dev_err(&info->pdev->dev, "non-supported command %x\n",
966                                 command);
967                 break;
968         }
969
970         return exec_cmd;
971 }
972
973 static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
974                          int column, int page_addr)
975 {
976         struct pxa3xx_nand_host *host = mtd->priv;
977         struct pxa3xx_nand_info *info = host->info_data;
978         int exec_cmd;
979
980         /*
981          * if this is a x16 device ,then convert the input
982          * "byte" address into a "word" address appropriate
983          * for indexing a word-oriented device
984          */
985         if (info->reg_ndcr & NDCR_DWIDTH_M)
986                 column /= 2;
987
988         /*
989          * There may be different NAND chip hooked to
990          * different chip select, so check whether
991          * chip select has been changed, if yes, reset the timing
992          */
993         if (info->cs != host->cs) {
994                 info->cs = host->cs;
995                 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
996                 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
997         }
998
999         prepare_start_command(info, command);
1000
1001         info->state = STATE_PREPARED;
1002         exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
1003
1004         if (exec_cmd) {
1005                 init_completion(&info->cmd_complete);
1006                 init_completion(&info->dev_ready);
1007                 info->need_wait = 1;
1008                 pxa3xx_nand_start(info);
1009
1010                 if (!wait_for_completion_timeout(&info->cmd_complete,
1011                     CHIP_DELAY_TIMEOUT)) {
1012                         dev_err(&info->pdev->dev, "Wait time out!!!\n");
1013                         /* Stop State Machine for next command cycle */
1014                         pxa3xx_nand_stop(info);
1015                 }
1016         }
1017         info->state = STATE_IDLE;
1018 }
1019
1020 static void nand_cmdfunc_extended(struct mtd_info *mtd,
1021                                   const unsigned command,
1022                                   int column, int page_addr)
1023 {
1024         struct pxa3xx_nand_host *host = mtd->priv;
1025         struct pxa3xx_nand_info *info = host->info_data;
1026         int exec_cmd, ext_cmd_type;
1027
1028         /*
1029          * if this is a x16 device then convert the input
1030          * "byte" address into a "word" address appropriate
1031          * for indexing a word-oriented device
1032          */
1033         if (info->reg_ndcr & NDCR_DWIDTH_M)
1034                 column /= 2;
1035
1036         /*
1037          * There may be different NAND chip hooked to
1038          * different chip select, so check whether
1039          * chip select has been changed, if yes, reset the timing
1040          */
1041         if (info->cs != host->cs) {
1042                 info->cs = host->cs;
1043                 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1044                 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1045         }
1046
1047         /* Select the extended command for the first command */
1048         switch (command) {
1049         case NAND_CMD_READ0:
1050         case NAND_CMD_READOOB:
1051                 ext_cmd_type = EXT_CMD_TYPE_MONO;
1052                 break;
1053         case NAND_CMD_SEQIN:
1054                 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1055                 break;
1056         case NAND_CMD_PAGEPROG:
1057                 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1058                 break;
1059         default:
1060                 ext_cmd_type = 0;
1061                 break;
1062         }
1063
1064         prepare_start_command(info, command);
1065
1066         /*
1067          * Prepare the "is ready" completion before starting a command
1068          * transaction sequence. If the command is not executed the
1069          * completion will be completed, see below.
1070          *
1071          * We can do that inside the loop because the command variable
1072          * is invariant and thus so is the exec_cmd.
1073          */
1074         info->need_wait = 1;
1075         init_completion(&info->dev_ready);
1076         do {
1077                 info->state = STATE_PREPARED;
1078                 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1079                                                column, page_addr);
1080                 if (!exec_cmd) {
1081                         info->need_wait = 0;
1082                         complete(&info->dev_ready);
1083                         break;
1084                 }
1085
1086                 init_completion(&info->cmd_complete);
1087                 pxa3xx_nand_start(info);
1088
1089                 if (!wait_for_completion_timeout(&info->cmd_complete,
1090                     CHIP_DELAY_TIMEOUT)) {
1091                         dev_err(&info->pdev->dev, "Wait time out!!!\n");
1092                         /* Stop State Machine for next command cycle */
1093                         pxa3xx_nand_stop(info);
1094                         break;
1095                 }
1096
1097                 /* Check if the sequence is complete */
1098                 if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
1099                         break;
1100
1101                 /*
1102                  * After a splitted program command sequence has issued
1103                  * the command dispatch, the command sequence is complete.
1104                  */
1105                 if (info->data_size == 0 &&
1106                     command == NAND_CMD_PAGEPROG &&
1107                     ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1108                         break;
1109
1110                 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1111                         /* Last read: issue a 'last naked read' */
1112                         if (info->data_size == info->chunk_size)
1113                                 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1114                         else
1115                                 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1116
1117                 /*
1118                  * If a splitted program command has no more data to transfer,
1119                  * the command dispatch must be issued to complete.
1120                  */
1121                 } else if (command == NAND_CMD_PAGEPROG &&
1122                            info->data_size == 0) {
1123                                 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1124                 }
1125         } while (1);
1126
1127         info->state = STATE_IDLE;
1128 }
1129
1130 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1131                 struct nand_chip *chip, const uint8_t *buf, int oob_required)
1132 {
1133         chip->write_buf(mtd, buf, mtd->writesize);
1134         chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1135
1136         return 0;
1137 }
1138
1139 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1140                 struct nand_chip *chip, uint8_t *buf, int oob_required,
1141                 int page)
1142 {
1143         struct pxa3xx_nand_host *host = mtd->priv;
1144         struct pxa3xx_nand_info *info = host->info_data;
1145
1146         chip->read_buf(mtd, buf, mtd->writesize);
1147         chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1148
1149         if (info->retcode == ERR_CORERR && info->use_ecc) {
1150                 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1151
1152         } else if (info->retcode == ERR_UNCORERR) {
1153                 /*
1154                  * for blank page (all 0xff), HW will calculate its ECC as
1155                  * 0, which is different from the ECC information within
1156                  * OOB, ignore such uncorrectable errors
1157                  */
1158                 if (is_buf_blank(buf, mtd->writesize))
1159                         info->retcode = ERR_NONE;
1160                 else
1161                         mtd->ecc_stats.failed++;
1162         }
1163
1164         return info->max_bitflips;
1165 }
1166
1167 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1168 {
1169         struct pxa3xx_nand_host *host = mtd->priv;
1170         struct pxa3xx_nand_info *info = host->info_data;
1171         char retval = 0xFF;
1172
1173         if (info->buf_start < info->buf_count)
1174                 /* Has just send a new command? */
1175                 retval = info->data_buff[info->buf_start++];
1176
1177         return retval;
1178 }
1179
1180 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1181 {
1182         struct pxa3xx_nand_host *host = mtd->priv;
1183         struct pxa3xx_nand_info *info = host->info_data;
1184         u16 retval = 0xFFFF;
1185
1186         if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1187                 retval = *((u16 *)(info->data_buff+info->buf_start));
1188                 info->buf_start += 2;
1189         }
1190         return retval;
1191 }
1192
1193 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1194 {
1195         struct pxa3xx_nand_host *host = mtd->priv;
1196         struct pxa3xx_nand_info *info = host->info_data;
1197         int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1198
1199         memcpy(buf, info->data_buff + info->buf_start, real_len);
1200         info->buf_start += real_len;
1201 }
1202
1203 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1204                 const uint8_t *buf, int len)
1205 {
1206         struct pxa3xx_nand_host *host = mtd->priv;
1207         struct pxa3xx_nand_info *info = host->info_data;
1208         int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1209
1210         memcpy(info->data_buff + info->buf_start, buf, real_len);
1211         info->buf_start += real_len;
1212 }
1213
1214 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1215 {
1216         return;
1217 }
1218
1219 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1220 {
1221         struct pxa3xx_nand_host *host = mtd->priv;
1222         struct pxa3xx_nand_info *info = host->info_data;
1223
1224         if (info->need_wait) {
1225                 info->need_wait = 0;
1226                 if (!wait_for_completion_timeout(&info->dev_ready,
1227                     CHIP_DELAY_TIMEOUT)) {
1228                         dev_err(&info->pdev->dev, "Ready time out!!!\n");
1229                         return NAND_STATUS_FAIL;
1230                 }
1231         }
1232
1233         /* pxa3xx_nand_send_command has waited for command complete */
1234         if (this->state == FL_WRITING || this->state == FL_ERASING) {
1235                 if (info->retcode == ERR_NONE)
1236                         return 0;
1237                 else
1238                         return NAND_STATUS_FAIL;
1239         }
1240
1241         return NAND_STATUS_READY;
1242 }
1243
1244 static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
1245                                     const struct pxa3xx_nand_flash *f)
1246 {
1247         struct platform_device *pdev = info->pdev;
1248         struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1249         struct pxa3xx_nand_host *host = info->host[info->cs];
1250         uint32_t ndcr = 0x0; /* enable all interrupts */
1251
1252         if (f->page_size != 2048 && f->page_size != 512) {
1253                 dev_err(&pdev->dev, "Current only support 2048 and 512 size\n");
1254                 return -EINVAL;
1255         }
1256
1257         if (f->flash_width != 16 && f->flash_width != 8) {
1258                 dev_err(&pdev->dev, "Only support 8bit and 16 bit!\n");
1259                 return -EINVAL;
1260         }
1261
1262         /* calculate addressing information */
1263         host->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
1264
1265         if (f->num_blocks * f->page_per_block > 65536)
1266                 host->row_addr_cycles = 3;
1267         else
1268                 host->row_addr_cycles = 2;
1269
1270         ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1271         ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1272         ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0;
1273         ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0;
1274         ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0;
1275         ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
1276
1277         ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1278         ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1279
1280         info->reg_ndcr = ndcr;
1281
1282         pxa3xx_nand_set_timing(host, f->timing);
1283         return 0;
1284 }
1285
1286 static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1287 {
1288         uint32_t ndcr = nand_readl(info, NDCR);
1289
1290         /* Set an initial chunk size */
1291         info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
1292         info->reg_ndcr = ndcr & ~NDCR_INT_MASK;
1293         info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1294         info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1295         return 0;
1296 }
1297
1298 #ifdef ARCH_HAS_DMA
1299 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1300 {
1301         struct platform_device *pdev = info->pdev;
1302         int data_desc_offset = info->buf_size - sizeof(struct pxa_dma_desc);
1303
1304         if (use_dma == 0) {
1305                 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1306                 if (info->data_buff == NULL)
1307                         return -ENOMEM;
1308                 return 0;
1309         }
1310
1311         info->data_buff = dma_alloc_coherent(&pdev->dev, info->buf_size,
1312                                 &info->data_buff_phys, GFP_KERNEL);
1313         if (info->data_buff == NULL) {
1314                 dev_err(&pdev->dev, "failed to allocate dma buffer\n");
1315                 return -ENOMEM;
1316         }
1317
1318         info->data_desc = (void *)info->data_buff + data_desc_offset;
1319         info->data_desc_addr = info->data_buff_phys + data_desc_offset;
1320
1321         info->data_dma_ch = pxa_request_dma("nand-data", DMA_PRIO_LOW,
1322                                 pxa3xx_nand_data_dma_irq, info);
1323         if (info->data_dma_ch < 0) {
1324                 dev_err(&pdev->dev, "failed to request data dma\n");
1325                 dma_free_coherent(&pdev->dev, info->buf_size,
1326                                 info->data_buff, info->data_buff_phys);
1327                 return info->data_dma_ch;
1328         }
1329
1330         /*
1331          * Now that DMA buffers are allocated we turn on
1332          * DMA proper for I/O operations.
1333          */
1334         info->use_dma = 1;
1335         return 0;
1336 }
1337
1338 static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1339 {
1340         struct platform_device *pdev = info->pdev;
1341         if (info->use_dma) {
1342                 pxa_free_dma(info->data_dma_ch);
1343                 dma_free_coherent(&pdev->dev, info->buf_size,
1344                                   info->data_buff, info->data_buff_phys);
1345         } else {
1346                 kfree(info->data_buff);
1347         }
1348 }
1349 #else
1350 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1351 {
1352         info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1353         if (info->data_buff == NULL)
1354                 return -ENOMEM;
1355         return 0;
1356 }
1357
1358 static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1359 {
1360         kfree(info->data_buff);
1361 }
1362 #endif
1363
1364 static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info)
1365 {
1366         struct mtd_info *mtd;
1367         struct nand_chip *chip;
1368         int ret;
1369
1370         mtd = info->host[info->cs]->mtd;
1371         chip = mtd->priv;
1372
1373         /* use the common timing to make a try */
1374         ret = pxa3xx_nand_config_flash(info, &builtin_flash_types[0]);
1375         if (ret)
1376                 return ret;
1377
1378         chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1379         ret = chip->waitfunc(mtd, chip);
1380         if (ret & NAND_STATUS_FAIL)
1381                 return -ENODEV;
1382
1383         return 0;
1384 }
1385
1386 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1387                         struct nand_ecc_ctrl *ecc,
1388                         int strength, int ecc_stepsize, int page_size)
1389 {
1390         if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1391                 info->chunk_size = 2048;
1392                 info->spare_size = 40;
1393                 info->ecc_size = 24;
1394                 ecc->mode = NAND_ECC_HW;
1395                 ecc->size = 512;
1396                 ecc->strength = 1;
1397
1398         } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1399                 info->chunk_size = 512;
1400                 info->spare_size = 8;
1401                 info->ecc_size = 8;
1402                 ecc->mode = NAND_ECC_HW;
1403                 ecc->size = 512;
1404                 ecc->strength = 1;
1405
1406         /*
1407          * Required ECC: 4-bit correction per 512 bytes
1408          * Select: 16-bit correction per 2048 bytes
1409          */
1410         } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1411                 info->ecc_bch = 1;
1412                 info->chunk_size = 2048;
1413                 info->spare_size = 32;
1414                 info->ecc_size = 32;
1415                 ecc->mode = NAND_ECC_HW;
1416                 ecc->size = info->chunk_size;
1417                 ecc->layout = &ecc_layout_2KB_bch4bit;
1418                 ecc->strength = 16;
1419
1420         } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1421                 info->ecc_bch = 1;
1422                 info->chunk_size = 2048;
1423                 info->spare_size = 32;
1424                 info->ecc_size = 32;
1425                 ecc->mode = NAND_ECC_HW;
1426                 ecc->size = info->chunk_size;
1427                 ecc->layout = &ecc_layout_4KB_bch4bit;
1428                 ecc->strength = 16;
1429
1430         /*
1431          * Required ECC: 8-bit correction per 512 bytes
1432          * Select: 16-bit correction per 1024 bytes
1433          */
1434         } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1435                 info->ecc_bch = 1;
1436                 info->chunk_size = 1024;
1437                 info->spare_size = 0;
1438                 info->ecc_size = 32;
1439                 ecc->mode = NAND_ECC_HW;
1440                 ecc->size = info->chunk_size;
1441                 ecc->layout = &ecc_layout_4KB_bch8bit;
1442                 ecc->strength = 16;
1443         } else {
1444                 dev_err(&info->pdev->dev,
1445                         "ECC strength %d at page size %d is not supported\n",
1446                         strength, page_size);
1447                 return -ENODEV;
1448         }
1449
1450         dev_info(&info->pdev->dev, "ECC strength %d, ECC step size %d\n",
1451                  ecc->strength, ecc->size);
1452         return 0;
1453 }
1454
1455 static int pxa3xx_nand_scan(struct mtd_info *mtd)
1456 {
1457         struct pxa3xx_nand_host *host = mtd->priv;
1458         struct pxa3xx_nand_info *info = host->info_data;
1459         struct platform_device *pdev = info->pdev;
1460         struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1461         struct nand_flash_dev pxa3xx_flash_ids[2], *def = NULL;
1462         const struct pxa3xx_nand_flash *f = NULL;
1463         struct nand_chip *chip = mtd->priv;
1464         uint32_t id = -1;
1465         uint64_t chipsize;
1466         int i, ret, num;
1467         uint16_t ecc_strength, ecc_step;
1468
1469         if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
1470                 goto KEEP_CONFIG;
1471
1472         /* Set a default chunk size */
1473         info->chunk_size = 512;
1474
1475         ret = pxa3xx_nand_sensing(info);
1476         if (ret) {
1477                 dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
1478                          info->cs);
1479
1480                 return ret;
1481         }
1482
1483         chip->cmdfunc(mtd, NAND_CMD_READID, 0, 0);
1484         id = *((uint16_t *)(info->data_buff));
1485         if (id != 0)
1486                 dev_info(&info->pdev->dev, "Detect a flash id %x\n", id);
1487         else {
1488                 dev_warn(&info->pdev->dev,
1489                          "Read out ID 0, potential timing set wrong!!\n");
1490
1491                 return -EINVAL;
1492         }
1493
1494         num = ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1;
1495         for (i = 0; i < num; i++) {
1496                 if (i < pdata->num_flash)
1497                         f = pdata->flash + i;
1498                 else
1499                         f = &builtin_flash_types[i - pdata->num_flash + 1];
1500
1501                 /* find the chip in default list */
1502                 if (f->chip_id == id)
1503                         break;
1504         }
1505
1506         if (i >= (ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1)) {
1507                 dev_err(&info->pdev->dev, "ERROR!! flash not defined!!!\n");
1508
1509                 return -EINVAL;
1510         }
1511
1512         ret = pxa3xx_nand_config_flash(info, f);
1513         if (ret) {
1514                 dev_err(&info->pdev->dev, "ERROR! Configure failed\n");
1515                 return ret;
1516         }
1517
1518         memset(pxa3xx_flash_ids, 0, sizeof(pxa3xx_flash_ids));
1519
1520         pxa3xx_flash_ids[0].name = f->name;
1521         pxa3xx_flash_ids[0].dev_id = (f->chip_id >> 8) & 0xffff;
1522         pxa3xx_flash_ids[0].pagesize = f->page_size;
1523         chipsize = (uint64_t)f->num_blocks * f->page_per_block * f->page_size;
1524         pxa3xx_flash_ids[0].chipsize = chipsize >> 20;
1525         pxa3xx_flash_ids[0].erasesize = f->page_size * f->page_per_block;
1526         if (f->flash_width == 16)
1527                 pxa3xx_flash_ids[0].options = NAND_BUSWIDTH_16;
1528         pxa3xx_flash_ids[1].name = NULL;
1529         def = pxa3xx_flash_ids;
1530 KEEP_CONFIG:
1531         if (info->reg_ndcr & NDCR_DWIDTH_M)
1532                 chip->options |= NAND_BUSWIDTH_16;
1533
1534         /* Device detection must be done with ECC disabled */
1535         if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1536                 nand_writel(info, NDECCCTRL, 0x0);
1537
1538         if (nand_scan_ident(mtd, 1, def))
1539                 return -ENODEV;
1540
1541         if (pdata->flash_bbt) {
1542                 /*
1543                  * We'll use a bad block table stored in-flash and don't
1544                  * allow writing the bad block marker to the flash.
1545                  */
1546                 chip->bbt_options |= NAND_BBT_USE_FLASH |
1547                                      NAND_BBT_NO_OOB_BBM;
1548                 chip->bbt_td = &bbt_main_descr;
1549                 chip->bbt_md = &bbt_mirror_descr;
1550         }
1551
1552         /*
1553          * If the page size is bigger than the FIFO size, let's check
1554          * we are given the right variant and then switch to the extended
1555          * (aka splitted) command handling,
1556          */
1557         if (mtd->writesize > PAGE_CHUNK_SIZE) {
1558                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1559                         chip->cmdfunc = nand_cmdfunc_extended;
1560                 } else {
1561                         dev_err(&info->pdev->dev,
1562                                 "unsupported page size on this variant\n");
1563                         return -ENODEV;
1564                 }
1565         }
1566
1567         if (pdata->ecc_strength && pdata->ecc_step_size) {
1568                 ecc_strength = pdata->ecc_strength;
1569                 ecc_step = pdata->ecc_step_size;
1570         } else {
1571                 ecc_strength = chip->ecc_strength_ds;
1572                 ecc_step = chip->ecc_step_ds;
1573         }
1574
1575         /* Set default ECC strength requirements on non-ONFI devices */
1576         if (ecc_strength < 1 && ecc_step < 1) {
1577                 ecc_strength = 1;
1578                 ecc_step = 512;
1579         }
1580
1581         ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1582                            ecc_step, mtd->writesize);
1583         if (ret)
1584                 return ret;
1585
1586         /* calculate addressing information */
1587         if (mtd->writesize >= 2048)
1588                 host->col_addr_cycles = 2;
1589         else
1590                 host->col_addr_cycles = 1;
1591
1592         /* release the initial buffer */
1593         kfree(info->data_buff);
1594
1595         /* allocate the real data + oob buffer */
1596         info->buf_size = mtd->writesize + mtd->oobsize;
1597         ret = pxa3xx_nand_init_buff(info);
1598         if (ret)
1599                 return ret;
1600         info->oob_buff = info->data_buff + mtd->writesize;
1601
1602         if ((mtd->size >> chip->page_shift) > 65536)
1603                 host->row_addr_cycles = 3;
1604         else
1605                 host->row_addr_cycles = 2;
1606         return nand_scan_tail(mtd);
1607 }
1608
1609 static int alloc_nand_resource(struct platform_device *pdev)
1610 {
1611         struct pxa3xx_nand_platform_data *pdata;
1612         struct pxa3xx_nand_info *info;
1613         struct pxa3xx_nand_host *host;
1614         struct nand_chip *chip = NULL;
1615         struct mtd_info *mtd;
1616         struct resource *r;
1617         int ret, irq, cs;
1618
1619         pdata = dev_get_platdata(&pdev->dev);
1620         if (pdata->num_cs <= 0)
1621                 return -ENODEV;
1622         info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) +
1623                             sizeof(*host)) * pdata->num_cs, GFP_KERNEL);
1624         if (!info)
1625                 return -ENOMEM;
1626
1627         info->pdev = pdev;
1628         info->variant = pxa3xx_nand_get_variant(pdev);
1629         for (cs = 0; cs < pdata->num_cs; cs++) {
1630                 mtd = (void *)&info[1] + (sizeof(*mtd) + sizeof(*host)) * cs;
1631                 chip = (struct nand_chip *)(&mtd[1]);
1632                 host = (struct pxa3xx_nand_host *)chip;
1633                 info->host[cs] = host;
1634                 host->mtd = mtd;
1635                 host->cs = cs;
1636                 host->info_data = info;
1637                 mtd->priv = host;
1638                 mtd->owner = THIS_MODULE;
1639
1640                 chip->ecc.read_page     = pxa3xx_nand_read_page_hwecc;
1641                 chip->ecc.write_page    = pxa3xx_nand_write_page_hwecc;
1642                 chip->controller        = &info->controller;
1643                 chip->waitfunc          = pxa3xx_nand_waitfunc;
1644                 chip->select_chip       = pxa3xx_nand_select_chip;
1645                 chip->read_word         = pxa3xx_nand_read_word;
1646                 chip->read_byte         = pxa3xx_nand_read_byte;
1647                 chip->read_buf          = pxa3xx_nand_read_buf;
1648                 chip->write_buf         = pxa3xx_nand_write_buf;
1649                 chip->options           |= NAND_NO_SUBPAGE_WRITE;
1650                 chip->cmdfunc           = nand_cmdfunc;
1651         }
1652
1653         spin_lock_init(&chip->controller->lock);
1654         init_waitqueue_head(&chip->controller->wq);
1655         info->clk = devm_clk_get(&pdev->dev, NULL);
1656         if (IS_ERR(info->clk)) {
1657                 dev_err(&pdev->dev, "failed to get nand clock\n");
1658                 return PTR_ERR(info->clk);
1659         }
1660         ret = clk_prepare_enable(info->clk);
1661         if (ret < 0)
1662                 return ret;
1663
1664         if (use_dma) {
1665                 /*
1666                  * This is a dirty hack to make this driver work from
1667                  * devicetree bindings. It can be removed once we have
1668                  * a prober DMA controller framework for DT.
1669                  */
1670                 if (pdev->dev.of_node &&
1671                     of_machine_is_compatible("marvell,pxa3xx")) {
1672                         info->drcmr_dat = 97;
1673                         info->drcmr_cmd = 99;
1674                 } else {
1675                         r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1676                         if (r == NULL) {
1677                                 dev_err(&pdev->dev,
1678                                         "no resource defined for data DMA\n");
1679                                 ret = -ENXIO;
1680                                 goto fail_disable_clk;
1681                         }
1682                         info->drcmr_dat = r->start;
1683
1684                         r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1685                         if (r == NULL) {
1686                                 dev_err(&pdev->dev,
1687                                         "no resource defined for cmd DMA\n");
1688                                 ret = -ENXIO;
1689                                 goto fail_disable_clk;
1690                         }
1691                         info->drcmr_cmd = r->start;
1692                 }
1693         }
1694
1695         irq = platform_get_irq(pdev, 0);
1696         if (irq < 0) {
1697                 dev_err(&pdev->dev, "no IRQ resource defined\n");
1698                 ret = -ENXIO;
1699                 goto fail_disable_clk;
1700         }
1701
1702         r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1703         info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
1704         if (IS_ERR(info->mmio_base)) {
1705                 ret = PTR_ERR(info->mmio_base);
1706                 goto fail_disable_clk;
1707         }
1708         info->mmio_phys = r->start;
1709
1710         /* Allocate a buffer to allow flash detection */
1711         info->buf_size = INIT_BUFFER_SIZE;
1712         info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1713         if (info->data_buff == NULL) {
1714                 ret = -ENOMEM;
1715                 goto fail_disable_clk;
1716         }
1717
1718         /* initialize all interrupts to be disabled */
1719         disable_int(info, NDSR_MASK);
1720
1721         ret = request_threaded_irq(irq, pxa3xx_nand_irq,
1722                                    pxa3xx_nand_irq_thread, IRQF_ONESHOT,
1723                                    pdev->name, info);
1724         if (ret < 0) {
1725                 dev_err(&pdev->dev, "failed to request IRQ\n");
1726                 goto fail_free_buf;
1727         }
1728
1729         platform_set_drvdata(pdev, info);
1730
1731         return 0;
1732
1733 fail_free_buf:
1734         free_irq(irq, info);
1735         kfree(info->data_buff);
1736 fail_disable_clk:
1737         clk_disable_unprepare(info->clk);
1738         return ret;
1739 }
1740
1741 static int pxa3xx_nand_remove(struct platform_device *pdev)
1742 {
1743         struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1744         struct pxa3xx_nand_platform_data *pdata;
1745         int irq, cs;
1746
1747         if (!info)
1748                 return 0;
1749
1750         pdata = dev_get_platdata(&pdev->dev);
1751
1752         irq = platform_get_irq(pdev, 0);
1753         if (irq >= 0)
1754                 free_irq(irq, info);
1755         pxa3xx_nand_free_buff(info);
1756
1757         clk_disable_unprepare(info->clk);
1758
1759         for (cs = 0; cs < pdata->num_cs; cs++)
1760                 nand_release(info->host[cs]->mtd);
1761         return 0;
1762 }
1763
1764 static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
1765 {
1766         struct pxa3xx_nand_platform_data *pdata;
1767         struct device_node *np = pdev->dev.of_node;
1768         const struct of_device_id *of_id =
1769                         of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
1770
1771         if (!of_id)
1772                 return 0;
1773
1774         pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1775         if (!pdata)
1776                 return -ENOMEM;
1777
1778         if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
1779                 pdata->enable_arbiter = 1;
1780         if (of_get_property(np, "marvell,nand-keep-config", NULL))
1781                 pdata->keep_config = 1;
1782         of_property_read_u32(np, "num-cs", &pdata->num_cs);
1783         pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
1784
1785         pdata->ecc_strength = of_get_nand_ecc_strength(np);
1786         if (pdata->ecc_strength < 0)
1787                 pdata->ecc_strength = 0;
1788
1789         pdata->ecc_step_size = of_get_nand_ecc_step_size(np);
1790         if (pdata->ecc_step_size < 0)
1791                 pdata->ecc_step_size = 0;
1792
1793         pdev->dev.platform_data = pdata;
1794
1795         return 0;
1796 }
1797
1798 static int pxa3xx_nand_probe(struct platform_device *pdev)
1799 {
1800         struct pxa3xx_nand_platform_data *pdata;
1801         struct mtd_part_parser_data ppdata = {};
1802         struct pxa3xx_nand_info *info;
1803         int ret, cs, probe_success;
1804
1805 #ifndef ARCH_HAS_DMA
1806         if (use_dma) {
1807                 use_dma = 0;
1808                 dev_warn(&pdev->dev,
1809                          "This platform can't do DMA on this device\n");
1810         }
1811 #endif
1812         ret = pxa3xx_nand_probe_dt(pdev);
1813         if (ret)
1814                 return ret;
1815
1816         pdata = dev_get_platdata(&pdev->dev);
1817         if (!pdata) {
1818                 dev_err(&pdev->dev, "no platform data defined\n");
1819                 return -ENODEV;
1820         }
1821
1822         ret = alloc_nand_resource(pdev);
1823         if (ret) {
1824                 dev_err(&pdev->dev, "alloc nand resource failed\n");
1825                 return ret;
1826         }
1827
1828         info = platform_get_drvdata(pdev);
1829         probe_success = 0;
1830         for (cs = 0; cs < pdata->num_cs; cs++) {
1831                 struct mtd_info *mtd = info->host[cs]->mtd;
1832
1833                 /*
1834                  * The mtd name matches the one used in 'mtdparts' kernel
1835                  * parameter. This name cannot be changed or otherwise
1836                  * user's mtd partitions configuration would get broken.
1837                  */
1838                 mtd->name = "pxa3xx_nand-0";
1839                 info->cs = cs;
1840                 ret = pxa3xx_nand_scan(mtd);
1841                 if (ret) {
1842                         dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
1843                                 cs);
1844                         continue;
1845                 }
1846
1847                 ppdata.of_node = pdev->dev.of_node;
1848                 ret = mtd_device_parse_register(mtd, NULL,
1849                                                 &ppdata, pdata->parts[cs],
1850                                                 pdata->nr_parts[cs]);
1851                 if (!ret)
1852                         probe_success = 1;
1853         }
1854
1855         if (!probe_success) {
1856                 pxa3xx_nand_remove(pdev);
1857                 return -ENODEV;
1858         }
1859
1860         return 0;
1861 }
1862
1863 #ifdef CONFIG_PM
1864 static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
1865 {
1866         struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1867         struct pxa3xx_nand_platform_data *pdata;
1868         struct mtd_info *mtd;
1869         int cs;
1870
1871         pdata = dev_get_platdata(&pdev->dev);
1872         if (info->state) {
1873                 dev_err(&pdev->dev, "driver busy, state = %d\n", info->state);
1874                 return -EAGAIN;
1875         }
1876
1877         for (cs = 0; cs < pdata->num_cs; cs++) {
1878                 mtd = info->host[cs]->mtd;
1879                 mtd_suspend(mtd);
1880         }
1881
1882         return 0;
1883 }
1884
1885 static int pxa3xx_nand_resume(struct platform_device *pdev)
1886 {
1887         struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1888         struct pxa3xx_nand_platform_data *pdata;
1889         struct mtd_info *mtd;
1890         int cs;
1891
1892         pdata = dev_get_platdata(&pdev->dev);
1893         /* We don't want to handle interrupt without calling mtd routine */
1894         disable_int(info, NDCR_INT_MASK);
1895
1896         /*
1897          * Directly set the chip select to a invalid value,
1898          * then the driver would reset the timing according
1899          * to current chip select at the beginning of cmdfunc
1900          */
1901         info->cs = 0xff;
1902
1903         /*
1904          * As the spec says, the NDSR would be updated to 0x1800 when
1905          * doing the nand_clk disable/enable.
1906          * To prevent it damaging state machine of the driver, clear
1907          * all status before resume
1908          */
1909         nand_writel(info, NDSR, NDSR_MASK);
1910         for (cs = 0; cs < pdata->num_cs; cs++) {
1911                 mtd = info->host[cs]->mtd;
1912                 mtd_resume(mtd);
1913         }
1914
1915         return 0;
1916 }
1917 #else
1918 #define pxa3xx_nand_suspend     NULL
1919 #define pxa3xx_nand_resume      NULL
1920 #endif
1921
1922 static struct platform_driver pxa3xx_nand_driver = {
1923         .driver = {
1924                 .name   = "pxa3xx-nand",
1925                 .of_match_table = pxa3xx_nand_dt_ids,
1926         },
1927         .probe          = pxa3xx_nand_probe,
1928         .remove         = pxa3xx_nand_remove,
1929         .suspend        = pxa3xx_nand_suspend,
1930         .resume         = pxa3xx_nand_resume,
1931 };
1932
1933 module_platform_driver(pxa3xx_nand_driver);
1934
1935 MODULE_LICENSE("GPL");
1936 MODULE_DESCRIPTION("PXA3xx NAND controller driver");