Merge remote-tracking branches 'spi/topic/qup', 'spi/topic/rockchip', 'spi/topic...
[cascardo/linux.git] / drivers / mtd / nand / pxa3xx_nand.c
1 /*
2  * drivers/mtd/nand/pxa3xx_nand.c
3  *
4  * Copyright © 2005 Intel Corporation
5  * Copyright © 2006 Marvell International Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
12  */
13
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/platform_device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/delay.h>
20 #include <linux/clk.h>
21 #include <linux/mtd/mtd.h>
22 #include <linux/mtd/nand.h>
23 #include <linux/mtd/partitions.h>
24 #include <linux/io.h>
25 #include <linux/irq.h>
26 #include <linux/slab.h>
27 #include <linux/of.h>
28 #include <linux/of_device.h>
29 #include <linux/of_mtd.h>
30
31 #if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP)
32 #define ARCH_HAS_DMA
33 #endif
34
35 #ifdef ARCH_HAS_DMA
36 #include <mach/dma.h>
37 #endif
38
39 #include <linux/platform_data/mtd-nand-pxa3xx.h>
40
41 #define CHIP_DELAY_TIMEOUT      (2 * HZ/10)
42 #define NAND_STOP_DELAY         (2 * HZ/50)
43 #define PAGE_CHUNK_SIZE         (2048)
44
45 /*
46  * Define a buffer size for the initial command that detects the flash device:
47  * STATUS, READID and PARAM. The largest of these is the PARAM command,
48  * needing 256 bytes.
49  */
50 #define INIT_BUFFER_SIZE        256
51
52 /* registers and bit definitions */
53 #define NDCR            (0x00) /* Control register */
54 #define NDTR0CS0        (0x04) /* Timing Parameter 0 for CS0 */
55 #define NDTR1CS0        (0x0C) /* Timing Parameter 1 for CS0 */
56 #define NDSR            (0x14) /* Status Register */
57 #define NDPCR           (0x18) /* Page Count Register */
58 #define NDBDR0          (0x1C) /* Bad Block Register 0 */
59 #define NDBDR1          (0x20) /* Bad Block Register 1 */
60 #define NDECCCTRL       (0x28) /* ECC control */
61 #define NDDB            (0x40) /* Data Buffer */
62 #define NDCB0           (0x48) /* Command Buffer0 */
63 #define NDCB1           (0x4C) /* Command Buffer1 */
64 #define NDCB2           (0x50) /* Command Buffer2 */
65
66 #define NDCR_SPARE_EN           (0x1 << 31)
67 #define NDCR_ECC_EN             (0x1 << 30)
68 #define NDCR_DMA_EN             (0x1 << 29)
69 #define NDCR_ND_RUN             (0x1 << 28)
70 #define NDCR_DWIDTH_C           (0x1 << 27)
71 #define NDCR_DWIDTH_M           (0x1 << 26)
72 #define NDCR_PAGE_SZ            (0x1 << 24)
73 #define NDCR_NCSX               (0x1 << 23)
74 #define NDCR_ND_MODE            (0x3 << 21)
75 #define NDCR_NAND_MODE          (0x0)
76 #define NDCR_CLR_PG_CNT         (0x1 << 20)
77 #define NDCR_STOP_ON_UNCOR      (0x1 << 19)
78 #define NDCR_RD_ID_CNT_MASK     (0x7 << 16)
79 #define NDCR_RD_ID_CNT(x)       (((x) << 16) & NDCR_RD_ID_CNT_MASK)
80
81 #define NDCR_RA_START           (0x1 << 15)
82 #define NDCR_PG_PER_BLK         (0x1 << 14)
83 #define NDCR_ND_ARB_EN          (0x1 << 12)
84 #define NDCR_INT_MASK           (0xFFF)
85
86 #define NDSR_MASK               (0xfff)
87 #define NDSR_ERR_CNT_OFF        (16)
88 #define NDSR_ERR_CNT_MASK       (0x1f)
89 #define NDSR_ERR_CNT(sr)        ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
90 #define NDSR_RDY                (0x1 << 12)
91 #define NDSR_FLASH_RDY          (0x1 << 11)
92 #define NDSR_CS0_PAGED          (0x1 << 10)
93 #define NDSR_CS1_PAGED          (0x1 << 9)
94 #define NDSR_CS0_CMDD           (0x1 << 8)
95 #define NDSR_CS1_CMDD           (0x1 << 7)
96 #define NDSR_CS0_BBD            (0x1 << 6)
97 #define NDSR_CS1_BBD            (0x1 << 5)
98 #define NDSR_UNCORERR           (0x1 << 4)
99 #define NDSR_CORERR             (0x1 << 3)
100 #define NDSR_WRDREQ             (0x1 << 2)
101 #define NDSR_RDDREQ             (0x1 << 1)
102 #define NDSR_WRCMDREQ           (0x1)
103
104 #define NDCB0_LEN_OVRD          (0x1 << 28)
105 #define NDCB0_ST_ROW_EN         (0x1 << 26)
106 #define NDCB0_AUTO_RS           (0x1 << 25)
107 #define NDCB0_CSEL              (0x1 << 24)
108 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
109 #define NDCB0_EXT_CMD_TYPE(x)   (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
110 #define NDCB0_CMD_TYPE_MASK     (0x7 << 21)
111 #define NDCB0_CMD_TYPE(x)       (((x) << 21) & NDCB0_CMD_TYPE_MASK)
112 #define NDCB0_NC                (0x1 << 20)
113 #define NDCB0_DBC               (0x1 << 19)
114 #define NDCB0_ADDR_CYC_MASK     (0x7 << 16)
115 #define NDCB0_ADDR_CYC(x)       (((x) << 16) & NDCB0_ADDR_CYC_MASK)
116 #define NDCB0_CMD2_MASK         (0xff << 8)
117 #define NDCB0_CMD1_MASK         (0xff)
118 #define NDCB0_ADDR_CYC_SHIFT    (16)
119
120 #define EXT_CMD_TYPE_DISPATCH   6 /* Command dispatch */
121 #define EXT_CMD_TYPE_NAKED_RW   5 /* Naked read or Naked write */
122 #define EXT_CMD_TYPE_READ       4 /* Read */
123 #define EXT_CMD_TYPE_DISP_WR    4 /* Command dispatch with write */
124 #define EXT_CMD_TYPE_FINAL      3 /* Final command */
125 #define EXT_CMD_TYPE_LAST_RW    1 /* Last naked read/write */
126 #define EXT_CMD_TYPE_MONO       0 /* Monolithic read/write */
127
128 /* macros for registers read/write */
129 #define nand_writel(info, off, val)     \
130         writel_relaxed((val), (info)->mmio_base + (off))
131
132 #define nand_readl(info, off)           \
133         readl_relaxed((info)->mmio_base + (off))
134
135 /* error code and state */
136 enum {
137         ERR_NONE        = 0,
138         ERR_DMABUSERR   = -1,
139         ERR_SENDCMD     = -2,
140         ERR_UNCORERR    = -3,
141         ERR_BBERR       = -4,
142         ERR_CORERR      = -5,
143 };
144
145 enum {
146         STATE_IDLE = 0,
147         STATE_PREPARED,
148         STATE_CMD_HANDLE,
149         STATE_DMA_READING,
150         STATE_DMA_WRITING,
151         STATE_DMA_DONE,
152         STATE_PIO_READING,
153         STATE_PIO_WRITING,
154         STATE_CMD_DONE,
155         STATE_READY,
156 };
157
158 enum pxa3xx_nand_variant {
159         PXA3XX_NAND_VARIANT_PXA,
160         PXA3XX_NAND_VARIANT_ARMADA370,
161 };
162
163 struct pxa3xx_nand_host {
164         struct nand_chip        chip;
165         struct mtd_info         *mtd;
166         void                    *info_data;
167
168         /* page size of attached chip */
169         int                     use_ecc;
170         int                     cs;
171
172         /* calculated from pxa3xx_nand_flash data */
173         unsigned int            col_addr_cycles;
174         unsigned int            row_addr_cycles;
175         size_t                  read_id_bytes;
176
177 };
178
179 struct pxa3xx_nand_info {
180         struct nand_hw_control  controller;
181         struct platform_device   *pdev;
182
183         struct clk              *clk;
184         void __iomem            *mmio_base;
185         unsigned long           mmio_phys;
186         struct completion       cmd_complete, dev_ready;
187
188         unsigned int            buf_start;
189         unsigned int            buf_count;
190         unsigned int            buf_size;
191         unsigned int            data_buff_pos;
192         unsigned int            oob_buff_pos;
193
194         /* DMA information */
195         int                     drcmr_dat;
196         int                     drcmr_cmd;
197
198         unsigned char           *data_buff;
199         unsigned char           *oob_buff;
200         dma_addr_t              data_buff_phys;
201         int                     data_dma_ch;
202         struct pxa_dma_desc     *data_desc;
203         dma_addr_t              data_desc_addr;
204
205         struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
206         unsigned int            state;
207
208         /*
209          * This driver supports NFCv1 (as found in PXA SoC)
210          * and NFCv2 (as found in Armada 370/XP SoC).
211          */
212         enum pxa3xx_nand_variant variant;
213
214         int                     cs;
215         int                     use_ecc;        /* use HW ECC ? */
216         int                     ecc_bch;        /* using BCH ECC? */
217         int                     use_dma;        /* use DMA ? */
218         int                     use_spare;      /* use spare ? */
219         int                     need_wait;
220
221         unsigned int            data_size;      /* data to be read from FIFO */
222         unsigned int            chunk_size;     /* split commands chunk size */
223         unsigned int            oob_size;
224         unsigned int            spare_size;
225         unsigned int            ecc_size;
226         unsigned int            ecc_err_cnt;
227         unsigned int            max_bitflips;
228         int                     retcode;
229
230         /* cached register value */
231         uint32_t                reg_ndcr;
232         uint32_t                ndtr0cs0;
233         uint32_t                ndtr1cs0;
234
235         /* generated NDCBx register values */
236         uint32_t                ndcb0;
237         uint32_t                ndcb1;
238         uint32_t                ndcb2;
239         uint32_t                ndcb3;
240 };
241
242 static bool use_dma = 1;
243 module_param(use_dma, bool, 0444);
244 MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
245
246 static struct pxa3xx_nand_timing timing[] = {
247         { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
248         { 10,  0, 20,  40, 30,  40, 11123, 110, 10, },
249         { 10, 25, 15,  25, 15,  30, 25000,  60, 10, },
250         { 10, 35, 15,  25, 15,  25, 25000,  60, 10, },
251 };
252
253 static struct pxa3xx_nand_flash builtin_flash_types[] = {
254 { "DEFAULT FLASH",      0,   0, 2048,  8,  8,    0, &timing[0] },
255 { "64MiB 16-bit",  0x46ec,  32,  512, 16, 16, 4096, &timing[1] },
256 { "256MiB 8-bit",  0xdaec,  64, 2048,  8,  8, 2048, &timing[1] },
257 { "4GiB 8-bit",    0xd7ec, 128, 4096,  8,  8, 8192, &timing[1] },
258 { "128MiB 8-bit",  0xa12c,  64, 2048,  8,  8, 1024, &timing[2] },
259 { "128MiB 16-bit", 0xb12c,  64, 2048, 16, 16, 1024, &timing[2] },
260 { "512MiB 8-bit",  0xdc2c,  64, 2048,  8,  8, 4096, &timing[2] },
261 { "512MiB 16-bit", 0xcc2c,  64, 2048, 16, 16, 4096, &timing[2] },
262 { "256MiB 16-bit", 0xba20,  64, 2048, 16, 16, 2048, &timing[3] },
263 };
264
265 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
266 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
267
268 static struct nand_bbt_descr bbt_main_descr = {
269         .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
270                 | NAND_BBT_2BIT | NAND_BBT_VERSION,
271         .offs = 8,
272         .len = 6,
273         .veroffs = 14,
274         .maxblocks = 8,         /* Last 8 blocks in each chip */
275         .pattern = bbt_pattern
276 };
277
278 static struct nand_bbt_descr bbt_mirror_descr = {
279         .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
280                 | NAND_BBT_2BIT | NAND_BBT_VERSION,
281         .offs = 8,
282         .len = 6,
283         .veroffs = 14,
284         .maxblocks = 8,         /* Last 8 blocks in each chip */
285         .pattern = bbt_mirror_pattern
286 };
287
288 static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
289         .eccbytes = 32,
290         .eccpos = {
291                 32, 33, 34, 35, 36, 37, 38, 39,
292                 40, 41, 42, 43, 44, 45, 46, 47,
293                 48, 49, 50, 51, 52, 53, 54, 55,
294                 56, 57, 58, 59, 60, 61, 62, 63},
295         .oobfree = { {2, 30} }
296 };
297
298 static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
299         .eccbytes = 64,
300         .eccpos = {
301                 32,  33,  34,  35,  36,  37,  38,  39,
302                 40,  41,  42,  43,  44,  45,  46,  47,
303                 48,  49,  50,  51,  52,  53,  54,  55,
304                 56,  57,  58,  59,  60,  61,  62,  63,
305                 96,  97,  98,  99,  100, 101, 102, 103,
306                 104, 105, 106, 107, 108, 109, 110, 111,
307                 112, 113, 114, 115, 116, 117, 118, 119,
308                 120, 121, 122, 123, 124, 125, 126, 127},
309         /* Bootrom looks in bytes 0 & 5 for bad blocks */
310         .oobfree = { {6, 26}, { 64, 32} }
311 };
312
313 static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
314         .eccbytes = 128,
315         .eccpos = {
316                 32,  33,  34,  35,  36,  37,  38,  39,
317                 40,  41,  42,  43,  44,  45,  46,  47,
318                 48,  49,  50,  51,  52,  53,  54,  55,
319                 56,  57,  58,  59,  60,  61,  62,  63},
320         .oobfree = { }
321 };
322
323 /* Define a default flash type setting serve as flash detecting only */
324 #define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
325
326 #define NDTR0_tCH(c)    (min((c), 7) << 19)
327 #define NDTR0_tCS(c)    (min((c), 7) << 16)
328 #define NDTR0_tWH(c)    (min((c), 7) << 11)
329 #define NDTR0_tWP(c)    (min((c), 7) << 8)
330 #define NDTR0_tRH(c)    (min((c), 7) << 3)
331 #define NDTR0_tRP(c)    (min((c), 7) << 0)
332
333 #define NDTR1_tR(c)     (min((c), 65535) << 16)
334 #define NDTR1_tWHR(c)   (min((c), 15) << 4)
335 #define NDTR1_tAR(c)    (min((c), 15) << 0)
336
337 /* convert nano-seconds to nand flash controller clock cycles */
338 #define ns2cycle(ns, clk)       (int)((ns) * (clk / 1000000) / 1000)
339
340 static const struct of_device_id pxa3xx_nand_dt_ids[] = {
341         {
342                 .compatible = "marvell,pxa3xx-nand",
343                 .data       = (void *)PXA3XX_NAND_VARIANT_PXA,
344         },
345         {
346                 .compatible = "marvell,armada370-nand",
347                 .data       = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
348         },
349         {}
350 };
351 MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
352
353 static enum pxa3xx_nand_variant
354 pxa3xx_nand_get_variant(struct platform_device *pdev)
355 {
356         const struct of_device_id *of_id =
357                         of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
358         if (!of_id)
359                 return PXA3XX_NAND_VARIANT_PXA;
360         return (enum pxa3xx_nand_variant)of_id->data;
361 }
362
363 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
364                                    const struct pxa3xx_nand_timing *t)
365 {
366         struct pxa3xx_nand_info *info = host->info_data;
367         unsigned long nand_clk = clk_get_rate(info->clk);
368         uint32_t ndtr0, ndtr1;
369
370         ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
371                 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
372                 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
373                 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
374                 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
375                 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
376
377         ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
378                 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
379                 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
380
381         info->ndtr0cs0 = ndtr0;
382         info->ndtr1cs0 = ndtr1;
383         nand_writel(info, NDTR0CS0, ndtr0);
384         nand_writel(info, NDTR1CS0, ndtr1);
385 }
386
387 /*
388  * Set the data and OOB size, depending on the selected
389  * spare and ECC configuration.
390  * Only applicable to READ0, READOOB and PAGEPROG commands.
391  */
392 static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
393                                 struct mtd_info *mtd)
394 {
395         int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
396
397         info->data_size = mtd->writesize;
398         if (!oob_enable)
399                 return;
400
401         info->oob_size = info->spare_size;
402         if (!info->use_ecc)
403                 info->oob_size += info->ecc_size;
404 }
405
406 /**
407  * NOTE: it is a must to set ND_RUN firstly, then write
408  * command buffer, otherwise, it does not work.
409  * We enable all the interrupt at the same time, and
410  * let pxa3xx_nand_irq to handle all logic.
411  */
412 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
413 {
414         uint32_t ndcr;
415
416         ndcr = info->reg_ndcr;
417
418         if (info->use_ecc) {
419                 ndcr |= NDCR_ECC_EN;
420                 if (info->ecc_bch)
421                         nand_writel(info, NDECCCTRL, 0x1);
422         } else {
423                 ndcr &= ~NDCR_ECC_EN;
424                 if (info->ecc_bch)
425                         nand_writel(info, NDECCCTRL, 0x0);
426         }
427
428         if (info->use_dma)
429                 ndcr |= NDCR_DMA_EN;
430         else
431                 ndcr &= ~NDCR_DMA_EN;
432
433         if (info->use_spare)
434                 ndcr |= NDCR_SPARE_EN;
435         else
436                 ndcr &= ~NDCR_SPARE_EN;
437
438         ndcr |= NDCR_ND_RUN;
439
440         /* clear status bits and run */
441         nand_writel(info, NDCR, 0);
442         nand_writel(info, NDSR, NDSR_MASK);
443         nand_writel(info, NDCR, ndcr);
444 }
445
446 static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
447 {
448         uint32_t ndcr;
449         int timeout = NAND_STOP_DELAY;
450
451         /* wait RUN bit in NDCR become 0 */
452         ndcr = nand_readl(info, NDCR);
453         while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) {
454                 ndcr = nand_readl(info, NDCR);
455                 udelay(1);
456         }
457
458         if (timeout <= 0) {
459                 ndcr &= ~NDCR_ND_RUN;
460                 nand_writel(info, NDCR, ndcr);
461         }
462         /* clear status bits */
463         nand_writel(info, NDSR, NDSR_MASK);
464 }
465
466 static void __maybe_unused
467 enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
468 {
469         uint32_t ndcr;
470
471         ndcr = nand_readl(info, NDCR);
472         nand_writel(info, NDCR, ndcr & ~int_mask);
473 }
474
475 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
476 {
477         uint32_t ndcr;
478
479         ndcr = nand_readl(info, NDCR);
480         nand_writel(info, NDCR, ndcr | int_mask);
481 }
482
483 static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
484 {
485         if (info->ecc_bch) {
486                 int timeout;
487
488                 /*
489                  * According to the datasheet, when reading from NDDB
490                  * with BCH enabled, after each 32 bytes reads, we
491                  * have to make sure that the NDSR.RDDREQ bit is set.
492                  *
493                  * Drain the FIFO 8 32 bits reads at a time, and skip
494                  * the polling on the last read.
495                  */
496                 while (len > 8) {
497                         __raw_readsl(info->mmio_base + NDDB, data, 8);
498
499                         for (timeout = 0;
500                              !(nand_readl(info, NDSR) & NDSR_RDDREQ);
501                              timeout++) {
502                                 if (timeout >= 5) {
503                                         dev_err(&info->pdev->dev,
504                                                 "Timeout on RDDREQ while draining the FIFO\n");
505                                         return;
506                                 }
507
508                                 mdelay(1);
509                         }
510
511                         data += 32;
512                         len -= 8;
513                 }
514         }
515
516         __raw_readsl(info->mmio_base + NDDB, data, len);
517 }
518
519 static void handle_data_pio(struct pxa3xx_nand_info *info)
520 {
521         unsigned int do_bytes = min(info->data_size, info->chunk_size);
522
523         switch (info->state) {
524         case STATE_PIO_WRITING:
525                 __raw_writesl(info->mmio_base + NDDB,
526                               info->data_buff + info->data_buff_pos,
527                               DIV_ROUND_UP(do_bytes, 4));
528
529                 if (info->oob_size > 0)
530                         __raw_writesl(info->mmio_base + NDDB,
531                                       info->oob_buff + info->oob_buff_pos,
532                                       DIV_ROUND_UP(info->oob_size, 4));
533                 break;
534         case STATE_PIO_READING:
535                 drain_fifo(info,
536                            info->data_buff + info->data_buff_pos,
537                            DIV_ROUND_UP(do_bytes, 4));
538
539                 if (info->oob_size > 0)
540                         drain_fifo(info,
541                                    info->oob_buff + info->oob_buff_pos,
542                                    DIV_ROUND_UP(info->oob_size, 4));
543                 break;
544         default:
545                 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
546                                 info->state);
547                 BUG();
548         }
549
550         /* Update buffer pointers for multi-page read/write */
551         info->data_buff_pos += do_bytes;
552         info->oob_buff_pos += info->oob_size;
553         info->data_size -= do_bytes;
554 }
555
556 #ifdef ARCH_HAS_DMA
557 static void start_data_dma(struct pxa3xx_nand_info *info)
558 {
559         struct pxa_dma_desc *desc = info->data_desc;
560         int dma_len = ALIGN(info->data_size + info->oob_size, 32);
561
562         desc->ddadr = DDADR_STOP;
563         desc->dcmd = DCMD_ENDIRQEN | DCMD_WIDTH4 | DCMD_BURST32 | dma_len;
564
565         switch (info->state) {
566         case STATE_DMA_WRITING:
567                 desc->dsadr = info->data_buff_phys;
568                 desc->dtadr = info->mmio_phys + NDDB;
569                 desc->dcmd |= DCMD_INCSRCADDR | DCMD_FLOWTRG;
570                 break;
571         case STATE_DMA_READING:
572                 desc->dtadr = info->data_buff_phys;
573                 desc->dsadr = info->mmio_phys + NDDB;
574                 desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC;
575                 break;
576         default:
577                 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
578                                 info->state);
579                 BUG();
580         }
581
582         DRCMR(info->drcmr_dat) = DRCMR_MAPVLD | info->data_dma_ch;
583         DDADR(info->data_dma_ch) = info->data_desc_addr;
584         DCSR(info->data_dma_ch) |= DCSR_RUN;
585 }
586
587 static void pxa3xx_nand_data_dma_irq(int channel, void *data)
588 {
589         struct pxa3xx_nand_info *info = data;
590         uint32_t dcsr;
591
592         dcsr = DCSR(channel);
593         DCSR(channel) = dcsr;
594
595         if (dcsr & DCSR_BUSERR) {
596                 info->retcode = ERR_DMABUSERR;
597         }
598
599         info->state = STATE_DMA_DONE;
600         enable_int(info, NDCR_INT_MASK);
601         nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
602 }
603 #else
604 static void start_data_dma(struct pxa3xx_nand_info *info)
605 {}
606 #endif
607
608 static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
609 {
610         struct pxa3xx_nand_info *info = devid;
611         unsigned int status, is_completed = 0, is_ready = 0;
612         unsigned int ready, cmd_done;
613
614         if (info->cs == 0) {
615                 ready           = NDSR_FLASH_RDY;
616                 cmd_done        = NDSR_CS0_CMDD;
617         } else {
618                 ready           = NDSR_RDY;
619                 cmd_done        = NDSR_CS1_CMDD;
620         }
621
622         status = nand_readl(info, NDSR);
623
624         if (status & NDSR_UNCORERR)
625                 info->retcode = ERR_UNCORERR;
626         if (status & NDSR_CORERR) {
627                 info->retcode = ERR_CORERR;
628                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
629                     info->ecc_bch)
630                         info->ecc_err_cnt = NDSR_ERR_CNT(status);
631                 else
632                         info->ecc_err_cnt = 1;
633
634                 /*
635                  * Each chunk composing a page is corrected independently,
636                  * and we need to store maximum number of corrected bitflips
637                  * to return it to the MTD layer in ecc.read_page().
638                  */
639                 info->max_bitflips = max_t(unsigned int,
640                                            info->max_bitflips,
641                                            info->ecc_err_cnt);
642         }
643         if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
644                 /* whether use dma to transfer data */
645                 if (info->use_dma) {
646                         disable_int(info, NDCR_INT_MASK);
647                         info->state = (status & NDSR_RDDREQ) ?
648                                       STATE_DMA_READING : STATE_DMA_WRITING;
649                         start_data_dma(info);
650                         goto NORMAL_IRQ_EXIT;
651                 } else {
652                         info->state = (status & NDSR_RDDREQ) ?
653                                       STATE_PIO_READING : STATE_PIO_WRITING;
654                         handle_data_pio(info);
655                 }
656         }
657         if (status & cmd_done) {
658                 info->state = STATE_CMD_DONE;
659                 is_completed = 1;
660         }
661         if (status & ready) {
662                 info->state = STATE_READY;
663                 is_ready = 1;
664         }
665
666         if (status & NDSR_WRCMDREQ) {
667                 nand_writel(info, NDSR, NDSR_WRCMDREQ);
668                 status &= ~NDSR_WRCMDREQ;
669                 info->state = STATE_CMD_HANDLE;
670
671                 /*
672                  * Command buffer registers NDCB{0-2} (and optionally NDCB3)
673                  * must be loaded by writing directly either 12 or 16
674                  * bytes directly to NDCB0, four bytes at a time.
675                  *
676                  * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
677                  * but each NDCBx register can be read.
678                  */
679                 nand_writel(info, NDCB0, info->ndcb0);
680                 nand_writel(info, NDCB0, info->ndcb1);
681                 nand_writel(info, NDCB0, info->ndcb2);
682
683                 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
684                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
685                         nand_writel(info, NDCB0, info->ndcb3);
686         }
687
688         /* clear NDSR to let the controller exit the IRQ */
689         nand_writel(info, NDSR, status);
690         if (is_completed)
691                 complete(&info->cmd_complete);
692         if (is_ready)
693                 complete(&info->dev_ready);
694 NORMAL_IRQ_EXIT:
695         return IRQ_HANDLED;
696 }
697
698 static inline int is_buf_blank(uint8_t *buf, size_t len)
699 {
700         for (; len > 0; len--)
701                 if (*buf++ != 0xff)
702                         return 0;
703         return 1;
704 }
705
706 static void set_command_address(struct pxa3xx_nand_info *info,
707                 unsigned int page_size, uint16_t column, int page_addr)
708 {
709         /* small page addr setting */
710         if (page_size < PAGE_CHUNK_SIZE) {
711                 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
712                                 | (column & 0xFF);
713
714                 info->ndcb2 = 0;
715         } else {
716                 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
717                                 | (column & 0xFFFF);
718
719                 if (page_addr & 0xFF0000)
720                         info->ndcb2 = (page_addr & 0xFF0000) >> 16;
721                 else
722                         info->ndcb2 = 0;
723         }
724 }
725
726 static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
727 {
728         struct pxa3xx_nand_host *host = info->host[info->cs];
729         struct mtd_info *mtd = host->mtd;
730
731         /* reset data and oob column point to handle data */
732         info->buf_start         = 0;
733         info->buf_count         = 0;
734         info->oob_size          = 0;
735         info->data_buff_pos     = 0;
736         info->oob_buff_pos      = 0;
737         info->use_ecc           = 0;
738         info->use_spare         = 1;
739         info->retcode           = ERR_NONE;
740         info->ecc_err_cnt       = 0;
741         info->ndcb3             = 0;
742         info->need_wait         = 0;
743
744         switch (command) {
745         case NAND_CMD_READ0:
746         case NAND_CMD_PAGEPROG:
747                 info->use_ecc = 1;
748         case NAND_CMD_READOOB:
749                 pxa3xx_set_datasize(info, mtd);
750                 break;
751         case NAND_CMD_PARAM:
752                 info->use_spare = 0;
753                 break;
754         default:
755                 info->ndcb1 = 0;
756                 info->ndcb2 = 0;
757                 break;
758         }
759
760         /*
761          * If we are about to issue a read command, or about to set
762          * the write address, then clean the data buffer.
763          */
764         if (command == NAND_CMD_READ0 ||
765             command == NAND_CMD_READOOB ||
766             command == NAND_CMD_SEQIN) {
767
768                 info->buf_count = mtd->writesize + mtd->oobsize;
769                 memset(info->data_buff, 0xFF, info->buf_count);
770         }
771
772 }
773
774 static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
775                 int ext_cmd_type, uint16_t column, int page_addr)
776 {
777         int addr_cycle, exec_cmd;
778         struct pxa3xx_nand_host *host;
779         struct mtd_info *mtd;
780
781         host = info->host[info->cs];
782         mtd = host->mtd;
783         addr_cycle = 0;
784         exec_cmd = 1;
785
786         if (info->cs != 0)
787                 info->ndcb0 = NDCB0_CSEL;
788         else
789                 info->ndcb0 = 0;
790
791         if (command == NAND_CMD_SEQIN)
792                 exec_cmd = 0;
793
794         addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
795                                     + host->col_addr_cycles);
796
797         switch (command) {
798         case NAND_CMD_READOOB:
799         case NAND_CMD_READ0:
800                 info->buf_start = column;
801                 info->ndcb0 |= NDCB0_CMD_TYPE(0)
802                                 | addr_cycle
803                                 | NAND_CMD_READ0;
804
805                 if (command == NAND_CMD_READOOB)
806                         info->buf_start += mtd->writesize;
807
808                 /*
809                  * Multiple page read needs an 'extended command type' field,
810                  * which is either naked-read or last-read according to the
811                  * state.
812                  */
813                 if (mtd->writesize == PAGE_CHUNK_SIZE) {
814                         info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
815                 } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
816                         info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
817                                         | NDCB0_LEN_OVRD
818                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
819                         info->ndcb3 = info->chunk_size +
820                                       info->oob_size;
821                 }
822
823                 set_command_address(info, mtd->writesize, column, page_addr);
824                 break;
825
826         case NAND_CMD_SEQIN:
827
828                 info->buf_start = column;
829                 set_command_address(info, mtd->writesize, 0, page_addr);
830
831                 /*
832                  * Multiple page programming needs to execute the initial
833                  * SEQIN command that sets the page address.
834                  */
835                 if (mtd->writesize > PAGE_CHUNK_SIZE) {
836                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
837                                 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
838                                 | addr_cycle
839                                 | command;
840                         /* No data transfer in this case */
841                         info->data_size = 0;
842                         exec_cmd = 1;
843                 }
844                 break;
845
846         case NAND_CMD_PAGEPROG:
847                 if (is_buf_blank(info->data_buff,
848                                         (mtd->writesize + mtd->oobsize))) {
849                         exec_cmd = 0;
850                         break;
851                 }
852
853                 /* Second command setting for large pages */
854                 if (mtd->writesize > PAGE_CHUNK_SIZE) {
855                         /*
856                          * Multiple page write uses the 'extended command'
857                          * field. This can be used to issue a command dispatch
858                          * or a naked-write depending on the current stage.
859                          */
860                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
861                                         | NDCB0_LEN_OVRD
862                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
863                         info->ndcb3 = info->chunk_size +
864                                       info->oob_size;
865
866                         /*
867                          * This is the command dispatch that completes a chunked
868                          * page program operation.
869                          */
870                         if (info->data_size == 0) {
871                                 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
872                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
873                                         | command;
874                                 info->ndcb1 = 0;
875                                 info->ndcb2 = 0;
876                                 info->ndcb3 = 0;
877                         }
878                 } else {
879                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
880                                         | NDCB0_AUTO_RS
881                                         | NDCB0_ST_ROW_EN
882                                         | NDCB0_DBC
883                                         | (NAND_CMD_PAGEPROG << 8)
884                                         | NAND_CMD_SEQIN
885                                         | addr_cycle;
886                 }
887                 break;
888
889         case NAND_CMD_PARAM:
890                 info->buf_count = 256;
891                 info->ndcb0 |= NDCB0_CMD_TYPE(0)
892                                 | NDCB0_ADDR_CYC(1)
893                                 | NDCB0_LEN_OVRD
894                                 | command;
895                 info->ndcb1 = (column & 0xFF);
896                 info->ndcb3 = 256;
897                 info->data_size = 256;
898                 break;
899
900         case NAND_CMD_READID:
901                 info->buf_count = host->read_id_bytes;
902                 info->ndcb0 |= NDCB0_CMD_TYPE(3)
903                                 | NDCB0_ADDR_CYC(1)
904                                 | command;
905                 info->ndcb1 = (column & 0xFF);
906
907                 info->data_size = 8;
908                 break;
909         case NAND_CMD_STATUS:
910                 info->buf_count = 1;
911                 info->ndcb0 |= NDCB0_CMD_TYPE(4)
912                                 | NDCB0_ADDR_CYC(1)
913                                 | command;
914
915                 info->data_size = 8;
916                 break;
917
918         case NAND_CMD_ERASE1:
919                 info->ndcb0 |= NDCB0_CMD_TYPE(2)
920                                 | NDCB0_AUTO_RS
921                                 | NDCB0_ADDR_CYC(3)
922                                 | NDCB0_DBC
923                                 | (NAND_CMD_ERASE2 << 8)
924                                 | NAND_CMD_ERASE1;
925                 info->ndcb1 = page_addr;
926                 info->ndcb2 = 0;
927
928                 break;
929         case NAND_CMD_RESET:
930                 info->ndcb0 |= NDCB0_CMD_TYPE(5)
931                                 | command;
932
933                 break;
934
935         case NAND_CMD_ERASE2:
936                 exec_cmd = 0;
937                 break;
938
939         default:
940                 exec_cmd = 0;
941                 dev_err(&info->pdev->dev, "non-supported command %x\n",
942                                 command);
943                 break;
944         }
945
946         return exec_cmd;
947 }
948
949 static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
950                          int column, int page_addr)
951 {
952         struct pxa3xx_nand_host *host = mtd->priv;
953         struct pxa3xx_nand_info *info = host->info_data;
954         int ret, exec_cmd;
955
956         /*
957          * if this is a x16 device ,then convert the input
958          * "byte" address into a "word" address appropriate
959          * for indexing a word-oriented device
960          */
961         if (info->reg_ndcr & NDCR_DWIDTH_M)
962                 column /= 2;
963
964         /*
965          * There may be different NAND chip hooked to
966          * different chip select, so check whether
967          * chip select has been changed, if yes, reset the timing
968          */
969         if (info->cs != host->cs) {
970                 info->cs = host->cs;
971                 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
972                 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
973         }
974
975         prepare_start_command(info, command);
976
977         info->state = STATE_PREPARED;
978         exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
979
980         if (exec_cmd) {
981                 init_completion(&info->cmd_complete);
982                 init_completion(&info->dev_ready);
983                 info->need_wait = 1;
984                 pxa3xx_nand_start(info);
985
986                 ret = wait_for_completion_timeout(&info->cmd_complete,
987                                 CHIP_DELAY_TIMEOUT);
988                 if (!ret) {
989                         dev_err(&info->pdev->dev, "Wait time out!!!\n");
990                         /* Stop State Machine for next command cycle */
991                         pxa3xx_nand_stop(info);
992                 }
993         }
994         info->state = STATE_IDLE;
995 }
996
997 static void nand_cmdfunc_extended(struct mtd_info *mtd,
998                                   const unsigned command,
999                                   int column, int page_addr)
1000 {
1001         struct pxa3xx_nand_host *host = mtd->priv;
1002         struct pxa3xx_nand_info *info = host->info_data;
1003         int ret, exec_cmd, ext_cmd_type;
1004
1005         /*
1006          * if this is a x16 device then convert the input
1007          * "byte" address into a "word" address appropriate
1008          * for indexing a word-oriented device
1009          */
1010         if (info->reg_ndcr & NDCR_DWIDTH_M)
1011                 column /= 2;
1012
1013         /*
1014          * There may be different NAND chip hooked to
1015          * different chip select, so check whether
1016          * chip select has been changed, if yes, reset the timing
1017          */
1018         if (info->cs != host->cs) {
1019                 info->cs = host->cs;
1020                 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1021                 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1022         }
1023
1024         /* Select the extended command for the first command */
1025         switch (command) {
1026         case NAND_CMD_READ0:
1027         case NAND_CMD_READOOB:
1028                 ext_cmd_type = EXT_CMD_TYPE_MONO;
1029                 break;
1030         case NAND_CMD_SEQIN:
1031                 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1032                 break;
1033         case NAND_CMD_PAGEPROG:
1034                 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1035                 break;
1036         default:
1037                 ext_cmd_type = 0;
1038                 break;
1039         }
1040
1041         prepare_start_command(info, command);
1042
1043         /*
1044          * Prepare the "is ready" completion before starting a command
1045          * transaction sequence. If the command is not executed the
1046          * completion will be completed, see below.
1047          *
1048          * We can do that inside the loop because the command variable
1049          * is invariant and thus so is the exec_cmd.
1050          */
1051         info->need_wait = 1;
1052         init_completion(&info->dev_ready);
1053         do {
1054                 info->state = STATE_PREPARED;
1055                 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1056                                                column, page_addr);
1057                 if (!exec_cmd) {
1058                         info->need_wait = 0;
1059                         complete(&info->dev_ready);
1060                         break;
1061                 }
1062
1063                 init_completion(&info->cmd_complete);
1064                 pxa3xx_nand_start(info);
1065
1066                 ret = wait_for_completion_timeout(&info->cmd_complete,
1067                                 CHIP_DELAY_TIMEOUT);
1068                 if (!ret) {
1069                         dev_err(&info->pdev->dev, "Wait time out!!!\n");
1070                         /* Stop State Machine for next command cycle */
1071                         pxa3xx_nand_stop(info);
1072                         break;
1073                 }
1074
1075                 /* Check if the sequence is complete */
1076                 if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
1077                         break;
1078
1079                 /*
1080                  * After a splitted program command sequence has issued
1081                  * the command dispatch, the command sequence is complete.
1082                  */
1083                 if (info->data_size == 0 &&
1084                     command == NAND_CMD_PAGEPROG &&
1085                     ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1086                         break;
1087
1088                 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1089                         /* Last read: issue a 'last naked read' */
1090                         if (info->data_size == info->chunk_size)
1091                                 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1092                         else
1093                                 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1094
1095                 /*
1096                  * If a splitted program command has no more data to transfer,
1097                  * the command dispatch must be issued to complete.
1098                  */
1099                 } else if (command == NAND_CMD_PAGEPROG &&
1100                            info->data_size == 0) {
1101                                 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1102                 }
1103         } while (1);
1104
1105         info->state = STATE_IDLE;
1106 }
1107
1108 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1109                 struct nand_chip *chip, const uint8_t *buf, int oob_required)
1110 {
1111         chip->write_buf(mtd, buf, mtd->writesize);
1112         chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1113
1114         return 0;
1115 }
1116
1117 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1118                 struct nand_chip *chip, uint8_t *buf, int oob_required,
1119                 int page)
1120 {
1121         struct pxa3xx_nand_host *host = mtd->priv;
1122         struct pxa3xx_nand_info *info = host->info_data;
1123
1124         chip->read_buf(mtd, buf, mtd->writesize);
1125         chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1126
1127         if (info->retcode == ERR_CORERR && info->use_ecc) {
1128                 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1129
1130         } else if (info->retcode == ERR_UNCORERR) {
1131                 /*
1132                  * for blank page (all 0xff), HW will calculate its ECC as
1133                  * 0, which is different from the ECC information within
1134                  * OOB, ignore such uncorrectable errors
1135                  */
1136                 if (is_buf_blank(buf, mtd->writesize))
1137                         info->retcode = ERR_NONE;
1138                 else
1139                         mtd->ecc_stats.failed++;
1140         }
1141
1142         return info->max_bitflips;
1143 }
1144
1145 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1146 {
1147         struct pxa3xx_nand_host *host = mtd->priv;
1148         struct pxa3xx_nand_info *info = host->info_data;
1149         char retval = 0xFF;
1150
1151         if (info->buf_start < info->buf_count)
1152                 /* Has just send a new command? */
1153                 retval = info->data_buff[info->buf_start++];
1154
1155         return retval;
1156 }
1157
1158 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1159 {
1160         struct pxa3xx_nand_host *host = mtd->priv;
1161         struct pxa3xx_nand_info *info = host->info_data;
1162         u16 retval = 0xFFFF;
1163
1164         if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1165                 retval = *((u16 *)(info->data_buff+info->buf_start));
1166                 info->buf_start += 2;
1167         }
1168         return retval;
1169 }
1170
1171 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1172 {
1173         struct pxa3xx_nand_host *host = mtd->priv;
1174         struct pxa3xx_nand_info *info = host->info_data;
1175         int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1176
1177         memcpy(buf, info->data_buff + info->buf_start, real_len);
1178         info->buf_start += real_len;
1179 }
1180
1181 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1182                 const uint8_t *buf, int len)
1183 {
1184         struct pxa3xx_nand_host *host = mtd->priv;
1185         struct pxa3xx_nand_info *info = host->info_data;
1186         int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1187
1188         memcpy(info->data_buff + info->buf_start, buf, real_len);
1189         info->buf_start += real_len;
1190 }
1191
1192 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1193 {
1194         return;
1195 }
1196
1197 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1198 {
1199         struct pxa3xx_nand_host *host = mtd->priv;
1200         struct pxa3xx_nand_info *info = host->info_data;
1201         int ret;
1202
1203         if (info->need_wait) {
1204                 ret = wait_for_completion_timeout(&info->dev_ready,
1205                                 CHIP_DELAY_TIMEOUT);
1206                 info->need_wait = 0;
1207                 if (!ret) {
1208                         dev_err(&info->pdev->dev, "Ready time out!!!\n");
1209                         return NAND_STATUS_FAIL;
1210                 }
1211         }
1212
1213         /* pxa3xx_nand_send_command has waited for command complete */
1214         if (this->state == FL_WRITING || this->state == FL_ERASING) {
1215                 if (info->retcode == ERR_NONE)
1216                         return 0;
1217                 else
1218                         return NAND_STATUS_FAIL;
1219         }
1220
1221         return NAND_STATUS_READY;
1222 }
1223
1224 static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
1225                                     const struct pxa3xx_nand_flash *f)
1226 {
1227         struct platform_device *pdev = info->pdev;
1228         struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1229         struct pxa3xx_nand_host *host = info->host[info->cs];
1230         uint32_t ndcr = 0x0; /* enable all interrupts */
1231
1232         if (f->page_size != 2048 && f->page_size != 512) {
1233                 dev_err(&pdev->dev, "Current only support 2048 and 512 size\n");
1234                 return -EINVAL;
1235         }
1236
1237         if (f->flash_width != 16 && f->flash_width != 8) {
1238                 dev_err(&pdev->dev, "Only support 8bit and 16 bit!\n");
1239                 return -EINVAL;
1240         }
1241
1242         /* calculate flash information */
1243         host->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
1244
1245         /* calculate addressing information */
1246         host->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
1247
1248         if (f->num_blocks * f->page_per_block > 65536)
1249                 host->row_addr_cycles = 3;
1250         else
1251                 host->row_addr_cycles = 2;
1252
1253         ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1254         ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1255         ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0;
1256         ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0;
1257         ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0;
1258         ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
1259
1260         ndcr |= NDCR_RD_ID_CNT(host->read_id_bytes);
1261         ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1262
1263         info->reg_ndcr = ndcr;
1264
1265         pxa3xx_nand_set_timing(host, f->timing);
1266         return 0;
1267 }
1268
1269 static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1270 {
1271         /*
1272          * We set 0 by hard coding here, for we don't support keep_config
1273          * when there is more than one chip attached to the controller
1274          */
1275         struct pxa3xx_nand_host *host = info->host[0];
1276         uint32_t ndcr = nand_readl(info, NDCR);
1277
1278         if (ndcr & NDCR_PAGE_SZ) {
1279                 /* Controller's FIFO size */
1280                 info->chunk_size = 2048;
1281                 host->read_id_bytes = 4;
1282         } else {
1283                 info->chunk_size = 512;
1284                 host->read_id_bytes = 2;
1285         }
1286
1287         /* Set an initial chunk size */
1288         info->reg_ndcr = ndcr & ~NDCR_INT_MASK;
1289         info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1290         info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1291         return 0;
1292 }
1293
1294 #ifdef ARCH_HAS_DMA
1295 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1296 {
1297         struct platform_device *pdev = info->pdev;
1298         int data_desc_offset = info->buf_size - sizeof(struct pxa_dma_desc);
1299
1300         if (use_dma == 0) {
1301                 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1302                 if (info->data_buff == NULL)
1303                         return -ENOMEM;
1304                 return 0;
1305         }
1306
1307         info->data_buff = dma_alloc_coherent(&pdev->dev, info->buf_size,
1308                                 &info->data_buff_phys, GFP_KERNEL);
1309         if (info->data_buff == NULL) {
1310                 dev_err(&pdev->dev, "failed to allocate dma buffer\n");
1311                 return -ENOMEM;
1312         }
1313
1314         info->data_desc = (void *)info->data_buff + data_desc_offset;
1315         info->data_desc_addr = info->data_buff_phys + data_desc_offset;
1316
1317         info->data_dma_ch = pxa_request_dma("nand-data", DMA_PRIO_LOW,
1318                                 pxa3xx_nand_data_dma_irq, info);
1319         if (info->data_dma_ch < 0) {
1320                 dev_err(&pdev->dev, "failed to request data dma\n");
1321                 dma_free_coherent(&pdev->dev, info->buf_size,
1322                                 info->data_buff, info->data_buff_phys);
1323                 return info->data_dma_ch;
1324         }
1325
1326         /*
1327          * Now that DMA buffers are allocated we turn on
1328          * DMA proper for I/O operations.
1329          */
1330         info->use_dma = 1;
1331         return 0;
1332 }
1333
1334 static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1335 {
1336         struct platform_device *pdev = info->pdev;
1337         if (info->use_dma) {
1338                 pxa_free_dma(info->data_dma_ch);
1339                 dma_free_coherent(&pdev->dev, info->buf_size,
1340                                   info->data_buff, info->data_buff_phys);
1341         } else {
1342                 kfree(info->data_buff);
1343         }
1344 }
1345 #else
1346 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1347 {
1348         info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1349         if (info->data_buff == NULL)
1350                 return -ENOMEM;
1351         return 0;
1352 }
1353
1354 static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1355 {
1356         kfree(info->data_buff);
1357 }
1358 #endif
1359
1360 static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info)
1361 {
1362         struct mtd_info *mtd;
1363         struct nand_chip *chip;
1364         int ret;
1365
1366         mtd = info->host[info->cs]->mtd;
1367         chip = mtd->priv;
1368
1369         /* use the common timing to make a try */
1370         ret = pxa3xx_nand_config_flash(info, &builtin_flash_types[0]);
1371         if (ret)
1372                 return ret;
1373
1374         chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1375         ret = chip->waitfunc(mtd, chip);
1376         if (ret & NAND_STATUS_FAIL)
1377                 return -ENODEV;
1378
1379         return 0;
1380 }
1381
1382 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1383                         struct nand_ecc_ctrl *ecc,
1384                         int strength, int ecc_stepsize, int page_size)
1385 {
1386         if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1387                 info->chunk_size = 2048;
1388                 info->spare_size = 40;
1389                 info->ecc_size = 24;
1390                 ecc->mode = NAND_ECC_HW;
1391                 ecc->size = 512;
1392                 ecc->strength = 1;
1393
1394         } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1395                 info->chunk_size = 512;
1396                 info->spare_size = 8;
1397                 info->ecc_size = 8;
1398                 ecc->mode = NAND_ECC_HW;
1399                 ecc->size = 512;
1400                 ecc->strength = 1;
1401
1402         /*
1403          * Required ECC: 4-bit correction per 512 bytes
1404          * Select: 16-bit correction per 2048 bytes
1405          */
1406         } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1407                 info->ecc_bch = 1;
1408                 info->chunk_size = 2048;
1409                 info->spare_size = 32;
1410                 info->ecc_size = 32;
1411                 ecc->mode = NAND_ECC_HW;
1412                 ecc->size = info->chunk_size;
1413                 ecc->layout = &ecc_layout_2KB_bch4bit;
1414                 ecc->strength = 16;
1415
1416         } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1417                 info->ecc_bch = 1;
1418                 info->chunk_size = 2048;
1419                 info->spare_size = 32;
1420                 info->ecc_size = 32;
1421                 ecc->mode = NAND_ECC_HW;
1422                 ecc->size = info->chunk_size;
1423                 ecc->layout = &ecc_layout_4KB_bch4bit;
1424                 ecc->strength = 16;
1425
1426         /*
1427          * Required ECC: 8-bit correction per 512 bytes
1428          * Select: 16-bit correction per 1024 bytes
1429          */
1430         } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1431                 info->ecc_bch = 1;
1432                 info->chunk_size = 1024;
1433                 info->spare_size = 0;
1434                 info->ecc_size = 32;
1435                 ecc->mode = NAND_ECC_HW;
1436                 ecc->size = info->chunk_size;
1437                 ecc->layout = &ecc_layout_4KB_bch8bit;
1438                 ecc->strength = 16;
1439         } else {
1440                 dev_err(&info->pdev->dev,
1441                         "ECC strength %d at page size %d is not supported\n",
1442                         strength, page_size);
1443                 return -ENODEV;
1444         }
1445
1446         dev_info(&info->pdev->dev, "ECC strength %d, ECC step size %d\n",
1447                  ecc->strength, ecc->size);
1448         return 0;
1449 }
1450
1451 static int pxa3xx_nand_scan(struct mtd_info *mtd)
1452 {
1453         struct pxa3xx_nand_host *host = mtd->priv;
1454         struct pxa3xx_nand_info *info = host->info_data;
1455         struct platform_device *pdev = info->pdev;
1456         struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1457         struct nand_flash_dev pxa3xx_flash_ids[2], *def = NULL;
1458         const struct pxa3xx_nand_flash *f = NULL;
1459         struct nand_chip *chip = mtd->priv;
1460         uint32_t id = -1;
1461         uint64_t chipsize;
1462         int i, ret, num;
1463         uint16_t ecc_strength, ecc_step;
1464
1465         if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
1466                 goto KEEP_CONFIG;
1467
1468         ret = pxa3xx_nand_sensing(info);
1469         if (ret) {
1470                 dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
1471                          info->cs);
1472
1473                 return ret;
1474         }
1475
1476         chip->cmdfunc(mtd, NAND_CMD_READID, 0, 0);
1477         id = *((uint16_t *)(info->data_buff));
1478         if (id != 0)
1479                 dev_info(&info->pdev->dev, "Detect a flash id %x\n", id);
1480         else {
1481                 dev_warn(&info->pdev->dev,
1482                          "Read out ID 0, potential timing set wrong!!\n");
1483
1484                 return -EINVAL;
1485         }
1486
1487         num = ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1;
1488         for (i = 0; i < num; i++) {
1489                 if (i < pdata->num_flash)
1490                         f = pdata->flash + i;
1491                 else
1492                         f = &builtin_flash_types[i - pdata->num_flash + 1];
1493
1494                 /* find the chip in default list */
1495                 if (f->chip_id == id)
1496                         break;
1497         }
1498
1499         if (i >= (ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1)) {
1500                 dev_err(&info->pdev->dev, "ERROR!! flash not defined!!!\n");
1501
1502                 return -EINVAL;
1503         }
1504
1505         ret = pxa3xx_nand_config_flash(info, f);
1506         if (ret) {
1507                 dev_err(&info->pdev->dev, "ERROR! Configure failed\n");
1508                 return ret;
1509         }
1510
1511         pxa3xx_flash_ids[0].name = f->name;
1512         pxa3xx_flash_ids[0].dev_id = (f->chip_id >> 8) & 0xffff;
1513         pxa3xx_flash_ids[0].pagesize = f->page_size;
1514         chipsize = (uint64_t)f->num_blocks * f->page_per_block * f->page_size;
1515         pxa3xx_flash_ids[0].chipsize = chipsize >> 20;
1516         pxa3xx_flash_ids[0].erasesize = f->page_size * f->page_per_block;
1517         if (f->flash_width == 16)
1518                 pxa3xx_flash_ids[0].options = NAND_BUSWIDTH_16;
1519         pxa3xx_flash_ids[1].name = NULL;
1520         def = pxa3xx_flash_ids;
1521 KEEP_CONFIG:
1522         if (info->reg_ndcr & NDCR_DWIDTH_M)
1523                 chip->options |= NAND_BUSWIDTH_16;
1524
1525         /* Device detection must be done with ECC disabled */
1526         if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1527                 nand_writel(info, NDECCCTRL, 0x0);
1528
1529         if (nand_scan_ident(mtd, 1, def))
1530                 return -ENODEV;
1531
1532         if (pdata->flash_bbt) {
1533                 /*
1534                  * We'll use a bad block table stored in-flash and don't
1535                  * allow writing the bad block marker to the flash.
1536                  */
1537                 chip->bbt_options |= NAND_BBT_USE_FLASH |
1538                                      NAND_BBT_NO_OOB_BBM;
1539                 chip->bbt_td = &bbt_main_descr;
1540                 chip->bbt_md = &bbt_mirror_descr;
1541         }
1542
1543         /*
1544          * If the page size is bigger than the FIFO size, let's check
1545          * we are given the right variant and then switch to the extended
1546          * (aka splitted) command handling,
1547          */
1548         if (mtd->writesize > PAGE_CHUNK_SIZE) {
1549                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1550                         chip->cmdfunc = nand_cmdfunc_extended;
1551                 } else {
1552                         dev_err(&info->pdev->dev,
1553                                 "unsupported page size on this variant\n");
1554                         return -ENODEV;
1555                 }
1556         }
1557
1558         if (pdata->ecc_strength && pdata->ecc_step_size) {
1559                 ecc_strength = pdata->ecc_strength;
1560                 ecc_step = pdata->ecc_step_size;
1561         } else {
1562                 ecc_strength = chip->ecc_strength_ds;
1563                 ecc_step = chip->ecc_step_ds;
1564         }
1565
1566         /* Set default ECC strength requirements on non-ONFI devices */
1567         if (ecc_strength < 1 && ecc_step < 1) {
1568                 ecc_strength = 1;
1569                 ecc_step = 512;
1570         }
1571
1572         ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1573                            ecc_step, mtd->writesize);
1574         if (ret)
1575                 return ret;
1576
1577         /* calculate addressing information */
1578         if (mtd->writesize >= 2048)
1579                 host->col_addr_cycles = 2;
1580         else
1581                 host->col_addr_cycles = 1;
1582
1583         /* release the initial buffer */
1584         kfree(info->data_buff);
1585
1586         /* allocate the real data + oob buffer */
1587         info->buf_size = mtd->writesize + mtd->oobsize;
1588         ret = pxa3xx_nand_init_buff(info);
1589         if (ret)
1590                 return ret;
1591         info->oob_buff = info->data_buff + mtd->writesize;
1592
1593         if ((mtd->size >> chip->page_shift) > 65536)
1594                 host->row_addr_cycles = 3;
1595         else
1596                 host->row_addr_cycles = 2;
1597         return nand_scan_tail(mtd);
1598 }
1599
1600 static int alloc_nand_resource(struct platform_device *pdev)
1601 {
1602         struct pxa3xx_nand_platform_data *pdata;
1603         struct pxa3xx_nand_info *info;
1604         struct pxa3xx_nand_host *host;
1605         struct nand_chip *chip = NULL;
1606         struct mtd_info *mtd;
1607         struct resource *r;
1608         int ret, irq, cs;
1609
1610         pdata = dev_get_platdata(&pdev->dev);
1611         if (pdata->num_cs <= 0)
1612                 return -ENODEV;
1613         info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) +
1614                             sizeof(*host)) * pdata->num_cs, GFP_KERNEL);
1615         if (!info)
1616                 return -ENOMEM;
1617
1618         info->pdev = pdev;
1619         info->variant = pxa3xx_nand_get_variant(pdev);
1620         for (cs = 0; cs < pdata->num_cs; cs++) {
1621                 mtd = (struct mtd_info *)((unsigned int)&info[1] +
1622                       (sizeof(*mtd) + sizeof(*host)) * cs);
1623                 chip = (struct nand_chip *)(&mtd[1]);
1624                 host = (struct pxa3xx_nand_host *)chip;
1625                 info->host[cs] = host;
1626                 host->mtd = mtd;
1627                 host->cs = cs;
1628                 host->info_data = info;
1629                 mtd->priv = host;
1630                 mtd->owner = THIS_MODULE;
1631
1632                 chip->ecc.read_page     = pxa3xx_nand_read_page_hwecc;
1633                 chip->ecc.write_page    = pxa3xx_nand_write_page_hwecc;
1634                 chip->controller        = &info->controller;
1635                 chip->waitfunc          = pxa3xx_nand_waitfunc;
1636                 chip->select_chip       = pxa3xx_nand_select_chip;
1637                 chip->read_word         = pxa3xx_nand_read_word;
1638                 chip->read_byte         = pxa3xx_nand_read_byte;
1639                 chip->read_buf          = pxa3xx_nand_read_buf;
1640                 chip->write_buf         = pxa3xx_nand_write_buf;
1641                 chip->options           |= NAND_NO_SUBPAGE_WRITE;
1642                 chip->cmdfunc           = nand_cmdfunc;
1643         }
1644
1645         spin_lock_init(&chip->controller->lock);
1646         init_waitqueue_head(&chip->controller->wq);
1647         info->clk = devm_clk_get(&pdev->dev, NULL);
1648         if (IS_ERR(info->clk)) {
1649                 dev_err(&pdev->dev, "failed to get nand clock\n");
1650                 return PTR_ERR(info->clk);
1651         }
1652         ret = clk_prepare_enable(info->clk);
1653         if (ret < 0)
1654                 return ret;
1655
1656         if (use_dma) {
1657                 /*
1658                  * This is a dirty hack to make this driver work from
1659                  * devicetree bindings. It can be removed once we have
1660                  * a prober DMA controller framework for DT.
1661                  */
1662                 if (pdev->dev.of_node &&
1663                     of_machine_is_compatible("marvell,pxa3xx")) {
1664                         info->drcmr_dat = 97;
1665                         info->drcmr_cmd = 99;
1666                 } else {
1667                         r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1668                         if (r == NULL) {
1669                                 dev_err(&pdev->dev,
1670                                         "no resource defined for data DMA\n");
1671                                 ret = -ENXIO;
1672                                 goto fail_disable_clk;
1673                         }
1674                         info->drcmr_dat = r->start;
1675
1676                         r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1677                         if (r == NULL) {
1678                                 dev_err(&pdev->dev,
1679                                         "no resource defined for cmd DMA\n");
1680                                 ret = -ENXIO;
1681                                 goto fail_disable_clk;
1682                         }
1683                         info->drcmr_cmd = r->start;
1684                 }
1685         }
1686
1687         irq = platform_get_irq(pdev, 0);
1688         if (irq < 0) {
1689                 dev_err(&pdev->dev, "no IRQ resource defined\n");
1690                 ret = -ENXIO;
1691                 goto fail_disable_clk;
1692         }
1693
1694         r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1695         info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
1696         if (IS_ERR(info->mmio_base)) {
1697                 ret = PTR_ERR(info->mmio_base);
1698                 goto fail_disable_clk;
1699         }
1700         info->mmio_phys = r->start;
1701
1702         /* Allocate a buffer to allow flash detection */
1703         info->buf_size = INIT_BUFFER_SIZE;
1704         info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1705         if (info->data_buff == NULL) {
1706                 ret = -ENOMEM;
1707                 goto fail_disable_clk;
1708         }
1709
1710         /* initialize all interrupts to be disabled */
1711         disable_int(info, NDSR_MASK);
1712
1713         ret = request_irq(irq, pxa3xx_nand_irq, 0, pdev->name, info);
1714         if (ret < 0) {
1715                 dev_err(&pdev->dev, "failed to request IRQ\n");
1716                 goto fail_free_buf;
1717         }
1718
1719         platform_set_drvdata(pdev, info);
1720
1721         return 0;
1722
1723 fail_free_buf:
1724         free_irq(irq, info);
1725         kfree(info->data_buff);
1726 fail_disable_clk:
1727         clk_disable_unprepare(info->clk);
1728         return ret;
1729 }
1730
1731 static int pxa3xx_nand_remove(struct platform_device *pdev)
1732 {
1733         struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1734         struct pxa3xx_nand_platform_data *pdata;
1735         int irq, cs;
1736
1737         if (!info)
1738                 return 0;
1739
1740         pdata = dev_get_platdata(&pdev->dev);
1741
1742         irq = platform_get_irq(pdev, 0);
1743         if (irq >= 0)
1744                 free_irq(irq, info);
1745         pxa3xx_nand_free_buff(info);
1746
1747         clk_disable_unprepare(info->clk);
1748
1749         for (cs = 0; cs < pdata->num_cs; cs++)
1750                 nand_release(info->host[cs]->mtd);
1751         return 0;
1752 }
1753
1754 static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
1755 {
1756         struct pxa3xx_nand_platform_data *pdata;
1757         struct device_node *np = pdev->dev.of_node;
1758         const struct of_device_id *of_id =
1759                         of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
1760
1761         if (!of_id)
1762                 return 0;
1763
1764         pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1765         if (!pdata)
1766                 return -ENOMEM;
1767
1768         if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
1769                 pdata->enable_arbiter = 1;
1770         if (of_get_property(np, "marvell,nand-keep-config", NULL))
1771                 pdata->keep_config = 1;
1772         of_property_read_u32(np, "num-cs", &pdata->num_cs);
1773         pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
1774
1775         pdata->ecc_strength = of_get_nand_ecc_strength(np);
1776         if (pdata->ecc_strength < 0)
1777                 pdata->ecc_strength = 0;
1778
1779         pdata->ecc_step_size = of_get_nand_ecc_step_size(np);
1780         if (pdata->ecc_step_size < 0)
1781                 pdata->ecc_step_size = 0;
1782
1783         pdev->dev.platform_data = pdata;
1784
1785         return 0;
1786 }
1787
1788 static int pxa3xx_nand_probe(struct platform_device *pdev)
1789 {
1790         struct pxa3xx_nand_platform_data *pdata;
1791         struct mtd_part_parser_data ppdata = {};
1792         struct pxa3xx_nand_info *info;
1793         int ret, cs, probe_success;
1794
1795 #ifndef ARCH_HAS_DMA
1796         if (use_dma) {
1797                 use_dma = 0;
1798                 dev_warn(&pdev->dev,
1799                          "This platform can't do DMA on this device\n");
1800         }
1801 #endif
1802         ret = pxa3xx_nand_probe_dt(pdev);
1803         if (ret)
1804                 return ret;
1805
1806         pdata = dev_get_platdata(&pdev->dev);
1807         if (!pdata) {
1808                 dev_err(&pdev->dev, "no platform data defined\n");
1809                 return -ENODEV;
1810         }
1811
1812         ret = alloc_nand_resource(pdev);
1813         if (ret) {
1814                 dev_err(&pdev->dev, "alloc nand resource failed\n");
1815                 return ret;
1816         }
1817
1818         info = platform_get_drvdata(pdev);
1819         probe_success = 0;
1820         for (cs = 0; cs < pdata->num_cs; cs++) {
1821                 struct mtd_info *mtd = info->host[cs]->mtd;
1822
1823                 /*
1824                  * The mtd name matches the one used in 'mtdparts' kernel
1825                  * parameter. This name cannot be changed or otherwise
1826                  * user's mtd partitions configuration would get broken.
1827                  */
1828                 mtd->name = "pxa3xx_nand-0";
1829                 info->cs = cs;
1830                 ret = pxa3xx_nand_scan(mtd);
1831                 if (ret) {
1832                         dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
1833                                 cs);
1834                         continue;
1835                 }
1836
1837                 ppdata.of_node = pdev->dev.of_node;
1838                 ret = mtd_device_parse_register(mtd, NULL,
1839                                                 &ppdata, pdata->parts[cs],
1840                                                 pdata->nr_parts[cs]);
1841                 if (!ret)
1842                         probe_success = 1;
1843         }
1844
1845         if (!probe_success) {
1846                 pxa3xx_nand_remove(pdev);
1847                 return -ENODEV;
1848         }
1849
1850         return 0;
1851 }
1852
1853 #ifdef CONFIG_PM
1854 static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
1855 {
1856         struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1857         struct pxa3xx_nand_platform_data *pdata;
1858         struct mtd_info *mtd;
1859         int cs;
1860
1861         pdata = dev_get_platdata(&pdev->dev);
1862         if (info->state) {
1863                 dev_err(&pdev->dev, "driver busy, state = %d\n", info->state);
1864                 return -EAGAIN;
1865         }
1866
1867         for (cs = 0; cs < pdata->num_cs; cs++) {
1868                 mtd = info->host[cs]->mtd;
1869                 mtd_suspend(mtd);
1870         }
1871
1872         return 0;
1873 }
1874
1875 static int pxa3xx_nand_resume(struct platform_device *pdev)
1876 {
1877         struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1878         struct pxa3xx_nand_platform_data *pdata;
1879         struct mtd_info *mtd;
1880         int cs;
1881
1882         pdata = dev_get_platdata(&pdev->dev);
1883         /* We don't want to handle interrupt without calling mtd routine */
1884         disable_int(info, NDCR_INT_MASK);
1885
1886         /*
1887          * Directly set the chip select to a invalid value,
1888          * then the driver would reset the timing according
1889          * to current chip select at the beginning of cmdfunc
1890          */
1891         info->cs = 0xff;
1892
1893         /*
1894          * As the spec says, the NDSR would be updated to 0x1800 when
1895          * doing the nand_clk disable/enable.
1896          * To prevent it damaging state machine of the driver, clear
1897          * all status before resume
1898          */
1899         nand_writel(info, NDSR, NDSR_MASK);
1900         for (cs = 0; cs < pdata->num_cs; cs++) {
1901                 mtd = info->host[cs]->mtd;
1902                 mtd_resume(mtd);
1903         }
1904
1905         return 0;
1906 }
1907 #else
1908 #define pxa3xx_nand_suspend     NULL
1909 #define pxa3xx_nand_resume      NULL
1910 #endif
1911
1912 static struct platform_driver pxa3xx_nand_driver = {
1913         .driver = {
1914                 .name   = "pxa3xx-nand",
1915                 .of_match_table = pxa3xx_nand_dt_ids,
1916         },
1917         .probe          = pxa3xx_nand_probe,
1918         .remove         = pxa3xx_nand_remove,
1919         .suspend        = pxa3xx_nand_suspend,
1920         .resume         = pxa3xx_nand_resume,
1921 };
1922
1923 module_platform_driver(pxa3xx_nand_driver);
1924
1925 MODULE_LICENSE("GPL");
1926 MODULE_DESCRIPTION("PXA3xx NAND controller driver");