Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/livep...
[cascardo/linux.git] / drivers / mtd / nand / pxa3xx_nand.c
1 /*
2  * drivers/mtd/nand/pxa3xx_nand.c
3  *
4  * Copyright © 2005 Intel Corporation
5  * Copyright © 2006 Marvell International Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
12  */
13
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/platform_device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/delay.h>
20 #include <linux/clk.h>
21 #include <linux/mtd/mtd.h>
22 #include <linux/mtd/nand.h>
23 #include <linux/mtd/partitions.h>
24 #include <linux/io.h>
25 #include <linux/iopoll.h>
26 #include <linux/irq.h>
27 #include <linux/slab.h>
28 #include <linux/of.h>
29 #include <linux/of_device.h>
30 #include <linux/of_mtd.h>
31
32 #if defined(CONFIG_ARM) && (defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP))
33 #define ARCH_HAS_DMA
34 #endif
35
36 #ifdef ARCH_HAS_DMA
37 #include <mach/dma.h>
38 #endif
39
40 #include <linux/platform_data/mtd-nand-pxa3xx.h>
41
42 #define CHIP_DELAY_TIMEOUT      msecs_to_jiffies(200)
43 #define NAND_STOP_DELAY         msecs_to_jiffies(40)
44 #define PAGE_CHUNK_SIZE         (2048)
45
46 /*
47  * Define a buffer size for the initial command that detects the flash device:
48  * STATUS, READID and PARAM. The largest of these is the PARAM command,
49  * needing 256 bytes.
50  */
51 #define INIT_BUFFER_SIZE        256
52
53 /* registers and bit definitions */
54 #define NDCR            (0x00) /* Control register */
55 #define NDTR0CS0        (0x04) /* Timing Parameter 0 for CS0 */
56 #define NDTR1CS0        (0x0C) /* Timing Parameter 1 for CS0 */
57 #define NDSR            (0x14) /* Status Register */
58 #define NDPCR           (0x18) /* Page Count Register */
59 #define NDBDR0          (0x1C) /* Bad Block Register 0 */
60 #define NDBDR1          (0x20) /* Bad Block Register 1 */
61 #define NDECCCTRL       (0x28) /* ECC control */
62 #define NDDB            (0x40) /* Data Buffer */
63 #define NDCB0           (0x48) /* Command Buffer0 */
64 #define NDCB1           (0x4C) /* Command Buffer1 */
65 #define NDCB2           (0x50) /* Command Buffer2 */
66
67 #define NDCR_SPARE_EN           (0x1 << 31)
68 #define NDCR_ECC_EN             (0x1 << 30)
69 #define NDCR_DMA_EN             (0x1 << 29)
70 #define NDCR_ND_RUN             (0x1 << 28)
71 #define NDCR_DWIDTH_C           (0x1 << 27)
72 #define NDCR_DWIDTH_M           (0x1 << 26)
73 #define NDCR_PAGE_SZ            (0x1 << 24)
74 #define NDCR_NCSX               (0x1 << 23)
75 #define NDCR_ND_MODE            (0x3 << 21)
76 #define NDCR_NAND_MODE          (0x0)
77 #define NDCR_CLR_PG_CNT         (0x1 << 20)
78 #define NDCR_STOP_ON_UNCOR      (0x1 << 19)
79 #define NDCR_RD_ID_CNT_MASK     (0x7 << 16)
80 #define NDCR_RD_ID_CNT(x)       (((x) << 16) & NDCR_RD_ID_CNT_MASK)
81
82 #define NDCR_RA_START           (0x1 << 15)
83 #define NDCR_PG_PER_BLK         (0x1 << 14)
84 #define NDCR_ND_ARB_EN          (0x1 << 12)
85 #define NDCR_INT_MASK           (0xFFF)
86
87 #define NDSR_MASK               (0xfff)
88 #define NDSR_ERR_CNT_OFF        (16)
89 #define NDSR_ERR_CNT_MASK       (0x1f)
90 #define NDSR_ERR_CNT(sr)        ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
91 #define NDSR_RDY                (0x1 << 12)
92 #define NDSR_FLASH_RDY          (0x1 << 11)
93 #define NDSR_CS0_PAGED          (0x1 << 10)
94 #define NDSR_CS1_PAGED          (0x1 << 9)
95 #define NDSR_CS0_CMDD           (0x1 << 8)
96 #define NDSR_CS1_CMDD           (0x1 << 7)
97 #define NDSR_CS0_BBD            (0x1 << 6)
98 #define NDSR_CS1_BBD            (0x1 << 5)
99 #define NDSR_UNCORERR           (0x1 << 4)
100 #define NDSR_CORERR             (0x1 << 3)
101 #define NDSR_WRDREQ             (0x1 << 2)
102 #define NDSR_RDDREQ             (0x1 << 1)
103 #define NDSR_WRCMDREQ           (0x1)
104
105 #define NDCB0_LEN_OVRD          (0x1 << 28)
106 #define NDCB0_ST_ROW_EN         (0x1 << 26)
107 #define NDCB0_AUTO_RS           (0x1 << 25)
108 #define NDCB0_CSEL              (0x1 << 24)
109 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
110 #define NDCB0_EXT_CMD_TYPE(x)   (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
111 #define NDCB0_CMD_TYPE_MASK     (0x7 << 21)
112 #define NDCB0_CMD_TYPE(x)       (((x) << 21) & NDCB0_CMD_TYPE_MASK)
113 #define NDCB0_NC                (0x1 << 20)
114 #define NDCB0_DBC               (0x1 << 19)
115 #define NDCB0_ADDR_CYC_MASK     (0x7 << 16)
116 #define NDCB0_ADDR_CYC(x)       (((x) << 16) & NDCB0_ADDR_CYC_MASK)
117 #define NDCB0_CMD2_MASK         (0xff << 8)
118 #define NDCB0_CMD1_MASK         (0xff)
119 #define NDCB0_ADDR_CYC_SHIFT    (16)
120
121 #define EXT_CMD_TYPE_DISPATCH   6 /* Command dispatch */
122 #define EXT_CMD_TYPE_NAKED_RW   5 /* Naked read or Naked write */
123 #define EXT_CMD_TYPE_READ       4 /* Read */
124 #define EXT_CMD_TYPE_DISP_WR    4 /* Command dispatch with write */
125 #define EXT_CMD_TYPE_FINAL      3 /* Final command */
126 #define EXT_CMD_TYPE_LAST_RW    1 /* Last naked read/write */
127 #define EXT_CMD_TYPE_MONO       0 /* Monolithic read/write */
128
129 /* macros for registers read/write */
130 #define nand_writel(info, off, val)     \
131         writel_relaxed((val), (info)->mmio_base + (off))
132
133 #define nand_readl(info, off)           \
134         readl_relaxed((info)->mmio_base + (off))
135
136 /* error code and state */
137 enum {
138         ERR_NONE        = 0,
139         ERR_DMABUSERR   = -1,
140         ERR_SENDCMD     = -2,
141         ERR_UNCORERR    = -3,
142         ERR_BBERR       = -4,
143         ERR_CORERR      = -5,
144 };
145
146 enum {
147         STATE_IDLE = 0,
148         STATE_PREPARED,
149         STATE_CMD_HANDLE,
150         STATE_DMA_READING,
151         STATE_DMA_WRITING,
152         STATE_DMA_DONE,
153         STATE_PIO_READING,
154         STATE_PIO_WRITING,
155         STATE_CMD_DONE,
156         STATE_READY,
157 };
158
159 enum pxa3xx_nand_variant {
160         PXA3XX_NAND_VARIANT_PXA,
161         PXA3XX_NAND_VARIANT_ARMADA370,
162 };
163
164 struct pxa3xx_nand_host {
165         struct nand_chip        chip;
166         struct mtd_info         *mtd;
167         void                    *info_data;
168
169         /* page size of attached chip */
170         int                     use_ecc;
171         int                     cs;
172
173         /* calculated from pxa3xx_nand_flash data */
174         unsigned int            col_addr_cycles;
175         unsigned int            row_addr_cycles;
176         size_t                  read_id_bytes;
177
178 };
179
180 struct pxa3xx_nand_info {
181         struct nand_hw_control  controller;
182         struct platform_device   *pdev;
183
184         struct clk              *clk;
185         void __iomem            *mmio_base;
186         unsigned long           mmio_phys;
187         struct completion       cmd_complete, dev_ready;
188
189         unsigned int            buf_start;
190         unsigned int            buf_count;
191         unsigned int            buf_size;
192         unsigned int            data_buff_pos;
193         unsigned int            oob_buff_pos;
194
195         /* DMA information */
196         int                     drcmr_dat;
197         int                     drcmr_cmd;
198
199         unsigned char           *data_buff;
200         unsigned char           *oob_buff;
201         dma_addr_t              data_buff_phys;
202         int                     data_dma_ch;
203         struct pxa_dma_desc     *data_desc;
204         dma_addr_t              data_desc_addr;
205
206         struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
207         unsigned int            state;
208
209         /*
210          * This driver supports NFCv1 (as found in PXA SoC)
211          * and NFCv2 (as found in Armada 370/XP SoC).
212          */
213         enum pxa3xx_nand_variant variant;
214
215         int                     cs;
216         int                     use_ecc;        /* use HW ECC ? */
217         int                     ecc_bch;        /* using BCH ECC? */
218         int                     use_dma;        /* use DMA ? */
219         int                     use_spare;      /* use spare ? */
220         int                     need_wait;
221
222         unsigned int            data_size;      /* data to be read from FIFO */
223         unsigned int            chunk_size;     /* split commands chunk size */
224         unsigned int            oob_size;
225         unsigned int            spare_size;
226         unsigned int            ecc_size;
227         unsigned int            ecc_err_cnt;
228         unsigned int            max_bitflips;
229         int                     retcode;
230
231         /* cached register value */
232         uint32_t                reg_ndcr;
233         uint32_t                ndtr0cs0;
234         uint32_t                ndtr1cs0;
235
236         /* generated NDCBx register values */
237         uint32_t                ndcb0;
238         uint32_t                ndcb1;
239         uint32_t                ndcb2;
240         uint32_t                ndcb3;
241 };
242
243 static bool use_dma = 1;
244 module_param(use_dma, bool, 0444);
245 MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
246
247 static struct pxa3xx_nand_timing timing[] = {
248         { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
249         { 10,  0, 20,  40, 30,  40, 11123, 110, 10, },
250         { 10, 25, 15,  25, 15,  30, 25000,  60, 10, },
251         { 10, 35, 15,  25, 15,  25, 25000,  60, 10, },
252 };
253
254 static struct pxa3xx_nand_flash builtin_flash_types[] = {
255 { "DEFAULT FLASH",      0,   0, 2048,  8,  8,    0, &timing[0] },
256 { "64MiB 16-bit",  0x46ec,  32,  512, 16, 16, 4096, &timing[1] },
257 { "256MiB 8-bit",  0xdaec,  64, 2048,  8,  8, 2048, &timing[1] },
258 { "4GiB 8-bit",    0xd7ec, 128, 4096,  8,  8, 8192, &timing[1] },
259 { "128MiB 8-bit",  0xa12c,  64, 2048,  8,  8, 1024, &timing[2] },
260 { "128MiB 16-bit", 0xb12c,  64, 2048, 16, 16, 1024, &timing[2] },
261 { "512MiB 8-bit",  0xdc2c,  64, 2048,  8,  8, 4096, &timing[2] },
262 { "512MiB 16-bit", 0xcc2c,  64, 2048, 16, 16, 4096, &timing[2] },
263 { "256MiB 16-bit", 0xba20,  64, 2048, 16, 16, 2048, &timing[3] },
264 };
265
266 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
267 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
268
269 static struct nand_bbt_descr bbt_main_descr = {
270         .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
271                 | NAND_BBT_2BIT | NAND_BBT_VERSION,
272         .offs = 8,
273         .len = 6,
274         .veroffs = 14,
275         .maxblocks = 8,         /* Last 8 blocks in each chip */
276         .pattern = bbt_pattern
277 };
278
279 static struct nand_bbt_descr bbt_mirror_descr = {
280         .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
281                 | NAND_BBT_2BIT | NAND_BBT_VERSION,
282         .offs = 8,
283         .len = 6,
284         .veroffs = 14,
285         .maxblocks = 8,         /* Last 8 blocks in each chip */
286         .pattern = bbt_mirror_pattern
287 };
288
289 static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
290         .eccbytes = 32,
291         .eccpos = {
292                 32, 33, 34, 35, 36, 37, 38, 39,
293                 40, 41, 42, 43, 44, 45, 46, 47,
294                 48, 49, 50, 51, 52, 53, 54, 55,
295                 56, 57, 58, 59, 60, 61, 62, 63},
296         .oobfree = { {2, 30} }
297 };
298
299 static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
300         .eccbytes = 64,
301         .eccpos = {
302                 32,  33,  34,  35,  36,  37,  38,  39,
303                 40,  41,  42,  43,  44,  45,  46,  47,
304                 48,  49,  50,  51,  52,  53,  54,  55,
305                 56,  57,  58,  59,  60,  61,  62,  63,
306                 96,  97,  98,  99,  100, 101, 102, 103,
307                 104, 105, 106, 107, 108, 109, 110, 111,
308                 112, 113, 114, 115, 116, 117, 118, 119,
309                 120, 121, 122, 123, 124, 125, 126, 127},
310         /* Bootrom looks in bytes 0 & 5 for bad blocks */
311         .oobfree = { {6, 26}, { 64, 32} }
312 };
313
314 static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
315         .eccbytes = 128,
316         .eccpos = {
317                 32,  33,  34,  35,  36,  37,  38,  39,
318                 40,  41,  42,  43,  44,  45,  46,  47,
319                 48,  49,  50,  51,  52,  53,  54,  55,
320                 56,  57,  58,  59,  60,  61,  62,  63},
321         .oobfree = { }
322 };
323
324 /* Define a default flash type setting serve as flash detecting only */
325 #define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
326
327 #define NDTR0_tCH(c)    (min((c), 7) << 19)
328 #define NDTR0_tCS(c)    (min((c), 7) << 16)
329 #define NDTR0_tWH(c)    (min((c), 7) << 11)
330 #define NDTR0_tWP(c)    (min((c), 7) << 8)
331 #define NDTR0_tRH(c)    (min((c), 7) << 3)
332 #define NDTR0_tRP(c)    (min((c), 7) << 0)
333
334 #define NDTR1_tR(c)     (min((c), 65535) << 16)
335 #define NDTR1_tWHR(c)   (min((c), 15) << 4)
336 #define NDTR1_tAR(c)    (min((c), 15) << 0)
337
338 /* convert nano-seconds to nand flash controller clock cycles */
339 #define ns2cycle(ns, clk)       (int)((ns) * (clk / 1000000) / 1000)
340
341 static const struct of_device_id pxa3xx_nand_dt_ids[] = {
342         {
343                 .compatible = "marvell,pxa3xx-nand",
344                 .data       = (void *)PXA3XX_NAND_VARIANT_PXA,
345         },
346         {
347                 .compatible = "marvell,armada370-nand",
348                 .data       = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
349         },
350         {}
351 };
352 MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
353
354 static enum pxa3xx_nand_variant
355 pxa3xx_nand_get_variant(struct platform_device *pdev)
356 {
357         const struct of_device_id *of_id =
358                         of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
359         if (!of_id)
360                 return PXA3XX_NAND_VARIANT_PXA;
361         return (enum pxa3xx_nand_variant)of_id->data;
362 }
363
364 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
365                                    const struct pxa3xx_nand_timing *t)
366 {
367         struct pxa3xx_nand_info *info = host->info_data;
368         unsigned long nand_clk = clk_get_rate(info->clk);
369         uint32_t ndtr0, ndtr1;
370
371         ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
372                 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
373                 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
374                 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
375                 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
376                 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
377
378         ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
379                 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
380                 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
381
382         info->ndtr0cs0 = ndtr0;
383         info->ndtr1cs0 = ndtr1;
384         nand_writel(info, NDTR0CS0, ndtr0);
385         nand_writel(info, NDTR1CS0, ndtr1);
386 }
387
388 /*
389  * Set the data and OOB size, depending on the selected
390  * spare and ECC configuration.
391  * Only applicable to READ0, READOOB and PAGEPROG commands.
392  */
393 static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
394                                 struct mtd_info *mtd)
395 {
396         int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
397
398         info->data_size = mtd->writesize;
399         if (!oob_enable)
400                 return;
401
402         info->oob_size = info->spare_size;
403         if (!info->use_ecc)
404                 info->oob_size += info->ecc_size;
405 }
406
407 /**
408  * NOTE: it is a must to set ND_RUN firstly, then write
409  * command buffer, otherwise, it does not work.
410  * We enable all the interrupt at the same time, and
411  * let pxa3xx_nand_irq to handle all logic.
412  */
413 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
414 {
415         uint32_t ndcr;
416
417         ndcr = info->reg_ndcr;
418
419         if (info->use_ecc) {
420                 ndcr |= NDCR_ECC_EN;
421                 if (info->ecc_bch)
422                         nand_writel(info, NDECCCTRL, 0x1);
423         } else {
424                 ndcr &= ~NDCR_ECC_EN;
425                 if (info->ecc_bch)
426                         nand_writel(info, NDECCCTRL, 0x0);
427         }
428
429         if (info->use_dma)
430                 ndcr |= NDCR_DMA_EN;
431         else
432                 ndcr &= ~NDCR_DMA_EN;
433
434         if (info->use_spare)
435                 ndcr |= NDCR_SPARE_EN;
436         else
437                 ndcr &= ~NDCR_SPARE_EN;
438
439         ndcr |= NDCR_ND_RUN;
440
441         /* clear status bits and run */
442         nand_writel(info, NDCR, 0);
443         nand_writel(info, NDSR, NDSR_MASK);
444         nand_writel(info, NDCR, ndcr);
445 }
446
447 static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
448 {
449         uint32_t ndcr;
450         int timeout = NAND_STOP_DELAY;
451
452         /* wait RUN bit in NDCR become 0 */
453         ndcr = nand_readl(info, NDCR);
454         while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) {
455                 ndcr = nand_readl(info, NDCR);
456                 udelay(1);
457         }
458
459         if (timeout <= 0) {
460                 ndcr &= ~NDCR_ND_RUN;
461                 nand_writel(info, NDCR, ndcr);
462         }
463         /* clear status bits */
464         nand_writel(info, NDSR, NDSR_MASK);
465 }
466
467 static void __maybe_unused
468 enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
469 {
470         uint32_t ndcr;
471
472         ndcr = nand_readl(info, NDCR);
473         nand_writel(info, NDCR, ndcr & ~int_mask);
474 }
475
476 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
477 {
478         uint32_t ndcr;
479
480         ndcr = nand_readl(info, NDCR);
481         nand_writel(info, NDCR, ndcr | int_mask);
482 }
483
484 static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
485 {
486         if (info->ecc_bch) {
487                 u32 val;
488                 int ret;
489
490                 /*
491                  * According to the datasheet, when reading from NDDB
492                  * with BCH enabled, after each 32 bytes reads, we
493                  * have to make sure that the NDSR.RDDREQ bit is set.
494                  *
495                  * Drain the FIFO 8 32 bits reads at a time, and skip
496                  * the polling on the last read.
497                  */
498                 while (len > 8) {
499                         readsl(info->mmio_base + NDDB, data, 8);
500
501                         ret = readl_relaxed_poll_timeout(info->mmio_base + NDSR, val,
502                                                          val & NDSR_RDDREQ, 1000, 5000);
503                         if (ret) {
504                                 dev_err(&info->pdev->dev,
505                                         "Timeout on RDDREQ while draining the FIFO\n");
506                                 return;
507                         }
508
509                         data += 32;
510                         len -= 8;
511                 }
512         }
513
514         readsl(info->mmio_base + NDDB, data, len);
515 }
516
517 static void handle_data_pio(struct pxa3xx_nand_info *info)
518 {
519         unsigned int do_bytes = min(info->data_size, info->chunk_size);
520
521         switch (info->state) {
522         case STATE_PIO_WRITING:
523                 writesl(info->mmio_base + NDDB,
524                         info->data_buff + info->data_buff_pos,
525                         DIV_ROUND_UP(do_bytes, 4));
526
527                 if (info->oob_size > 0)
528                         writesl(info->mmio_base + NDDB,
529                                 info->oob_buff + info->oob_buff_pos,
530                                 DIV_ROUND_UP(info->oob_size, 4));
531                 break;
532         case STATE_PIO_READING:
533                 drain_fifo(info,
534                            info->data_buff + info->data_buff_pos,
535                            DIV_ROUND_UP(do_bytes, 4));
536
537                 if (info->oob_size > 0)
538                         drain_fifo(info,
539                                    info->oob_buff + info->oob_buff_pos,
540                                    DIV_ROUND_UP(info->oob_size, 4));
541                 break;
542         default:
543                 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
544                                 info->state);
545                 BUG();
546         }
547
548         /* Update buffer pointers for multi-page read/write */
549         info->data_buff_pos += do_bytes;
550         info->oob_buff_pos += info->oob_size;
551         info->data_size -= do_bytes;
552 }
553
554 #ifdef ARCH_HAS_DMA
555 static void start_data_dma(struct pxa3xx_nand_info *info)
556 {
557         struct pxa_dma_desc *desc = info->data_desc;
558         int dma_len = ALIGN(info->data_size + info->oob_size, 32);
559
560         desc->ddadr = DDADR_STOP;
561         desc->dcmd = DCMD_ENDIRQEN | DCMD_WIDTH4 | DCMD_BURST32 | dma_len;
562
563         switch (info->state) {
564         case STATE_DMA_WRITING:
565                 desc->dsadr = info->data_buff_phys;
566                 desc->dtadr = info->mmio_phys + NDDB;
567                 desc->dcmd |= DCMD_INCSRCADDR | DCMD_FLOWTRG;
568                 break;
569         case STATE_DMA_READING:
570                 desc->dtadr = info->data_buff_phys;
571                 desc->dsadr = info->mmio_phys + NDDB;
572                 desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC;
573                 break;
574         default:
575                 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
576                                 info->state);
577                 BUG();
578         }
579
580         DRCMR(info->drcmr_dat) = DRCMR_MAPVLD | info->data_dma_ch;
581         DDADR(info->data_dma_ch) = info->data_desc_addr;
582         DCSR(info->data_dma_ch) |= DCSR_RUN;
583 }
584
585 static void pxa3xx_nand_data_dma_irq(int channel, void *data)
586 {
587         struct pxa3xx_nand_info *info = data;
588         uint32_t dcsr;
589
590         dcsr = DCSR(channel);
591         DCSR(channel) = dcsr;
592
593         if (dcsr & DCSR_BUSERR) {
594                 info->retcode = ERR_DMABUSERR;
595         }
596
597         info->state = STATE_DMA_DONE;
598         enable_int(info, NDCR_INT_MASK);
599         nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
600 }
601 #else
602 static void start_data_dma(struct pxa3xx_nand_info *info)
603 {}
604 #endif
605
606 static irqreturn_t pxa3xx_nand_irq_thread(int irq, void *data)
607 {
608         struct pxa3xx_nand_info *info = data;
609
610         handle_data_pio(info);
611
612         info->state = STATE_CMD_DONE;
613         nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
614
615         return IRQ_HANDLED;
616 }
617
618 static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
619 {
620         struct pxa3xx_nand_info *info = devid;
621         unsigned int status, is_completed = 0, is_ready = 0;
622         unsigned int ready, cmd_done;
623         irqreturn_t ret = IRQ_HANDLED;
624
625         if (info->cs == 0) {
626                 ready           = NDSR_FLASH_RDY;
627                 cmd_done        = NDSR_CS0_CMDD;
628         } else {
629                 ready           = NDSR_RDY;
630                 cmd_done        = NDSR_CS1_CMDD;
631         }
632
633         status = nand_readl(info, NDSR);
634
635         if (status & NDSR_UNCORERR)
636                 info->retcode = ERR_UNCORERR;
637         if (status & NDSR_CORERR) {
638                 info->retcode = ERR_CORERR;
639                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
640                     info->ecc_bch)
641                         info->ecc_err_cnt = NDSR_ERR_CNT(status);
642                 else
643                         info->ecc_err_cnt = 1;
644
645                 /*
646                  * Each chunk composing a page is corrected independently,
647                  * and we need to store maximum number of corrected bitflips
648                  * to return it to the MTD layer in ecc.read_page().
649                  */
650                 info->max_bitflips = max_t(unsigned int,
651                                            info->max_bitflips,
652                                            info->ecc_err_cnt);
653         }
654         if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
655                 /* whether use dma to transfer data */
656                 if (info->use_dma) {
657                         disable_int(info, NDCR_INT_MASK);
658                         info->state = (status & NDSR_RDDREQ) ?
659                                       STATE_DMA_READING : STATE_DMA_WRITING;
660                         start_data_dma(info);
661                         goto NORMAL_IRQ_EXIT;
662                 } else {
663                         info->state = (status & NDSR_RDDREQ) ?
664                                       STATE_PIO_READING : STATE_PIO_WRITING;
665                         ret = IRQ_WAKE_THREAD;
666                         goto NORMAL_IRQ_EXIT;
667                 }
668         }
669         if (status & cmd_done) {
670                 info->state = STATE_CMD_DONE;
671                 is_completed = 1;
672         }
673         if (status & ready) {
674                 info->state = STATE_READY;
675                 is_ready = 1;
676         }
677
678         if (status & NDSR_WRCMDREQ) {
679                 nand_writel(info, NDSR, NDSR_WRCMDREQ);
680                 status &= ~NDSR_WRCMDREQ;
681                 info->state = STATE_CMD_HANDLE;
682
683                 /*
684                  * Command buffer registers NDCB{0-2} (and optionally NDCB3)
685                  * must be loaded by writing directly either 12 or 16
686                  * bytes directly to NDCB0, four bytes at a time.
687                  *
688                  * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
689                  * but each NDCBx register can be read.
690                  */
691                 nand_writel(info, NDCB0, info->ndcb0);
692                 nand_writel(info, NDCB0, info->ndcb1);
693                 nand_writel(info, NDCB0, info->ndcb2);
694
695                 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
696                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
697                         nand_writel(info, NDCB0, info->ndcb3);
698         }
699
700         /* clear NDSR to let the controller exit the IRQ */
701         nand_writel(info, NDSR, status);
702         if (is_completed)
703                 complete(&info->cmd_complete);
704         if (is_ready)
705                 complete(&info->dev_ready);
706 NORMAL_IRQ_EXIT:
707         return ret;
708 }
709
710 static inline int is_buf_blank(uint8_t *buf, size_t len)
711 {
712         for (; len > 0; len--)
713                 if (*buf++ != 0xff)
714                         return 0;
715         return 1;
716 }
717
718 static void set_command_address(struct pxa3xx_nand_info *info,
719                 unsigned int page_size, uint16_t column, int page_addr)
720 {
721         /* small page addr setting */
722         if (page_size < PAGE_CHUNK_SIZE) {
723                 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
724                                 | (column & 0xFF);
725
726                 info->ndcb2 = 0;
727         } else {
728                 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
729                                 | (column & 0xFFFF);
730
731                 if (page_addr & 0xFF0000)
732                         info->ndcb2 = (page_addr & 0xFF0000) >> 16;
733                 else
734                         info->ndcb2 = 0;
735         }
736 }
737
738 static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
739 {
740         struct pxa3xx_nand_host *host = info->host[info->cs];
741         struct mtd_info *mtd = host->mtd;
742
743         /* reset data and oob column point to handle data */
744         info->buf_start         = 0;
745         info->buf_count         = 0;
746         info->oob_size          = 0;
747         info->data_buff_pos     = 0;
748         info->oob_buff_pos      = 0;
749         info->use_ecc           = 0;
750         info->use_spare         = 1;
751         info->retcode           = ERR_NONE;
752         info->ecc_err_cnt       = 0;
753         info->ndcb3             = 0;
754         info->need_wait         = 0;
755
756         switch (command) {
757         case NAND_CMD_READ0:
758         case NAND_CMD_PAGEPROG:
759                 info->use_ecc = 1;
760         case NAND_CMD_READOOB:
761                 pxa3xx_set_datasize(info, mtd);
762                 break;
763         case NAND_CMD_PARAM:
764                 info->use_spare = 0;
765                 break;
766         default:
767                 info->ndcb1 = 0;
768                 info->ndcb2 = 0;
769                 break;
770         }
771
772         /*
773          * If we are about to issue a read command, or about to set
774          * the write address, then clean the data buffer.
775          */
776         if (command == NAND_CMD_READ0 ||
777             command == NAND_CMD_READOOB ||
778             command == NAND_CMD_SEQIN) {
779
780                 info->buf_count = mtd->writesize + mtd->oobsize;
781                 memset(info->data_buff, 0xFF, info->buf_count);
782         }
783
784 }
785
786 static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
787                 int ext_cmd_type, uint16_t column, int page_addr)
788 {
789         int addr_cycle, exec_cmd;
790         struct pxa3xx_nand_host *host;
791         struct mtd_info *mtd;
792
793         host = info->host[info->cs];
794         mtd = host->mtd;
795         addr_cycle = 0;
796         exec_cmd = 1;
797
798         if (info->cs != 0)
799                 info->ndcb0 = NDCB0_CSEL;
800         else
801                 info->ndcb0 = 0;
802
803         if (command == NAND_CMD_SEQIN)
804                 exec_cmd = 0;
805
806         addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
807                                     + host->col_addr_cycles);
808
809         switch (command) {
810         case NAND_CMD_READOOB:
811         case NAND_CMD_READ0:
812                 info->buf_start = column;
813                 info->ndcb0 |= NDCB0_CMD_TYPE(0)
814                                 | addr_cycle
815                                 | NAND_CMD_READ0;
816
817                 if (command == NAND_CMD_READOOB)
818                         info->buf_start += mtd->writesize;
819
820                 /*
821                  * Multiple page read needs an 'extended command type' field,
822                  * which is either naked-read or last-read according to the
823                  * state.
824                  */
825                 if (mtd->writesize == PAGE_CHUNK_SIZE) {
826                         info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
827                 } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
828                         info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
829                                         | NDCB0_LEN_OVRD
830                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
831                         info->ndcb3 = info->chunk_size +
832                                       info->oob_size;
833                 }
834
835                 set_command_address(info, mtd->writesize, column, page_addr);
836                 break;
837
838         case NAND_CMD_SEQIN:
839
840                 info->buf_start = column;
841                 set_command_address(info, mtd->writesize, 0, page_addr);
842
843                 /*
844                  * Multiple page programming needs to execute the initial
845                  * SEQIN command that sets the page address.
846                  */
847                 if (mtd->writesize > PAGE_CHUNK_SIZE) {
848                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
849                                 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
850                                 | addr_cycle
851                                 | command;
852                         /* No data transfer in this case */
853                         info->data_size = 0;
854                         exec_cmd = 1;
855                 }
856                 break;
857
858         case NAND_CMD_PAGEPROG:
859                 if (is_buf_blank(info->data_buff,
860                                         (mtd->writesize + mtd->oobsize))) {
861                         exec_cmd = 0;
862                         break;
863                 }
864
865                 /* Second command setting for large pages */
866                 if (mtd->writesize > PAGE_CHUNK_SIZE) {
867                         /*
868                          * Multiple page write uses the 'extended command'
869                          * field. This can be used to issue a command dispatch
870                          * or a naked-write depending on the current stage.
871                          */
872                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
873                                         | NDCB0_LEN_OVRD
874                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
875                         info->ndcb3 = info->chunk_size +
876                                       info->oob_size;
877
878                         /*
879                          * This is the command dispatch that completes a chunked
880                          * page program operation.
881                          */
882                         if (info->data_size == 0) {
883                                 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
884                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
885                                         | command;
886                                 info->ndcb1 = 0;
887                                 info->ndcb2 = 0;
888                                 info->ndcb3 = 0;
889                         }
890                 } else {
891                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
892                                         | NDCB0_AUTO_RS
893                                         | NDCB0_ST_ROW_EN
894                                         | NDCB0_DBC
895                                         | (NAND_CMD_PAGEPROG << 8)
896                                         | NAND_CMD_SEQIN
897                                         | addr_cycle;
898                 }
899                 break;
900
901         case NAND_CMD_PARAM:
902                 info->buf_count = 256;
903                 info->ndcb0 |= NDCB0_CMD_TYPE(0)
904                                 | NDCB0_ADDR_CYC(1)
905                                 | NDCB0_LEN_OVRD
906                                 | command;
907                 info->ndcb1 = (column & 0xFF);
908                 info->ndcb3 = 256;
909                 info->data_size = 256;
910                 break;
911
912         case NAND_CMD_READID:
913                 info->buf_count = host->read_id_bytes;
914                 info->ndcb0 |= NDCB0_CMD_TYPE(3)
915                                 | NDCB0_ADDR_CYC(1)
916                                 | command;
917                 info->ndcb1 = (column & 0xFF);
918
919                 info->data_size = 8;
920                 break;
921         case NAND_CMD_STATUS:
922                 info->buf_count = 1;
923                 info->ndcb0 |= NDCB0_CMD_TYPE(4)
924                                 | NDCB0_ADDR_CYC(1)
925                                 | command;
926
927                 info->data_size = 8;
928                 break;
929
930         case NAND_CMD_ERASE1:
931                 info->ndcb0 |= NDCB0_CMD_TYPE(2)
932                                 | NDCB0_AUTO_RS
933                                 | NDCB0_ADDR_CYC(3)
934                                 | NDCB0_DBC
935                                 | (NAND_CMD_ERASE2 << 8)
936                                 | NAND_CMD_ERASE1;
937                 info->ndcb1 = page_addr;
938                 info->ndcb2 = 0;
939
940                 break;
941         case NAND_CMD_RESET:
942                 info->ndcb0 |= NDCB0_CMD_TYPE(5)
943                                 | command;
944
945                 break;
946
947         case NAND_CMD_ERASE2:
948                 exec_cmd = 0;
949                 break;
950
951         default:
952                 exec_cmd = 0;
953                 dev_err(&info->pdev->dev, "non-supported command %x\n",
954                                 command);
955                 break;
956         }
957
958         return exec_cmd;
959 }
960
961 static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
962                          int column, int page_addr)
963 {
964         struct pxa3xx_nand_host *host = mtd->priv;
965         struct pxa3xx_nand_info *info = host->info_data;
966         int exec_cmd;
967
968         /*
969          * if this is a x16 device ,then convert the input
970          * "byte" address into a "word" address appropriate
971          * for indexing a word-oriented device
972          */
973         if (info->reg_ndcr & NDCR_DWIDTH_M)
974                 column /= 2;
975
976         /*
977          * There may be different NAND chip hooked to
978          * different chip select, so check whether
979          * chip select has been changed, if yes, reset the timing
980          */
981         if (info->cs != host->cs) {
982                 info->cs = host->cs;
983                 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
984                 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
985         }
986
987         prepare_start_command(info, command);
988
989         info->state = STATE_PREPARED;
990         exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
991
992         if (exec_cmd) {
993                 init_completion(&info->cmd_complete);
994                 init_completion(&info->dev_ready);
995                 info->need_wait = 1;
996                 pxa3xx_nand_start(info);
997
998                 if (!wait_for_completion_timeout(&info->cmd_complete,
999                     CHIP_DELAY_TIMEOUT)) {
1000                         dev_err(&info->pdev->dev, "Wait time out!!!\n");
1001                         /* Stop State Machine for next command cycle */
1002                         pxa3xx_nand_stop(info);
1003                 }
1004         }
1005         info->state = STATE_IDLE;
1006 }
1007
1008 static void nand_cmdfunc_extended(struct mtd_info *mtd,
1009                                   const unsigned command,
1010                                   int column, int page_addr)
1011 {
1012         struct pxa3xx_nand_host *host = mtd->priv;
1013         struct pxa3xx_nand_info *info = host->info_data;
1014         int exec_cmd, ext_cmd_type;
1015
1016         /*
1017          * if this is a x16 device then convert the input
1018          * "byte" address into a "word" address appropriate
1019          * for indexing a word-oriented device
1020          */
1021         if (info->reg_ndcr & NDCR_DWIDTH_M)
1022                 column /= 2;
1023
1024         /*
1025          * There may be different NAND chip hooked to
1026          * different chip select, so check whether
1027          * chip select has been changed, if yes, reset the timing
1028          */
1029         if (info->cs != host->cs) {
1030                 info->cs = host->cs;
1031                 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1032                 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1033         }
1034
1035         /* Select the extended command for the first command */
1036         switch (command) {
1037         case NAND_CMD_READ0:
1038         case NAND_CMD_READOOB:
1039                 ext_cmd_type = EXT_CMD_TYPE_MONO;
1040                 break;
1041         case NAND_CMD_SEQIN:
1042                 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1043                 break;
1044         case NAND_CMD_PAGEPROG:
1045                 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1046                 break;
1047         default:
1048                 ext_cmd_type = 0;
1049                 break;
1050         }
1051
1052         prepare_start_command(info, command);
1053
1054         /*
1055          * Prepare the "is ready" completion before starting a command
1056          * transaction sequence. If the command is not executed the
1057          * completion will be completed, see below.
1058          *
1059          * We can do that inside the loop because the command variable
1060          * is invariant and thus so is the exec_cmd.
1061          */
1062         info->need_wait = 1;
1063         init_completion(&info->dev_ready);
1064         do {
1065                 info->state = STATE_PREPARED;
1066                 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1067                                                column, page_addr);
1068                 if (!exec_cmd) {
1069                         info->need_wait = 0;
1070                         complete(&info->dev_ready);
1071                         break;
1072                 }
1073
1074                 init_completion(&info->cmd_complete);
1075                 pxa3xx_nand_start(info);
1076
1077                 if (!wait_for_completion_timeout(&info->cmd_complete,
1078                     CHIP_DELAY_TIMEOUT)) {
1079                         dev_err(&info->pdev->dev, "Wait time out!!!\n");
1080                         /* Stop State Machine for next command cycle */
1081                         pxa3xx_nand_stop(info);
1082                         break;
1083                 }
1084
1085                 /* Check if the sequence is complete */
1086                 if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
1087                         break;
1088
1089                 /*
1090                  * After a splitted program command sequence has issued
1091                  * the command dispatch, the command sequence is complete.
1092                  */
1093                 if (info->data_size == 0 &&
1094                     command == NAND_CMD_PAGEPROG &&
1095                     ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1096                         break;
1097
1098                 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1099                         /* Last read: issue a 'last naked read' */
1100                         if (info->data_size == info->chunk_size)
1101                                 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1102                         else
1103                                 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1104
1105                 /*
1106                  * If a splitted program command has no more data to transfer,
1107                  * the command dispatch must be issued to complete.
1108                  */
1109                 } else if (command == NAND_CMD_PAGEPROG &&
1110                            info->data_size == 0) {
1111                                 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1112                 }
1113         } while (1);
1114
1115         info->state = STATE_IDLE;
1116 }
1117
1118 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1119                 struct nand_chip *chip, const uint8_t *buf, int oob_required)
1120 {
1121         chip->write_buf(mtd, buf, mtd->writesize);
1122         chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1123
1124         return 0;
1125 }
1126
1127 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1128                 struct nand_chip *chip, uint8_t *buf, int oob_required,
1129                 int page)
1130 {
1131         struct pxa3xx_nand_host *host = mtd->priv;
1132         struct pxa3xx_nand_info *info = host->info_data;
1133
1134         chip->read_buf(mtd, buf, mtd->writesize);
1135         chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1136
1137         if (info->retcode == ERR_CORERR && info->use_ecc) {
1138                 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1139
1140         } else if (info->retcode == ERR_UNCORERR) {
1141                 /*
1142                  * for blank page (all 0xff), HW will calculate its ECC as
1143                  * 0, which is different from the ECC information within
1144                  * OOB, ignore such uncorrectable errors
1145                  */
1146                 if (is_buf_blank(buf, mtd->writesize))
1147                         info->retcode = ERR_NONE;
1148                 else
1149                         mtd->ecc_stats.failed++;
1150         }
1151
1152         return info->max_bitflips;
1153 }
1154
1155 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1156 {
1157         struct pxa3xx_nand_host *host = mtd->priv;
1158         struct pxa3xx_nand_info *info = host->info_data;
1159         char retval = 0xFF;
1160
1161         if (info->buf_start < info->buf_count)
1162                 /* Has just send a new command? */
1163                 retval = info->data_buff[info->buf_start++];
1164
1165         return retval;
1166 }
1167
1168 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1169 {
1170         struct pxa3xx_nand_host *host = mtd->priv;
1171         struct pxa3xx_nand_info *info = host->info_data;
1172         u16 retval = 0xFFFF;
1173
1174         if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1175                 retval = *((u16 *)(info->data_buff+info->buf_start));
1176                 info->buf_start += 2;
1177         }
1178         return retval;
1179 }
1180
1181 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1182 {
1183         struct pxa3xx_nand_host *host = mtd->priv;
1184         struct pxa3xx_nand_info *info = host->info_data;
1185         int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1186
1187         memcpy(buf, info->data_buff + info->buf_start, real_len);
1188         info->buf_start += real_len;
1189 }
1190
1191 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1192                 const uint8_t *buf, int len)
1193 {
1194         struct pxa3xx_nand_host *host = mtd->priv;
1195         struct pxa3xx_nand_info *info = host->info_data;
1196         int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1197
1198         memcpy(info->data_buff + info->buf_start, buf, real_len);
1199         info->buf_start += real_len;
1200 }
1201
1202 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1203 {
1204         return;
1205 }
1206
1207 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1208 {
1209         struct pxa3xx_nand_host *host = mtd->priv;
1210         struct pxa3xx_nand_info *info = host->info_data;
1211
1212         if (info->need_wait) {
1213                 info->need_wait = 0;
1214                 if (!wait_for_completion_timeout(&info->dev_ready,
1215                     CHIP_DELAY_TIMEOUT)) {
1216                         dev_err(&info->pdev->dev, "Ready time out!!!\n");
1217                         return NAND_STATUS_FAIL;
1218                 }
1219         }
1220
1221         /* pxa3xx_nand_send_command has waited for command complete */
1222         if (this->state == FL_WRITING || this->state == FL_ERASING) {
1223                 if (info->retcode == ERR_NONE)
1224                         return 0;
1225                 else
1226                         return NAND_STATUS_FAIL;
1227         }
1228
1229         return NAND_STATUS_READY;
1230 }
1231
1232 static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
1233                                     const struct pxa3xx_nand_flash *f)
1234 {
1235         struct platform_device *pdev = info->pdev;
1236         struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1237         struct pxa3xx_nand_host *host = info->host[info->cs];
1238         uint32_t ndcr = 0x0; /* enable all interrupts */
1239
1240         if (f->page_size != 2048 && f->page_size != 512) {
1241                 dev_err(&pdev->dev, "Current only support 2048 and 512 size\n");
1242                 return -EINVAL;
1243         }
1244
1245         if (f->flash_width != 16 && f->flash_width != 8) {
1246                 dev_err(&pdev->dev, "Only support 8bit and 16 bit!\n");
1247                 return -EINVAL;
1248         }
1249
1250         /* calculate flash information */
1251         host->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
1252
1253         /* calculate addressing information */
1254         host->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
1255
1256         if (f->num_blocks * f->page_per_block > 65536)
1257                 host->row_addr_cycles = 3;
1258         else
1259                 host->row_addr_cycles = 2;
1260
1261         ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1262         ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1263         ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0;
1264         ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0;
1265         ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0;
1266         ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
1267
1268         ndcr |= NDCR_RD_ID_CNT(host->read_id_bytes);
1269         ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1270
1271         info->reg_ndcr = ndcr;
1272
1273         pxa3xx_nand_set_timing(host, f->timing);
1274         return 0;
1275 }
1276
1277 static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1278 {
1279         /*
1280          * We set 0 by hard coding here, for we don't support keep_config
1281          * when there is more than one chip attached to the controller
1282          */
1283         struct pxa3xx_nand_host *host = info->host[0];
1284         uint32_t ndcr = nand_readl(info, NDCR);
1285
1286         if (ndcr & NDCR_PAGE_SZ) {
1287                 /* Controller's FIFO size */
1288                 info->chunk_size = 2048;
1289                 host->read_id_bytes = 4;
1290         } else {
1291                 info->chunk_size = 512;
1292                 host->read_id_bytes = 2;
1293         }
1294
1295         /* Set an initial chunk size */
1296         info->reg_ndcr = ndcr & ~NDCR_INT_MASK;
1297         info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1298         info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1299         return 0;
1300 }
1301
1302 #ifdef ARCH_HAS_DMA
1303 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1304 {
1305         struct platform_device *pdev = info->pdev;
1306         int data_desc_offset = info->buf_size - sizeof(struct pxa_dma_desc);
1307
1308         if (use_dma == 0) {
1309                 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1310                 if (info->data_buff == NULL)
1311                         return -ENOMEM;
1312                 return 0;
1313         }
1314
1315         info->data_buff = dma_alloc_coherent(&pdev->dev, info->buf_size,
1316                                 &info->data_buff_phys, GFP_KERNEL);
1317         if (info->data_buff == NULL) {
1318                 dev_err(&pdev->dev, "failed to allocate dma buffer\n");
1319                 return -ENOMEM;
1320         }
1321
1322         info->data_desc = (void *)info->data_buff + data_desc_offset;
1323         info->data_desc_addr = info->data_buff_phys + data_desc_offset;
1324
1325         info->data_dma_ch = pxa_request_dma("nand-data", DMA_PRIO_LOW,
1326                                 pxa3xx_nand_data_dma_irq, info);
1327         if (info->data_dma_ch < 0) {
1328                 dev_err(&pdev->dev, "failed to request data dma\n");
1329                 dma_free_coherent(&pdev->dev, info->buf_size,
1330                                 info->data_buff, info->data_buff_phys);
1331                 return info->data_dma_ch;
1332         }
1333
1334         /*
1335          * Now that DMA buffers are allocated we turn on
1336          * DMA proper for I/O operations.
1337          */
1338         info->use_dma = 1;
1339         return 0;
1340 }
1341
1342 static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1343 {
1344         struct platform_device *pdev = info->pdev;
1345         if (info->use_dma) {
1346                 pxa_free_dma(info->data_dma_ch);
1347                 dma_free_coherent(&pdev->dev, info->buf_size,
1348                                   info->data_buff, info->data_buff_phys);
1349         } else {
1350                 kfree(info->data_buff);
1351         }
1352 }
1353 #else
1354 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1355 {
1356         info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1357         if (info->data_buff == NULL)
1358                 return -ENOMEM;
1359         return 0;
1360 }
1361
1362 static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1363 {
1364         kfree(info->data_buff);
1365 }
1366 #endif
1367
1368 static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info)
1369 {
1370         struct mtd_info *mtd;
1371         struct nand_chip *chip;
1372         int ret;
1373
1374         mtd = info->host[info->cs]->mtd;
1375         chip = mtd->priv;
1376
1377         /* use the common timing to make a try */
1378         ret = pxa3xx_nand_config_flash(info, &builtin_flash_types[0]);
1379         if (ret)
1380                 return ret;
1381
1382         chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1383         ret = chip->waitfunc(mtd, chip);
1384         if (ret & NAND_STATUS_FAIL)
1385                 return -ENODEV;
1386
1387         return 0;
1388 }
1389
1390 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1391                         struct nand_ecc_ctrl *ecc,
1392                         int strength, int ecc_stepsize, int page_size)
1393 {
1394         if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1395                 info->chunk_size = 2048;
1396                 info->spare_size = 40;
1397                 info->ecc_size = 24;
1398                 ecc->mode = NAND_ECC_HW;
1399                 ecc->size = 512;
1400                 ecc->strength = 1;
1401
1402         } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1403                 info->chunk_size = 512;
1404                 info->spare_size = 8;
1405                 info->ecc_size = 8;
1406                 ecc->mode = NAND_ECC_HW;
1407                 ecc->size = 512;
1408                 ecc->strength = 1;
1409
1410         /*
1411          * Required ECC: 4-bit correction per 512 bytes
1412          * Select: 16-bit correction per 2048 bytes
1413          */
1414         } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1415                 info->ecc_bch = 1;
1416                 info->chunk_size = 2048;
1417                 info->spare_size = 32;
1418                 info->ecc_size = 32;
1419                 ecc->mode = NAND_ECC_HW;
1420                 ecc->size = info->chunk_size;
1421                 ecc->layout = &ecc_layout_2KB_bch4bit;
1422                 ecc->strength = 16;
1423
1424         } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1425                 info->ecc_bch = 1;
1426                 info->chunk_size = 2048;
1427                 info->spare_size = 32;
1428                 info->ecc_size = 32;
1429                 ecc->mode = NAND_ECC_HW;
1430                 ecc->size = info->chunk_size;
1431                 ecc->layout = &ecc_layout_4KB_bch4bit;
1432                 ecc->strength = 16;
1433
1434         /*
1435          * Required ECC: 8-bit correction per 512 bytes
1436          * Select: 16-bit correction per 1024 bytes
1437          */
1438         } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1439                 info->ecc_bch = 1;
1440                 info->chunk_size = 1024;
1441                 info->spare_size = 0;
1442                 info->ecc_size = 32;
1443                 ecc->mode = NAND_ECC_HW;
1444                 ecc->size = info->chunk_size;
1445                 ecc->layout = &ecc_layout_4KB_bch8bit;
1446                 ecc->strength = 16;
1447         } else {
1448                 dev_err(&info->pdev->dev,
1449                         "ECC strength %d at page size %d is not supported\n",
1450                         strength, page_size);
1451                 return -ENODEV;
1452         }
1453
1454         dev_info(&info->pdev->dev, "ECC strength %d, ECC step size %d\n",
1455                  ecc->strength, ecc->size);
1456         return 0;
1457 }
1458
1459 static int pxa3xx_nand_scan(struct mtd_info *mtd)
1460 {
1461         struct pxa3xx_nand_host *host = mtd->priv;
1462         struct pxa3xx_nand_info *info = host->info_data;
1463         struct platform_device *pdev = info->pdev;
1464         struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1465         struct nand_flash_dev pxa3xx_flash_ids[2], *def = NULL;
1466         const struct pxa3xx_nand_flash *f = NULL;
1467         struct nand_chip *chip = mtd->priv;
1468         uint32_t id = -1;
1469         uint64_t chipsize;
1470         int i, ret, num;
1471         uint16_t ecc_strength, ecc_step;
1472
1473         if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
1474                 goto KEEP_CONFIG;
1475
1476         ret = pxa3xx_nand_sensing(info);
1477         if (ret) {
1478                 dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
1479                          info->cs);
1480
1481                 return ret;
1482         }
1483
1484         chip->cmdfunc(mtd, NAND_CMD_READID, 0, 0);
1485         id = *((uint16_t *)(info->data_buff));
1486         if (id != 0)
1487                 dev_info(&info->pdev->dev, "Detect a flash id %x\n", id);
1488         else {
1489                 dev_warn(&info->pdev->dev,
1490                          "Read out ID 0, potential timing set wrong!!\n");
1491
1492                 return -EINVAL;
1493         }
1494
1495         num = ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1;
1496         for (i = 0; i < num; i++) {
1497                 if (i < pdata->num_flash)
1498                         f = pdata->flash + i;
1499                 else
1500                         f = &builtin_flash_types[i - pdata->num_flash + 1];
1501
1502                 /* find the chip in default list */
1503                 if (f->chip_id == id)
1504                         break;
1505         }
1506
1507         if (i >= (ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1)) {
1508                 dev_err(&info->pdev->dev, "ERROR!! flash not defined!!!\n");
1509
1510                 return -EINVAL;
1511         }
1512
1513         ret = pxa3xx_nand_config_flash(info, f);
1514         if (ret) {
1515                 dev_err(&info->pdev->dev, "ERROR! Configure failed\n");
1516                 return ret;
1517         }
1518
1519         memset(pxa3xx_flash_ids, 0, sizeof(pxa3xx_flash_ids));
1520
1521         pxa3xx_flash_ids[0].name = f->name;
1522         pxa3xx_flash_ids[0].dev_id = (f->chip_id >> 8) & 0xffff;
1523         pxa3xx_flash_ids[0].pagesize = f->page_size;
1524         chipsize = (uint64_t)f->num_blocks * f->page_per_block * f->page_size;
1525         pxa3xx_flash_ids[0].chipsize = chipsize >> 20;
1526         pxa3xx_flash_ids[0].erasesize = f->page_size * f->page_per_block;
1527         if (f->flash_width == 16)
1528                 pxa3xx_flash_ids[0].options = NAND_BUSWIDTH_16;
1529         pxa3xx_flash_ids[1].name = NULL;
1530         def = pxa3xx_flash_ids;
1531 KEEP_CONFIG:
1532         if (info->reg_ndcr & NDCR_DWIDTH_M)
1533                 chip->options |= NAND_BUSWIDTH_16;
1534
1535         /* Device detection must be done with ECC disabled */
1536         if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1537                 nand_writel(info, NDECCCTRL, 0x0);
1538
1539         if (nand_scan_ident(mtd, 1, def))
1540                 return -ENODEV;
1541
1542         if (pdata->flash_bbt) {
1543                 /*
1544                  * We'll use a bad block table stored in-flash and don't
1545                  * allow writing the bad block marker to the flash.
1546                  */
1547                 chip->bbt_options |= NAND_BBT_USE_FLASH |
1548                                      NAND_BBT_NO_OOB_BBM;
1549                 chip->bbt_td = &bbt_main_descr;
1550                 chip->bbt_md = &bbt_mirror_descr;
1551         }
1552
1553         /*
1554          * If the page size is bigger than the FIFO size, let's check
1555          * we are given the right variant and then switch to the extended
1556          * (aka splitted) command handling,
1557          */
1558         if (mtd->writesize > PAGE_CHUNK_SIZE) {
1559                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1560                         chip->cmdfunc = nand_cmdfunc_extended;
1561                 } else {
1562                         dev_err(&info->pdev->dev,
1563                                 "unsupported page size on this variant\n");
1564                         return -ENODEV;
1565                 }
1566         }
1567
1568         if (pdata->ecc_strength && pdata->ecc_step_size) {
1569                 ecc_strength = pdata->ecc_strength;
1570                 ecc_step = pdata->ecc_step_size;
1571         } else {
1572                 ecc_strength = chip->ecc_strength_ds;
1573                 ecc_step = chip->ecc_step_ds;
1574         }
1575
1576         /* Set default ECC strength requirements on non-ONFI devices */
1577         if (ecc_strength < 1 && ecc_step < 1) {
1578                 ecc_strength = 1;
1579                 ecc_step = 512;
1580         }
1581
1582         ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1583                            ecc_step, mtd->writesize);
1584         if (ret)
1585                 return ret;
1586
1587         /* calculate addressing information */
1588         if (mtd->writesize >= 2048)
1589                 host->col_addr_cycles = 2;
1590         else
1591                 host->col_addr_cycles = 1;
1592
1593         /* release the initial buffer */
1594         kfree(info->data_buff);
1595
1596         /* allocate the real data + oob buffer */
1597         info->buf_size = mtd->writesize + mtd->oobsize;
1598         ret = pxa3xx_nand_init_buff(info);
1599         if (ret)
1600                 return ret;
1601         info->oob_buff = info->data_buff + mtd->writesize;
1602
1603         if ((mtd->size >> chip->page_shift) > 65536)
1604                 host->row_addr_cycles = 3;
1605         else
1606                 host->row_addr_cycles = 2;
1607         return nand_scan_tail(mtd);
1608 }
1609
1610 static int alloc_nand_resource(struct platform_device *pdev)
1611 {
1612         struct pxa3xx_nand_platform_data *pdata;
1613         struct pxa3xx_nand_info *info;
1614         struct pxa3xx_nand_host *host;
1615         struct nand_chip *chip = NULL;
1616         struct mtd_info *mtd;
1617         struct resource *r;
1618         int ret, irq, cs;
1619
1620         pdata = dev_get_platdata(&pdev->dev);
1621         if (pdata->num_cs <= 0)
1622                 return -ENODEV;
1623         info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) +
1624                             sizeof(*host)) * pdata->num_cs, GFP_KERNEL);
1625         if (!info)
1626                 return -ENOMEM;
1627
1628         info->pdev = pdev;
1629         info->variant = pxa3xx_nand_get_variant(pdev);
1630         for (cs = 0; cs < pdata->num_cs; cs++) {
1631                 mtd = (void *)&info[1] + (sizeof(*mtd) + sizeof(*host)) * cs;
1632                 chip = (struct nand_chip *)(&mtd[1]);
1633                 host = (struct pxa3xx_nand_host *)chip;
1634                 info->host[cs] = host;
1635                 host->mtd = mtd;
1636                 host->cs = cs;
1637                 host->info_data = info;
1638                 mtd->priv = host;
1639                 mtd->owner = THIS_MODULE;
1640
1641                 chip->ecc.read_page     = pxa3xx_nand_read_page_hwecc;
1642                 chip->ecc.write_page    = pxa3xx_nand_write_page_hwecc;
1643                 chip->controller        = &info->controller;
1644                 chip->waitfunc          = pxa3xx_nand_waitfunc;
1645                 chip->select_chip       = pxa3xx_nand_select_chip;
1646                 chip->read_word         = pxa3xx_nand_read_word;
1647                 chip->read_byte         = pxa3xx_nand_read_byte;
1648                 chip->read_buf          = pxa3xx_nand_read_buf;
1649                 chip->write_buf         = pxa3xx_nand_write_buf;
1650                 chip->options           |= NAND_NO_SUBPAGE_WRITE;
1651                 chip->cmdfunc           = nand_cmdfunc;
1652         }
1653
1654         spin_lock_init(&chip->controller->lock);
1655         init_waitqueue_head(&chip->controller->wq);
1656         info->clk = devm_clk_get(&pdev->dev, NULL);
1657         if (IS_ERR(info->clk)) {
1658                 dev_err(&pdev->dev, "failed to get nand clock\n");
1659                 return PTR_ERR(info->clk);
1660         }
1661         ret = clk_prepare_enable(info->clk);
1662         if (ret < 0)
1663                 return ret;
1664
1665         if (use_dma) {
1666                 /*
1667                  * This is a dirty hack to make this driver work from
1668                  * devicetree bindings. It can be removed once we have
1669                  * a prober DMA controller framework for DT.
1670                  */
1671                 if (pdev->dev.of_node &&
1672                     of_machine_is_compatible("marvell,pxa3xx")) {
1673                         info->drcmr_dat = 97;
1674                         info->drcmr_cmd = 99;
1675                 } else {
1676                         r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1677                         if (r == NULL) {
1678                                 dev_err(&pdev->dev,
1679                                         "no resource defined for data DMA\n");
1680                                 ret = -ENXIO;
1681                                 goto fail_disable_clk;
1682                         }
1683                         info->drcmr_dat = r->start;
1684
1685                         r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1686                         if (r == NULL) {
1687                                 dev_err(&pdev->dev,
1688                                         "no resource defined for cmd DMA\n");
1689                                 ret = -ENXIO;
1690                                 goto fail_disable_clk;
1691                         }
1692                         info->drcmr_cmd = r->start;
1693                 }
1694         }
1695
1696         irq = platform_get_irq(pdev, 0);
1697         if (irq < 0) {
1698                 dev_err(&pdev->dev, "no IRQ resource defined\n");
1699                 ret = -ENXIO;
1700                 goto fail_disable_clk;
1701         }
1702
1703         r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1704         info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
1705         if (IS_ERR(info->mmio_base)) {
1706                 ret = PTR_ERR(info->mmio_base);
1707                 goto fail_disable_clk;
1708         }
1709         info->mmio_phys = r->start;
1710
1711         /* Allocate a buffer to allow flash detection */
1712         info->buf_size = INIT_BUFFER_SIZE;
1713         info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1714         if (info->data_buff == NULL) {
1715                 ret = -ENOMEM;
1716                 goto fail_disable_clk;
1717         }
1718
1719         /* initialize all interrupts to be disabled */
1720         disable_int(info, NDSR_MASK);
1721
1722         ret = request_threaded_irq(irq, pxa3xx_nand_irq,
1723                                    pxa3xx_nand_irq_thread, IRQF_ONESHOT,
1724                                    pdev->name, info);
1725         if (ret < 0) {
1726                 dev_err(&pdev->dev, "failed to request IRQ\n");
1727                 goto fail_free_buf;
1728         }
1729
1730         platform_set_drvdata(pdev, info);
1731
1732         return 0;
1733
1734 fail_free_buf:
1735         free_irq(irq, info);
1736         kfree(info->data_buff);
1737 fail_disable_clk:
1738         clk_disable_unprepare(info->clk);
1739         return ret;
1740 }
1741
1742 static int pxa3xx_nand_remove(struct platform_device *pdev)
1743 {
1744         struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1745         struct pxa3xx_nand_platform_data *pdata;
1746         int irq, cs;
1747
1748         if (!info)
1749                 return 0;
1750
1751         pdata = dev_get_platdata(&pdev->dev);
1752
1753         irq = platform_get_irq(pdev, 0);
1754         if (irq >= 0)
1755                 free_irq(irq, info);
1756         pxa3xx_nand_free_buff(info);
1757
1758         clk_disable_unprepare(info->clk);
1759
1760         for (cs = 0; cs < pdata->num_cs; cs++)
1761                 nand_release(info->host[cs]->mtd);
1762         return 0;
1763 }
1764
1765 static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
1766 {
1767         struct pxa3xx_nand_platform_data *pdata;
1768         struct device_node *np = pdev->dev.of_node;
1769         const struct of_device_id *of_id =
1770                         of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
1771
1772         if (!of_id)
1773                 return 0;
1774
1775         pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1776         if (!pdata)
1777                 return -ENOMEM;
1778
1779         if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
1780                 pdata->enable_arbiter = 1;
1781         if (of_get_property(np, "marvell,nand-keep-config", NULL))
1782                 pdata->keep_config = 1;
1783         of_property_read_u32(np, "num-cs", &pdata->num_cs);
1784         pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
1785
1786         pdata->ecc_strength = of_get_nand_ecc_strength(np);
1787         if (pdata->ecc_strength < 0)
1788                 pdata->ecc_strength = 0;
1789
1790         pdata->ecc_step_size = of_get_nand_ecc_step_size(np);
1791         if (pdata->ecc_step_size < 0)
1792                 pdata->ecc_step_size = 0;
1793
1794         pdev->dev.platform_data = pdata;
1795
1796         return 0;
1797 }
1798
1799 static int pxa3xx_nand_probe(struct platform_device *pdev)
1800 {
1801         struct pxa3xx_nand_platform_data *pdata;
1802         struct mtd_part_parser_data ppdata = {};
1803         struct pxa3xx_nand_info *info;
1804         int ret, cs, probe_success;
1805
1806 #ifndef ARCH_HAS_DMA
1807         if (use_dma) {
1808                 use_dma = 0;
1809                 dev_warn(&pdev->dev,
1810                          "This platform can't do DMA on this device\n");
1811         }
1812 #endif
1813         ret = pxa3xx_nand_probe_dt(pdev);
1814         if (ret)
1815                 return ret;
1816
1817         pdata = dev_get_platdata(&pdev->dev);
1818         if (!pdata) {
1819                 dev_err(&pdev->dev, "no platform data defined\n");
1820                 return -ENODEV;
1821         }
1822
1823         ret = alloc_nand_resource(pdev);
1824         if (ret) {
1825                 dev_err(&pdev->dev, "alloc nand resource failed\n");
1826                 return ret;
1827         }
1828
1829         info = platform_get_drvdata(pdev);
1830         probe_success = 0;
1831         for (cs = 0; cs < pdata->num_cs; cs++) {
1832                 struct mtd_info *mtd = info->host[cs]->mtd;
1833
1834                 /*
1835                  * The mtd name matches the one used in 'mtdparts' kernel
1836                  * parameter. This name cannot be changed or otherwise
1837                  * user's mtd partitions configuration would get broken.
1838                  */
1839                 mtd->name = "pxa3xx_nand-0";
1840                 info->cs = cs;
1841                 ret = pxa3xx_nand_scan(mtd);
1842                 if (ret) {
1843                         dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
1844                                 cs);
1845                         continue;
1846                 }
1847
1848                 ppdata.of_node = pdev->dev.of_node;
1849                 ret = mtd_device_parse_register(mtd, NULL,
1850                                                 &ppdata, pdata->parts[cs],
1851                                                 pdata->nr_parts[cs]);
1852                 if (!ret)
1853                         probe_success = 1;
1854         }
1855
1856         if (!probe_success) {
1857                 pxa3xx_nand_remove(pdev);
1858                 return -ENODEV;
1859         }
1860
1861         return 0;
1862 }
1863
1864 #ifdef CONFIG_PM
1865 static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
1866 {
1867         struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1868         struct pxa3xx_nand_platform_data *pdata;
1869         struct mtd_info *mtd;
1870         int cs;
1871
1872         pdata = dev_get_platdata(&pdev->dev);
1873         if (info->state) {
1874                 dev_err(&pdev->dev, "driver busy, state = %d\n", info->state);
1875                 return -EAGAIN;
1876         }
1877
1878         for (cs = 0; cs < pdata->num_cs; cs++) {
1879                 mtd = info->host[cs]->mtd;
1880                 mtd_suspend(mtd);
1881         }
1882
1883         return 0;
1884 }
1885
1886 static int pxa3xx_nand_resume(struct platform_device *pdev)
1887 {
1888         struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1889         struct pxa3xx_nand_platform_data *pdata;
1890         struct mtd_info *mtd;
1891         int cs;
1892
1893         pdata = dev_get_platdata(&pdev->dev);
1894         /* We don't want to handle interrupt without calling mtd routine */
1895         disable_int(info, NDCR_INT_MASK);
1896
1897         /*
1898          * Directly set the chip select to a invalid value,
1899          * then the driver would reset the timing according
1900          * to current chip select at the beginning of cmdfunc
1901          */
1902         info->cs = 0xff;
1903
1904         /*
1905          * As the spec says, the NDSR would be updated to 0x1800 when
1906          * doing the nand_clk disable/enable.
1907          * To prevent it damaging state machine of the driver, clear
1908          * all status before resume
1909          */
1910         nand_writel(info, NDSR, NDSR_MASK);
1911         for (cs = 0; cs < pdata->num_cs; cs++) {
1912                 mtd = info->host[cs]->mtd;
1913                 mtd_resume(mtd);
1914         }
1915
1916         return 0;
1917 }
1918 #else
1919 #define pxa3xx_nand_suspend     NULL
1920 #define pxa3xx_nand_resume      NULL
1921 #endif
1922
1923 static struct platform_driver pxa3xx_nand_driver = {
1924         .driver = {
1925                 .name   = "pxa3xx-nand",
1926                 .of_match_table = pxa3xx_nand_dt_ids,
1927         },
1928         .probe          = pxa3xx_nand_probe,
1929         .remove         = pxa3xx_nand_remove,
1930         .suspend        = pxa3xx_nand_suspend,
1931         .resume         = pxa3xx_nand_resume,
1932 };
1933
1934 module_platform_driver(pxa3xx_nand_driver);
1935
1936 MODULE_LICENSE("GPL");
1937 MODULE_DESCRIPTION("PXA3xx NAND controller driver");