mtd: pxa3xx_nand: rework flash detection and timing setup
[cascardo/linux.git] / drivers / mtd / nand / pxa3xx_nand.c
1 /*
2  * drivers/mtd/nand/pxa3xx_nand.c
3  *
4  * Copyright © 2005 Intel Corporation
5  * Copyright © 2006 Marvell International Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
12  */
13
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/platform_device.h>
18 #include <linux/dmaengine.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/dma/pxa-dma.h>
21 #include <linux/delay.h>
22 #include <linux/clk.h>
23 #include <linux/mtd/mtd.h>
24 #include <linux/mtd/nand.h>
25 #include <linux/mtd/partitions.h>
26 #include <linux/io.h>
27 #include <linux/iopoll.h>
28 #include <linux/irq.h>
29 #include <linux/slab.h>
30 #include <linux/of.h>
31 #include <linux/of_device.h>
32 #include <linux/of_mtd.h>
33
34 #if defined(CONFIG_ARM) && (defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP))
35 #define ARCH_HAS_DMA
36 #endif
37
38 #include <linux/platform_data/mtd-nand-pxa3xx.h>
39
40 #define CHIP_DELAY_TIMEOUT      msecs_to_jiffies(200)
41 #define NAND_STOP_DELAY         msecs_to_jiffies(40)
42 #define PAGE_CHUNK_SIZE         (2048)
43
44 /*
45  * Define a buffer size for the initial command that detects the flash device:
46  * STATUS, READID and PARAM.
47  * ONFI param page is 256 bytes, and there are three redundant copies
48  * to be read. JEDEC param page is 512 bytes, and there are also three
49  * redundant copies to be read.
50  * Hence this buffer should be at least 512 x 3. Let's pick 2048.
51  */
52 #define INIT_BUFFER_SIZE        2048
53
54 /* registers and bit definitions */
55 #define NDCR            (0x00) /* Control register */
56 #define NDTR0CS0        (0x04) /* Timing Parameter 0 for CS0 */
57 #define NDTR1CS0        (0x0C) /* Timing Parameter 1 for CS0 */
58 #define NDSR            (0x14) /* Status Register */
59 #define NDPCR           (0x18) /* Page Count Register */
60 #define NDBDR0          (0x1C) /* Bad Block Register 0 */
61 #define NDBDR1          (0x20) /* Bad Block Register 1 */
62 #define NDECCCTRL       (0x28) /* ECC control */
63 #define NDDB            (0x40) /* Data Buffer */
64 #define NDCB0           (0x48) /* Command Buffer0 */
65 #define NDCB1           (0x4C) /* Command Buffer1 */
66 #define NDCB2           (0x50) /* Command Buffer2 */
67
68 #define NDCR_SPARE_EN           (0x1 << 31)
69 #define NDCR_ECC_EN             (0x1 << 30)
70 #define NDCR_DMA_EN             (0x1 << 29)
71 #define NDCR_ND_RUN             (0x1 << 28)
72 #define NDCR_DWIDTH_C           (0x1 << 27)
73 #define NDCR_DWIDTH_M           (0x1 << 26)
74 #define NDCR_PAGE_SZ            (0x1 << 24)
75 #define NDCR_NCSX               (0x1 << 23)
76 #define NDCR_ND_MODE            (0x3 << 21)
77 #define NDCR_NAND_MODE          (0x0)
78 #define NDCR_CLR_PG_CNT         (0x1 << 20)
79 #define NFCV1_NDCR_ARB_CNTL     (0x1 << 19)
80 #define NFCV2_NDCR_STOP_ON_UNCOR        (0x1 << 19)
81 #define NDCR_RD_ID_CNT_MASK     (0x7 << 16)
82 #define NDCR_RD_ID_CNT(x)       (((x) << 16) & NDCR_RD_ID_CNT_MASK)
83
84 #define NDCR_RA_START           (0x1 << 15)
85 #define NDCR_PG_PER_BLK         (0x1 << 14)
86 #define NDCR_ND_ARB_EN          (0x1 << 12)
87 #define NDCR_INT_MASK           (0xFFF)
88
89 #define NDSR_MASK               (0xfff)
90 #define NDSR_ERR_CNT_OFF        (16)
91 #define NDSR_ERR_CNT_MASK       (0x1f)
92 #define NDSR_ERR_CNT(sr)        ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
93 #define NDSR_RDY                (0x1 << 12)
94 #define NDSR_FLASH_RDY          (0x1 << 11)
95 #define NDSR_CS0_PAGED          (0x1 << 10)
96 #define NDSR_CS1_PAGED          (0x1 << 9)
97 #define NDSR_CS0_CMDD           (0x1 << 8)
98 #define NDSR_CS1_CMDD           (0x1 << 7)
99 #define NDSR_CS0_BBD            (0x1 << 6)
100 #define NDSR_CS1_BBD            (0x1 << 5)
101 #define NDSR_UNCORERR           (0x1 << 4)
102 #define NDSR_CORERR             (0x1 << 3)
103 #define NDSR_WRDREQ             (0x1 << 2)
104 #define NDSR_RDDREQ             (0x1 << 1)
105 #define NDSR_WRCMDREQ           (0x1)
106
107 #define NDCB0_LEN_OVRD          (0x1 << 28)
108 #define NDCB0_ST_ROW_EN         (0x1 << 26)
109 #define NDCB0_AUTO_RS           (0x1 << 25)
110 #define NDCB0_CSEL              (0x1 << 24)
111 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
112 #define NDCB0_EXT_CMD_TYPE(x)   (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
113 #define NDCB0_CMD_TYPE_MASK     (0x7 << 21)
114 #define NDCB0_CMD_TYPE(x)       (((x) << 21) & NDCB0_CMD_TYPE_MASK)
115 #define NDCB0_NC                (0x1 << 20)
116 #define NDCB0_DBC               (0x1 << 19)
117 #define NDCB0_ADDR_CYC_MASK     (0x7 << 16)
118 #define NDCB0_ADDR_CYC(x)       (((x) << 16) & NDCB0_ADDR_CYC_MASK)
119 #define NDCB0_CMD2_MASK         (0xff << 8)
120 #define NDCB0_CMD1_MASK         (0xff)
121 #define NDCB0_ADDR_CYC_SHIFT    (16)
122
123 #define EXT_CMD_TYPE_DISPATCH   6 /* Command dispatch */
124 #define EXT_CMD_TYPE_NAKED_RW   5 /* Naked read or Naked write */
125 #define EXT_CMD_TYPE_READ       4 /* Read */
126 #define EXT_CMD_TYPE_DISP_WR    4 /* Command dispatch with write */
127 #define EXT_CMD_TYPE_FINAL      3 /* Final command */
128 #define EXT_CMD_TYPE_LAST_RW    1 /* Last naked read/write */
129 #define EXT_CMD_TYPE_MONO       0 /* Monolithic read/write */
130
131 /*
132  * This should be large enough to read 'ONFI' and 'JEDEC'.
133  * Let's use 7 bytes, which is the maximum ID count supported
134  * by the controller (see NDCR_RD_ID_CNT_MASK).
135  */
136 #define READ_ID_BYTES           7
137
138 /* macros for registers read/write */
139 #define nand_writel(info, off, val)     \
140         writel_relaxed((val), (info)->mmio_base + (off))
141
142 #define nand_readl(info, off)           \
143         readl_relaxed((info)->mmio_base + (off))
144
145 /* error code and state */
146 enum {
147         ERR_NONE        = 0,
148         ERR_DMABUSERR   = -1,
149         ERR_SENDCMD     = -2,
150         ERR_UNCORERR    = -3,
151         ERR_BBERR       = -4,
152         ERR_CORERR      = -5,
153 };
154
155 enum {
156         STATE_IDLE = 0,
157         STATE_PREPARED,
158         STATE_CMD_HANDLE,
159         STATE_DMA_READING,
160         STATE_DMA_WRITING,
161         STATE_DMA_DONE,
162         STATE_PIO_READING,
163         STATE_PIO_WRITING,
164         STATE_CMD_DONE,
165         STATE_READY,
166 };
167
168 enum pxa3xx_nand_variant {
169         PXA3XX_NAND_VARIANT_PXA,
170         PXA3XX_NAND_VARIANT_ARMADA370,
171 };
172
173 struct pxa3xx_nand_host {
174         struct nand_chip        chip;
175         struct mtd_info         *mtd;
176         void                    *info_data;
177
178         /* page size of attached chip */
179         int                     use_ecc;
180         int                     cs;
181
182         /* calculated from pxa3xx_nand_flash data */
183         unsigned int            col_addr_cycles;
184         unsigned int            row_addr_cycles;
185 };
186
187 struct pxa3xx_nand_info {
188         struct nand_hw_control  controller;
189         struct platform_device   *pdev;
190
191         struct clk              *clk;
192         void __iomem            *mmio_base;
193         unsigned long           mmio_phys;
194         struct completion       cmd_complete, dev_ready;
195
196         unsigned int            buf_start;
197         unsigned int            buf_count;
198         unsigned int            buf_size;
199         unsigned int            data_buff_pos;
200         unsigned int            oob_buff_pos;
201
202         /* DMA information */
203         struct scatterlist      sg;
204         enum dma_data_direction dma_dir;
205         struct dma_chan         *dma_chan;
206         dma_cookie_t            dma_cookie;
207         int                     drcmr_dat;
208         int                     drcmr_cmd;
209
210         unsigned char           *data_buff;
211         unsigned char           *oob_buff;
212         dma_addr_t              data_buff_phys;
213         int                     data_dma_ch;
214
215         struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
216         unsigned int            state;
217
218         /*
219          * This driver supports NFCv1 (as found in PXA SoC)
220          * and NFCv2 (as found in Armada 370/XP SoC).
221          */
222         enum pxa3xx_nand_variant variant;
223
224         int                     cs;
225         int                     use_ecc;        /* use HW ECC ? */
226         int                     ecc_bch;        /* using BCH ECC? */
227         int                     use_dma;        /* use DMA ? */
228         int                     use_spare;      /* use spare ? */
229         int                     need_wait;
230
231         unsigned int            data_size;      /* data to be read from FIFO */
232         unsigned int            chunk_size;     /* split commands chunk size */
233         unsigned int            oob_size;
234         unsigned int            spare_size;
235         unsigned int            ecc_size;
236         unsigned int            ecc_err_cnt;
237         unsigned int            max_bitflips;
238         int                     retcode;
239
240         /* cached register value */
241         uint32_t                reg_ndcr;
242         uint32_t                ndtr0cs0;
243         uint32_t                ndtr1cs0;
244
245         /* generated NDCBx register values */
246         uint32_t                ndcb0;
247         uint32_t                ndcb1;
248         uint32_t                ndcb2;
249         uint32_t                ndcb3;
250 };
251
252 static bool use_dma = 1;
253 module_param(use_dma, bool, 0444);
254 MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
255
256 struct pxa3xx_nand_timing {
257         unsigned int    tCH;  /* Enable signal hold time */
258         unsigned int    tCS;  /* Enable signal setup time */
259         unsigned int    tWH;  /* ND_nWE high duration */
260         unsigned int    tWP;  /* ND_nWE pulse time */
261         unsigned int    tRH;  /* ND_nRE high duration */
262         unsigned int    tRP;  /* ND_nRE pulse width */
263         unsigned int    tR;   /* ND_nWE high to ND_nRE low for read */
264         unsigned int    tWHR; /* ND_nWE high to ND_nRE low for status read */
265         unsigned int    tAR;  /* ND_ALE low to ND_nRE low delay */
266 };
267
268 struct pxa3xx_nand_flash {
269         char            *name;
270         uint32_t        chip_id;
271         unsigned int    page_per_block; /* Pages per block (PG_PER_BLK) */
272         unsigned int    page_size;      /* Page size in bytes (PAGE_SZ) */
273         unsigned int    flash_width;    /* Width of Flash memory (DWIDTH_M) */
274         unsigned int    dfc_width;      /* Width of flash controller(DWIDTH_C) */
275         unsigned int    num_blocks;     /* Number of physical blocks in Flash */
276
277         struct pxa3xx_nand_timing *timing;      /* NAND Flash timing */
278 };
279
280 static struct pxa3xx_nand_timing timing[] = {
281         { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
282         { 10,  0, 20,  40, 30,  40, 11123, 110, 10, },
283         { 10, 25, 15,  25, 15,  30, 25000,  60, 10, },
284         { 10, 35, 15,  25, 15,  25, 25000,  60, 10, },
285 };
286
287 static struct pxa3xx_nand_flash builtin_flash_types[] = {
288 { "DEFAULT FLASH",      0,   0, 2048,  8,  8,    0, &timing[0] },
289 { "64MiB 16-bit",  0x46ec,  32,  512, 16, 16, 4096, &timing[1] },
290 { "256MiB 8-bit",  0xdaec,  64, 2048,  8,  8, 2048, &timing[1] },
291 { "4GiB 8-bit",    0xd7ec, 128, 4096,  8,  8, 8192, &timing[1] },
292 { "128MiB 8-bit",  0xa12c,  64, 2048,  8,  8, 1024, &timing[2] },
293 { "128MiB 16-bit", 0xb12c,  64, 2048, 16, 16, 1024, &timing[2] },
294 { "512MiB 8-bit",  0xdc2c,  64, 2048,  8,  8, 4096, &timing[2] },
295 { "512MiB 16-bit", 0xcc2c,  64, 2048, 16, 16, 4096, &timing[2] },
296 { "256MiB 16-bit", 0xba20,  64, 2048, 16, 16, 2048, &timing[3] },
297 };
298
299 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
300 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
301
302 static struct nand_bbt_descr bbt_main_descr = {
303         .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
304                 | NAND_BBT_2BIT | NAND_BBT_VERSION,
305         .offs = 8,
306         .len = 6,
307         .veroffs = 14,
308         .maxblocks = 8,         /* Last 8 blocks in each chip */
309         .pattern = bbt_pattern
310 };
311
312 static struct nand_bbt_descr bbt_mirror_descr = {
313         .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
314                 | NAND_BBT_2BIT | NAND_BBT_VERSION,
315         .offs = 8,
316         .len = 6,
317         .veroffs = 14,
318         .maxblocks = 8,         /* Last 8 blocks in each chip */
319         .pattern = bbt_mirror_pattern
320 };
321
322 static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
323         .eccbytes = 32,
324         .eccpos = {
325                 32, 33, 34, 35, 36, 37, 38, 39,
326                 40, 41, 42, 43, 44, 45, 46, 47,
327                 48, 49, 50, 51, 52, 53, 54, 55,
328                 56, 57, 58, 59, 60, 61, 62, 63},
329         .oobfree = { {2, 30} }
330 };
331
332 static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
333         .eccbytes = 64,
334         .eccpos = {
335                 32,  33,  34,  35,  36,  37,  38,  39,
336                 40,  41,  42,  43,  44,  45,  46,  47,
337                 48,  49,  50,  51,  52,  53,  54,  55,
338                 56,  57,  58,  59,  60,  61,  62,  63,
339                 96,  97,  98,  99,  100, 101, 102, 103,
340                 104, 105, 106, 107, 108, 109, 110, 111,
341                 112, 113, 114, 115, 116, 117, 118, 119,
342                 120, 121, 122, 123, 124, 125, 126, 127},
343         /* Bootrom looks in bytes 0 & 5 for bad blocks */
344         .oobfree = { {6, 26}, { 64, 32} }
345 };
346
347 static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
348         .eccbytes = 128,
349         .eccpos = {
350                 32,  33,  34,  35,  36,  37,  38,  39,
351                 40,  41,  42,  43,  44,  45,  46,  47,
352                 48,  49,  50,  51,  52,  53,  54,  55,
353                 56,  57,  58,  59,  60,  61,  62,  63},
354         .oobfree = { }
355 };
356
357 /* Define a default flash type setting serve as flash detecting only */
358 #define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
359
360 #define NDTR0_tCH(c)    (min((c), 7) << 19)
361 #define NDTR0_tCS(c)    (min((c), 7) << 16)
362 #define NDTR0_tWH(c)    (min((c), 7) << 11)
363 #define NDTR0_tWP(c)    (min((c), 7) << 8)
364 #define NDTR0_tRH(c)    (min((c), 7) << 3)
365 #define NDTR0_tRP(c)    (min((c), 7) << 0)
366
367 #define NDTR1_tR(c)     (min((c), 65535) << 16)
368 #define NDTR1_tWHR(c)   (min((c), 15) << 4)
369 #define NDTR1_tAR(c)    (min((c), 15) << 0)
370
371 /* convert nano-seconds to nand flash controller clock cycles */
372 #define ns2cycle(ns, clk)       (int)((ns) * (clk / 1000000) / 1000)
373
374 static const struct of_device_id pxa3xx_nand_dt_ids[] = {
375         {
376                 .compatible = "marvell,pxa3xx-nand",
377                 .data       = (void *)PXA3XX_NAND_VARIANT_PXA,
378         },
379         {
380                 .compatible = "marvell,armada370-nand",
381                 .data       = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
382         },
383         {}
384 };
385 MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
386
387 static enum pxa3xx_nand_variant
388 pxa3xx_nand_get_variant(struct platform_device *pdev)
389 {
390         const struct of_device_id *of_id =
391                         of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
392         if (!of_id)
393                 return PXA3XX_NAND_VARIANT_PXA;
394         return (enum pxa3xx_nand_variant)of_id->data;
395 }
396
397 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
398                                    const struct pxa3xx_nand_timing *t)
399 {
400         struct pxa3xx_nand_info *info = host->info_data;
401         unsigned long nand_clk = clk_get_rate(info->clk);
402         uint32_t ndtr0, ndtr1;
403
404         ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
405                 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
406                 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
407                 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
408                 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
409                 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
410
411         ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
412                 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
413                 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
414
415         info->ndtr0cs0 = ndtr0;
416         info->ndtr1cs0 = ndtr1;
417         nand_writel(info, NDTR0CS0, ndtr0);
418         nand_writel(info, NDTR1CS0, ndtr1);
419 }
420
421 static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
422                                        const struct nand_sdr_timings *t)
423 {
424         struct pxa3xx_nand_info *info = host->info_data;
425         struct nand_chip *chip = &host->chip;
426         unsigned long nand_clk = clk_get_rate(info->clk);
427         uint32_t ndtr0, ndtr1;
428
429         u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
430         u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
431         u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
432         u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
433         u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
434         u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
435         u32 tR = chip->chip_delay * 1000;
436         u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
437         u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
438
439         /* fallback to a default value if tR = 0 */
440         if (!tR)
441                 tR = 20000;
442
443         ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
444                 NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
445                 NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
446                 NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
447                 NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
448                 NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
449
450         ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
451                 NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
452                 NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
453
454         info->ndtr0cs0 = ndtr0;
455         info->ndtr1cs0 = ndtr1;
456         nand_writel(info, NDTR0CS0, ndtr0);
457         nand_writel(info, NDTR1CS0, ndtr1);
458 }
459
460 static int pxa3xx_nand_init_timings_compat(struct pxa3xx_nand_host *host,
461                                            unsigned int *flash_width,
462                                            unsigned int *dfc_width)
463 {
464         struct nand_chip *chip = &host->chip;
465         struct pxa3xx_nand_info *info = host->info_data;
466         const struct pxa3xx_nand_flash *f = NULL;
467         int i, id, ntypes;
468
469         ntypes = ARRAY_SIZE(builtin_flash_types);
470
471         chip->cmdfunc(host->mtd, NAND_CMD_READID, 0x00, -1);
472
473         id = chip->read_byte(host->mtd);
474         id |= chip->read_byte(host->mtd) << 0x8;
475
476         for (i = 0; i < ntypes; i++) {
477                 f = &builtin_flash_types[i];
478
479                 if (f->chip_id == id)
480                         break;
481         }
482
483         if (i == ntypes) {
484                 dev_err(&info->pdev->dev, "Error: timings not found\n");
485                 return -EINVAL;
486         }
487
488         pxa3xx_nand_set_timing(host, f->timing);
489
490         *flash_width = f->flash_width;
491         *dfc_width = f->dfc_width;
492
493         return 0;
494 }
495
496 static int pxa3xx_nand_init_timings_onfi(struct pxa3xx_nand_host *host,
497                                          int mode)
498 {
499         const struct nand_sdr_timings *timings;
500
501         mode = fls(mode) - 1;
502         if (mode < 0)
503                 mode = 0;
504
505         timings = onfi_async_timing_mode_to_sdr_timings(mode);
506         if (IS_ERR(timings))
507                 return PTR_ERR(timings);
508
509         pxa3xx_nand_set_sdr_timing(host, timings);
510
511         return 0;
512 }
513
514 static int pxa3xx_nand_init(struct pxa3xx_nand_host *host)
515 {
516         struct nand_chip *chip = &host->chip;
517         struct pxa3xx_nand_info *info = host->info_data;
518         unsigned int flash_width = 0, dfc_width = 0;
519         int mode, err;
520
521         mode = onfi_get_async_timing_mode(chip);
522         if (mode == ONFI_TIMING_MODE_UNKNOWN) {
523                 err = pxa3xx_nand_init_timings_compat(host, &flash_width,
524                                                       &dfc_width);
525                 if (err)
526                         return err;
527
528                 if (flash_width == 16) {
529                         info->reg_ndcr |= NDCR_DWIDTH_M;
530                         chip->options |= NAND_BUSWIDTH_16;
531                 }
532
533                 info->reg_ndcr |= (dfc_width == 16) ? NDCR_DWIDTH_C : 0;
534         } else {
535                 err = pxa3xx_nand_init_timings_onfi(host, mode);
536                 if (err)
537                         return err;
538         }
539
540         return 0;
541 }
542
543 /*
544  * Set the data and OOB size, depending on the selected
545  * spare and ECC configuration.
546  * Only applicable to READ0, READOOB and PAGEPROG commands.
547  */
548 static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
549                                 struct mtd_info *mtd)
550 {
551         int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
552
553         info->data_size = mtd->writesize;
554         if (!oob_enable)
555                 return;
556
557         info->oob_size = info->spare_size;
558         if (!info->use_ecc)
559                 info->oob_size += info->ecc_size;
560 }
561
562 /**
563  * NOTE: it is a must to set ND_RUN firstly, then write
564  * command buffer, otherwise, it does not work.
565  * We enable all the interrupt at the same time, and
566  * let pxa3xx_nand_irq to handle all logic.
567  */
568 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
569 {
570         uint32_t ndcr;
571
572         ndcr = info->reg_ndcr;
573
574         if (info->use_ecc) {
575                 ndcr |= NDCR_ECC_EN;
576                 if (info->ecc_bch)
577                         nand_writel(info, NDECCCTRL, 0x1);
578         } else {
579                 ndcr &= ~NDCR_ECC_EN;
580                 if (info->ecc_bch)
581                         nand_writel(info, NDECCCTRL, 0x0);
582         }
583
584         if (info->use_dma)
585                 ndcr |= NDCR_DMA_EN;
586         else
587                 ndcr &= ~NDCR_DMA_EN;
588
589         if (info->use_spare)
590                 ndcr |= NDCR_SPARE_EN;
591         else
592                 ndcr &= ~NDCR_SPARE_EN;
593
594         ndcr |= NDCR_ND_RUN;
595
596         /* clear status bits and run */
597         nand_writel(info, NDSR, NDSR_MASK);
598         nand_writel(info, NDCR, 0);
599         nand_writel(info, NDCR, ndcr);
600 }
601
602 static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
603 {
604         uint32_t ndcr;
605         int timeout = NAND_STOP_DELAY;
606
607         /* wait RUN bit in NDCR become 0 */
608         ndcr = nand_readl(info, NDCR);
609         while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) {
610                 ndcr = nand_readl(info, NDCR);
611                 udelay(1);
612         }
613
614         if (timeout <= 0) {
615                 ndcr &= ~NDCR_ND_RUN;
616                 nand_writel(info, NDCR, ndcr);
617         }
618         if (info->dma_chan)
619                 dmaengine_terminate_all(info->dma_chan);
620
621         /* clear status bits */
622         nand_writel(info, NDSR, NDSR_MASK);
623 }
624
625 static void __maybe_unused
626 enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
627 {
628         uint32_t ndcr;
629
630         ndcr = nand_readl(info, NDCR);
631         nand_writel(info, NDCR, ndcr & ~int_mask);
632 }
633
634 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
635 {
636         uint32_t ndcr;
637
638         ndcr = nand_readl(info, NDCR);
639         nand_writel(info, NDCR, ndcr | int_mask);
640 }
641
642 static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
643 {
644         if (info->ecc_bch) {
645                 u32 val;
646                 int ret;
647
648                 /*
649                  * According to the datasheet, when reading from NDDB
650                  * with BCH enabled, after each 32 bytes reads, we
651                  * have to make sure that the NDSR.RDDREQ bit is set.
652                  *
653                  * Drain the FIFO 8 32 bits reads at a time, and skip
654                  * the polling on the last read.
655                  */
656                 while (len > 8) {
657                         ioread32_rep(info->mmio_base + NDDB, data, 8);
658
659                         ret = readl_relaxed_poll_timeout(info->mmio_base + NDSR, val,
660                                                          val & NDSR_RDDREQ, 1000, 5000);
661                         if (ret) {
662                                 dev_err(&info->pdev->dev,
663                                         "Timeout on RDDREQ while draining the FIFO\n");
664                                 return;
665                         }
666
667                         data += 32;
668                         len -= 8;
669                 }
670         }
671
672         ioread32_rep(info->mmio_base + NDDB, data, len);
673 }
674
675 static void handle_data_pio(struct pxa3xx_nand_info *info)
676 {
677         unsigned int do_bytes = min(info->data_size, info->chunk_size);
678
679         switch (info->state) {
680         case STATE_PIO_WRITING:
681                 writesl(info->mmio_base + NDDB,
682                         info->data_buff + info->data_buff_pos,
683                         DIV_ROUND_UP(do_bytes, 4));
684
685                 if (info->oob_size > 0)
686                         writesl(info->mmio_base + NDDB,
687                                 info->oob_buff + info->oob_buff_pos,
688                                 DIV_ROUND_UP(info->oob_size, 4));
689                 break;
690         case STATE_PIO_READING:
691                 drain_fifo(info,
692                            info->data_buff + info->data_buff_pos,
693                            DIV_ROUND_UP(do_bytes, 4));
694
695                 if (info->oob_size > 0)
696                         drain_fifo(info,
697                                    info->oob_buff + info->oob_buff_pos,
698                                    DIV_ROUND_UP(info->oob_size, 4));
699                 break;
700         default:
701                 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
702                                 info->state);
703                 BUG();
704         }
705
706         /* Update buffer pointers for multi-page read/write */
707         info->data_buff_pos += do_bytes;
708         info->oob_buff_pos += info->oob_size;
709         info->data_size -= do_bytes;
710 }
711
712 static void pxa3xx_nand_data_dma_irq(void *data)
713 {
714         struct pxa3xx_nand_info *info = data;
715         struct dma_tx_state state;
716         enum dma_status status;
717
718         status = dmaengine_tx_status(info->dma_chan, info->dma_cookie, &state);
719         if (likely(status == DMA_COMPLETE)) {
720                 info->state = STATE_DMA_DONE;
721         } else {
722                 dev_err(&info->pdev->dev, "DMA error on data channel\n");
723                 info->retcode = ERR_DMABUSERR;
724         }
725         dma_unmap_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
726
727         nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
728         enable_int(info, NDCR_INT_MASK);
729 }
730
731 static void start_data_dma(struct pxa3xx_nand_info *info)
732 {
733         enum dma_transfer_direction direction;
734         struct dma_async_tx_descriptor *tx;
735
736         switch (info->state) {
737         case STATE_DMA_WRITING:
738                 info->dma_dir = DMA_TO_DEVICE;
739                 direction = DMA_MEM_TO_DEV;
740                 break;
741         case STATE_DMA_READING:
742                 info->dma_dir = DMA_FROM_DEVICE;
743                 direction = DMA_DEV_TO_MEM;
744                 break;
745         default:
746                 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
747                                 info->state);
748                 BUG();
749         }
750         info->sg.length = info->data_size +
751                 (info->oob_size ? info->spare_size + info->ecc_size : 0);
752         dma_map_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
753
754         tx = dmaengine_prep_slave_sg(info->dma_chan, &info->sg, 1, direction,
755                                      DMA_PREP_INTERRUPT);
756         if (!tx) {
757                 dev_err(&info->pdev->dev, "prep_slave_sg() failed\n");
758                 return;
759         }
760         tx->callback = pxa3xx_nand_data_dma_irq;
761         tx->callback_param = info;
762         info->dma_cookie = dmaengine_submit(tx);
763         dma_async_issue_pending(info->dma_chan);
764         dev_dbg(&info->pdev->dev, "%s(dir=%d cookie=%x size=%u)\n",
765                 __func__, direction, info->dma_cookie, info->sg.length);
766 }
767
768 static irqreturn_t pxa3xx_nand_irq_thread(int irq, void *data)
769 {
770         struct pxa3xx_nand_info *info = data;
771
772         handle_data_pio(info);
773
774         info->state = STATE_CMD_DONE;
775         nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
776
777         return IRQ_HANDLED;
778 }
779
780 static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
781 {
782         struct pxa3xx_nand_info *info = devid;
783         unsigned int status, is_completed = 0, is_ready = 0;
784         unsigned int ready, cmd_done;
785         irqreturn_t ret = IRQ_HANDLED;
786
787         if (info->cs == 0) {
788                 ready           = NDSR_FLASH_RDY;
789                 cmd_done        = NDSR_CS0_CMDD;
790         } else {
791                 ready           = NDSR_RDY;
792                 cmd_done        = NDSR_CS1_CMDD;
793         }
794
795         status = nand_readl(info, NDSR);
796
797         if (status & NDSR_UNCORERR)
798                 info->retcode = ERR_UNCORERR;
799         if (status & NDSR_CORERR) {
800                 info->retcode = ERR_CORERR;
801                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
802                     info->ecc_bch)
803                         info->ecc_err_cnt = NDSR_ERR_CNT(status);
804                 else
805                         info->ecc_err_cnt = 1;
806
807                 /*
808                  * Each chunk composing a page is corrected independently,
809                  * and we need to store maximum number of corrected bitflips
810                  * to return it to the MTD layer in ecc.read_page().
811                  */
812                 info->max_bitflips = max_t(unsigned int,
813                                            info->max_bitflips,
814                                            info->ecc_err_cnt);
815         }
816         if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
817                 /* whether use dma to transfer data */
818                 if (info->use_dma) {
819                         disable_int(info, NDCR_INT_MASK);
820                         info->state = (status & NDSR_RDDREQ) ?
821                                       STATE_DMA_READING : STATE_DMA_WRITING;
822                         start_data_dma(info);
823                         goto NORMAL_IRQ_EXIT;
824                 } else {
825                         info->state = (status & NDSR_RDDREQ) ?
826                                       STATE_PIO_READING : STATE_PIO_WRITING;
827                         ret = IRQ_WAKE_THREAD;
828                         goto NORMAL_IRQ_EXIT;
829                 }
830         }
831         if (status & cmd_done) {
832                 info->state = STATE_CMD_DONE;
833                 is_completed = 1;
834         }
835         if (status & ready) {
836                 info->state = STATE_READY;
837                 is_ready = 1;
838         }
839
840         /*
841          * Clear all status bit before issuing the next command, which
842          * can and will alter the status bits and will deserve a new
843          * interrupt on its own. This lets the controller exit the IRQ
844          */
845         nand_writel(info, NDSR, status);
846
847         if (status & NDSR_WRCMDREQ) {
848                 status &= ~NDSR_WRCMDREQ;
849                 info->state = STATE_CMD_HANDLE;
850
851                 /*
852                  * Command buffer registers NDCB{0-2} (and optionally NDCB3)
853                  * must be loaded by writing directly either 12 or 16
854                  * bytes directly to NDCB0, four bytes at a time.
855                  *
856                  * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
857                  * but each NDCBx register can be read.
858                  */
859                 nand_writel(info, NDCB0, info->ndcb0);
860                 nand_writel(info, NDCB0, info->ndcb1);
861                 nand_writel(info, NDCB0, info->ndcb2);
862
863                 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
864                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
865                         nand_writel(info, NDCB0, info->ndcb3);
866         }
867
868         if (is_completed)
869                 complete(&info->cmd_complete);
870         if (is_ready)
871                 complete(&info->dev_ready);
872 NORMAL_IRQ_EXIT:
873         return ret;
874 }
875
876 static inline int is_buf_blank(uint8_t *buf, size_t len)
877 {
878         for (; len > 0; len--)
879                 if (*buf++ != 0xff)
880                         return 0;
881         return 1;
882 }
883
884 static void set_command_address(struct pxa3xx_nand_info *info,
885                 unsigned int page_size, uint16_t column, int page_addr)
886 {
887         /* small page addr setting */
888         if (page_size < PAGE_CHUNK_SIZE) {
889                 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
890                                 | (column & 0xFF);
891
892                 info->ndcb2 = 0;
893         } else {
894                 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
895                                 | (column & 0xFFFF);
896
897                 if (page_addr & 0xFF0000)
898                         info->ndcb2 = (page_addr & 0xFF0000) >> 16;
899                 else
900                         info->ndcb2 = 0;
901         }
902 }
903
904 static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
905 {
906         struct pxa3xx_nand_host *host = info->host[info->cs];
907         struct mtd_info *mtd = host->mtd;
908
909         /* reset data and oob column point to handle data */
910         info->buf_start         = 0;
911         info->buf_count         = 0;
912         info->oob_size          = 0;
913         info->data_buff_pos     = 0;
914         info->oob_buff_pos      = 0;
915         info->use_ecc           = 0;
916         info->use_spare         = 1;
917         info->retcode           = ERR_NONE;
918         info->ecc_err_cnt       = 0;
919         info->ndcb3             = 0;
920         info->need_wait         = 0;
921
922         switch (command) {
923         case NAND_CMD_READ0:
924         case NAND_CMD_PAGEPROG:
925                 info->use_ecc = 1;
926         case NAND_CMD_READOOB:
927                 pxa3xx_set_datasize(info, mtd);
928                 break;
929         case NAND_CMD_PARAM:
930                 info->use_spare = 0;
931                 break;
932         default:
933                 info->ndcb1 = 0;
934                 info->ndcb2 = 0;
935                 break;
936         }
937
938         /*
939          * If we are about to issue a read command, or about to set
940          * the write address, then clean the data buffer.
941          */
942         if (command == NAND_CMD_READ0 ||
943             command == NAND_CMD_READOOB ||
944             command == NAND_CMD_SEQIN) {
945
946                 info->buf_count = mtd->writesize + mtd->oobsize;
947                 memset(info->data_buff, 0xFF, info->buf_count);
948         }
949
950 }
951
952 static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
953                 int ext_cmd_type, uint16_t column, int page_addr)
954 {
955         int addr_cycle, exec_cmd;
956         struct pxa3xx_nand_host *host;
957         struct mtd_info *mtd;
958
959         host = info->host[info->cs];
960         mtd = host->mtd;
961         addr_cycle = 0;
962         exec_cmd = 1;
963
964         if (info->cs != 0)
965                 info->ndcb0 = NDCB0_CSEL;
966         else
967                 info->ndcb0 = 0;
968
969         if (command == NAND_CMD_SEQIN)
970                 exec_cmd = 0;
971
972         addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
973                                     + host->col_addr_cycles);
974
975         switch (command) {
976         case NAND_CMD_READOOB:
977         case NAND_CMD_READ0:
978                 info->buf_start = column;
979                 info->ndcb0 |= NDCB0_CMD_TYPE(0)
980                                 | addr_cycle
981                                 | NAND_CMD_READ0;
982
983                 if (command == NAND_CMD_READOOB)
984                         info->buf_start += mtd->writesize;
985
986                 /*
987                  * Multiple page read needs an 'extended command type' field,
988                  * which is either naked-read or last-read according to the
989                  * state.
990                  */
991                 if (mtd->writesize == PAGE_CHUNK_SIZE) {
992                         info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
993                 } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
994                         info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
995                                         | NDCB0_LEN_OVRD
996                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
997                         info->ndcb3 = info->chunk_size +
998                                       info->oob_size;
999                 }
1000
1001                 set_command_address(info, mtd->writesize, column, page_addr);
1002                 break;
1003
1004         case NAND_CMD_SEQIN:
1005
1006                 info->buf_start = column;
1007                 set_command_address(info, mtd->writesize, 0, page_addr);
1008
1009                 /*
1010                  * Multiple page programming needs to execute the initial
1011                  * SEQIN command that sets the page address.
1012                  */
1013                 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1014                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
1015                                 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
1016                                 | addr_cycle
1017                                 | command;
1018                         /* No data transfer in this case */
1019                         info->data_size = 0;
1020                         exec_cmd = 1;
1021                 }
1022                 break;
1023
1024         case NAND_CMD_PAGEPROG:
1025                 if (is_buf_blank(info->data_buff,
1026                                         (mtd->writesize + mtd->oobsize))) {
1027                         exec_cmd = 0;
1028                         break;
1029                 }
1030
1031                 /* Second command setting for large pages */
1032                 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1033                         /*
1034                          * Multiple page write uses the 'extended command'
1035                          * field. This can be used to issue a command dispatch
1036                          * or a naked-write depending on the current stage.
1037                          */
1038                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
1039                                         | NDCB0_LEN_OVRD
1040                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
1041                         info->ndcb3 = info->chunk_size +
1042                                       info->oob_size;
1043
1044                         /*
1045                          * This is the command dispatch that completes a chunked
1046                          * page program operation.
1047                          */
1048                         if (info->data_size == 0) {
1049                                 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
1050                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
1051                                         | command;
1052                                 info->ndcb1 = 0;
1053                                 info->ndcb2 = 0;
1054                                 info->ndcb3 = 0;
1055                         }
1056                 } else {
1057                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
1058                                         | NDCB0_AUTO_RS
1059                                         | NDCB0_ST_ROW_EN
1060                                         | NDCB0_DBC
1061                                         | (NAND_CMD_PAGEPROG << 8)
1062                                         | NAND_CMD_SEQIN
1063                                         | addr_cycle;
1064                 }
1065                 break;
1066
1067         case NAND_CMD_PARAM:
1068                 info->buf_count = INIT_BUFFER_SIZE;
1069                 info->ndcb0 |= NDCB0_CMD_TYPE(0)
1070                                 | NDCB0_ADDR_CYC(1)
1071                                 | NDCB0_LEN_OVRD
1072                                 | command;
1073                 info->ndcb1 = (column & 0xFF);
1074                 info->ndcb3 = INIT_BUFFER_SIZE;
1075                 info->data_size = INIT_BUFFER_SIZE;
1076                 break;
1077
1078         case NAND_CMD_READID:
1079                 info->buf_count = READ_ID_BYTES;
1080                 info->ndcb0 |= NDCB0_CMD_TYPE(3)
1081                                 | NDCB0_ADDR_CYC(1)
1082                                 | command;
1083                 info->ndcb1 = (column & 0xFF);
1084
1085                 info->data_size = 8;
1086                 break;
1087         case NAND_CMD_STATUS:
1088                 info->buf_count = 1;
1089                 info->ndcb0 |= NDCB0_CMD_TYPE(4)
1090                                 | NDCB0_ADDR_CYC(1)
1091                                 | command;
1092
1093                 info->data_size = 8;
1094                 break;
1095
1096         case NAND_CMD_ERASE1:
1097                 info->ndcb0 |= NDCB0_CMD_TYPE(2)
1098                                 | NDCB0_AUTO_RS
1099                                 | NDCB0_ADDR_CYC(3)
1100                                 | NDCB0_DBC
1101                                 | (NAND_CMD_ERASE2 << 8)
1102                                 | NAND_CMD_ERASE1;
1103                 info->ndcb1 = page_addr;
1104                 info->ndcb2 = 0;
1105
1106                 break;
1107         case NAND_CMD_RESET:
1108                 info->ndcb0 |= NDCB0_CMD_TYPE(5)
1109                                 | command;
1110
1111                 break;
1112
1113         case NAND_CMD_ERASE2:
1114                 exec_cmd = 0;
1115                 break;
1116
1117         default:
1118                 exec_cmd = 0;
1119                 dev_err(&info->pdev->dev, "non-supported command %x\n",
1120                                 command);
1121                 break;
1122         }
1123
1124         return exec_cmd;
1125 }
1126
1127 static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
1128                          int column, int page_addr)
1129 {
1130         struct pxa3xx_nand_host *host = mtd->priv;
1131         struct pxa3xx_nand_info *info = host->info_data;
1132         int exec_cmd;
1133
1134         /*
1135          * if this is a x16 device ,then convert the input
1136          * "byte" address into a "word" address appropriate
1137          * for indexing a word-oriented device
1138          */
1139         if (info->reg_ndcr & NDCR_DWIDTH_M)
1140                 column /= 2;
1141
1142         /*
1143          * There may be different NAND chip hooked to
1144          * different chip select, so check whether
1145          * chip select has been changed, if yes, reset the timing
1146          */
1147         if (info->cs != host->cs) {
1148                 info->cs = host->cs;
1149                 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1150                 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1151         }
1152
1153         prepare_start_command(info, command);
1154
1155         info->state = STATE_PREPARED;
1156         exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
1157
1158         if (exec_cmd) {
1159                 init_completion(&info->cmd_complete);
1160                 init_completion(&info->dev_ready);
1161                 info->need_wait = 1;
1162                 pxa3xx_nand_start(info);
1163
1164                 if (!wait_for_completion_timeout(&info->cmd_complete,
1165                     CHIP_DELAY_TIMEOUT)) {
1166                         dev_err(&info->pdev->dev, "Wait time out!!!\n");
1167                         /* Stop State Machine for next command cycle */
1168                         pxa3xx_nand_stop(info);
1169                 }
1170         }
1171         info->state = STATE_IDLE;
1172 }
1173
1174 static void nand_cmdfunc_extended(struct mtd_info *mtd,
1175                                   const unsigned command,
1176                                   int column, int page_addr)
1177 {
1178         struct pxa3xx_nand_host *host = mtd->priv;
1179         struct pxa3xx_nand_info *info = host->info_data;
1180         int exec_cmd, ext_cmd_type;
1181
1182         /*
1183          * if this is a x16 device then convert the input
1184          * "byte" address into a "word" address appropriate
1185          * for indexing a word-oriented device
1186          */
1187         if (info->reg_ndcr & NDCR_DWIDTH_M)
1188                 column /= 2;
1189
1190         /*
1191          * There may be different NAND chip hooked to
1192          * different chip select, so check whether
1193          * chip select has been changed, if yes, reset the timing
1194          */
1195         if (info->cs != host->cs) {
1196                 info->cs = host->cs;
1197                 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1198                 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1199         }
1200
1201         /* Select the extended command for the first command */
1202         switch (command) {
1203         case NAND_CMD_READ0:
1204         case NAND_CMD_READOOB:
1205                 ext_cmd_type = EXT_CMD_TYPE_MONO;
1206                 break;
1207         case NAND_CMD_SEQIN:
1208                 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1209                 break;
1210         case NAND_CMD_PAGEPROG:
1211                 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1212                 break;
1213         default:
1214                 ext_cmd_type = 0;
1215                 break;
1216         }
1217
1218         prepare_start_command(info, command);
1219
1220         /*
1221          * Prepare the "is ready" completion before starting a command
1222          * transaction sequence. If the command is not executed the
1223          * completion will be completed, see below.
1224          *
1225          * We can do that inside the loop because the command variable
1226          * is invariant and thus so is the exec_cmd.
1227          */
1228         info->need_wait = 1;
1229         init_completion(&info->dev_ready);
1230         do {
1231                 info->state = STATE_PREPARED;
1232                 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1233                                                column, page_addr);
1234                 if (!exec_cmd) {
1235                         info->need_wait = 0;
1236                         complete(&info->dev_ready);
1237                         break;
1238                 }
1239
1240                 init_completion(&info->cmd_complete);
1241                 pxa3xx_nand_start(info);
1242
1243                 if (!wait_for_completion_timeout(&info->cmd_complete,
1244                     CHIP_DELAY_TIMEOUT)) {
1245                         dev_err(&info->pdev->dev, "Wait time out!!!\n");
1246                         /* Stop State Machine for next command cycle */
1247                         pxa3xx_nand_stop(info);
1248                         break;
1249                 }
1250
1251                 /* Check if the sequence is complete */
1252                 if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
1253                         break;
1254
1255                 /*
1256                  * After a splitted program command sequence has issued
1257                  * the command dispatch, the command sequence is complete.
1258                  */
1259                 if (info->data_size == 0 &&
1260                     command == NAND_CMD_PAGEPROG &&
1261                     ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1262                         break;
1263
1264                 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1265                         /* Last read: issue a 'last naked read' */
1266                         if (info->data_size == info->chunk_size)
1267                                 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1268                         else
1269                                 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1270
1271                 /*
1272                  * If a splitted program command has no more data to transfer,
1273                  * the command dispatch must be issued to complete.
1274                  */
1275                 } else if (command == NAND_CMD_PAGEPROG &&
1276                            info->data_size == 0) {
1277                                 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1278                 }
1279         } while (1);
1280
1281         info->state = STATE_IDLE;
1282 }
1283
1284 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1285                 struct nand_chip *chip, const uint8_t *buf, int oob_required,
1286                 int page)
1287 {
1288         chip->write_buf(mtd, buf, mtd->writesize);
1289         chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1290
1291         return 0;
1292 }
1293
1294 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1295                 struct nand_chip *chip, uint8_t *buf, int oob_required,
1296                 int page)
1297 {
1298         struct pxa3xx_nand_host *host = mtd->priv;
1299         struct pxa3xx_nand_info *info = host->info_data;
1300
1301         chip->read_buf(mtd, buf, mtd->writesize);
1302         chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1303
1304         if (info->retcode == ERR_CORERR && info->use_ecc) {
1305                 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1306
1307         } else if (info->retcode == ERR_UNCORERR) {
1308                 /*
1309                  * for blank page (all 0xff), HW will calculate its ECC as
1310                  * 0, which is different from the ECC information within
1311                  * OOB, ignore such uncorrectable errors
1312                  */
1313                 if (is_buf_blank(buf, mtd->writesize))
1314                         info->retcode = ERR_NONE;
1315                 else
1316                         mtd->ecc_stats.failed++;
1317         }
1318
1319         return info->max_bitflips;
1320 }
1321
1322 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1323 {
1324         struct pxa3xx_nand_host *host = mtd->priv;
1325         struct pxa3xx_nand_info *info = host->info_data;
1326         char retval = 0xFF;
1327
1328         if (info->buf_start < info->buf_count)
1329                 /* Has just send a new command? */
1330                 retval = info->data_buff[info->buf_start++];
1331
1332         return retval;
1333 }
1334
1335 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1336 {
1337         struct pxa3xx_nand_host *host = mtd->priv;
1338         struct pxa3xx_nand_info *info = host->info_data;
1339         u16 retval = 0xFFFF;
1340
1341         if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1342                 retval = *((u16 *)(info->data_buff+info->buf_start));
1343                 info->buf_start += 2;
1344         }
1345         return retval;
1346 }
1347
1348 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1349 {
1350         struct pxa3xx_nand_host *host = mtd->priv;
1351         struct pxa3xx_nand_info *info = host->info_data;
1352         int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1353
1354         memcpy(buf, info->data_buff + info->buf_start, real_len);
1355         info->buf_start += real_len;
1356 }
1357
1358 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1359                 const uint8_t *buf, int len)
1360 {
1361         struct pxa3xx_nand_host *host = mtd->priv;
1362         struct pxa3xx_nand_info *info = host->info_data;
1363         int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1364
1365         memcpy(info->data_buff + info->buf_start, buf, real_len);
1366         info->buf_start += real_len;
1367 }
1368
1369 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1370 {
1371         return;
1372 }
1373
1374 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1375 {
1376         struct pxa3xx_nand_host *host = mtd->priv;
1377         struct pxa3xx_nand_info *info = host->info_data;
1378
1379         if (info->need_wait) {
1380                 info->need_wait = 0;
1381                 if (!wait_for_completion_timeout(&info->dev_ready,
1382                     CHIP_DELAY_TIMEOUT)) {
1383                         dev_err(&info->pdev->dev, "Ready time out!!!\n");
1384                         return NAND_STATUS_FAIL;
1385                 }
1386         }
1387
1388         /* pxa3xx_nand_send_command has waited for command complete */
1389         if (this->state == FL_WRITING || this->state == FL_ERASING) {
1390                 if (info->retcode == ERR_NONE)
1391                         return 0;
1392                 else
1393                         return NAND_STATUS_FAIL;
1394         }
1395
1396         return NAND_STATUS_READY;
1397 }
1398
1399 static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info)
1400 {
1401         struct platform_device *pdev = info->pdev;
1402         struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1403         struct pxa3xx_nand_host *host = info->host[info->cs];
1404         struct mtd_info *mtd = host->mtd;
1405         struct nand_chip *chip = mtd->priv;
1406
1407         /* configure default flash values */
1408         info->reg_ndcr = 0x0; /* enable all interrupts */
1409         info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1410         info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1411         info->reg_ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1412         info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1413         info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1414         info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
1415
1416         return 0;
1417 }
1418
1419 static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1420 {
1421         uint32_t ndcr = nand_readl(info, NDCR);
1422
1423         /* Set an initial chunk size */
1424         info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
1425         info->reg_ndcr = ndcr &
1426                 ~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
1427         info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1428         info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1429         return 0;
1430 }
1431
1432 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1433 {
1434         struct platform_device *pdev = info->pdev;
1435         struct dma_slave_config config;
1436         dma_cap_mask_t mask;
1437         struct pxad_param param;
1438         int ret;
1439
1440         info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1441         if (info->data_buff == NULL)
1442                 return -ENOMEM;
1443         if (use_dma == 0)
1444                 return 0;
1445
1446         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1447         if (ret)
1448                 return ret;
1449
1450         sg_init_one(&info->sg, info->data_buff, info->buf_size);
1451         dma_cap_zero(mask);
1452         dma_cap_set(DMA_SLAVE, mask);
1453         param.prio = PXAD_PRIO_LOWEST;
1454         param.drcmr = info->drcmr_dat;
1455         info->dma_chan = dma_request_slave_channel_compat(mask, pxad_filter_fn,
1456                                                           &param, &pdev->dev,
1457                                                           "data");
1458         if (!info->dma_chan) {
1459                 dev_err(&pdev->dev, "unable to request data dma channel\n");
1460                 return -ENODEV;
1461         }
1462
1463         memset(&config, 0, sizeof(config));
1464         config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1465         config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1466         config.src_addr = info->mmio_phys + NDDB;
1467         config.dst_addr = info->mmio_phys + NDDB;
1468         config.src_maxburst = 32;
1469         config.dst_maxburst = 32;
1470         ret = dmaengine_slave_config(info->dma_chan, &config);
1471         if (ret < 0) {
1472                 dev_err(&info->pdev->dev,
1473                         "dma channel configuration failed: %d\n",
1474                         ret);
1475                 return ret;
1476         }
1477
1478         /*
1479          * Now that DMA buffers are allocated we turn on
1480          * DMA proper for I/O operations.
1481          */
1482         info->use_dma = 1;
1483         return 0;
1484 }
1485
1486 static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1487 {
1488         if (info->use_dma) {
1489                 dmaengine_terminate_all(info->dma_chan);
1490                 dma_release_channel(info->dma_chan);
1491         }
1492         kfree(info->data_buff);
1493 }
1494
1495 static int pxa3xx_nand_sensing(struct pxa3xx_nand_host *host)
1496 {
1497         struct pxa3xx_nand_info *info = host->info_data;
1498         struct mtd_info *mtd;
1499         struct nand_chip *chip;
1500         const struct nand_sdr_timings *timings;
1501         int ret;
1502
1503         mtd = info->host[info->cs]->mtd;
1504         chip = mtd->priv;
1505
1506         /* use the common timing to make a try */
1507         timings = onfi_async_timing_mode_to_sdr_timings(0);
1508         if (IS_ERR(timings))
1509                 return PTR_ERR(timings);
1510
1511         pxa3xx_nand_set_sdr_timing(host, timings);
1512
1513         chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1514         ret = chip->waitfunc(mtd, chip);
1515         if (ret & NAND_STATUS_FAIL)
1516                 return -ENODEV;
1517
1518         return 0;
1519 }
1520
1521 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1522                         struct nand_ecc_ctrl *ecc,
1523                         int strength, int ecc_stepsize, int page_size)
1524 {
1525         if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1526                 info->chunk_size = 2048;
1527                 info->spare_size = 40;
1528                 info->ecc_size = 24;
1529                 ecc->mode = NAND_ECC_HW;
1530                 ecc->size = 512;
1531                 ecc->strength = 1;
1532
1533         } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1534                 info->chunk_size = 512;
1535                 info->spare_size = 8;
1536                 info->ecc_size = 8;
1537                 ecc->mode = NAND_ECC_HW;
1538                 ecc->size = 512;
1539                 ecc->strength = 1;
1540
1541         /*
1542          * Required ECC: 4-bit correction per 512 bytes
1543          * Select: 16-bit correction per 2048 bytes
1544          */
1545         } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1546                 info->ecc_bch = 1;
1547                 info->chunk_size = 2048;
1548                 info->spare_size = 32;
1549                 info->ecc_size = 32;
1550                 ecc->mode = NAND_ECC_HW;
1551                 ecc->size = info->chunk_size;
1552                 ecc->layout = &ecc_layout_2KB_bch4bit;
1553                 ecc->strength = 16;
1554
1555         } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1556                 info->ecc_bch = 1;
1557                 info->chunk_size = 2048;
1558                 info->spare_size = 32;
1559                 info->ecc_size = 32;
1560                 ecc->mode = NAND_ECC_HW;
1561                 ecc->size = info->chunk_size;
1562                 ecc->layout = &ecc_layout_4KB_bch4bit;
1563                 ecc->strength = 16;
1564
1565         /*
1566          * Required ECC: 8-bit correction per 512 bytes
1567          * Select: 16-bit correction per 1024 bytes
1568          */
1569         } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1570                 info->ecc_bch = 1;
1571                 info->chunk_size = 1024;
1572                 info->spare_size = 0;
1573                 info->ecc_size = 32;
1574                 ecc->mode = NAND_ECC_HW;
1575                 ecc->size = info->chunk_size;
1576                 ecc->layout = &ecc_layout_4KB_bch8bit;
1577                 ecc->strength = 16;
1578         } else {
1579                 dev_err(&info->pdev->dev,
1580                         "ECC strength %d at page size %d is not supported\n",
1581                         strength, page_size);
1582                 return -ENODEV;
1583         }
1584
1585         dev_info(&info->pdev->dev, "ECC strength %d, ECC step size %d\n",
1586                  ecc->strength, ecc->size);
1587         return 0;
1588 }
1589
1590 static int pxa3xx_nand_scan(struct mtd_info *mtd)
1591 {
1592         struct pxa3xx_nand_host *host = mtd->priv;
1593         struct pxa3xx_nand_info *info = host->info_data;
1594         struct platform_device *pdev = info->pdev;
1595         struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1596         struct nand_chip *chip = mtd->priv;
1597         int ret;
1598         uint16_t ecc_strength, ecc_step;
1599
1600         if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
1601                 goto KEEP_CONFIG;
1602
1603         /* Set a default chunk size */
1604         info->chunk_size = 512;
1605
1606         ret = pxa3xx_nand_config_flash(info);
1607         if (ret)
1608                 return ret;
1609
1610         ret = pxa3xx_nand_sensing(host);
1611         if (ret) {
1612                 dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
1613                          info->cs);
1614
1615                 return ret;
1616         }
1617
1618 KEEP_CONFIG:
1619         info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1620         if (info->reg_ndcr & NDCR_DWIDTH_M)
1621                 chip->options |= NAND_BUSWIDTH_16;
1622
1623         /* Device detection must be done with ECC disabled */
1624         if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1625                 nand_writel(info, NDECCCTRL, 0x0);
1626
1627         if (nand_scan_ident(mtd, 1, NULL))
1628                 return -ENODEV;
1629
1630         if (!pdata->keep_config) {
1631                 ret = pxa3xx_nand_init(host);
1632                 if (ret) {
1633                         dev_err(&info->pdev->dev, "Failed to init nand: %d\n",
1634                                 ret);
1635                         return ret;
1636                 }
1637         }
1638
1639         if (pdata->flash_bbt) {
1640                 /*
1641                  * We'll use a bad block table stored in-flash and don't
1642                  * allow writing the bad block marker to the flash.
1643                  */
1644                 chip->bbt_options |= NAND_BBT_USE_FLASH |
1645                                      NAND_BBT_NO_OOB_BBM;
1646                 chip->bbt_td = &bbt_main_descr;
1647                 chip->bbt_md = &bbt_mirror_descr;
1648         }
1649
1650         /*
1651          * If the page size is bigger than the FIFO size, let's check
1652          * we are given the right variant and then switch to the extended
1653          * (aka splitted) command handling,
1654          */
1655         if (mtd->writesize > PAGE_CHUNK_SIZE) {
1656                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1657                         chip->cmdfunc = nand_cmdfunc_extended;
1658                 } else {
1659                         dev_err(&info->pdev->dev,
1660                                 "unsupported page size on this variant\n");
1661                         return -ENODEV;
1662                 }
1663         }
1664
1665         if (pdata->ecc_strength && pdata->ecc_step_size) {
1666                 ecc_strength = pdata->ecc_strength;
1667                 ecc_step = pdata->ecc_step_size;
1668         } else {
1669                 ecc_strength = chip->ecc_strength_ds;
1670                 ecc_step = chip->ecc_step_ds;
1671         }
1672
1673         /* Set default ECC strength requirements on non-ONFI devices */
1674         if (ecc_strength < 1 && ecc_step < 1) {
1675                 ecc_strength = 1;
1676                 ecc_step = 512;
1677         }
1678
1679         ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1680                            ecc_step, mtd->writesize);
1681         if (ret)
1682                 return ret;
1683
1684         /* calculate addressing information */
1685         if (mtd->writesize >= 2048)
1686                 host->col_addr_cycles = 2;
1687         else
1688                 host->col_addr_cycles = 1;
1689
1690         /* release the initial buffer */
1691         kfree(info->data_buff);
1692
1693         /* allocate the real data + oob buffer */
1694         info->buf_size = mtd->writesize + mtd->oobsize;
1695         ret = pxa3xx_nand_init_buff(info);
1696         if (ret)
1697                 return ret;
1698         info->oob_buff = info->data_buff + mtd->writesize;
1699
1700         if ((mtd->size >> chip->page_shift) > 65536)
1701                 host->row_addr_cycles = 3;
1702         else
1703                 host->row_addr_cycles = 2;
1704         return nand_scan_tail(mtd);
1705 }
1706
1707 static int alloc_nand_resource(struct platform_device *pdev)
1708 {
1709         struct pxa3xx_nand_platform_data *pdata;
1710         struct pxa3xx_nand_info *info;
1711         struct pxa3xx_nand_host *host;
1712         struct nand_chip *chip = NULL;
1713         struct mtd_info *mtd;
1714         struct resource *r;
1715         int ret, irq, cs;
1716
1717         pdata = dev_get_platdata(&pdev->dev);
1718         if (pdata->num_cs <= 0)
1719                 return -ENODEV;
1720         info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) +
1721                             sizeof(*host)) * pdata->num_cs, GFP_KERNEL);
1722         if (!info)
1723                 return -ENOMEM;
1724
1725         info->pdev = pdev;
1726         info->variant = pxa3xx_nand_get_variant(pdev);
1727         for (cs = 0; cs < pdata->num_cs; cs++) {
1728                 mtd = (void *)&info[1] + (sizeof(*mtd) + sizeof(*host)) * cs;
1729                 chip = (struct nand_chip *)(&mtd[1]);
1730                 host = (struct pxa3xx_nand_host *)chip;
1731                 info->host[cs] = host;
1732                 host->mtd = mtd;
1733                 host->cs = cs;
1734                 host->info_data = info;
1735                 mtd->priv = host;
1736                 mtd->dev.parent = &pdev->dev;
1737
1738                 chip->ecc.read_page     = pxa3xx_nand_read_page_hwecc;
1739                 chip->ecc.write_page    = pxa3xx_nand_write_page_hwecc;
1740                 chip->controller        = &info->controller;
1741                 chip->waitfunc          = pxa3xx_nand_waitfunc;
1742                 chip->select_chip       = pxa3xx_nand_select_chip;
1743                 chip->read_word         = pxa3xx_nand_read_word;
1744                 chip->read_byte         = pxa3xx_nand_read_byte;
1745                 chip->read_buf          = pxa3xx_nand_read_buf;
1746                 chip->write_buf         = pxa3xx_nand_write_buf;
1747                 chip->options           |= NAND_NO_SUBPAGE_WRITE;
1748                 chip->cmdfunc           = nand_cmdfunc;
1749         }
1750
1751         spin_lock_init(&chip->controller->lock);
1752         init_waitqueue_head(&chip->controller->wq);
1753         info->clk = devm_clk_get(&pdev->dev, NULL);
1754         if (IS_ERR(info->clk)) {
1755                 dev_err(&pdev->dev, "failed to get nand clock\n");
1756                 return PTR_ERR(info->clk);
1757         }
1758         ret = clk_prepare_enable(info->clk);
1759         if (ret < 0)
1760                 return ret;
1761
1762         if (use_dma) {
1763                 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1764                 if (r == NULL) {
1765                         dev_err(&pdev->dev,
1766                                 "no resource defined for data DMA\n");
1767                         ret = -ENXIO;
1768                         goto fail_disable_clk;
1769                 }
1770                 info->drcmr_dat = r->start;
1771
1772                 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1773                 if (r == NULL) {
1774                         dev_err(&pdev->dev,
1775                                 "no resource defined for cmd DMA\n");
1776                         ret = -ENXIO;
1777                         goto fail_disable_clk;
1778                 }
1779                 info->drcmr_cmd = r->start;
1780         }
1781
1782         irq = platform_get_irq(pdev, 0);
1783         if (irq < 0) {
1784                 dev_err(&pdev->dev, "no IRQ resource defined\n");
1785                 ret = -ENXIO;
1786                 goto fail_disable_clk;
1787         }
1788
1789         r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1790         info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
1791         if (IS_ERR(info->mmio_base)) {
1792                 ret = PTR_ERR(info->mmio_base);
1793                 goto fail_disable_clk;
1794         }
1795         info->mmio_phys = r->start;
1796
1797         /* Allocate a buffer to allow flash detection */
1798         info->buf_size = INIT_BUFFER_SIZE;
1799         info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1800         if (info->data_buff == NULL) {
1801                 ret = -ENOMEM;
1802                 goto fail_disable_clk;
1803         }
1804
1805         /* initialize all interrupts to be disabled */
1806         disable_int(info, NDSR_MASK);
1807
1808         ret = request_threaded_irq(irq, pxa3xx_nand_irq,
1809                                    pxa3xx_nand_irq_thread, IRQF_ONESHOT,
1810                                    pdev->name, info);
1811         if (ret < 0) {
1812                 dev_err(&pdev->dev, "failed to request IRQ\n");
1813                 goto fail_free_buf;
1814         }
1815
1816         platform_set_drvdata(pdev, info);
1817
1818         return 0;
1819
1820 fail_free_buf:
1821         free_irq(irq, info);
1822         kfree(info->data_buff);
1823 fail_disable_clk:
1824         clk_disable_unprepare(info->clk);
1825         return ret;
1826 }
1827
1828 static int pxa3xx_nand_remove(struct platform_device *pdev)
1829 {
1830         struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1831         struct pxa3xx_nand_platform_data *pdata;
1832         int irq, cs;
1833
1834         if (!info)
1835                 return 0;
1836
1837         pdata = dev_get_platdata(&pdev->dev);
1838
1839         irq = platform_get_irq(pdev, 0);
1840         if (irq >= 0)
1841                 free_irq(irq, info);
1842         pxa3xx_nand_free_buff(info);
1843
1844         /*
1845          * In the pxa3xx case, the DFI bus is shared between the SMC and NFC.
1846          * In order to prevent a lockup of the system bus, the DFI bus
1847          * arbitration is granted to SMC upon driver removal. This is done by
1848          * setting the x_ARB_CNTL bit, which also prevents the NAND to have
1849          * access to the bus anymore.
1850          */
1851         nand_writel(info, NDCR,
1852                     (nand_readl(info, NDCR) & ~NDCR_ND_ARB_EN) |
1853                     NFCV1_NDCR_ARB_CNTL);
1854         clk_disable_unprepare(info->clk);
1855
1856         for (cs = 0; cs < pdata->num_cs; cs++)
1857                 nand_release(info->host[cs]->mtd);
1858         return 0;
1859 }
1860
1861 static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
1862 {
1863         struct pxa3xx_nand_platform_data *pdata;
1864         struct device_node *np = pdev->dev.of_node;
1865         const struct of_device_id *of_id =
1866                         of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
1867
1868         if (!of_id)
1869                 return 0;
1870
1871         pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1872         if (!pdata)
1873                 return -ENOMEM;
1874
1875         if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
1876                 pdata->enable_arbiter = 1;
1877         if (of_get_property(np, "marvell,nand-keep-config", NULL))
1878                 pdata->keep_config = 1;
1879         of_property_read_u32(np, "num-cs", &pdata->num_cs);
1880         pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
1881
1882         pdata->ecc_strength = of_get_nand_ecc_strength(np);
1883         if (pdata->ecc_strength < 0)
1884                 pdata->ecc_strength = 0;
1885
1886         pdata->ecc_step_size = of_get_nand_ecc_step_size(np);
1887         if (pdata->ecc_step_size < 0)
1888                 pdata->ecc_step_size = 0;
1889
1890         pdev->dev.platform_data = pdata;
1891
1892         return 0;
1893 }
1894
1895 static int pxa3xx_nand_probe(struct platform_device *pdev)
1896 {
1897         struct pxa3xx_nand_platform_data *pdata;
1898         struct mtd_part_parser_data ppdata = {};
1899         struct pxa3xx_nand_info *info;
1900         int ret, cs, probe_success, dma_available;
1901
1902         dma_available = IS_ENABLED(CONFIG_ARM) &&
1903                 (IS_ENABLED(CONFIG_ARCH_PXA) || IS_ENABLED(CONFIG_ARCH_MMP));
1904         if (use_dma && !dma_available) {
1905                 use_dma = 0;
1906                 dev_warn(&pdev->dev,
1907                          "This platform can't do DMA on this device\n");
1908         }
1909
1910         ret = pxa3xx_nand_probe_dt(pdev);
1911         if (ret)
1912                 return ret;
1913
1914         pdata = dev_get_platdata(&pdev->dev);
1915         if (!pdata) {
1916                 dev_err(&pdev->dev, "no platform data defined\n");
1917                 return -ENODEV;
1918         }
1919
1920         ret = alloc_nand_resource(pdev);
1921         if (ret) {
1922                 dev_err(&pdev->dev, "alloc nand resource failed\n");
1923                 return ret;
1924         }
1925
1926         info = platform_get_drvdata(pdev);
1927         probe_success = 0;
1928         for (cs = 0; cs < pdata->num_cs; cs++) {
1929                 struct mtd_info *mtd = info->host[cs]->mtd;
1930
1931                 /*
1932                  * The mtd name matches the one used in 'mtdparts' kernel
1933                  * parameter. This name cannot be changed or otherwise
1934                  * user's mtd partitions configuration would get broken.
1935                  */
1936                 mtd->name = "pxa3xx_nand-0";
1937                 info->cs = cs;
1938                 ret = pxa3xx_nand_scan(mtd);
1939                 if (ret) {
1940                         dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
1941                                 cs);
1942                         continue;
1943                 }
1944
1945                 ppdata.of_node = pdev->dev.of_node;
1946                 ret = mtd_device_parse_register(mtd, NULL,
1947                                                 &ppdata, pdata->parts[cs],
1948                                                 pdata->nr_parts[cs]);
1949                 if (!ret)
1950                         probe_success = 1;
1951         }
1952
1953         if (!probe_success) {
1954                 pxa3xx_nand_remove(pdev);
1955                 return -ENODEV;
1956         }
1957
1958         return 0;
1959 }
1960
1961 #ifdef CONFIG_PM
1962 static int pxa3xx_nand_suspend(struct device *dev)
1963 {
1964         struct pxa3xx_nand_info *info = dev_get_drvdata(dev);
1965
1966         if (info->state) {
1967                 dev_err(dev, "driver busy, state = %d\n", info->state);
1968                 return -EAGAIN;
1969         }
1970
1971         return 0;
1972 }
1973
1974 static int pxa3xx_nand_resume(struct device *dev)
1975 {
1976         struct pxa3xx_nand_info *info = dev_get_drvdata(dev);
1977
1978         /* We don't want to handle interrupt without calling mtd routine */
1979         disable_int(info, NDCR_INT_MASK);
1980
1981         /*
1982          * Directly set the chip select to a invalid value,
1983          * then the driver would reset the timing according
1984          * to current chip select at the beginning of cmdfunc
1985          */
1986         info->cs = 0xff;
1987
1988         /*
1989          * As the spec says, the NDSR would be updated to 0x1800 when
1990          * doing the nand_clk disable/enable.
1991          * To prevent it damaging state machine of the driver, clear
1992          * all status before resume
1993          */
1994         nand_writel(info, NDSR, NDSR_MASK);
1995
1996         return 0;
1997 }
1998 #else
1999 #define pxa3xx_nand_suspend     NULL
2000 #define pxa3xx_nand_resume      NULL
2001 #endif
2002
2003 static const struct dev_pm_ops pxa3xx_nand_pm_ops = {
2004         .suspend        = pxa3xx_nand_suspend,
2005         .resume         = pxa3xx_nand_resume,
2006 };
2007
2008 static struct platform_driver pxa3xx_nand_driver = {
2009         .driver = {
2010                 .name   = "pxa3xx-nand",
2011                 .of_match_table = pxa3xx_nand_dt_ids,
2012                 .pm     = &pxa3xx_nand_pm_ops,
2013         },
2014         .probe          = pxa3xx_nand_probe,
2015         .remove         = pxa3xx_nand_remove,
2016 };
2017
2018 module_platform_driver(pxa3xx_nand_driver);
2019
2020 MODULE_LICENSE("GPL");
2021 MODULE_DESCRIPTION("PXA3xx NAND controller driver");