Merge branch 'akpm' (patches from Andrew)
[cascardo/linux.git] / drivers / mtd / spi-nor / cadence-quadspi.c
1 /*
2  * Driver for Cadence QSPI Controller
3  *
4  * Copyright Altera Corporation (C) 2012-2014. All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 #include <linux/clk.h>
19 #include <linux/completion.h>
20 #include <linux/delay.h>
21 #include <linux/err.h>
22 #include <linux/errno.h>
23 #include <linux/interrupt.h>
24 #include <linux/io.h>
25 #include <linux/jiffies.h>
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/mtd/mtd.h>
29 #include <linux/mtd/partitions.h>
30 #include <linux/mtd/spi-nor.h>
31 #include <linux/of_device.h>
32 #include <linux/of.h>
33 #include <linux/platform_device.h>
34 #include <linux/sched.h>
35 #include <linux/spi/spi.h>
36 #include <linux/timer.h>
37
38 #define CQSPI_NAME                      "cadence-qspi"
39 #define CQSPI_MAX_CHIPSELECT            16
40
41 struct cqspi_st;
42
43 struct cqspi_flash_pdata {
44         struct spi_nor  nor;
45         struct cqspi_st *cqspi;
46         u32             clk_rate;
47         u32             read_delay;
48         u32             tshsl_ns;
49         u32             tsd2d_ns;
50         u32             tchsh_ns;
51         u32             tslch_ns;
52         u8              inst_width;
53         u8              addr_width;
54         u8              data_width;
55         u8              cs;
56         bool            registered;
57 };
58
59 struct cqspi_st {
60         struct platform_device  *pdev;
61
62         struct clk              *clk;
63         unsigned int            sclk;
64
65         void __iomem            *iobase;
66         void __iomem            *ahb_base;
67         struct completion       transfer_complete;
68         struct mutex            bus_mutex;
69
70         int                     current_cs;
71         int                     current_page_size;
72         int                     current_erase_size;
73         int                     current_addr_width;
74         unsigned long           master_ref_clk_hz;
75         bool                    is_decoded_cs;
76         u32                     fifo_depth;
77         u32                     fifo_width;
78         u32                     trigger_address;
79         struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT];
80 };
81
82 /* Operation timeout value */
83 #define CQSPI_TIMEOUT_MS                        500
84 #define CQSPI_READ_TIMEOUT_MS                   10
85
86 /* Instruction type */
87 #define CQSPI_INST_TYPE_SINGLE                  0
88 #define CQSPI_INST_TYPE_DUAL                    1
89 #define CQSPI_INST_TYPE_QUAD                    2
90
91 #define CQSPI_DUMMY_CLKS_PER_BYTE               8
92 #define CQSPI_DUMMY_BYTES_MAX                   4
93 #define CQSPI_DUMMY_CLKS_MAX                    31
94
95 #define CQSPI_STIG_DATA_LEN_MAX                 8
96
97 /* Register map */
98 #define CQSPI_REG_CONFIG                        0x00
99 #define CQSPI_REG_CONFIG_ENABLE_MASK            BIT(0)
100 #define CQSPI_REG_CONFIG_DECODE_MASK            BIT(9)
101 #define CQSPI_REG_CONFIG_CHIPSELECT_LSB         10
102 #define CQSPI_REG_CONFIG_DMA_MASK               BIT(15)
103 #define CQSPI_REG_CONFIG_BAUD_LSB               19
104 #define CQSPI_REG_CONFIG_IDLE_LSB               31
105 #define CQSPI_REG_CONFIG_CHIPSELECT_MASK        0xF
106 #define CQSPI_REG_CONFIG_BAUD_MASK              0xF
107
108 #define CQSPI_REG_RD_INSTR                      0x04
109 #define CQSPI_REG_RD_INSTR_OPCODE_LSB           0
110 #define CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB       8
111 #define CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB        12
112 #define CQSPI_REG_RD_INSTR_TYPE_DATA_LSB        16
113 #define CQSPI_REG_RD_INSTR_MODE_EN_LSB          20
114 #define CQSPI_REG_RD_INSTR_DUMMY_LSB            24
115 #define CQSPI_REG_RD_INSTR_TYPE_INSTR_MASK      0x3
116 #define CQSPI_REG_RD_INSTR_TYPE_ADDR_MASK       0x3
117 #define CQSPI_REG_RD_INSTR_TYPE_DATA_MASK       0x3
118 #define CQSPI_REG_RD_INSTR_DUMMY_MASK           0x1F
119
120 #define CQSPI_REG_WR_INSTR                      0x08
121 #define CQSPI_REG_WR_INSTR_OPCODE_LSB           0
122 #define CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB        12
123 #define CQSPI_REG_WR_INSTR_TYPE_DATA_LSB        16
124
125 #define CQSPI_REG_DELAY                         0x0C
126 #define CQSPI_REG_DELAY_TSLCH_LSB               0
127 #define CQSPI_REG_DELAY_TCHSH_LSB               8
128 #define CQSPI_REG_DELAY_TSD2D_LSB               16
129 #define CQSPI_REG_DELAY_TSHSL_LSB               24
130 #define CQSPI_REG_DELAY_TSLCH_MASK              0xFF
131 #define CQSPI_REG_DELAY_TCHSH_MASK              0xFF
132 #define CQSPI_REG_DELAY_TSD2D_MASK              0xFF
133 #define CQSPI_REG_DELAY_TSHSL_MASK              0xFF
134
135 #define CQSPI_REG_READCAPTURE                   0x10
136 #define CQSPI_REG_READCAPTURE_BYPASS_LSB        0
137 #define CQSPI_REG_READCAPTURE_DELAY_LSB         1
138 #define CQSPI_REG_READCAPTURE_DELAY_MASK        0xF
139
140 #define CQSPI_REG_SIZE                          0x14
141 #define CQSPI_REG_SIZE_ADDRESS_LSB              0
142 #define CQSPI_REG_SIZE_PAGE_LSB                 4
143 #define CQSPI_REG_SIZE_BLOCK_LSB                16
144 #define CQSPI_REG_SIZE_ADDRESS_MASK             0xF
145 #define CQSPI_REG_SIZE_PAGE_MASK                0xFFF
146 #define CQSPI_REG_SIZE_BLOCK_MASK               0x3F
147
148 #define CQSPI_REG_SRAMPARTITION                 0x18
149 #define CQSPI_REG_INDIRECTTRIGGER               0x1C
150
151 #define CQSPI_REG_DMA                           0x20
152 #define CQSPI_REG_DMA_SINGLE_LSB                0
153 #define CQSPI_REG_DMA_BURST_LSB                 8
154 #define CQSPI_REG_DMA_SINGLE_MASK               0xFF
155 #define CQSPI_REG_DMA_BURST_MASK                0xFF
156
157 #define CQSPI_REG_REMAP                         0x24
158 #define CQSPI_REG_MODE_BIT                      0x28
159
160 #define CQSPI_REG_SDRAMLEVEL                    0x2C
161 #define CQSPI_REG_SDRAMLEVEL_RD_LSB             0
162 #define CQSPI_REG_SDRAMLEVEL_WR_LSB             16
163 #define CQSPI_REG_SDRAMLEVEL_RD_MASK            0xFFFF
164 #define CQSPI_REG_SDRAMLEVEL_WR_MASK            0xFFFF
165
166 #define CQSPI_REG_IRQSTATUS                     0x40
167 #define CQSPI_REG_IRQMASK                       0x44
168
169 #define CQSPI_REG_INDIRECTRD                    0x60
170 #define CQSPI_REG_INDIRECTRD_START_MASK         BIT(0)
171 #define CQSPI_REG_INDIRECTRD_CANCEL_MASK        BIT(1)
172 #define CQSPI_REG_INDIRECTRD_DONE_MASK          BIT(5)
173
174 #define CQSPI_REG_INDIRECTRDWATERMARK           0x64
175 #define CQSPI_REG_INDIRECTRDSTARTADDR           0x68
176 #define CQSPI_REG_INDIRECTRDBYTES               0x6C
177
178 #define CQSPI_REG_CMDCTRL                       0x90
179 #define CQSPI_REG_CMDCTRL_EXECUTE_MASK          BIT(0)
180 #define CQSPI_REG_CMDCTRL_INPROGRESS_MASK       BIT(1)
181 #define CQSPI_REG_CMDCTRL_WR_BYTES_LSB          12
182 #define CQSPI_REG_CMDCTRL_WR_EN_LSB             15
183 #define CQSPI_REG_CMDCTRL_ADD_BYTES_LSB         16
184 #define CQSPI_REG_CMDCTRL_ADDR_EN_LSB           19
185 #define CQSPI_REG_CMDCTRL_RD_BYTES_LSB          20
186 #define CQSPI_REG_CMDCTRL_RD_EN_LSB             23
187 #define CQSPI_REG_CMDCTRL_OPCODE_LSB            24
188 #define CQSPI_REG_CMDCTRL_WR_BYTES_MASK         0x7
189 #define CQSPI_REG_CMDCTRL_ADD_BYTES_MASK        0x3
190 #define CQSPI_REG_CMDCTRL_RD_BYTES_MASK         0x7
191
192 #define CQSPI_REG_INDIRECTWR                    0x70
193 #define CQSPI_REG_INDIRECTWR_START_MASK         BIT(0)
194 #define CQSPI_REG_INDIRECTWR_CANCEL_MASK        BIT(1)
195 #define CQSPI_REG_INDIRECTWR_DONE_MASK          BIT(5)
196
197 #define CQSPI_REG_INDIRECTWRWATERMARK           0x74
198 #define CQSPI_REG_INDIRECTWRSTARTADDR           0x78
199 #define CQSPI_REG_INDIRECTWRBYTES               0x7C
200
201 #define CQSPI_REG_CMDADDRESS                    0x94
202 #define CQSPI_REG_CMDREADDATALOWER              0xA0
203 #define CQSPI_REG_CMDREADDATAUPPER              0xA4
204 #define CQSPI_REG_CMDWRITEDATALOWER             0xA8
205 #define CQSPI_REG_CMDWRITEDATAUPPER             0xAC
206
207 /* Interrupt status bits */
208 #define CQSPI_REG_IRQ_MODE_ERR                  BIT(0)
209 #define CQSPI_REG_IRQ_UNDERFLOW                 BIT(1)
210 #define CQSPI_REG_IRQ_IND_COMP                  BIT(2)
211 #define CQSPI_REG_IRQ_IND_RD_REJECT             BIT(3)
212 #define CQSPI_REG_IRQ_WR_PROTECTED_ERR          BIT(4)
213 #define CQSPI_REG_IRQ_ILLEGAL_AHB_ERR           BIT(5)
214 #define CQSPI_REG_IRQ_WATERMARK                 BIT(6)
215 #define CQSPI_REG_IRQ_IND_SRAM_FULL             BIT(12)
216
217 #define CQSPI_IRQ_MASK_RD               (CQSPI_REG_IRQ_WATERMARK        | \
218                                          CQSPI_REG_IRQ_IND_SRAM_FULL    | \
219                                          CQSPI_REG_IRQ_IND_COMP)
220
221 #define CQSPI_IRQ_MASK_WR               (CQSPI_REG_IRQ_IND_COMP         | \
222                                          CQSPI_REG_IRQ_WATERMARK        | \
223                                          CQSPI_REG_IRQ_UNDERFLOW)
224
225 #define CQSPI_IRQ_STATUS_MASK           0x1FFFF
226
227 static int cqspi_wait_for_bit(void __iomem *reg, const u32 mask, bool clear)
228 {
229         unsigned long end = jiffies + msecs_to_jiffies(CQSPI_TIMEOUT_MS);
230         u32 val;
231
232         while (1) {
233                 val = readl(reg);
234                 if (clear)
235                         val = ~val;
236                 val &= mask;
237
238                 if (val == mask)
239                         return 0;
240
241                 if (time_after(jiffies, end))
242                         return -ETIMEDOUT;
243         }
244 }
245
246 static bool cqspi_is_idle(struct cqspi_st *cqspi)
247 {
248         u32 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
249
250         return reg & (1 << CQSPI_REG_CONFIG_IDLE_LSB);
251 }
252
253 static u32 cqspi_get_rd_sram_level(struct cqspi_st *cqspi)
254 {
255         u32 reg = readl(cqspi->iobase + CQSPI_REG_SDRAMLEVEL);
256
257         reg >>= CQSPI_REG_SDRAMLEVEL_RD_LSB;
258         return reg & CQSPI_REG_SDRAMLEVEL_RD_MASK;
259 }
260
261 static irqreturn_t cqspi_irq_handler(int this_irq, void *dev)
262 {
263         struct cqspi_st *cqspi = dev;
264         unsigned int irq_status;
265
266         /* Read interrupt status */
267         irq_status = readl(cqspi->iobase + CQSPI_REG_IRQSTATUS);
268
269         /* Clear interrupt */
270         writel(irq_status, cqspi->iobase + CQSPI_REG_IRQSTATUS);
271
272         irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR;
273
274         if (irq_status)
275                 complete(&cqspi->transfer_complete);
276
277         return IRQ_HANDLED;
278 }
279
280 static unsigned int cqspi_calc_rdreg(struct spi_nor *nor, const u8 opcode)
281 {
282         struct cqspi_flash_pdata *f_pdata = nor->priv;
283         u32 rdreg = 0;
284
285         rdreg |= f_pdata->inst_width << CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB;
286         rdreg |= f_pdata->addr_width << CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB;
287         rdreg |= f_pdata->data_width << CQSPI_REG_RD_INSTR_TYPE_DATA_LSB;
288
289         return rdreg;
290 }
291
292 static int cqspi_wait_idle(struct cqspi_st *cqspi)
293 {
294         const unsigned int poll_idle_retry = 3;
295         unsigned int count = 0;
296         unsigned long timeout;
297
298         timeout = jiffies + msecs_to_jiffies(CQSPI_TIMEOUT_MS);
299         while (1) {
300                 /*
301                  * Read few times in succession to ensure the controller
302                  * is indeed idle, that is, the bit does not transition
303                  * low again.
304                  */
305                 if (cqspi_is_idle(cqspi))
306                         count++;
307                 else
308                         count = 0;
309
310                 if (count >= poll_idle_retry)
311                         return 0;
312
313                 if (time_after(jiffies, timeout)) {
314                         /* Timeout, in busy mode. */
315                         dev_err(&cqspi->pdev->dev,
316                                 "QSPI is still busy after %dms timeout.\n",
317                                 CQSPI_TIMEOUT_MS);
318                         return -ETIMEDOUT;
319                 }
320
321                 cpu_relax();
322         }
323 }
324
325 static int cqspi_exec_flash_cmd(struct cqspi_st *cqspi, unsigned int reg)
326 {
327         void __iomem *reg_base = cqspi->iobase;
328         int ret;
329
330         /* Write the CMDCTRL without start execution. */
331         writel(reg, reg_base + CQSPI_REG_CMDCTRL);
332         /* Start execute */
333         reg |= CQSPI_REG_CMDCTRL_EXECUTE_MASK;
334         writel(reg, reg_base + CQSPI_REG_CMDCTRL);
335
336         /* Polling for completion. */
337         ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_CMDCTRL,
338                                  CQSPI_REG_CMDCTRL_INPROGRESS_MASK, 1);
339         if (ret) {
340                 dev_err(&cqspi->pdev->dev,
341                         "Flash command execution timed out.\n");
342                 return ret;
343         }
344
345         /* Polling QSPI idle status. */
346         return cqspi_wait_idle(cqspi);
347 }
348
349 static int cqspi_command_read(struct spi_nor *nor,
350                               const u8 *txbuf, const unsigned n_tx,
351                               u8 *rxbuf, const unsigned n_rx)
352 {
353         struct cqspi_flash_pdata *f_pdata = nor->priv;
354         struct cqspi_st *cqspi = f_pdata->cqspi;
355         void __iomem *reg_base = cqspi->iobase;
356         unsigned int rdreg;
357         unsigned int reg;
358         unsigned int read_len;
359         int status;
360
361         if (!n_rx || n_rx > CQSPI_STIG_DATA_LEN_MAX || !rxbuf) {
362                 dev_err(nor->dev, "Invalid input argument, len %d rxbuf 0x%p\n",
363                         n_rx, rxbuf);
364                 return -EINVAL;
365         }
366
367         reg = txbuf[0] << CQSPI_REG_CMDCTRL_OPCODE_LSB;
368
369         rdreg = cqspi_calc_rdreg(nor, txbuf[0]);
370         writel(rdreg, reg_base + CQSPI_REG_RD_INSTR);
371
372         reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB);
373
374         /* 0 means 1 byte. */
375         reg |= (((n_rx - 1) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK)
376                 << CQSPI_REG_CMDCTRL_RD_BYTES_LSB);
377         status = cqspi_exec_flash_cmd(cqspi, reg);
378         if (status)
379                 return status;
380
381         reg = readl(reg_base + CQSPI_REG_CMDREADDATALOWER);
382
383         /* Put the read value into rx_buf */
384         read_len = (n_rx > 4) ? 4 : n_rx;
385         memcpy(rxbuf, &reg, read_len);
386         rxbuf += read_len;
387
388         if (n_rx > 4) {
389                 reg = readl(reg_base + CQSPI_REG_CMDREADDATAUPPER);
390
391                 read_len = n_rx - read_len;
392                 memcpy(rxbuf, &reg, read_len);
393         }
394
395         return 0;
396 }
397
398 static int cqspi_command_write(struct spi_nor *nor, const u8 opcode,
399                                const u8 *txbuf, const unsigned n_tx)
400 {
401         struct cqspi_flash_pdata *f_pdata = nor->priv;
402         struct cqspi_st *cqspi = f_pdata->cqspi;
403         void __iomem *reg_base = cqspi->iobase;
404         unsigned int reg;
405         unsigned int data;
406         int ret;
407
408         if (n_tx > 4 || (n_tx && !txbuf)) {
409                 dev_err(nor->dev,
410                         "Invalid input argument, cmdlen %d txbuf 0x%p\n",
411                         n_tx, txbuf);
412                 return -EINVAL;
413         }
414
415         reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
416         if (n_tx) {
417                 reg |= (0x1 << CQSPI_REG_CMDCTRL_WR_EN_LSB);
418                 reg |= ((n_tx - 1) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK)
419                         << CQSPI_REG_CMDCTRL_WR_BYTES_LSB;
420                 data = 0;
421                 memcpy(&data, txbuf, n_tx);
422                 writel(data, reg_base + CQSPI_REG_CMDWRITEDATALOWER);
423         }
424
425         ret = cqspi_exec_flash_cmd(cqspi, reg);
426         return ret;
427 }
428
429 static int cqspi_command_write_addr(struct spi_nor *nor,
430                                     const u8 opcode, const unsigned int addr)
431 {
432         struct cqspi_flash_pdata *f_pdata = nor->priv;
433         struct cqspi_st *cqspi = f_pdata->cqspi;
434         void __iomem *reg_base = cqspi->iobase;
435         unsigned int reg;
436
437         reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
438         reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB);
439         reg |= ((nor->addr_width - 1) & CQSPI_REG_CMDCTRL_ADD_BYTES_MASK)
440                 << CQSPI_REG_CMDCTRL_ADD_BYTES_LSB;
441
442         writel(addr, reg_base + CQSPI_REG_CMDADDRESS);
443
444         return cqspi_exec_flash_cmd(cqspi, reg);
445 }
446
447 static int cqspi_indirect_read_setup(struct spi_nor *nor,
448                                      const unsigned int from_addr)
449 {
450         struct cqspi_flash_pdata *f_pdata = nor->priv;
451         struct cqspi_st *cqspi = f_pdata->cqspi;
452         void __iomem *reg_base = cqspi->iobase;
453         unsigned int dummy_clk = 0;
454         unsigned int reg;
455
456         writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
457
458         reg = nor->read_opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB;
459         reg |= cqspi_calc_rdreg(nor, nor->read_opcode);
460
461         /* Setup dummy clock cycles */
462         dummy_clk = nor->read_dummy;
463         if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
464                 dummy_clk = CQSPI_DUMMY_CLKS_MAX;
465
466         if (dummy_clk / 8) {
467                 reg |= (1 << CQSPI_REG_RD_INSTR_MODE_EN_LSB);
468                 /* Set mode bits high to ensure chip doesn't enter XIP */
469                 writel(0xFF, reg_base + CQSPI_REG_MODE_BIT);
470
471                 /* Need to subtract the mode byte (8 clocks). */
472                 if (f_pdata->inst_width != CQSPI_INST_TYPE_QUAD)
473                         dummy_clk -= 8;
474
475                 if (dummy_clk)
476                         reg |= (dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK)
477                                << CQSPI_REG_RD_INSTR_DUMMY_LSB;
478         }
479
480         writel(reg, reg_base + CQSPI_REG_RD_INSTR);
481
482         /* Set address width */
483         reg = readl(reg_base + CQSPI_REG_SIZE);
484         reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
485         reg |= (nor->addr_width - 1);
486         writel(reg, reg_base + CQSPI_REG_SIZE);
487         return 0;
488 }
489
490 static int cqspi_indirect_read_execute(struct spi_nor *nor,
491                                        u8 *rxbuf, const unsigned n_rx)
492 {
493         struct cqspi_flash_pdata *f_pdata = nor->priv;
494         struct cqspi_st *cqspi = f_pdata->cqspi;
495         void __iomem *reg_base = cqspi->iobase;
496         void __iomem *ahb_base = cqspi->ahb_base;
497         unsigned int remaining = n_rx;
498         unsigned int bytes_to_read = 0;
499         int ret = 0;
500
501         writel(remaining, reg_base + CQSPI_REG_INDIRECTRDBYTES);
502
503         /* Clear all interrupts. */
504         writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
505
506         writel(CQSPI_IRQ_MASK_RD, reg_base + CQSPI_REG_IRQMASK);
507
508         reinit_completion(&cqspi->transfer_complete);
509         writel(CQSPI_REG_INDIRECTRD_START_MASK,
510                reg_base + CQSPI_REG_INDIRECTRD);
511
512         while (remaining > 0) {
513                 ret = wait_for_completion_timeout(&cqspi->transfer_complete,
514                                                   msecs_to_jiffies
515                                                   (CQSPI_READ_TIMEOUT_MS));
516
517                 bytes_to_read = cqspi_get_rd_sram_level(cqspi);
518
519                 if (!ret && bytes_to_read == 0) {
520                         dev_err(nor->dev, "Indirect read timeout, no bytes\n");
521                         ret = -ETIMEDOUT;
522                         goto failrd;
523                 }
524
525                 while (bytes_to_read != 0) {
526                         bytes_to_read *= cqspi->fifo_width;
527                         bytes_to_read = bytes_to_read > remaining ?
528                                         remaining : bytes_to_read;
529                         readsl(ahb_base, rxbuf, DIV_ROUND_UP(bytes_to_read, 4));
530                         rxbuf += bytes_to_read;
531                         remaining -= bytes_to_read;
532                         bytes_to_read = cqspi_get_rd_sram_level(cqspi);
533                 }
534
535                 if (remaining > 0)
536                         reinit_completion(&cqspi->transfer_complete);
537         }
538
539         /* Check indirect done status */
540         ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_INDIRECTRD,
541                                  CQSPI_REG_INDIRECTRD_DONE_MASK, 0);
542         if (ret) {
543                 dev_err(nor->dev,
544                         "Indirect read completion error (%i)\n", ret);
545                 goto failrd;
546         }
547
548         /* Disable interrupt */
549         writel(0, reg_base + CQSPI_REG_IRQMASK);
550
551         /* Clear indirect completion status */
552         writel(CQSPI_REG_INDIRECTRD_DONE_MASK, reg_base + CQSPI_REG_INDIRECTRD);
553
554         return 0;
555
556 failrd:
557         /* Disable interrupt */
558         writel(0, reg_base + CQSPI_REG_IRQMASK);
559
560         /* Cancel the indirect read */
561         writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK,
562                reg_base + CQSPI_REG_INDIRECTRD);
563         return ret;
564 }
565
566 static int cqspi_indirect_write_setup(struct spi_nor *nor,
567                                       const unsigned int to_addr)
568 {
569         unsigned int reg;
570         struct cqspi_flash_pdata *f_pdata = nor->priv;
571         struct cqspi_st *cqspi = f_pdata->cqspi;
572         void __iomem *reg_base = cqspi->iobase;
573
574         /* Set opcode. */
575         reg = nor->program_opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB;
576         writel(reg, reg_base + CQSPI_REG_WR_INSTR);
577         reg = cqspi_calc_rdreg(nor, nor->program_opcode);
578         writel(reg, reg_base + CQSPI_REG_RD_INSTR);
579
580         writel(to_addr, reg_base + CQSPI_REG_INDIRECTWRSTARTADDR);
581
582         reg = readl(reg_base + CQSPI_REG_SIZE);
583         reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
584         reg |= (nor->addr_width - 1);
585         writel(reg, reg_base + CQSPI_REG_SIZE);
586         return 0;
587 }
588
589 static int cqspi_indirect_write_execute(struct spi_nor *nor,
590                                         const u8 *txbuf, const unsigned n_tx)
591 {
592         const unsigned int page_size = nor->page_size;
593         struct cqspi_flash_pdata *f_pdata = nor->priv;
594         struct cqspi_st *cqspi = f_pdata->cqspi;
595         void __iomem *reg_base = cqspi->iobase;
596         unsigned int remaining = n_tx;
597         unsigned int write_bytes;
598         int ret;
599
600         writel(remaining, reg_base + CQSPI_REG_INDIRECTWRBYTES);
601
602         /* Clear all interrupts. */
603         writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
604
605         writel(CQSPI_IRQ_MASK_WR, reg_base + CQSPI_REG_IRQMASK);
606
607         reinit_completion(&cqspi->transfer_complete);
608         writel(CQSPI_REG_INDIRECTWR_START_MASK,
609                reg_base + CQSPI_REG_INDIRECTWR);
610
611         while (remaining > 0) {
612                 write_bytes = remaining > page_size ? page_size : remaining;
613                 writesl(cqspi->ahb_base, txbuf, DIV_ROUND_UP(write_bytes, 4));
614
615                 ret = wait_for_completion_timeout(&cqspi->transfer_complete,
616                                                   msecs_to_jiffies
617                                                   (CQSPI_TIMEOUT_MS));
618                 if (!ret) {
619                         dev_err(nor->dev, "Indirect write timeout\n");
620                         ret = -ETIMEDOUT;
621                         goto failwr;
622                 }
623
624                 txbuf += write_bytes;
625                 remaining -= write_bytes;
626
627                 if (remaining > 0)
628                         reinit_completion(&cqspi->transfer_complete);
629         }
630
631         /* Check indirect done status */
632         ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_INDIRECTWR,
633                                  CQSPI_REG_INDIRECTWR_DONE_MASK, 0);
634         if (ret) {
635                 dev_err(nor->dev,
636                         "Indirect write completion error (%i)\n", ret);
637                 goto failwr;
638         }
639
640         /* Disable interrupt. */
641         writel(0, reg_base + CQSPI_REG_IRQMASK);
642
643         /* Clear indirect completion status */
644         writel(CQSPI_REG_INDIRECTWR_DONE_MASK, reg_base + CQSPI_REG_INDIRECTWR);
645
646         cqspi_wait_idle(cqspi);
647
648         return 0;
649
650 failwr:
651         /* Disable interrupt. */
652         writel(0, reg_base + CQSPI_REG_IRQMASK);
653
654         /* Cancel the indirect write */
655         writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK,
656                reg_base + CQSPI_REG_INDIRECTWR);
657         return ret;
658 }
659
660 static void cqspi_chipselect(struct spi_nor *nor)
661 {
662         struct cqspi_flash_pdata *f_pdata = nor->priv;
663         struct cqspi_st *cqspi = f_pdata->cqspi;
664         void __iomem *reg_base = cqspi->iobase;
665         unsigned int chip_select = f_pdata->cs;
666         unsigned int reg;
667
668         reg = readl(reg_base + CQSPI_REG_CONFIG);
669         if (cqspi->is_decoded_cs) {
670                 reg |= CQSPI_REG_CONFIG_DECODE_MASK;
671         } else {
672                 reg &= ~CQSPI_REG_CONFIG_DECODE_MASK;
673
674                 /* Convert CS if without decoder.
675                  * CS0 to 4b'1110
676                  * CS1 to 4b'1101
677                  * CS2 to 4b'1011
678                  * CS3 to 4b'0111
679                  */
680                 chip_select = 0xF & ~(1 << chip_select);
681         }
682
683         reg &= ~(CQSPI_REG_CONFIG_CHIPSELECT_MASK
684                  << CQSPI_REG_CONFIG_CHIPSELECT_LSB);
685         reg |= (chip_select & CQSPI_REG_CONFIG_CHIPSELECT_MASK)
686             << CQSPI_REG_CONFIG_CHIPSELECT_LSB;
687         writel(reg, reg_base + CQSPI_REG_CONFIG);
688 }
689
690 static void cqspi_configure_cs_and_sizes(struct spi_nor *nor)
691 {
692         struct cqspi_flash_pdata *f_pdata = nor->priv;
693         struct cqspi_st *cqspi = f_pdata->cqspi;
694         void __iomem *iobase = cqspi->iobase;
695         unsigned int reg;
696
697         /* configure page size and block size. */
698         reg = readl(iobase + CQSPI_REG_SIZE);
699         reg &= ~(CQSPI_REG_SIZE_PAGE_MASK << CQSPI_REG_SIZE_PAGE_LSB);
700         reg &= ~(CQSPI_REG_SIZE_BLOCK_MASK << CQSPI_REG_SIZE_BLOCK_LSB);
701         reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
702         reg |= (nor->page_size << CQSPI_REG_SIZE_PAGE_LSB);
703         reg |= (ilog2(nor->mtd.erasesize) << CQSPI_REG_SIZE_BLOCK_LSB);
704         reg |= (nor->addr_width - 1);
705         writel(reg, iobase + CQSPI_REG_SIZE);
706
707         /* configure the chip select */
708         cqspi_chipselect(nor);
709
710         /* Store the new configuration of the controller */
711         cqspi->current_page_size = nor->page_size;
712         cqspi->current_erase_size = nor->mtd.erasesize;
713         cqspi->current_addr_width = nor->addr_width;
714 }
715
716 static unsigned int calculate_ticks_for_ns(const unsigned int ref_clk_hz,
717                                            const unsigned int ns_val)
718 {
719         unsigned int ticks;
720
721         ticks = ref_clk_hz / 1000;      /* kHz */
722         ticks = DIV_ROUND_UP(ticks * ns_val, 1000000);
723
724         return ticks;
725 }
726
727 static void cqspi_delay(struct spi_nor *nor)
728 {
729         struct cqspi_flash_pdata *f_pdata = nor->priv;
730         struct cqspi_st *cqspi = f_pdata->cqspi;
731         void __iomem *iobase = cqspi->iobase;
732         const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz;
733         unsigned int tshsl, tchsh, tslch, tsd2d;
734         unsigned int reg;
735         unsigned int tsclk;
736
737         /* calculate the number of ref ticks for one sclk tick */
738         tsclk = DIV_ROUND_UP(ref_clk_hz, cqspi->sclk);
739
740         tshsl = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tshsl_ns);
741         /* this particular value must be at least one sclk */
742         if (tshsl < tsclk)
743                 tshsl = tsclk;
744
745         tchsh = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tchsh_ns);
746         tslch = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tslch_ns);
747         tsd2d = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tsd2d_ns);
748
749         reg = (tshsl & CQSPI_REG_DELAY_TSHSL_MASK)
750                << CQSPI_REG_DELAY_TSHSL_LSB;
751         reg |= (tchsh & CQSPI_REG_DELAY_TCHSH_MASK)
752                 << CQSPI_REG_DELAY_TCHSH_LSB;
753         reg |= (tslch & CQSPI_REG_DELAY_TSLCH_MASK)
754                 << CQSPI_REG_DELAY_TSLCH_LSB;
755         reg |= (tsd2d & CQSPI_REG_DELAY_TSD2D_MASK)
756                 << CQSPI_REG_DELAY_TSD2D_LSB;
757         writel(reg, iobase + CQSPI_REG_DELAY);
758 }
759
760 static void cqspi_config_baudrate_div(struct cqspi_st *cqspi)
761 {
762         const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz;
763         void __iomem *reg_base = cqspi->iobase;
764         u32 reg, div;
765
766         /* Recalculate the baudrate divisor based on QSPI specification. */
767         div = DIV_ROUND_UP(ref_clk_hz, 2 * cqspi->sclk) - 1;
768
769         reg = readl(reg_base + CQSPI_REG_CONFIG);
770         reg &= ~(CQSPI_REG_CONFIG_BAUD_MASK << CQSPI_REG_CONFIG_BAUD_LSB);
771         reg |= (div & CQSPI_REG_CONFIG_BAUD_MASK) << CQSPI_REG_CONFIG_BAUD_LSB;
772         writel(reg, reg_base + CQSPI_REG_CONFIG);
773 }
774
775 static void cqspi_readdata_capture(struct cqspi_st *cqspi,
776                                    const unsigned int bypass,
777                                    const unsigned int delay)
778 {
779         void __iomem *reg_base = cqspi->iobase;
780         unsigned int reg;
781
782         reg = readl(reg_base + CQSPI_REG_READCAPTURE);
783
784         if (bypass)
785                 reg |= (1 << CQSPI_REG_READCAPTURE_BYPASS_LSB);
786         else
787                 reg &= ~(1 << CQSPI_REG_READCAPTURE_BYPASS_LSB);
788
789         reg &= ~(CQSPI_REG_READCAPTURE_DELAY_MASK
790                  << CQSPI_REG_READCAPTURE_DELAY_LSB);
791
792         reg |= (delay & CQSPI_REG_READCAPTURE_DELAY_MASK)
793                 << CQSPI_REG_READCAPTURE_DELAY_LSB;
794
795         writel(reg, reg_base + CQSPI_REG_READCAPTURE);
796 }
797
798 static void cqspi_controller_enable(struct cqspi_st *cqspi, bool enable)
799 {
800         void __iomem *reg_base = cqspi->iobase;
801         unsigned int reg;
802
803         reg = readl(reg_base + CQSPI_REG_CONFIG);
804
805         if (enable)
806                 reg |= CQSPI_REG_CONFIG_ENABLE_MASK;
807         else
808                 reg &= ~CQSPI_REG_CONFIG_ENABLE_MASK;
809
810         writel(reg, reg_base + CQSPI_REG_CONFIG);
811 }
812
813 static void cqspi_configure(struct spi_nor *nor)
814 {
815         struct cqspi_flash_pdata *f_pdata = nor->priv;
816         struct cqspi_st *cqspi = f_pdata->cqspi;
817         const unsigned int sclk = f_pdata->clk_rate;
818         int switch_cs = (cqspi->current_cs != f_pdata->cs);
819         int switch_ck = (cqspi->sclk != sclk);
820
821         if ((cqspi->current_page_size != nor->page_size) ||
822             (cqspi->current_erase_size != nor->mtd.erasesize) ||
823             (cqspi->current_addr_width != nor->addr_width))
824                 switch_cs = 1;
825
826         if (switch_cs || switch_ck)
827                 cqspi_controller_enable(cqspi, 0);
828
829         /* Switch chip select. */
830         if (switch_cs) {
831                 cqspi->current_cs = f_pdata->cs;
832                 cqspi_configure_cs_and_sizes(nor);
833         }
834
835         /* Setup baudrate divisor and delays */
836         if (switch_ck) {
837                 cqspi->sclk = sclk;
838                 cqspi_config_baudrate_div(cqspi);
839                 cqspi_delay(nor);
840                 cqspi_readdata_capture(cqspi, 1, f_pdata->read_delay);
841         }
842
843         if (switch_cs || switch_ck)
844                 cqspi_controller_enable(cqspi, 1);
845 }
846
847 static int cqspi_set_protocol(struct spi_nor *nor, const int read)
848 {
849         struct cqspi_flash_pdata *f_pdata = nor->priv;
850
851         f_pdata->inst_width = CQSPI_INST_TYPE_SINGLE;
852         f_pdata->addr_width = CQSPI_INST_TYPE_SINGLE;
853         f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
854
855         if (read) {
856                 switch (nor->flash_read) {
857                 case SPI_NOR_NORMAL:
858                 case SPI_NOR_FAST:
859                         f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
860                         break;
861                 case SPI_NOR_DUAL:
862                         f_pdata->data_width = CQSPI_INST_TYPE_DUAL;
863                         break;
864                 case SPI_NOR_QUAD:
865                         f_pdata->data_width = CQSPI_INST_TYPE_QUAD;
866                         break;
867                 default:
868                         return -EINVAL;
869                 }
870         }
871
872         cqspi_configure(nor);
873
874         return 0;
875 }
876
877 static ssize_t cqspi_write(struct spi_nor *nor, loff_t to,
878                            size_t len, const u_char *buf)
879 {
880         int ret;
881
882         ret = cqspi_set_protocol(nor, 0);
883         if (ret)
884                 return ret;
885
886         ret = cqspi_indirect_write_setup(nor, to);
887         if (ret)
888                 return ret;
889
890         ret = cqspi_indirect_write_execute(nor, buf, len);
891         if (ret)
892                 return ret;
893
894         return (ret < 0) ? ret : len;
895 }
896
897 static ssize_t cqspi_read(struct spi_nor *nor, loff_t from,
898                           size_t len, u_char *buf)
899 {
900         int ret;
901
902         ret = cqspi_set_protocol(nor, 1);
903         if (ret)
904                 return ret;
905
906         ret = cqspi_indirect_read_setup(nor, from);
907         if (ret)
908                 return ret;
909
910         ret = cqspi_indirect_read_execute(nor, buf, len);
911         if (ret)
912                 return ret;
913
914         return (ret < 0) ? ret : len;
915 }
916
917 static int cqspi_erase(struct spi_nor *nor, loff_t offs)
918 {
919         int ret;
920
921         ret = cqspi_set_protocol(nor, 0);
922         if (ret)
923                 return ret;
924
925         /* Send write enable, then erase commands. */
926         ret = nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0);
927         if (ret)
928                 return ret;
929
930         /* Set up command buffer. */
931         ret = cqspi_command_write_addr(nor, nor->erase_opcode, offs);
932         if (ret)
933                 return ret;
934
935         return 0;
936 }
937
938 static int cqspi_prep(struct spi_nor *nor, enum spi_nor_ops ops)
939 {
940         struct cqspi_flash_pdata *f_pdata = nor->priv;
941         struct cqspi_st *cqspi = f_pdata->cqspi;
942
943         mutex_lock(&cqspi->bus_mutex);
944
945         return 0;
946 }
947
948 static void cqspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
949 {
950         struct cqspi_flash_pdata *f_pdata = nor->priv;
951         struct cqspi_st *cqspi = f_pdata->cqspi;
952
953         mutex_unlock(&cqspi->bus_mutex);
954 }
955
956 static int cqspi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
957 {
958         int ret;
959
960         ret = cqspi_set_protocol(nor, 0);
961         if (!ret)
962                 ret = cqspi_command_read(nor, &opcode, 1, buf, len);
963
964         return ret;
965 }
966
967 static int cqspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
968 {
969         int ret;
970
971         ret = cqspi_set_protocol(nor, 0);
972         if (!ret)
973                 ret = cqspi_command_write(nor, opcode, buf, len);
974
975         return ret;
976 }
977
978 static int cqspi_of_get_flash_pdata(struct platform_device *pdev,
979                                     struct cqspi_flash_pdata *f_pdata,
980                                     struct device_node *np)
981 {
982         if (of_property_read_u32(np, "cdns,read-delay", &f_pdata->read_delay)) {
983                 dev_err(&pdev->dev, "couldn't determine read-delay\n");
984                 return -ENXIO;
985         }
986
987         if (of_property_read_u32(np, "cdns,tshsl-ns", &f_pdata->tshsl_ns)) {
988                 dev_err(&pdev->dev, "couldn't determine tshsl-ns\n");
989                 return -ENXIO;
990         }
991
992         if (of_property_read_u32(np, "cdns,tsd2d-ns", &f_pdata->tsd2d_ns)) {
993                 dev_err(&pdev->dev, "couldn't determine tsd2d-ns\n");
994                 return -ENXIO;
995         }
996
997         if (of_property_read_u32(np, "cdns,tchsh-ns", &f_pdata->tchsh_ns)) {
998                 dev_err(&pdev->dev, "couldn't determine tchsh-ns\n");
999                 return -ENXIO;
1000         }
1001
1002         if (of_property_read_u32(np, "cdns,tslch-ns", &f_pdata->tslch_ns)) {
1003                 dev_err(&pdev->dev, "couldn't determine tslch-ns\n");
1004                 return -ENXIO;
1005         }
1006
1007         if (of_property_read_u32(np, "spi-max-frequency", &f_pdata->clk_rate)) {
1008                 dev_err(&pdev->dev, "couldn't determine spi-max-frequency\n");
1009                 return -ENXIO;
1010         }
1011
1012         return 0;
1013 }
1014
1015 static int cqspi_of_get_pdata(struct platform_device *pdev)
1016 {
1017         struct device_node *np = pdev->dev.of_node;
1018         struct cqspi_st *cqspi = platform_get_drvdata(pdev);
1019
1020         cqspi->is_decoded_cs = of_property_read_bool(np, "cdns,is-decoded-cs");
1021
1022         if (of_property_read_u32(np, "cdns,fifo-depth", &cqspi->fifo_depth)) {
1023                 dev_err(&pdev->dev, "couldn't determine fifo-depth\n");
1024                 return -ENXIO;
1025         }
1026
1027         if (of_property_read_u32(np, "cdns,fifo-width", &cqspi->fifo_width)) {
1028                 dev_err(&pdev->dev, "couldn't determine fifo-width\n");
1029                 return -ENXIO;
1030         }
1031
1032         if (of_property_read_u32(np, "cdns,trigger-address",
1033                                  &cqspi->trigger_address)) {
1034                 dev_err(&pdev->dev, "couldn't determine trigger-address\n");
1035                 return -ENXIO;
1036         }
1037
1038         return 0;
1039 }
1040
1041 static void cqspi_controller_init(struct cqspi_st *cqspi)
1042 {
1043         cqspi_controller_enable(cqspi, 0);
1044
1045         /* Configure the remap address register, no remap */
1046         writel(0, cqspi->iobase + CQSPI_REG_REMAP);
1047
1048         /* Disable all interrupts. */
1049         writel(0, cqspi->iobase + CQSPI_REG_IRQMASK);
1050
1051         /* Configure the SRAM split to 1:1 . */
1052         writel(cqspi->fifo_depth / 2, cqspi->iobase + CQSPI_REG_SRAMPARTITION);
1053
1054         /* Load indirect trigger address. */
1055         writel(cqspi->trigger_address,
1056                cqspi->iobase + CQSPI_REG_INDIRECTTRIGGER);
1057
1058         /* Program read watermark -- 1/2 of the FIFO. */
1059         writel(cqspi->fifo_depth * cqspi->fifo_width / 2,
1060                cqspi->iobase + CQSPI_REG_INDIRECTRDWATERMARK);
1061         /* Program write watermark -- 1/8 of the FIFO. */
1062         writel(cqspi->fifo_depth * cqspi->fifo_width / 8,
1063                cqspi->iobase + CQSPI_REG_INDIRECTWRWATERMARK);
1064
1065         cqspi_controller_enable(cqspi, 1);
1066 }
1067
1068 static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np)
1069 {
1070         struct platform_device *pdev = cqspi->pdev;
1071         struct device *dev = &pdev->dev;
1072         struct cqspi_flash_pdata *f_pdata;
1073         struct spi_nor *nor;
1074         struct mtd_info *mtd;
1075         unsigned int cs;
1076         int i, ret;
1077
1078         /* Get flash device data */
1079         for_each_available_child_of_node(dev->of_node, np) {
1080                 if (of_property_read_u32(np, "reg", &cs)) {
1081                         dev_err(dev, "Couldn't determine chip select.\n");
1082                         goto err;
1083                 }
1084
1085                 if (cs > CQSPI_MAX_CHIPSELECT) {
1086                         dev_err(dev, "Chip select %d out of range.\n", cs);
1087                         goto err;
1088                 }
1089
1090                 f_pdata = &cqspi->f_pdata[cs];
1091                 f_pdata->cqspi = cqspi;
1092                 f_pdata->cs = cs;
1093
1094                 ret = cqspi_of_get_flash_pdata(pdev, f_pdata, np);
1095                 if (ret)
1096                         goto err;
1097
1098                 nor = &f_pdata->nor;
1099                 mtd = &nor->mtd;
1100
1101                 mtd->priv = nor;
1102
1103                 nor->dev = dev;
1104                 spi_nor_set_flash_node(nor, np);
1105                 nor->priv = f_pdata;
1106
1107                 nor->read_reg = cqspi_read_reg;
1108                 nor->write_reg = cqspi_write_reg;
1109                 nor->read = cqspi_read;
1110                 nor->write = cqspi_write;
1111                 nor->erase = cqspi_erase;
1112                 nor->prepare = cqspi_prep;
1113                 nor->unprepare = cqspi_unprep;
1114
1115                 mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%s.%d",
1116                                            dev_name(dev), cs);
1117                 if (!mtd->name) {
1118                         ret = -ENOMEM;
1119                         goto err;
1120                 }
1121
1122                 ret = spi_nor_scan(nor, NULL, SPI_NOR_QUAD);
1123                 if (ret)
1124                         goto err;
1125
1126                 ret = mtd_device_register(mtd, NULL, 0);
1127                 if (ret)
1128                         goto err;
1129
1130                 f_pdata->registered = true;
1131         }
1132
1133         return 0;
1134
1135 err:
1136         for (i = 0; i < CQSPI_MAX_CHIPSELECT; i++)
1137                 if (cqspi->f_pdata[i].registered)
1138                         mtd_device_unregister(&cqspi->f_pdata[i].nor.mtd);
1139         return ret;
1140 }
1141
1142 static int cqspi_probe(struct platform_device *pdev)
1143 {
1144         struct device_node *np = pdev->dev.of_node;
1145         struct device *dev = &pdev->dev;
1146         struct cqspi_st *cqspi;
1147         struct resource *res;
1148         struct resource *res_ahb;
1149         int ret;
1150         int irq;
1151
1152         cqspi = devm_kzalloc(dev, sizeof(*cqspi), GFP_KERNEL);
1153         if (!cqspi)
1154                 return -ENOMEM;
1155
1156         mutex_init(&cqspi->bus_mutex);
1157         cqspi->pdev = pdev;
1158         platform_set_drvdata(pdev, cqspi);
1159
1160         /* Obtain configuration from OF. */
1161         ret = cqspi_of_get_pdata(pdev);
1162         if (ret) {
1163                 dev_err(dev, "Cannot get mandatory OF data.\n");
1164                 return -ENODEV;
1165         }
1166
1167         /* Obtain QSPI clock. */
1168         cqspi->clk = devm_clk_get(dev, NULL);
1169         if (IS_ERR(cqspi->clk)) {
1170                 dev_err(dev, "Cannot claim QSPI clock.\n");
1171                 return PTR_ERR(cqspi->clk);
1172         }
1173
1174         /* Obtain and remap controller address. */
1175         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1176         cqspi->iobase = devm_ioremap_resource(dev, res);
1177         if (IS_ERR(cqspi->iobase)) {
1178                 dev_err(dev, "Cannot remap controller address.\n");
1179                 return PTR_ERR(cqspi->iobase);
1180         }
1181
1182         /* Obtain and remap AHB address. */
1183         res_ahb = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1184         cqspi->ahb_base = devm_ioremap_resource(dev, res_ahb);
1185         if (IS_ERR(cqspi->ahb_base)) {
1186                 dev_err(dev, "Cannot remap AHB address.\n");
1187                 return PTR_ERR(cqspi->ahb_base);
1188         }
1189
1190         init_completion(&cqspi->transfer_complete);
1191
1192         /* Obtain IRQ line. */
1193         irq = platform_get_irq(pdev, 0);
1194         if (irq < 0) {
1195                 dev_err(dev, "Cannot obtain IRQ.\n");
1196                 return -ENXIO;
1197         }
1198
1199         ret = clk_prepare_enable(cqspi->clk);
1200         if (ret) {
1201                 dev_err(dev, "Cannot enable QSPI clock.\n");
1202                 return ret;
1203         }
1204
1205         cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk);
1206
1207         ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0,
1208                                pdev->name, cqspi);
1209         if (ret) {
1210                 dev_err(dev, "Cannot request IRQ.\n");
1211                 goto probe_irq_failed;
1212         }
1213
1214         cqspi_wait_idle(cqspi);
1215         cqspi_controller_init(cqspi);
1216         cqspi->current_cs = -1;
1217         cqspi->sclk = 0;
1218
1219         ret = cqspi_setup_flash(cqspi, np);
1220         if (ret) {
1221                 dev_err(dev, "Cadence QSPI NOR probe failed %d\n", ret);
1222                 goto probe_setup_failed;
1223         }
1224
1225         return ret;
1226 probe_irq_failed:
1227         cqspi_controller_enable(cqspi, 0);
1228 probe_setup_failed:
1229         clk_disable_unprepare(cqspi->clk);
1230         return ret;
1231 }
1232
1233 static int cqspi_remove(struct platform_device *pdev)
1234 {
1235         struct cqspi_st *cqspi = platform_get_drvdata(pdev);
1236         int i;
1237
1238         for (i = 0; i < CQSPI_MAX_CHIPSELECT; i++)
1239                 if (cqspi->f_pdata[i].registered)
1240                         mtd_device_unregister(&cqspi->f_pdata[i].nor.mtd);
1241
1242         cqspi_controller_enable(cqspi, 0);
1243
1244         clk_disable_unprepare(cqspi->clk);
1245
1246         return 0;
1247 }
1248
1249 #ifdef CONFIG_PM_SLEEP
1250 static int cqspi_suspend(struct device *dev)
1251 {
1252         struct cqspi_st *cqspi = dev_get_drvdata(dev);
1253
1254         cqspi_controller_enable(cqspi, 0);
1255         return 0;
1256 }
1257
1258 static int cqspi_resume(struct device *dev)
1259 {
1260         struct cqspi_st *cqspi = dev_get_drvdata(dev);
1261
1262         cqspi_controller_enable(cqspi, 1);
1263         return 0;
1264 }
1265
1266 static const struct dev_pm_ops cqspi__dev_pm_ops = {
1267         .suspend = cqspi_suspend,
1268         .resume = cqspi_resume,
1269 };
1270
1271 #define CQSPI_DEV_PM_OPS        (&cqspi__dev_pm_ops)
1272 #else
1273 #define CQSPI_DEV_PM_OPS        NULL
1274 #endif
1275
1276 static struct of_device_id const cqspi_dt_ids[] = {
1277         {.compatible = "cdns,qspi-nor",},
1278         { /* end of table */ }
1279 };
1280
1281 MODULE_DEVICE_TABLE(of, cqspi_dt_ids);
1282
1283 static struct platform_driver cqspi_platform_driver = {
1284         .probe = cqspi_probe,
1285         .remove = cqspi_remove,
1286         .driver = {
1287                 .name = CQSPI_NAME,
1288                 .pm = CQSPI_DEV_PM_OPS,
1289                 .of_match_table = cqspi_dt_ids,
1290         },
1291 };
1292
1293 module_platform_driver(cqspi_platform_driver);
1294
1295 MODULE_DESCRIPTION("Cadence QSPI Controller Driver");
1296 MODULE_LICENSE("GPL v2");
1297 MODULE_ALIAS("platform:" CQSPI_NAME);
1298 MODULE_AUTHOR("Ley Foon Tan <lftan@altera.com>");
1299 MODULE_AUTHOR("Graham Moore <grmoore@opensource.altera.com>");