SUNRPC: Fix a deadlock in rpc_client_register()
[cascardo/linux.git] / drivers / mmc / host / sh_mmcif.c
1 /*
2  * MMCIF eMMC driver.
3  *
4  * Copyright (C) 2010 Renesas Solutions Corp.
5  * Yusuke Goda <yusuke.goda.sx@renesas.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License.
10  *
11  *
12  * TODO
13  *  1. DMA
14  *  2. Power management
15  *  3. Handle MMC errors better
16  *
17  */
18
19 /*
20  * The MMCIF driver is now processing MMC requests asynchronously, according
21  * to the Linux MMC API requirement.
22  *
23  * The MMCIF driver processes MMC requests in up to 3 stages: command, optional
24  * data, and optional stop. To achieve asynchronous processing each of these
25  * stages is split into two halves: a top and a bottom half. The top half
26  * initialises the hardware, installs a timeout handler to handle completion
27  * timeouts, and returns. In case of the command stage this immediately returns
28  * control to the caller, leaving all further processing to run asynchronously.
29  * All further request processing is performed by the bottom halves.
30  *
31  * The bottom half further consists of a "hard" IRQ handler, an IRQ handler
32  * thread, a DMA completion callback, if DMA is used, a timeout work, and
33  * request- and stage-specific handler methods.
34  *
35  * Each bottom half run begins with either a hardware interrupt, a DMA callback
36  * invocation, or a timeout work run. In case of an error or a successful
37  * processing completion, the MMC core is informed and the request processing is
38  * finished. In case processing has to continue, i.e., if data has to be read
39  * from or written to the card, or if a stop command has to be sent, the next
40  * top half is called, which performs the necessary hardware handling and
41  * reschedules the timeout work. This returns the driver state machine into the
42  * bottom half waiting state.
43  */
44
45 #include <linux/bitops.h>
46 #include <linux/clk.h>
47 #include <linux/completion.h>
48 #include <linux/delay.h>
49 #include <linux/dma-mapping.h>
50 #include <linux/dmaengine.h>
51 #include <linux/mmc/card.h>
52 #include <linux/mmc/core.h>
53 #include <linux/mmc/host.h>
54 #include <linux/mmc/mmc.h>
55 #include <linux/mmc/sdio.h>
56 #include <linux/mmc/sh_mmcif.h>
57 #include <linux/mmc/slot-gpio.h>
58 #include <linux/mod_devicetable.h>
59 #include <linux/mutex.h>
60 #include <linux/pagemap.h>
61 #include <linux/platform_device.h>
62 #include <linux/pm_qos.h>
63 #include <linux/pm_runtime.h>
64 #include <linux/spinlock.h>
65 #include <linux/module.h>
66
67 #define DRIVER_NAME     "sh_mmcif"
68 #define DRIVER_VERSION  "2010-04-28"
69
70 /* CE_CMD_SET */
71 #define CMD_MASK                0x3f000000
72 #define CMD_SET_RTYP_NO         ((0 << 23) | (0 << 22))
73 #define CMD_SET_RTYP_6B         ((0 << 23) | (1 << 22)) /* R1/R1b/R3/R4/R5 */
74 #define CMD_SET_RTYP_17B        ((1 << 23) | (0 << 22)) /* R2 */
75 #define CMD_SET_RBSY            (1 << 21) /* R1b */
76 #define CMD_SET_CCSEN           (1 << 20)
77 #define CMD_SET_WDAT            (1 << 19) /* 1: on data, 0: no data */
78 #define CMD_SET_DWEN            (1 << 18) /* 1: write, 0: read */
79 #define CMD_SET_CMLTE           (1 << 17) /* 1: multi block trans, 0: single */
80 #define CMD_SET_CMD12EN         (1 << 16) /* 1: CMD12 auto issue */
81 #define CMD_SET_RIDXC_INDEX     ((0 << 15) | (0 << 14)) /* index check */
82 #define CMD_SET_RIDXC_BITS      ((0 << 15) | (1 << 14)) /* check bits check */
83 #define CMD_SET_RIDXC_NO        ((1 << 15) | (0 << 14)) /* no check */
84 #define CMD_SET_CRC7C           ((0 << 13) | (0 << 12)) /* CRC7 check*/
85 #define CMD_SET_CRC7C_BITS      ((0 << 13) | (1 << 12)) /* check bits check*/
86 #define CMD_SET_CRC7C_INTERNAL  ((1 << 13) | (0 << 12)) /* internal CRC7 check*/
87 #define CMD_SET_CRC16C          (1 << 10) /* 0: CRC16 check*/
88 #define CMD_SET_CRCSTE          (1 << 8) /* 1: not receive CRC status */
89 #define CMD_SET_TBIT            (1 << 7) /* 1: tran mission bit "Low" */
90 #define CMD_SET_OPDM            (1 << 6) /* 1: open/drain */
91 #define CMD_SET_CCSH            (1 << 5)
92 #define CMD_SET_DARS            (1 << 2) /* Dual Data Rate */
93 #define CMD_SET_DATW_1          ((0 << 1) | (0 << 0)) /* 1bit */
94 #define CMD_SET_DATW_4          ((0 << 1) | (1 << 0)) /* 4bit */
95 #define CMD_SET_DATW_8          ((1 << 1) | (0 << 0)) /* 8bit */
96
97 /* CE_CMD_CTRL */
98 #define CMD_CTRL_BREAK          (1 << 0)
99
100 /* CE_BLOCK_SET */
101 #define BLOCK_SIZE_MASK         0x0000ffff
102
103 /* CE_INT */
104 #define INT_CCSDE               (1 << 29)
105 #define INT_CMD12DRE            (1 << 26)
106 #define INT_CMD12RBE            (1 << 25)
107 #define INT_CMD12CRE            (1 << 24)
108 #define INT_DTRANE              (1 << 23)
109 #define INT_BUFRE               (1 << 22)
110 #define INT_BUFWEN              (1 << 21)
111 #define INT_BUFREN              (1 << 20)
112 #define INT_CCSRCV              (1 << 19)
113 #define INT_RBSYE               (1 << 17)
114 #define INT_CRSPE               (1 << 16)
115 #define INT_CMDVIO              (1 << 15)
116 #define INT_BUFVIO              (1 << 14)
117 #define INT_WDATERR             (1 << 11)
118 #define INT_RDATERR             (1 << 10)
119 #define INT_RIDXERR             (1 << 9)
120 #define INT_RSPERR              (1 << 8)
121 #define INT_CCSTO               (1 << 5)
122 #define INT_CRCSTO              (1 << 4)
123 #define INT_WDATTO              (1 << 3)
124 #define INT_RDATTO              (1 << 2)
125 #define INT_RBSYTO              (1 << 1)
126 #define INT_RSPTO               (1 << 0)
127 #define INT_ERR_STS             (INT_CMDVIO | INT_BUFVIO | INT_WDATERR |  \
128                                  INT_RDATERR | INT_RIDXERR | INT_RSPERR | \
129                                  INT_CCSTO | INT_CRCSTO | INT_WDATTO |    \
130                                  INT_RDATTO | INT_RBSYTO | INT_RSPTO)
131
132 #define INT_ALL                 (INT_RBSYE | INT_CRSPE | INT_BUFREN |    \
133                                  INT_BUFWEN | INT_CMD12DRE | INT_BUFRE | \
134                                  INT_DTRANE | INT_CMD12RBE | INT_CMD12CRE)
135
136 /* CE_INT_MASK */
137 #define MASK_ALL                0x00000000
138 #define MASK_MCCSDE             (1 << 29)
139 #define MASK_MCMD12DRE          (1 << 26)
140 #define MASK_MCMD12RBE          (1 << 25)
141 #define MASK_MCMD12CRE          (1 << 24)
142 #define MASK_MDTRANE            (1 << 23)
143 #define MASK_MBUFRE             (1 << 22)
144 #define MASK_MBUFWEN            (1 << 21)
145 #define MASK_MBUFREN            (1 << 20)
146 #define MASK_MCCSRCV            (1 << 19)
147 #define MASK_MRBSYE             (1 << 17)
148 #define MASK_MCRSPE             (1 << 16)
149 #define MASK_MCMDVIO            (1 << 15)
150 #define MASK_MBUFVIO            (1 << 14)
151 #define MASK_MWDATERR           (1 << 11)
152 #define MASK_MRDATERR           (1 << 10)
153 #define MASK_MRIDXERR           (1 << 9)
154 #define MASK_MRSPERR            (1 << 8)
155 #define MASK_MCCSTO             (1 << 5)
156 #define MASK_MCRCSTO            (1 << 4)
157 #define MASK_MWDATTO            (1 << 3)
158 #define MASK_MRDATTO            (1 << 2)
159 #define MASK_MRBSYTO            (1 << 1)
160 #define MASK_MRSPTO             (1 << 0)
161
162 #define MASK_START_CMD          (MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR | \
163                                  MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR | \
164                                  MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO | \
165                                  MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO)
166
167 #define MASK_CLEAN              (INT_ERR_STS | MASK_MRBSYE | MASK_MCRSPE |      \
168                                  MASK_MBUFREN | MASK_MBUFWEN |                  \
169                                  MASK_MCMD12DRE | MASK_MBUFRE | MASK_MDTRANE |  \
170                                  MASK_MCMD12RBE | MASK_MCMD12CRE)
171
172 /* CE_HOST_STS1 */
173 #define STS1_CMDSEQ             (1 << 31)
174
175 /* CE_HOST_STS2 */
176 #define STS2_CRCSTE             (1 << 31)
177 #define STS2_CRC16E             (1 << 30)
178 #define STS2_AC12CRCE           (1 << 29)
179 #define STS2_RSPCRC7E           (1 << 28)
180 #define STS2_CRCSTEBE           (1 << 27)
181 #define STS2_RDATEBE            (1 << 26)
182 #define STS2_AC12REBE           (1 << 25)
183 #define STS2_RSPEBE             (1 << 24)
184 #define STS2_AC12IDXE           (1 << 23)
185 #define STS2_RSPIDXE            (1 << 22)
186 #define STS2_CCSTO              (1 << 15)
187 #define STS2_RDATTO             (1 << 14)
188 #define STS2_DATBSYTO           (1 << 13)
189 #define STS2_CRCSTTO            (1 << 12)
190 #define STS2_AC12BSYTO          (1 << 11)
191 #define STS2_RSPBSYTO           (1 << 10)
192 #define STS2_AC12RSPTO          (1 << 9)
193 #define STS2_RSPTO              (1 << 8)
194 #define STS2_CRC_ERR            (STS2_CRCSTE | STS2_CRC16E |            \
195                                  STS2_AC12CRCE | STS2_RSPCRC7E | STS2_CRCSTEBE)
196 #define STS2_TIMEOUT_ERR        (STS2_CCSTO | STS2_RDATTO |             \
197                                  STS2_DATBSYTO | STS2_CRCSTTO |         \
198                                  STS2_AC12BSYTO | STS2_RSPBSYTO |       \
199                                  STS2_AC12RSPTO | STS2_RSPTO)
200
201 #define CLKDEV_EMMC_DATA        52000000 /* 52MHz */
202 #define CLKDEV_MMC_DATA         20000000 /* 20MHz */
203 #define CLKDEV_INIT             400000   /* 400 KHz */
204
205 enum mmcif_state {
206         STATE_IDLE,
207         STATE_REQUEST,
208         STATE_IOS,
209         STATE_TIMEOUT,
210 };
211
212 enum mmcif_wait_for {
213         MMCIF_WAIT_FOR_REQUEST,
214         MMCIF_WAIT_FOR_CMD,
215         MMCIF_WAIT_FOR_MREAD,
216         MMCIF_WAIT_FOR_MWRITE,
217         MMCIF_WAIT_FOR_READ,
218         MMCIF_WAIT_FOR_WRITE,
219         MMCIF_WAIT_FOR_READ_END,
220         MMCIF_WAIT_FOR_WRITE_END,
221         MMCIF_WAIT_FOR_STOP,
222 };
223
224 struct sh_mmcif_host {
225         struct mmc_host *mmc;
226         struct mmc_request *mrq;
227         struct platform_device *pd;
228         struct clk *hclk;
229         unsigned int clk;
230         int bus_width;
231         unsigned char timing;
232         bool sd_error;
233         bool dying;
234         long timeout;
235         void __iomem *addr;
236         u32 *pio_ptr;
237         spinlock_t lock;                /* protect sh_mmcif_host::state */
238         enum mmcif_state state;
239         enum mmcif_wait_for wait_for;
240         struct delayed_work timeout_work;
241         size_t blocksize;
242         int sg_idx;
243         int sg_blkidx;
244         bool power;
245         bool card_present;
246         struct mutex thread_lock;
247
248         /* DMA support */
249         struct dma_chan         *chan_rx;
250         struct dma_chan         *chan_tx;
251         struct completion       dma_complete;
252         bool                    dma_active;
253 };
254
255 static inline void sh_mmcif_bitset(struct sh_mmcif_host *host,
256                                         unsigned int reg, u32 val)
257 {
258         writel(val | readl(host->addr + reg), host->addr + reg);
259 }
260
261 static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
262                                         unsigned int reg, u32 val)
263 {
264         writel(~val & readl(host->addr + reg), host->addr + reg);
265 }
266
267 static void mmcif_dma_complete(void *arg)
268 {
269         struct sh_mmcif_host *host = arg;
270         struct mmc_request *mrq = host->mrq;
271
272         dev_dbg(&host->pd->dev, "Command completed\n");
273
274         if (WARN(!mrq || !mrq->data, "%s: NULL data in DMA completion!\n",
275                  dev_name(&host->pd->dev)))
276                 return;
277
278         complete(&host->dma_complete);
279 }
280
281 static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
282 {
283         struct mmc_data *data = host->mrq->data;
284         struct scatterlist *sg = data->sg;
285         struct dma_async_tx_descriptor *desc = NULL;
286         struct dma_chan *chan = host->chan_rx;
287         dma_cookie_t cookie = -EINVAL;
288         int ret;
289
290         ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
291                          DMA_FROM_DEVICE);
292         if (ret > 0) {
293                 host->dma_active = true;
294                 desc = dmaengine_prep_slave_sg(chan, sg, ret,
295                         DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
296         }
297
298         if (desc) {
299                 desc->callback = mmcif_dma_complete;
300                 desc->callback_param = host;
301                 cookie = dmaengine_submit(desc);
302                 sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN);
303                 dma_async_issue_pending(chan);
304         }
305         dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
306                 __func__, data->sg_len, ret, cookie);
307
308         if (!desc) {
309                 /* DMA failed, fall back to PIO */
310                 if (ret >= 0)
311                         ret = -EIO;
312                 host->chan_rx = NULL;
313                 host->dma_active = false;
314                 dma_release_channel(chan);
315                 /* Free the Tx channel too */
316                 chan = host->chan_tx;
317                 if (chan) {
318                         host->chan_tx = NULL;
319                         dma_release_channel(chan);
320                 }
321                 dev_warn(&host->pd->dev,
322                          "DMA failed: %d, falling back to PIO\n", ret);
323                 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
324         }
325
326         dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
327                 desc, cookie, data->sg_len);
328 }
329
330 static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
331 {
332         struct mmc_data *data = host->mrq->data;
333         struct scatterlist *sg = data->sg;
334         struct dma_async_tx_descriptor *desc = NULL;
335         struct dma_chan *chan = host->chan_tx;
336         dma_cookie_t cookie = -EINVAL;
337         int ret;
338
339         ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
340                          DMA_TO_DEVICE);
341         if (ret > 0) {
342                 host->dma_active = true;
343                 desc = dmaengine_prep_slave_sg(chan, sg, ret,
344                         DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
345         }
346
347         if (desc) {
348                 desc->callback = mmcif_dma_complete;
349                 desc->callback_param = host;
350                 cookie = dmaengine_submit(desc);
351                 sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN);
352                 dma_async_issue_pending(chan);
353         }
354         dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
355                 __func__, data->sg_len, ret, cookie);
356
357         if (!desc) {
358                 /* DMA failed, fall back to PIO */
359                 if (ret >= 0)
360                         ret = -EIO;
361                 host->chan_tx = NULL;
362                 host->dma_active = false;
363                 dma_release_channel(chan);
364                 /* Free the Rx channel too */
365                 chan = host->chan_rx;
366                 if (chan) {
367                         host->chan_rx = NULL;
368                         dma_release_channel(chan);
369                 }
370                 dev_warn(&host->pd->dev,
371                          "DMA failed: %d, falling back to PIO\n", ret);
372                 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
373         }
374
375         dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d\n", __func__,
376                 desc, cookie);
377 }
378
379 static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
380                                  struct sh_mmcif_plat_data *pdata)
381 {
382         struct resource *res = platform_get_resource(host->pd, IORESOURCE_MEM, 0);
383         struct dma_slave_config cfg;
384         dma_cap_mask_t mask;
385         int ret;
386
387         host->dma_active = false;
388
389         if (!pdata)
390                 return;
391
392         if (pdata->slave_id_tx <= 0 || pdata->slave_id_rx <= 0)
393                 return;
394
395         /* We can only either use DMA for both Tx and Rx or not use it at all */
396         dma_cap_zero(mask);
397         dma_cap_set(DMA_SLAVE, mask);
398
399         host->chan_tx = dma_request_channel(mask, shdma_chan_filter,
400                                             (void *)pdata->slave_id_tx);
401         dev_dbg(&host->pd->dev, "%s: TX: got channel %p\n", __func__,
402                 host->chan_tx);
403
404         if (!host->chan_tx)
405                 return;
406
407         cfg.slave_id = pdata->slave_id_tx;
408         cfg.direction = DMA_MEM_TO_DEV;
409         cfg.dst_addr = res->start + MMCIF_CE_DATA;
410         cfg.src_addr = 0;
411         ret = dmaengine_slave_config(host->chan_tx, &cfg);
412         if (ret < 0)
413                 goto ecfgtx;
414
415         host->chan_rx = dma_request_channel(mask, shdma_chan_filter,
416                                             (void *)pdata->slave_id_rx);
417         dev_dbg(&host->pd->dev, "%s: RX: got channel %p\n", __func__,
418                 host->chan_rx);
419
420         if (!host->chan_rx)
421                 goto erqrx;
422
423         cfg.slave_id = pdata->slave_id_rx;
424         cfg.direction = DMA_DEV_TO_MEM;
425         cfg.dst_addr = 0;
426         cfg.src_addr = res->start + MMCIF_CE_DATA;
427         ret = dmaengine_slave_config(host->chan_rx, &cfg);
428         if (ret < 0)
429                 goto ecfgrx;
430
431         return;
432
433 ecfgrx:
434         dma_release_channel(host->chan_rx);
435         host->chan_rx = NULL;
436 erqrx:
437 ecfgtx:
438         dma_release_channel(host->chan_tx);
439         host->chan_tx = NULL;
440 }
441
442 static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
443 {
444         sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
445         /* Descriptors are freed automatically */
446         if (host->chan_tx) {
447                 struct dma_chan *chan = host->chan_tx;
448                 host->chan_tx = NULL;
449                 dma_release_channel(chan);
450         }
451         if (host->chan_rx) {
452                 struct dma_chan *chan = host->chan_rx;
453                 host->chan_rx = NULL;
454                 dma_release_channel(chan);
455         }
456
457         host->dma_active = false;
458 }
459
460 static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
461 {
462         struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
463         bool sup_pclk = p ? p->sup_pclk : false;
464
465         sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
466         sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR);
467
468         if (!clk)
469                 return;
470         if (sup_pclk && clk == host->clk)
471                 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_SUP_PCLK);
472         else
473                 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR &
474                                 ((fls(DIV_ROUND_UP(host->clk,
475                                                    clk) - 1) - 1) << 16));
476
477         sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
478 }
479
480 static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)
481 {
482         u32 tmp;
483
484         tmp = 0x010f0000 & sh_mmcif_readl(host->addr, MMCIF_CE_CLK_CTRL);
485
486         sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_ON);
487         sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_OFF);
488         sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp |
489                 SRSPTO_256 | SRBSYTO_29 | SRWDTO_29 | SCCSTO_29);
490         /* byte swap on */
491         sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP);
492 }
493
494 static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
495 {
496         u32 state1, state2;
497         int ret, timeout;
498
499         host->sd_error = false;
500
501         state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1);
502         state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2);
503         dev_dbg(&host->pd->dev, "ERR HOST_STS1 = %08x\n", state1);
504         dev_dbg(&host->pd->dev, "ERR HOST_STS2 = %08x\n", state2);
505
506         if (state1 & STS1_CMDSEQ) {
507                 sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
508                 sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK);
509                 for (timeout = 10000000; timeout; timeout--) {
510                         if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1)
511                               & STS1_CMDSEQ))
512                                 break;
513                         mdelay(1);
514                 }
515                 if (!timeout) {
516                         dev_err(&host->pd->dev,
517                                 "Forced end of command sequence timeout err\n");
518                         return -EIO;
519                 }
520                 sh_mmcif_sync_reset(host);
521                 dev_dbg(&host->pd->dev, "Forced end of command sequence\n");
522                 return -EIO;
523         }
524
525         if (state2 & STS2_CRC_ERR) {
526                 dev_err(&host->pd->dev, " CRC error: state %u, wait %u\n",
527                         host->state, host->wait_for);
528                 ret = -EIO;
529         } else if (state2 & STS2_TIMEOUT_ERR) {
530                 dev_err(&host->pd->dev, " Timeout: state %u, wait %u\n",
531                         host->state, host->wait_for);
532                 ret = -ETIMEDOUT;
533         } else {
534                 dev_dbg(&host->pd->dev, " End/Index error: state %u, wait %u\n",
535                         host->state, host->wait_for);
536                 ret = -EIO;
537         }
538         return ret;
539 }
540
541 static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p)
542 {
543         struct mmc_data *data = host->mrq->data;
544
545         host->sg_blkidx += host->blocksize;
546
547         /* data->sg->length must be a multiple of host->blocksize? */
548         BUG_ON(host->sg_blkidx > data->sg->length);
549
550         if (host->sg_blkidx == data->sg->length) {
551                 host->sg_blkidx = 0;
552                 if (++host->sg_idx < data->sg_len)
553                         host->pio_ptr = sg_virt(++data->sg);
554         } else {
555                 host->pio_ptr = p;
556         }
557
558         return host->sg_idx != data->sg_len;
559 }
560
561 static void sh_mmcif_single_read(struct sh_mmcif_host *host,
562                                  struct mmc_request *mrq)
563 {
564         host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
565                            BLOCK_SIZE_MASK) + 3;
566
567         host->wait_for = MMCIF_WAIT_FOR_READ;
568
569         /* buf read enable */
570         sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
571 }
572
573 static bool sh_mmcif_read_block(struct sh_mmcif_host *host)
574 {
575         struct mmc_data *data = host->mrq->data;
576         u32 *p = sg_virt(data->sg);
577         int i;
578
579         if (host->sd_error) {
580                 data->error = sh_mmcif_error_manage(host);
581                 dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
582                 return false;
583         }
584
585         for (i = 0; i < host->blocksize / 4; i++)
586                 *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
587
588         /* buffer read end */
589         sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
590         host->wait_for = MMCIF_WAIT_FOR_READ_END;
591
592         return true;
593 }
594
595 static void sh_mmcif_multi_read(struct sh_mmcif_host *host,
596                                 struct mmc_request *mrq)
597 {
598         struct mmc_data *data = mrq->data;
599
600         if (!data->sg_len || !data->sg->length)
601                 return;
602
603         host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
604                 BLOCK_SIZE_MASK;
605
606         host->wait_for = MMCIF_WAIT_FOR_MREAD;
607         host->sg_idx = 0;
608         host->sg_blkidx = 0;
609         host->pio_ptr = sg_virt(data->sg);
610
611         sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
612 }
613
614 static bool sh_mmcif_mread_block(struct sh_mmcif_host *host)
615 {
616         struct mmc_data *data = host->mrq->data;
617         u32 *p = host->pio_ptr;
618         int i;
619
620         if (host->sd_error) {
621                 data->error = sh_mmcif_error_manage(host);
622                 dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
623                 return false;
624         }
625
626         BUG_ON(!data->sg->length);
627
628         for (i = 0; i < host->blocksize / 4; i++)
629                 *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
630
631         if (!sh_mmcif_next_block(host, p))
632                 return false;
633
634         sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
635
636         return true;
637 }
638
639 static void sh_mmcif_single_write(struct sh_mmcif_host *host,
640                                         struct mmc_request *mrq)
641 {
642         host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
643                            BLOCK_SIZE_MASK) + 3;
644
645         host->wait_for = MMCIF_WAIT_FOR_WRITE;
646
647         /* buf write enable */
648         sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
649 }
650
651 static bool sh_mmcif_write_block(struct sh_mmcif_host *host)
652 {
653         struct mmc_data *data = host->mrq->data;
654         u32 *p = sg_virt(data->sg);
655         int i;
656
657         if (host->sd_error) {
658                 data->error = sh_mmcif_error_manage(host);
659                 dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
660                 return false;
661         }
662
663         for (i = 0; i < host->blocksize / 4; i++)
664                 sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
665
666         /* buffer write end */
667         sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
668         host->wait_for = MMCIF_WAIT_FOR_WRITE_END;
669
670         return true;
671 }
672
673 static void sh_mmcif_multi_write(struct sh_mmcif_host *host,
674                                 struct mmc_request *mrq)
675 {
676         struct mmc_data *data = mrq->data;
677
678         if (!data->sg_len || !data->sg->length)
679                 return;
680
681         host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
682                 BLOCK_SIZE_MASK;
683
684         host->wait_for = MMCIF_WAIT_FOR_MWRITE;
685         host->sg_idx = 0;
686         host->sg_blkidx = 0;
687         host->pio_ptr = sg_virt(data->sg);
688
689         sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
690 }
691
692 static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host)
693 {
694         struct mmc_data *data = host->mrq->data;
695         u32 *p = host->pio_ptr;
696         int i;
697
698         if (host->sd_error) {
699                 data->error = sh_mmcif_error_manage(host);
700                 dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
701                 return false;
702         }
703
704         BUG_ON(!data->sg->length);
705
706         for (i = 0; i < host->blocksize / 4; i++)
707                 sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
708
709         if (!sh_mmcif_next_block(host, p))
710                 return false;
711
712         sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
713
714         return true;
715 }
716
717 static void sh_mmcif_get_response(struct sh_mmcif_host *host,
718                                                 struct mmc_command *cmd)
719 {
720         if (cmd->flags & MMC_RSP_136) {
721                 cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP3);
722                 cmd->resp[1] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP2);
723                 cmd->resp[2] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP1);
724                 cmd->resp[3] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
725         } else
726                 cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
727 }
728
729 static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host,
730                                                 struct mmc_command *cmd)
731 {
732         cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP_CMD12);
733 }
734
735 static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
736                             struct mmc_request *mrq)
737 {
738         struct mmc_data *data = mrq->data;
739         struct mmc_command *cmd = mrq->cmd;
740         u32 opc = cmd->opcode;
741         u32 tmp = 0;
742
743         /* Response Type check */
744         switch (mmc_resp_type(cmd)) {
745         case MMC_RSP_NONE:
746                 tmp |= CMD_SET_RTYP_NO;
747                 break;
748         case MMC_RSP_R1:
749         case MMC_RSP_R1B:
750         case MMC_RSP_R3:
751                 tmp |= CMD_SET_RTYP_6B;
752                 break;
753         case MMC_RSP_R2:
754                 tmp |= CMD_SET_RTYP_17B;
755                 break;
756         default:
757                 dev_err(&host->pd->dev, "Unsupported response type.\n");
758                 break;
759         }
760         switch (opc) {
761         /* RBSY */
762         case MMC_SLEEP_AWAKE:
763         case MMC_SWITCH:
764         case MMC_STOP_TRANSMISSION:
765         case MMC_SET_WRITE_PROT:
766         case MMC_CLR_WRITE_PROT:
767         case MMC_ERASE:
768                 tmp |= CMD_SET_RBSY;
769                 break;
770         }
771         /* WDAT / DATW */
772         if (data) {
773                 tmp |= CMD_SET_WDAT;
774                 switch (host->bus_width) {
775                 case MMC_BUS_WIDTH_1:
776                         tmp |= CMD_SET_DATW_1;
777                         break;
778                 case MMC_BUS_WIDTH_4:
779                         tmp |= CMD_SET_DATW_4;
780                         break;
781                 case MMC_BUS_WIDTH_8:
782                         tmp |= CMD_SET_DATW_8;
783                         break;
784                 default:
785                         dev_err(&host->pd->dev, "Unsupported bus width.\n");
786                         break;
787                 }
788                 switch (host->timing) {
789                 case MMC_TIMING_UHS_DDR50:
790                         /*
791                          * MMC core will only set this timing, if the host
792                          * advertises the MMC_CAP_UHS_DDR50 capability. MMCIF
793                          * implementations with this capability, e.g. sh73a0,
794                          * will have to set it in their platform data.
795                          */
796                         tmp |= CMD_SET_DARS;
797                         break;
798                 }
799         }
800         /* DWEN */
801         if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK)
802                 tmp |= CMD_SET_DWEN;
803         /* CMLTE/CMD12EN */
804         if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) {
805                 tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN;
806                 sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET,
807                                 data->blocks << 16);
808         }
809         /* RIDXC[1:0] check bits */
810         if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID ||
811             opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
812                 tmp |= CMD_SET_RIDXC_BITS;
813         /* RCRC7C[1:0] check bits */
814         if (opc == MMC_SEND_OP_COND)
815                 tmp |= CMD_SET_CRC7C_BITS;
816         /* RCRC7C[1:0] internal CRC7 */
817         if (opc == MMC_ALL_SEND_CID ||
818                 opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
819                 tmp |= CMD_SET_CRC7C_INTERNAL;
820
821         return (opc << 24) | tmp;
822 }
823
824 static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
825                                struct mmc_request *mrq, u32 opc)
826 {
827         switch (opc) {
828         case MMC_READ_MULTIPLE_BLOCK:
829                 sh_mmcif_multi_read(host, mrq);
830                 return 0;
831         case MMC_WRITE_MULTIPLE_BLOCK:
832                 sh_mmcif_multi_write(host, mrq);
833                 return 0;
834         case MMC_WRITE_BLOCK:
835                 sh_mmcif_single_write(host, mrq);
836                 return 0;
837         case MMC_READ_SINGLE_BLOCK:
838         case MMC_SEND_EXT_CSD:
839                 sh_mmcif_single_read(host, mrq);
840                 return 0;
841         default:
842                 dev_err(&host->pd->dev, "Unsupported CMD%d\n", opc);
843                 return -EINVAL;
844         }
845 }
846
847 static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
848                                struct mmc_request *mrq)
849 {
850         struct mmc_command *cmd = mrq->cmd;
851         u32 opc = cmd->opcode;
852         u32 mask;
853
854         switch (opc) {
855         /* response busy check */
856         case MMC_SLEEP_AWAKE:
857         case MMC_SWITCH:
858         case MMC_STOP_TRANSMISSION:
859         case MMC_SET_WRITE_PROT:
860         case MMC_CLR_WRITE_PROT:
861         case MMC_ERASE:
862                 mask = MASK_START_CMD | MASK_MRBSYE;
863                 break;
864         default:
865                 mask = MASK_START_CMD | MASK_MCRSPE;
866                 break;
867         }
868
869         if (mrq->data) {
870                 sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0);
871                 sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET,
872                                 mrq->data->blksz);
873         }
874         opc = sh_mmcif_set_cmd(host, mrq);
875
876         sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0);
877         sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask);
878         /* set arg */
879         sh_mmcif_writel(host->addr, MMCIF_CE_ARG, cmd->arg);
880         /* set cmd */
881         sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc);
882
883         host->wait_for = MMCIF_WAIT_FOR_CMD;
884         schedule_delayed_work(&host->timeout_work, host->timeout);
885 }
886
887 static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
888                               struct mmc_request *mrq)
889 {
890         switch (mrq->cmd->opcode) {
891         case MMC_READ_MULTIPLE_BLOCK:
892                 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
893                 break;
894         case MMC_WRITE_MULTIPLE_BLOCK:
895                 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
896                 break;
897         default:
898                 dev_err(&host->pd->dev, "unsupported stop cmd\n");
899                 mrq->stop->error = sh_mmcif_error_manage(host);
900                 return;
901         }
902
903         host->wait_for = MMCIF_WAIT_FOR_STOP;
904 }
905
906 static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
907 {
908         struct sh_mmcif_host *host = mmc_priv(mmc);
909         unsigned long flags;
910
911         spin_lock_irqsave(&host->lock, flags);
912         if (host->state != STATE_IDLE) {
913                 dev_dbg(&host->pd->dev, "%s() rejected, state %u\n", __func__, host->state);
914                 spin_unlock_irqrestore(&host->lock, flags);
915                 mrq->cmd->error = -EAGAIN;
916                 mmc_request_done(mmc, mrq);
917                 return;
918         }
919
920         host->state = STATE_REQUEST;
921         spin_unlock_irqrestore(&host->lock, flags);
922
923         switch (mrq->cmd->opcode) {
924         /* MMCIF does not support SD/SDIO command */
925         case MMC_SLEEP_AWAKE: /* = SD_IO_SEND_OP_COND (5) */
926         case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */
927                 if ((mrq->cmd->flags & MMC_CMD_MASK) != MMC_CMD_BCR)
928                         break;
929         case MMC_APP_CMD:
930         case SD_IO_RW_DIRECT:
931                 host->state = STATE_IDLE;
932                 mrq->cmd->error = -ETIMEDOUT;
933                 mmc_request_done(mmc, mrq);
934                 return;
935         default:
936                 break;
937         }
938
939         host->mrq = mrq;
940
941         sh_mmcif_start_cmd(host, mrq);
942 }
943
944 static int sh_mmcif_clk_update(struct sh_mmcif_host *host)
945 {
946         int ret = clk_enable(host->hclk);
947
948         if (!ret) {
949                 host->clk = clk_get_rate(host->hclk);
950                 host->mmc->f_max = host->clk / 2;
951                 host->mmc->f_min = host->clk / 512;
952         }
953
954         return ret;
955 }
956
957 static void sh_mmcif_set_power(struct sh_mmcif_host *host, struct mmc_ios *ios)
958 {
959         struct sh_mmcif_plat_data *pd = host->pd->dev.platform_data;
960         struct mmc_host *mmc = host->mmc;
961
962         if (pd && pd->set_pwr)
963                 pd->set_pwr(host->pd, ios->power_mode != MMC_POWER_OFF);
964         if (!IS_ERR(mmc->supply.vmmc))
965                 /* Errors ignored... */
966                 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
967                                       ios->power_mode ? ios->vdd : 0);
968 }
969
970 static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
971 {
972         struct sh_mmcif_host *host = mmc_priv(mmc);
973         unsigned long flags;
974
975         spin_lock_irqsave(&host->lock, flags);
976         if (host->state != STATE_IDLE) {
977                 dev_dbg(&host->pd->dev, "%s() rejected, state %u\n", __func__, host->state);
978                 spin_unlock_irqrestore(&host->lock, flags);
979                 return;
980         }
981
982         host->state = STATE_IOS;
983         spin_unlock_irqrestore(&host->lock, flags);
984
985         if (ios->power_mode == MMC_POWER_UP) {
986                 if (!host->card_present) {
987                         /* See if we also get DMA */
988                         sh_mmcif_request_dma(host, host->pd->dev.platform_data);
989                         host->card_present = true;
990                 }
991                 sh_mmcif_set_power(host, ios);
992         } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
993                 /* clock stop */
994                 sh_mmcif_clock_control(host, 0);
995                 if (ios->power_mode == MMC_POWER_OFF) {
996                         if (host->card_present) {
997                                 sh_mmcif_release_dma(host);
998                                 host->card_present = false;
999                         }
1000                 }
1001                 if (host->power) {
1002                         pm_runtime_put_sync(&host->pd->dev);
1003                         clk_disable(host->hclk);
1004                         host->power = false;
1005                         if (ios->power_mode == MMC_POWER_OFF)
1006                                 sh_mmcif_set_power(host, ios);
1007                 }
1008                 host->state = STATE_IDLE;
1009                 return;
1010         }
1011
1012         if (ios->clock) {
1013                 if (!host->power) {
1014                         sh_mmcif_clk_update(host);
1015                         pm_runtime_get_sync(&host->pd->dev);
1016                         host->power = true;
1017                         sh_mmcif_sync_reset(host);
1018                 }
1019                 sh_mmcif_clock_control(host, ios->clock);
1020         }
1021
1022         host->timing = ios->timing;
1023         host->bus_width = ios->bus_width;
1024         host->state = STATE_IDLE;
1025 }
1026
1027 static int sh_mmcif_get_cd(struct mmc_host *mmc)
1028 {
1029         struct sh_mmcif_host *host = mmc_priv(mmc);
1030         struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
1031         int ret = mmc_gpio_get_cd(mmc);
1032
1033         if (ret >= 0)
1034                 return ret;
1035
1036         if (!p || !p->get_cd)
1037                 return -ENOSYS;
1038         else
1039                 return p->get_cd(host->pd);
1040 }
1041
1042 static struct mmc_host_ops sh_mmcif_ops = {
1043         .request        = sh_mmcif_request,
1044         .set_ios        = sh_mmcif_set_ios,
1045         .get_cd         = sh_mmcif_get_cd,
1046 };
1047
1048 static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
1049 {
1050         struct mmc_command *cmd = host->mrq->cmd;
1051         struct mmc_data *data = host->mrq->data;
1052         long time;
1053
1054         if (host->sd_error) {
1055                 switch (cmd->opcode) {
1056                 case MMC_ALL_SEND_CID:
1057                 case MMC_SELECT_CARD:
1058                 case MMC_APP_CMD:
1059                         cmd->error = -ETIMEDOUT;
1060                         break;
1061                 default:
1062                         cmd->error = sh_mmcif_error_manage(host);
1063                         break;
1064                 }
1065                 dev_dbg(&host->pd->dev, "CMD%d error %d\n",
1066                         cmd->opcode, cmd->error);
1067                 host->sd_error = false;
1068                 return false;
1069         }
1070         if (!(cmd->flags & MMC_RSP_PRESENT)) {
1071                 cmd->error = 0;
1072                 return false;
1073         }
1074
1075         sh_mmcif_get_response(host, cmd);
1076
1077         if (!data)
1078                 return false;
1079
1080         /*
1081          * Completion can be signalled from DMA callback and error, so, have to
1082          * reset here, before setting .dma_active
1083          */
1084         init_completion(&host->dma_complete);
1085
1086         if (data->flags & MMC_DATA_READ) {
1087                 if (host->chan_rx)
1088                         sh_mmcif_start_dma_rx(host);
1089         } else {
1090                 if (host->chan_tx)
1091                         sh_mmcif_start_dma_tx(host);
1092         }
1093
1094         if (!host->dma_active) {
1095                 data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode);
1096                 return !data->error;
1097         }
1098
1099         /* Running in the IRQ thread, can sleep */
1100         time = wait_for_completion_interruptible_timeout(&host->dma_complete,
1101                                                          host->timeout);
1102
1103         if (data->flags & MMC_DATA_READ)
1104                 dma_unmap_sg(host->chan_rx->device->dev,
1105                              data->sg, data->sg_len,
1106                              DMA_FROM_DEVICE);
1107         else
1108                 dma_unmap_sg(host->chan_tx->device->dev,
1109                              data->sg, data->sg_len,
1110                              DMA_TO_DEVICE);
1111
1112         if (host->sd_error) {
1113                 dev_err(host->mmc->parent,
1114                         "Error IRQ while waiting for DMA completion!\n");
1115                 /* Woken up by an error IRQ: abort DMA */
1116                 data->error = sh_mmcif_error_manage(host);
1117         } else if (!time) {
1118                 dev_err(host->mmc->parent, "DMA timeout!\n");
1119                 data->error = -ETIMEDOUT;
1120         } else if (time < 0) {
1121                 dev_err(host->mmc->parent,
1122                         "wait_for_completion_...() error %ld!\n", time);
1123                 data->error = time;
1124         }
1125         sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
1126                         BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
1127         host->dma_active = false;
1128
1129         if (data->error) {
1130                 data->bytes_xfered = 0;
1131                 /* Abort DMA */
1132                 if (data->flags & MMC_DATA_READ)
1133                         dmaengine_terminate_all(host->chan_rx);
1134                 else
1135                         dmaengine_terminate_all(host->chan_tx);
1136         }
1137
1138         return false;
1139 }
1140
1141 static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
1142 {
1143         struct sh_mmcif_host *host = dev_id;
1144         struct mmc_request *mrq;
1145         bool wait = false;
1146
1147         cancel_delayed_work_sync(&host->timeout_work);
1148
1149         mutex_lock(&host->thread_lock);
1150
1151         mrq = host->mrq;
1152         if (!mrq) {
1153                 dev_dbg(&host->pd->dev, "IRQ thread state %u, wait %u: NULL mrq!\n",
1154                         host->state, host->wait_for);
1155                 mutex_unlock(&host->thread_lock);
1156                 return IRQ_HANDLED;
1157         }
1158
1159         /*
1160          * All handlers return true, if processing continues, and false, if the
1161          * request has to be completed - successfully or not
1162          */
1163         switch (host->wait_for) {
1164         case MMCIF_WAIT_FOR_REQUEST:
1165                 /* We're too late, the timeout has already kicked in */
1166                 mutex_unlock(&host->thread_lock);
1167                 return IRQ_HANDLED;
1168         case MMCIF_WAIT_FOR_CMD:
1169                 /* Wait for data? */
1170                 wait = sh_mmcif_end_cmd(host);
1171                 break;
1172         case MMCIF_WAIT_FOR_MREAD:
1173                 /* Wait for more data? */
1174                 wait = sh_mmcif_mread_block(host);
1175                 break;
1176         case MMCIF_WAIT_FOR_READ:
1177                 /* Wait for data end? */
1178                 wait = sh_mmcif_read_block(host);
1179                 break;
1180         case MMCIF_WAIT_FOR_MWRITE:
1181                 /* Wait data to write? */
1182                 wait = sh_mmcif_mwrite_block(host);
1183                 break;
1184         case MMCIF_WAIT_FOR_WRITE:
1185                 /* Wait for data end? */
1186                 wait = sh_mmcif_write_block(host);
1187                 break;
1188         case MMCIF_WAIT_FOR_STOP:
1189                 if (host->sd_error) {
1190                         mrq->stop->error = sh_mmcif_error_manage(host);
1191                         dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, mrq->stop->error);
1192                         break;
1193                 }
1194                 sh_mmcif_get_cmd12response(host, mrq->stop);
1195                 mrq->stop->error = 0;
1196                 break;
1197         case MMCIF_WAIT_FOR_READ_END:
1198         case MMCIF_WAIT_FOR_WRITE_END:
1199                 if (host->sd_error) {
1200                         mrq->data->error = sh_mmcif_error_manage(host);
1201                         dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, mrq->data->error);
1202                 }
1203                 break;
1204         default:
1205                 BUG();
1206         }
1207
1208         if (wait) {
1209                 schedule_delayed_work(&host->timeout_work, host->timeout);
1210                 /* Wait for more data */
1211                 mutex_unlock(&host->thread_lock);
1212                 return IRQ_HANDLED;
1213         }
1214
1215         if (host->wait_for != MMCIF_WAIT_FOR_STOP) {
1216                 struct mmc_data *data = mrq->data;
1217                 if (!mrq->cmd->error && data && !data->error)
1218                         data->bytes_xfered =
1219                                 data->blocks * data->blksz;
1220
1221                 if (mrq->stop && !mrq->cmd->error && (!data || !data->error)) {
1222                         sh_mmcif_stop_cmd(host, mrq);
1223                         if (!mrq->stop->error) {
1224                                 schedule_delayed_work(&host->timeout_work, host->timeout);
1225                                 mutex_unlock(&host->thread_lock);
1226                                 return IRQ_HANDLED;
1227                         }
1228                 }
1229         }
1230
1231         host->wait_for = MMCIF_WAIT_FOR_REQUEST;
1232         host->state = STATE_IDLE;
1233         host->mrq = NULL;
1234         mmc_request_done(host->mmc, mrq);
1235
1236         mutex_unlock(&host->thread_lock);
1237
1238         return IRQ_HANDLED;
1239 }
1240
1241 static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
1242 {
1243         struct sh_mmcif_host *host = dev_id;
1244         u32 state;
1245
1246         state = sh_mmcif_readl(host->addr, MMCIF_CE_INT);
1247         sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state);
1248         sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state & MASK_CLEAN);
1249
1250         if (state & ~MASK_CLEAN)
1251                 dev_dbg(&host->pd->dev, "IRQ state = 0x%08x incompletely cleared\n",
1252                         state);
1253
1254         if (state & INT_ERR_STS || state & ~INT_ALL) {
1255                 host->sd_error = true;
1256                 dev_dbg(&host->pd->dev, "int err state = 0x%08x\n", state);
1257         }
1258         if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) {
1259                 if (!host->mrq)
1260                         dev_dbg(&host->pd->dev, "NULL IRQ state = 0x%08x\n", state);
1261                 if (!host->dma_active)
1262                         return IRQ_WAKE_THREAD;
1263                 else if (host->sd_error)
1264                         mmcif_dma_complete(host);
1265         } else {
1266                 dev_dbg(&host->pd->dev, "Unexpected IRQ 0x%x\n", state);
1267         }
1268
1269         return IRQ_HANDLED;
1270 }
1271
1272 static void mmcif_timeout_work(struct work_struct *work)
1273 {
1274         struct delayed_work *d = container_of(work, struct delayed_work, work);
1275         struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work);
1276         struct mmc_request *mrq = host->mrq;
1277         unsigned long flags;
1278
1279         if (host->dying)
1280                 /* Don't run after mmc_remove_host() */
1281                 return;
1282
1283         dev_err(&host->pd->dev, "Timeout waiting for %u on CMD%u\n",
1284                 host->wait_for, mrq->cmd->opcode);
1285
1286         spin_lock_irqsave(&host->lock, flags);
1287         if (host->state == STATE_IDLE) {
1288                 spin_unlock_irqrestore(&host->lock, flags);
1289                 return;
1290         }
1291
1292         host->state = STATE_TIMEOUT;
1293         spin_unlock_irqrestore(&host->lock, flags);
1294
1295         /*
1296          * Handle races with cancel_delayed_work(), unless
1297          * cancel_delayed_work_sync() is used
1298          */
1299         switch (host->wait_for) {
1300         case MMCIF_WAIT_FOR_CMD:
1301                 mrq->cmd->error = sh_mmcif_error_manage(host);
1302                 break;
1303         case MMCIF_WAIT_FOR_STOP:
1304                 mrq->stop->error = sh_mmcif_error_manage(host);
1305                 break;
1306         case MMCIF_WAIT_FOR_MREAD:
1307         case MMCIF_WAIT_FOR_MWRITE:
1308         case MMCIF_WAIT_FOR_READ:
1309         case MMCIF_WAIT_FOR_WRITE:
1310         case MMCIF_WAIT_FOR_READ_END:
1311         case MMCIF_WAIT_FOR_WRITE_END:
1312                 mrq->data->error = sh_mmcif_error_manage(host);
1313                 break;
1314         default:
1315                 BUG();
1316         }
1317
1318         host->state = STATE_IDLE;
1319         host->wait_for = MMCIF_WAIT_FOR_REQUEST;
1320         host->mrq = NULL;
1321         mmc_request_done(host->mmc, mrq);
1322 }
1323
1324 static void sh_mmcif_init_ocr(struct sh_mmcif_host *host)
1325 {
1326         struct sh_mmcif_plat_data *pd = host->pd->dev.platform_data;
1327         struct mmc_host *mmc = host->mmc;
1328
1329         mmc_regulator_get_supply(mmc);
1330
1331         if (!pd)
1332                 return;
1333
1334         if (!mmc->ocr_avail)
1335                 mmc->ocr_avail = pd->ocr;
1336         else if (pd->ocr)
1337                 dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
1338 }
1339
1340 static int sh_mmcif_probe(struct platform_device *pdev)
1341 {
1342         int ret = 0, irq[2];
1343         struct mmc_host *mmc;
1344         struct sh_mmcif_host *host;
1345         struct sh_mmcif_plat_data *pd = pdev->dev.platform_data;
1346         struct resource *res;
1347         void __iomem *reg;
1348         const char *name;
1349
1350         irq[0] = platform_get_irq(pdev, 0);
1351         irq[1] = platform_get_irq(pdev, 1);
1352         if (irq[0] < 0) {
1353                 dev_err(&pdev->dev, "Get irq error\n");
1354                 return -ENXIO;
1355         }
1356         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1357         if (!res) {
1358                 dev_err(&pdev->dev, "platform_get_resource error.\n");
1359                 return -ENXIO;
1360         }
1361         reg = ioremap(res->start, resource_size(res));
1362         if (!reg) {
1363                 dev_err(&pdev->dev, "ioremap error.\n");
1364                 return -ENOMEM;
1365         }
1366
1367         mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), &pdev->dev);
1368         if (!mmc) {
1369                 ret = -ENOMEM;
1370                 goto ealloch;
1371         }
1372         mmc_of_parse(mmc);
1373         host            = mmc_priv(mmc);
1374         host->mmc       = mmc;
1375         host->addr      = reg;
1376         host->timeout   = msecs_to_jiffies(1000);
1377
1378         host->pd = pdev;
1379
1380         spin_lock_init(&host->lock);
1381
1382         mmc->ops = &sh_mmcif_ops;
1383         sh_mmcif_init_ocr(host);
1384
1385         mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_WAIT_WHILE_BUSY;
1386         if (pd && pd->caps)
1387                 mmc->caps |= pd->caps;
1388         mmc->max_segs = 32;
1389         mmc->max_blk_size = 512;
1390         mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs;
1391         mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
1392         mmc->max_seg_size = mmc->max_req_size;
1393
1394         platform_set_drvdata(pdev, host);
1395
1396         pm_runtime_enable(&pdev->dev);
1397         host->power = false;
1398
1399         host->hclk = clk_get(&pdev->dev, NULL);
1400         if (IS_ERR(host->hclk)) {
1401                 ret = PTR_ERR(host->hclk);
1402                 dev_err(&pdev->dev, "cannot get clock: %d\n", ret);
1403                 goto eclkget;
1404         }
1405         ret = sh_mmcif_clk_update(host);
1406         if (ret < 0)
1407                 goto eclkupdate;
1408
1409         ret = pm_runtime_resume(&pdev->dev);
1410         if (ret < 0)
1411                 goto eresume;
1412
1413         INIT_DELAYED_WORK(&host->timeout_work, mmcif_timeout_work);
1414
1415         sh_mmcif_sync_reset(host);
1416         sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1417
1418         name = irq[1] < 0 ? dev_name(&pdev->dev) : "sh_mmc:error";
1419         ret = request_threaded_irq(irq[0], sh_mmcif_intr, sh_mmcif_irqt, 0, name, host);
1420         if (ret) {
1421                 dev_err(&pdev->dev, "request_irq error (%s)\n", name);
1422                 goto ereqirq0;
1423         }
1424         if (irq[1] >= 0) {
1425                 ret = request_threaded_irq(irq[1], sh_mmcif_intr, sh_mmcif_irqt,
1426                                            0, "sh_mmc:int", host);
1427                 if (ret) {
1428                         dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n");
1429                         goto ereqirq1;
1430                 }
1431         }
1432
1433         if (pd && pd->use_cd_gpio) {
1434                 ret = mmc_gpio_request_cd(mmc, pd->cd_gpio);
1435                 if (ret < 0)
1436                         goto erqcd;
1437         }
1438
1439         mutex_init(&host->thread_lock);
1440
1441         clk_disable(host->hclk);
1442         ret = mmc_add_host(mmc);
1443         if (ret < 0)
1444                 goto emmcaddh;
1445
1446         dev_pm_qos_expose_latency_limit(&pdev->dev, 100);
1447
1448         dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION);
1449         dev_dbg(&pdev->dev, "chip ver H'%04x\n",
1450                 sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff);
1451         return ret;
1452
1453 emmcaddh:
1454 erqcd:
1455         if (irq[1] >= 0)
1456                 free_irq(irq[1], host);
1457 ereqirq1:
1458         free_irq(irq[0], host);
1459 ereqirq0:
1460         pm_runtime_suspend(&pdev->dev);
1461 eresume:
1462         clk_disable(host->hclk);
1463 eclkupdate:
1464         clk_put(host->hclk);
1465 eclkget:
1466         pm_runtime_disable(&pdev->dev);
1467         mmc_free_host(mmc);
1468 ealloch:
1469         iounmap(reg);
1470         return ret;
1471 }
1472
1473 static int sh_mmcif_remove(struct platform_device *pdev)
1474 {
1475         struct sh_mmcif_host *host = platform_get_drvdata(pdev);
1476         int irq[2];
1477
1478         host->dying = true;
1479         clk_enable(host->hclk);
1480         pm_runtime_get_sync(&pdev->dev);
1481
1482         dev_pm_qos_hide_latency_limit(&pdev->dev);
1483
1484         mmc_remove_host(host->mmc);
1485         sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1486
1487         /*
1488          * FIXME: cancel_delayed_work(_sync)() and free_irq() race with the
1489          * mmc_remove_host() call above. But swapping order doesn't help either
1490          * (a query on the linux-mmc mailing list didn't bring any replies).
1491          */
1492         cancel_delayed_work_sync(&host->timeout_work);
1493
1494         if (host->addr)
1495                 iounmap(host->addr);
1496
1497         irq[0] = platform_get_irq(pdev, 0);
1498         irq[1] = platform_get_irq(pdev, 1);
1499
1500         free_irq(irq[0], host);
1501         if (irq[1] >= 0)
1502                 free_irq(irq[1], host);
1503
1504         platform_set_drvdata(pdev, NULL);
1505
1506         clk_disable(host->hclk);
1507         mmc_free_host(host->mmc);
1508         pm_runtime_put_sync(&pdev->dev);
1509         pm_runtime_disable(&pdev->dev);
1510
1511         return 0;
1512 }
1513
1514 #ifdef CONFIG_PM
1515 static int sh_mmcif_suspend(struct device *dev)
1516 {
1517         struct sh_mmcif_host *host = dev_get_drvdata(dev);
1518         int ret = mmc_suspend_host(host->mmc);
1519
1520         if (!ret)
1521                 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1522
1523         return ret;
1524 }
1525
1526 static int sh_mmcif_resume(struct device *dev)
1527 {
1528         struct sh_mmcif_host *host = dev_get_drvdata(dev);
1529
1530         return mmc_resume_host(host->mmc);
1531 }
1532 #else
1533 #define sh_mmcif_suspend        NULL
1534 #define sh_mmcif_resume         NULL
1535 #endif  /* CONFIG_PM */
1536
1537 static const struct of_device_id mmcif_of_match[] = {
1538         { .compatible = "renesas,sh-mmcif" },
1539         { }
1540 };
1541 MODULE_DEVICE_TABLE(of, mmcif_of_match);
1542
1543 static const struct dev_pm_ops sh_mmcif_dev_pm_ops = {
1544         .suspend = sh_mmcif_suspend,
1545         .resume = sh_mmcif_resume,
1546 };
1547
1548 static struct platform_driver sh_mmcif_driver = {
1549         .probe          = sh_mmcif_probe,
1550         .remove         = sh_mmcif_remove,
1551         .driver         = {
1552                 .name   = DRIVER_NAME,
1553                 .pm     = &sh_mmcif_dev_pm_ops,
1554                 .owner  = THIS_MODULE,
1555                 .of_match_table = mmcif_of_match,
1556         },
1557 };
1558
1559 module_platform_driver(sh_mmcif_driver);
1560
1561 MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver");
1562 MODULE_LICENSE("GPL");
1563 MODULE_ALIAS("platform:" DRIVER_NAME);
1564 MODULE_AUTHOR("Yusuke Goda <yusuke.goda.sx@renesas.com>");