mmc: dw_mmc: Support voltage changes
[cascardo/linux.git] / drivers / mmc / host / dw_mmc.c
1 /*
2  * Synopsys DesignWare Multimedia Card Interface driver
3  *  (Based on NXP driver for lpc 31xx)
4  *
5  * Copyright (C) 2009 NXP Semiconductors
6  * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  */
13
14 #include <linux/blkdev.h>
15 #include <linux/clk.h>
16 #include <linux/debugfs.h>
17 #include <linux/device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/ioport.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/seq_file.h>
26 #include <linux/slab.h>
27 #include <linux/stat.h>
28 #include <linux/delay.h>
29 #include <linux/irq.h>
30 #include <linux/mmc/host.h>
31 #include <linux/mmc/mmc.h>
32 #include <linux/mmc/sd.h>
33 #include <linux/mmc/sdio.h>
34 #include <linux/mmc/dw_mmc.h>
35 #include <linux/bitops.h>
36 #include <linux/regulator/consumer.h>
37 #include <linux/workqueue.h>
38 #include <linux/of.h>
39 #include <linux/of_gpio.h>
40 #include <linux/mmc/slot-gpio.h>
41
42 #include "dw_mmc.h"
43
44 /* Common flag combinations */
45 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
46                                  SDMMC_INT_HTO | SDMMC_INT_SBE  | \
47                                  SDMMC_INT_EBE)
48 #define DW_MCI_CMD_ERROR_FLAGS  (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
49                                  SDMMC_INT_RESP_ERR)
50 #define DW_MCI_ERROR_FLAGS      (DW_MCI_DATA_ERROR_FLAGS | \
51                                  DW_MCI_CMD_ERROR_FLAGS  | SDMMC_INT_HLE)
52 #define DW_MCI_SEND_STATUS      1
53 #define DW_MCI_RECV_STATUS      2
54 #define DW_MCI_DMA_THRESHOLD    16
55
56 #define DW_MCI_FREQ_MAX 200000000       /* unit: HZ */
57 #define DW_MCI_FREQ_MIN 400000          /* unit: HZ */
58
59 #ifdef CONFIG_MMC_DW_IDMAC
60 #define IDMAC_INT_CLR           (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
61                                  SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
62                                  SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
63                                  SDMMC_IDMAC_INT_TI)
64
65 struct idmac_desc {
66         u32             des0;   /* Control Descriptor */
67 #define IDMAC_DES0_DIC  BIT(1)
68 #define IDMAC_DES0_LD   BIT(2)
69 #define IDMAC_DES0_FD   BIT(3)
70 #define IDMAC_DES0_CH   BIT(4)
71 #define IDMAC_DES0_ER   BIT(5)
72 #define IDMAC_DES0_CES  BIT(30)
73 #define IDMAC_DES0_OWN  BIT(31)
74
75         u32             des1;   /* Buffer sizes */
76 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
77         ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
78
79         u32             des2;   /* buffer 1 physical address */
80
81         u32             des3;   /* buffer 2 physical address */
82 };
83 #endif /* CONFIG_MMC_DW_IDMAC */
84
85 static const u8 tuning_blk_pattern_4bit[] = {
86         0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
87         0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
88         0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
89         0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
90         0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
91         0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
92         0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
93         0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
94 };
95
96 static const u8 tuning_blk_pattern_8bit[] = {
97         0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
98         0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
99         0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
100         0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
101         0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
102         0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
103         0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
104         0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
105         0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
106         0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
107         0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
108         0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
109         0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
110         0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
111         0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
112         0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
113 };
114
115 static bool dw_mci_reset(struct dw_mci *host);
116
117 #if defined(CONFIG_DEBUG_FS)
118 static int dw_mci_req_show(struct seq_file *s, void *v)
119 {
120         struct dw_mci_slot *slot = s->private;
121         struct mmc_request *mrq;
122         struct mmc_command *cmd;
123         struct mmc_command *stop;
124         struct mmc_data *data;
125
126         /* Make sure we get a consistent snapshot */
127         spin_lock_bh(&slot->host->lock);
128         mrq = slot->mrq;
129
130         if (mrq) {
131                 cmd = mrq->cmd;
132                 data = mrq->data;
133                 stop = mrq->stop;
134
135                 if (cmd)
136                         seq_printf(s,
137                                    "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
138                                    cmd->opcode, cmd->arg, cmd->flags,
139                                    cmd->resp[0], cmd->resp[1], cmd->resp[2],
140                                    cmd->resp[2], cmd->error);
141                 if (data)
142                         seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
143                                    data->bytes_xfered, data->blocks,
144                                    data->blksz, data->flags, data->error);
145                 if (stop)
146                         seq_printf(s,
147                                    "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
148                                    stop->opcode, stop->arg, stop->flags,
149                                    stop->resp[0], stop->resp[1], stop->resp[2],
150                                    stop->resp[2], stop->error);
151         }
152
153         spin_unlock_bh(&slot->host->lock);
154
155         return 0;
156 }
157
158 static int dw_mci_req_open(struct inode *inode, struct file *file)
159 {
160         return single_open(file, dw_mci_req_show, inode->i_private);
161 }
162
163 static const struct file_operations dw_mci_req_fops = {
164         .owner          = THIS_MODULE,
165         .open           = dw_mci_req_open,
166         .read           = seq_read,
167         .llseek         = seq_lseek,
168         .release        = single_release,
169 };
170
171 static int dw_mci_regs_show(struct seq_file *s, void *v)
172 {
173         seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
174         seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
175         seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
176         seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
177         seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
178         seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
179
180         return 0;
181 }
182
183 static int dw_mci_regs_open(struct inode *inode, struct file *file)
184 {
185         return single_open(file, dw_mci_regs_show, inode->i_private);
186 }
187
188 static const struct file_operations dw_mci_regs_fops = {
189         .owner          = THIS_MODULE,
190         .open           = dw_mci_regs_open,
191         .read           = seq_read,
192         .llseek         = seq_lseek,
193         .release        = single_release,
194 };
195
196 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
197 {
198         struct mmc_host *mmc = slot->mmc;
199         struct dw_mci *host = slot->host;
200         struct dentry *root;
201         struct dentry *node;
202
203         root = mmc->debugfs_root;
204         if (!root)
205                 return;
206
207         node = debugfs_create_file("regs", S_IRUSR, root, host,
208                                    &dw_mci_regs_fops);
209         if (!node)
210                 goto err;
211
212         node = debugfs_create_file("req", S_IRUSR, root, slot,
213                                    &dw_mci_req_fops);
214         if (!node)
215                 goto err;
216
217         node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
218         if (!node)
219                 goto err;
220
221         node = debugfs_create_x32("pending_events", S_IRUSR, root,
222                                   (u32 *)&host->pending_events);
223         if (!node)
224                 goto err;
225
226         node = debugfs_create_x32("completed_events", S_IRUSR, root,
227                                   (u32 *)&host->completed_events);
228         if (!node)
229                 goto err;
230
231         return;
232
233 err:
234         dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
235 }
236 #endif /* defined(CONFIG_DEBUG_FS) */
237
238 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg);
239
240 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
241 {
242         struct mmc_data *data;
243         struct dw_mci_slot *slot = mmc_priv(mmc);
244         struct dw_mci *host = slot->host;
245         const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
246         u32 cmdr;
247         cmd->error = -EINPROGRESS;
248
249         cmdr = cmd->opcode;
250
251         if (cmd->opcode == MMC_STOP_TRANSMISSION ||
252             cmd->opcode == MMC_GO_IDLE_STATE ||
253             cmd->opcode == MMC_GO_INACTIVE_STATE ||
254             (cmd->opcode == SD_IO_RW_DIRECT &&
255              ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
256                 cmdr |= SDMMC_CMD_STOP;
257         else if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
258                 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
259
260         if (cmd->opcode == SD_SWITCH_VOLTAGE) {
261                 u32 clk_en_a;
262
263                 /* Special bit makes CMD11 not die */
264                 cmdr |= SDMMC_CMD_VOLT_SWITCH;
265
266                 /* Change state to continue to handle CMD11 weirdness */
267                 WARN_ON(slot->host->state != STATE_SENDING_CMD);
268                 slot->host->state = STATE_SENDING_CMD11;
269
270                 /*
271                  * We need to disable low power mode (automatic clock stop)
272                  * while doing voltage switch so we don't confuse the card,
273                  * since stopping the clock is a specific part of the UHS
274                  * voltage change dance.
275                  *
276                  * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be
277                  * unconditionally turned back on in dw_mci_setup_bus() if it's
278                  * ever called with a non-zero clock.  That shouldn't happen
279                  * until the voltage change is all done.
280                  */
281                 clk_en_a = mci_readl(host, CLKENA);
282                 clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id);
283                 mci_writel(host, CLKENA, clk_en_a);
284                 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
285                              SDMMC_CMD_PRV_DAT_WAIT, 0);
286         }
287
288         if (cmd->flags & MMC_RSP_PRESENT) {
289                 /* We expect a response, so set this bit */
290                 cmdr |= SDMMC_CMD_RESP_EXP;
291                 if (cmd->flags & MMC_RSP_136)
292                         cmdr |= SDMMC_CMD_RESP_LONG;
293         }
294
295         if (cmd->flags & MMC_RSP_CRC)
296                 cmdr |= SDMMC_CMD_RESP_CRC;
297
298         data = cmd->data;
299         if (data) {
300                 cmdr |= SDMMC_CMD_DAT_EXP;
301                 if (data->flags & MMC_DATA_STREAM)
302                         cmdr |= SDMMC_CMD_STRM_MODE;
303                 if (data->flags & MMC_DATA_WRITE)
304                         cmdr |= SDMMC_CMD_DAT_WR;
305         }
306
307         if (drv_data && drv_data->prepare_command)
308                 drv_data->prepare_command(slot->host, &cmdr);
309
310         return cmdr;
311 }
312
313 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
314 {
315         struct mmc_command *stop;
316         u32 cmdr;
317
318         if (!cmd->data)
319                 return 0;
320
321         stop = &host->stop_abort;
322         cmdr = cmd->opcode;
323         memset(stop, 0, sizeof(struct mmc_command));
324
325         if (cmdr == MMC_READ_SINGLE_BLOCK ||
326             cmdr == MMC_READ_MULTIPLE_BLOCK ||
327             cmdr == MMC_WRITE_BLOCK ||
328             cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
329                 stop->opcode = MMC_STOP_TRANSMISSION;
330                 stop->arg = 0;
331                 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
332         } else if (cmdr == SD_IO_RW_EXTENDED) {
333                 stop->opcode = SD_IO_RW_DIRECT;
334                 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
335                              ((cmd->arg >> 28) & 0x7);
336                 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
337         } else {
338                 return 0;
339         }
340
341         cmdr = stop->opcode | SDMMC_CMD_STOP |
342                 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
343
344         return cmdr;
345 }
346
347 static void dw_mci_start_command(struct dw_mci *host,
348                                  struct mmc_command *cmd, u32 cmd_flags)
349 {
350         host->cmd = cmd;
351         dev_vdbg(host->dev,
352                  "start command: ARGR=0x%08x CMDR=0x%08x\n",
353                  cmd->arg, cmd_flags);
354
355         mci_writel(host, CMDARG, cmd->arg);
356         wmb();
357
358         mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
359 }
360
361 static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
362 {
363         struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort;
364         dw_mci_start_command(host, stop, host->stop_cmdr);
365 }
366
367 /* DMA interface functions */
368 static void dw_mci_stop_dma(struct dw_mci *host)
369 {
370         if (host->using_dma) {
371                 host->dma_ops->stop(host);
372                 host->dma_ops->cleanup(host);
373         }
374
375         /* Data transfer was stopped by the interrupt handler */
376         set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
377 }
378
379 static int dw_mci_get_dma_dir(struct mmc_data *data)
380 {
381         if (data->flags & MMC_DATA_WRITE)
382                 return DMA_TO_DEVICE;
383         else
384                 return DMA_FROM_DEVICE;
385 }
386
387 #ifdef CONFIG_MMC_DW_IDMAC
388 static void dw_mci_dma_cleanup(struct dw_mci *host)
389 {
390         struct mmc_data *data = host->data;
391
392         if (data)
393                 if (!data->host_cookie)
394                         dma_unmap_sg(host->dev,
395                                      data->sg,
396                                      data->sg_len,
397                                      dw_mci_get_dma_dir(data));
398 }
399
400 static void dw_mci_idmac_reset(struct dw_mci *host)
401 {
402         u32 bmod = mci_readl(host, BMOD);
403         /* Software reset of DMA */
404         bmod |= SDMMC_IDMAC_SWRESET;
405         mci_writel(host, BMOD, bmod);
406 }
407
408 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
409 {
410         u32 temp;
411
412         /* Disable and reset the IDMAC interface */
413         temp = mci_readl(host, CTRL);
414         temp &= ~SDMMC_CTRL_USE_IDMAC;
415         temp |= SDMMC_CTRL_DMA_RESET;
416         mci_writel(host, CTRL, temp);
417
418         /* Stop the IDMAC running */
419         temp = mci_readl(host, BMOD);
420         temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
421         temp |= SDMMC_IDMAC_SWRESET;
422         mci_writel(host, BMOD, temp);
423 }
424
425 static void dw_mci_idmac_complete_dma(struct dw_mci *host)
426 {
427         struct mmc_data *data = host->data;
428
429         dev_vdbg(host->dev, "DMA complete\n");
430
431         host->dma_ops->cleanup(host);
432
433         /*
434          * If the card was removed, data will be NULL. No point in trying to
435          * send the stop command or waiting for NBUSY in this case.
436          */
437         if (data) {
438                 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
439                 tasklet_schedule(&host->tasklet);
440         }
441 }
442
443 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
444                                     unsigned int sg_len)
445 {
446         int i;
447         struct idmac_desc *desc = host->sg_cpu;
448
449         for (i = 0; i < sg_len; i++, desc++) {
450                 unsigned int length = sg_dma_len(&data->sg[i]);
451                 u32 mem_addr = sg_dma_address(&data->sg[i]);
452
453                 /* Set the OWN bit and disable interrupts for this descriptor */
454                 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
455
456                 /* Buffer length */
457                 IDMAC_SET_BUFFER1_SIZE(desc, length);
458
459                 /* Physical address to DMA to/from */
460                 desc->des2 = mem_addr;
461         }
462
463         /* Set first descriptor */
464         desc = host->sg_cpu;
465         desc->des0 |= IDMAC_DES0_FD;
466
467         /* Set last descriptor */
468         desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
469         desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
470         desc->des0 |= IDMAC_DES0_LD;
471
472         wmb();
473 }
474
475 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
476 {
477         u32 temp;
478
479         dw_mci_translate_sglist(host, host->data, sg_len);
480
481         /* Select IDMAC interface */
482         temp = mci_readl(host, CTRL);
483         temp |= SDMMC_CTRL_USE_IDMAC;
484         mci_writel(host, CTRL, temp);
485
486         wmb();
487
488         /* Enable the IDMAC */
489         temp = mci_readl(host, BMOD);
490         temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
491         mci_writel(host, BMOD, temp);
492
493         /* Start it running */
494         mci_writel(host, PLDMND, 1);
495 }
496
497 static int dw_mci_idmac_init(struct dw_mci *host)
498 {
499         struct idmac_desc *p;
500         int i;
501
502         /* Number of descriptors in the ring buffer */
503         host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
504
505         /* Forward link the descriptor list */
506         for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
507                 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
508
509         /* Set the last descriptor as the end-of-ring descriptor */
510         p->des3 = host->sg_dma;
511         p->des0 = IDMAC_DES0_ER;
512
513         dw_mci_idmac_reset(host);
514
515         /* Mask out interrupts - get Tx & Rx complete only */
516         mci_writel(host, IDSTS, IDMAC_INT_CLR);
517         mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
518                    SDMMC_IDMAC_INT_TI);
519
520         /* Set the descriptor base address */
521         mci_writel(host, DBADDR, host->sg_dma);
522         return 0;
523 }
524
525 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
526         .init = dw_mci_idmac_init,
527         .start = dw_mci_idmac_start_dma,
528         .stop = dw_mci_idmac_stop_dma,
529         .complete = dw_mci_idmac_complete_dma,
530         .cleanup = dw_mci_dma_cleanup,
531 };
532 #endif /* CONFIG_MMC_DW_IDMAC */
533
534 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
535                                    struct mmc_data *data,
536                                    bool next)
537 {
538         struct scatterlist *sg;
539         unsigned int i, sg_len;
540
541         if (!next && data->host_cookie)
542                 return data->host_cookie;
543
544         /*
545          * We don't do DMA on "complex" transfers, i.e. with
546          * non-word-aligned buffers or lengths. Also, we don't bother
547          * with all the DMA setup overhead for short transfers.
548          */
549         if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
550                 return -EINVAL;
551
552         if (data->blksz & 3)
553                 return -EINVAL;
554
555         for_each_sg(data->sg, sg, data->sg_len, i) {
556                 if (sg->offset & 3 || sg->length & 3)
557                         return -EINVAL;
558         }
559
560         sg_len = dma_map_sg(host->dev,
561                             data->sg,
562                             data->sg_len,
563                             dw_mci_get_dma_dir(data));
564         if (sg_len == 0)
565                 return -EINVAL;
566
567         if (next)
568                 data->host_cookie = sg_len;
569
570         return sg_len;
571 }
572
573 static void dw_mci_pre_req(struct mmc_host *mmc,
574                            struct mmc_request *mrq,
575                            bool is_first_req)
576 {
577         struct dw_mci_slot *slot = mmc_priv(mmc);
578         struct mmc_data *data = mrq->data;
579
580         if (!slot->host->use_dma || !data)
581                 return;
582
583         if (data->host_cookie) {
584                 data->host_cookie = 0;
585                 return;
586         }
587
588         if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
589                 data->host_cookie = 0;
590 }
591
592 static void dw_mci_post_req(struct mmc_host *mmc,
593                             struct mmc_request *mrq,
594                             int err)
595 {
596         struct dw_mci_slot *slot = mmc_priv(mmc);
597         struct mmc_data *data = mrq->data;
598
599         if (!slot->host->use_dma || !data)
600                 return;
601
602         if (data->host_cookie)
603                 dma_unmap_sg(slot->host->dev,
604                              data->sg,
605                              data->sg_len,
606                              dw_mci_get_dma_dir(data));
607         data->host_cookie = 0;
608 }
609
610 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
611 {
612 #ifdef CONFIG_MMC_DW_IDMAC
613         unsigned int blksz = data->blksz;
614         const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
615         u32 fifo_width = 1 << host->data_shift;
616         u32 blksz_depth = blksz / fifo_width, fifoth_val;
617         u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
618         int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
619
620         tx_wmark = (host->fifo_depth) / 2;
621         tx_wmark_invers = host->fifo_depth - tx_wmark;
622
623         /*
624          * MSIZE is '1',
625          * if blksz is not a multiple of the FIFO width
626          */
627         if (blksz % fifo_width) {
628                 msize = 0;
629                 rx_wmark = 1;
630                 goto done;
631         }
632
633         do {
634                 if (!((blksz_depth % mszs[idx]) ||
635                      (tx_wmark_invers % mszs[idx]))) {
636                         msize = idx;
637                         rx_wmark = mszs[idx] - 1;
638                         break;
639                 }
640         } while (--idx > 0);
641         /*
642          * If idx is '0', it won't be tried
643          * Thus, initial values are uesed
644          */
645 done:
646         fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
647         mci_writel(host, FIFOTH, fifoth_val);
648 #endif
649 }
650
651 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
652 {
653         unsigned int blksz = data->blksz;
654         u32 blksz_depth, fifo_depth;
655         u16 thld_size;
656
657         WARN_ON(!(data->flags & MMC_DATA_READ));
658
659         if (host->timing != MMC_TIMING_MMC_HS200 &&
660             host->timing != MMC_TIMING_UHS_SDR104)
661                 goto disable;
662
663         blksz_depth = blksz / (1 << host->data_shift);
664         fifo_depth = host->fifo_depth;
665
666         if (blksz_depth > fifo_depth)
667                 goto disable;
668
669         /*
670          * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
671          * If (blksz_depth) <  (fifo_depth >> 1), should be thld_size = blksz
672          * Currently just choose blksz.
673          */
674         thld_size = blksz;
675         mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
676         return;
677
678 disable:
679         mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
680 }
681
682 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
683 {
684         int sg_len;
685         u32 temp;
686
687         host->using_dma = 0;
688
689         /* If we don't have a channel, we can't do DMA */
690         if (!host->use_dma)
691                 return -ENODEV;
692
693         sg_len = dw_mci_pre_dma_transfer(host, data, 0);
694         if (sg_len < 0) {
695                 host->dma_ops->stop(host);
696                 return sg_len;
697         }
698
699         host->using_dma = 1;
700
701         dev_vdbg(host->dev,
702                  "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
703                  (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
704                  sg_len);
705
706         /*
707          * Decide the MSIZE and RX/TX Watermark.
708          * If current block size is same with previous size,
709          * no need to update fifoth.
710          */
711         if (host->prev_blksz != data->blksz)
712                 dw_mci_adjust_fifoth(host, data);
713
714         /* Enable the DMA interface */
715         temp = mci_readl(host, CTRL);
716         temp |= SDMMC_CTRL_DMA_ENABLE;
717         mci_writel(host, CTRL, temp);
718
719         /* Disable RX/TX IRQs, let DMA handle it */
720         temp = mci_readl(host, INTMASK);
721         temp  &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
722         mci_writel(host, INTMASK, temp);
723
724         host->dma_ops->start(host, sg_len);
725
726         return 0;
727 }
728
729 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
730 {
731         u32 temp;
732
733         data->error = -EINPROGRESS;
734
735         WARN_ON(host->data);
736         host->sg = NULL;
737         host->data = data;
738
739         if (data->flags & MMC_DATA_READ) {
740                 host->dir_status = DW_MCI_RECV_STATUS;
741                 dw_mci_ctrl_rd_thld(host, data);
742         } else {
743                 host->dir_status = DW_MCI_SEND_STATUS;
744         }
745
746         if (dw_mci_submit_data_dma(host, data)) {
747                 int flags = SG_MITER_ATOMIC;
748                 if (host->data->flags & MMC_DATA_READ)
749                         flags |= SG_MITER_TO_SG;
750                 else
751                         flags |= SG_MITER_FROM_SG;
752
753                 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
754                 host->sg = data->sg;
755                 host->part_buf_start = 0;
756                 host->part_buf_count = 0;
757
758                 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
759                 temp = mci_readl(host, INTMASK);
760                 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
761                 mci_writel(host, INTMASK, temp);
762
763                 temp = mci_readl(host, CTRL);
764                 temp &= ~SDMMC_CTRL_DMA_ENABLE;
765                 mci_writel(host, CTRL, temp);
766
767                 /*
768                  * Use the initial fifoth_val for PIO mode.
769                  * If next issued data may be transfered by DMA mode,
770                  * prev_blksz should be invalidated.
771                  */
772                 mci_writel(host, FIFOTH, host->fifoth_val);
773                 host->prev_blksz = 0;
774         } else {
775                 /*
776                  * Keep the current block size.
777                  * It will be used to decide whether to update
778                  * fifoth register next time.
779                  */
780                 host->prev_blksz = data->blksz;
781         }
782 }
783
784 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
785 {
786         struct dw_mci *host = slot->host;
787         unsigned long timeout = jiffies + msecs_to_jiffies(500);
788         unsigned int cmd_status = 0;
789
790         mci_writel(host, CMDARG, arg);
791         wmb();
792         mci_writel(host, CMD, SDMMC_CMD_START | cmd);
793
794         while (time_before(jiffies, timeout)) {
795                 cmd_status = mci_readl(host, CMD);
796                 if (!(cmd_status & SDMMC_CMD_START))
797                         return;
798         }
799         dev_err(&slot->mmc->class_dev,
800                 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
801                 cmd, arg, cmd_status);
802 }
803
804 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
805 {
806         struct dw_mci *host = slot->host;
807         unsigned int clock = slot->clock;
808         u32 div;
809         u32 clk_en_a;
810         u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT;
811
812         /* We must continue to set bit 28 in CMD until the change is complete */
813         if (host->state == STATE_WAITING_CMD11_DONE)
814                 sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH;
815
816         if (!clock) {
817                 mci_writel(host, CLKENA, 0);
818                 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
819         } else if (clock != host->current_speed || force_clkinit) {
820                 div = host->bus_hz / clock;
821                 if (host->bus_hz % clock && host->bus_hz > clock)
822                         /*
823                          * move the + 1 after the divide to prevent
824                          * over-clocking the card.
825                          */
826                         div += 1;
827
828                 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
829
830                 if ((clock << div) != slot->__clk_old || force_clkinit)
831                         dev_info(&slot->mmc->class_dev,
832                                  "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
833                                  slot->id, host->bus_hz, clock,
834                                  div ? ((host->bus_hz / div) >> 1) :
835                                  host->bus_hz, div);
836
837                 /* disable clock */
838                 mci_writel(host, CLKENA, 0);
839                 mci_writel(host, CLKSRC, 0);
840
841                 /* inform CIU */
842                 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
843
844                 /* set clock to desired speed */
845                 mci_writel(host, CLKDIV, div);
846
847                 /* inform CIU */
848                 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
849
850                 /* enable clock; only low power if no SDIO */
851                 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
852                 if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id)))
853                         clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
854                 mci_writel(host, CLKENA, clk_en_a);
855
856                 /* inform CIU */
857                 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
858
859                 /* keep the clock with reflecting clock dividor */
860                 slot->__clk_old = clock << div;
861         }
862
863         host->current_speed = clock;
864
865         /* Set the current slot bus width */
866         mci_writel(host, CTYPE, (slot->ctype << slot->id));
867 }
868
869 static void __dw_mci_start_request(struct dw_mci *host,
870                                    struct dw_mci_slot *slot,
871                                    struct mmc_command *cmd)
872 {
873         struct mmc_request *mrq;
874         struct mmc_data *data;
875         u32 cmdflags;
876
877         mrq = slot->mrq;
878
879         host->cur_slot = slot;
880         host->mrq = mrq;
881
882         host->pending_events = 0;
883         host->completed_events = 0;
884         host->cmd_status = 0;
885         host->data_status = 0;
886         host->dir_status = 0;
887
888         data = cmd->data;
889         if (data) {
890                 mci_writel(host, TMOUT, 0xFFFFFFFF);
891                 mci_writel(host, BYTCNT, data->blksz*data->blocks);
892                 mci_writel(host, BLKSIZ, data->blksz);
893         }
894
895         cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
896
897         /* this is the first command, send the initialization clock */
898         if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
899                 cmdflags |= SDMMC_CMD_INIT;
900
901         if (data) {
902                 dw_mci_submit_data(host, data);
903                 wmb();
904         }
905
906         dw_mci_start_command(host, cmd, cmdflags);
907
908         if (mrq->stop)
909                 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
910         else
911                 host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
912 }
913
914 static void dw_mci_start_request(struct dw_mci *host,
915                                  struct dw_mci_slot *slot)
916 {
917         struct mmc_request *mrq = slot->mrq;
918         struct mmc_command *cmd;
919
920         cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
921         __dw_mci_start_request(host, slot, cmd);
922 }
923
924 /* must be called with host->lock held */
925 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
926                                  struct mmc_request *mrq)
927 {
928         dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
929                  host->state);
930
931         slot->mrq = mrq;
932
933         if (host->state == STATE_WAITING_CMD11_DONE) {
934                 dev_warn(&slot->mmc->class_dev,
935                          "Voltage change didn't complete\n");
936                 /*
937                  * this case isn't expected to happen, so we can
938                  * either crash here or just try to continue on
939                  * in the closest possible state
940                  */
941                 host->state = STATE_IDLE;
942         }
943
944         if (host->state == STATE_IDLE) {
945                 host->state = STATE_SENDING_CMD;
946                 dw_mci_start_request(host, slot);
947         } else {
948                 list_add_tail(&slot->queue_node, &host->queue);
949         }
950 }
951
952 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
953 {
954         struct dw_mci_slot *slot = mmc_priv(mmc);
955         struct dw_mci *host = slot->host;
956
957         WARN_ON(slot->mrq);
958
959         /*
960          * The check for card presence and queueing of the request must be
961          * atomic, otherwise the card could be removed in between and the
962          * request wouldn't fail until another card was inserted.
963          */
964         spin_lock_bh(&host->lock);
965
966         if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
967                 spin_unlock_bh(&host->lock);
968                 mrq->cmd->error = -ENOMEDIUM;
969                 mmc_request_done(mmc, mrq);
970                 return;
971         }
972
973         dw_mci_queue_request(host, slot, mrq);
974
975         spin_unlock_bh(&host->lock);
976 }
977
978 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
979 {
980         struct dw_mci_slot *slot = mmc_priv(mmc);
981         const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
982         u32 regs;
983         int ret;
984
985         switch (ios->bus_width) {
986         case MMC_BUS_WIDTH_4:
987                 slot->ctype = SDMMC_CTYPE_4BIT;
988                 break;
989         case MMC_BUS_WIDTH_8:
990                 slot->ctype = SDMMC_CTYPE_8BIT;
991                 break;
992         default:
993                 /* set default 1 bit mode */
994                 slot->ctype = SDMMC_CTYPE_1BIT;
995         }
996
997         regs = mci_readl(slot->host, UHS_REG);
998
999         /* DDR mode set */
1000         if (ios->timing == MMC_TIMING_MMC_DDR52)
1001                 regs |= ((0x1 << slot->id) << 16);
1002         else
1003                 regs &= ~((0x1 << slot->id) << 16);
1004
1005         mci_writel(slot->host, UHS_REG, regs);
1006         slot->host->timing = ios->timing;
1007
1008         /*
1009          * Use mirror of ios->clock to prevent race with mmc
1010          * core ios update when finding the minimum.
1011          */
1012         slot->clock = ios->clock;
1013
1014         if (drv_data && drv_data->set_ios)
1015                 drv_data->set_ios(slot->host, ios);
1016
1017         /* Slot specific timing and width adjustment */
1018         dw_mci_setup_bus(slot, false);
1019
1020         if (slot->host->state == STATE_WAITING_CMD11_DONE && ios->clock != 0)
1021                 slot->host->state = STATE_IDLE;
1022
1023         switch (ios->power_mode) {
1024         case MMC_POWER_UP:
1025                 if (!IS_ERR(mmc->supply.vmmc)) {
1026                         ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
1027                                         ios->vdd);
1028                         if (ret) {
1029                                 dev_err(slot->host->dev,
1030                                         "failed to enable vmmc regulator\n");
1031                                 /*return, if failed turn on vmmc*/
1032                                 return;
1033                         }
1034                 }
1035                 if (!IS_ERR(mmc->supply.vqmmc) && !slot->host->vqmmc_enabled) {
1036                         ret = regulator_enable(mmc->supply.vqmmc);
1037                         if (ret < 0)
1038                                 dev_err(slot->host->dev,
1039                                         "failed to enable vqmmc regulator\n");
1040                         else
1041                                 slot->host->vqmmc_enabled = true;
1042                 }
1043                 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1044                 regs = mci_readl(slot->host, PWREN);
1045                 regs |= (1 << slot->id);
1046                 mci_writel(slot->host, PWREN, regs);
1047                 break;
1048         case MMC_POWER_OFF:
1049                 if (!IS_ERR(mmc->supply.vmmc))
1050                         mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1051
1052                 if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled) {
1053                         regulator_disable(mmc->supply.vqmmc);
1054                         slot->host->vqmmc_enabled = false;
1055                 }
1056
1057                 regs = mci_readl(slot->host, PWREN);
1058                 regs &= ~(1 << slot->id);
1059                 mci_writel(slot->host, PWREN, regs);
1060                 break;
1061         default:
1062                 break;
1063         }
1064 }
1065
1066 static int dw_mci_card_busy(struct mmc_host *mmc)
1067 {
1068         struct dw_mci_slot *slot = mmc_priv(mmc);
1069         u32 status;
1070
1071         /*
1072          * Check the busy bit which is low when DAT[3:0]
1073          * (the data lines) are 0000
1074          */
1075         status = mci_readl(slot->host, STATUS);
1076
1077         return !!(status & SDMMC_STATUS_BUSY);
1078 }
1079
1080 static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
1081 {
1082         struct dw_mci_slot *slot = mmc_priv(mmc);
1083         struct dw_mci *host = slot->host;
1084         u32 uhs;
1085         u32 v18 = SDMMC_UHS_18V << slot->id;
1086         int min_uv, max_uv;
1087         int ret;
1088
1089         /*
1090          * Program the voltage.  Note that some instances of dw_mmc may use
1091          * the UHS_REG for this.  For other instances (like exynos) the UHS_REG
1092          * does no harm but you need to set the regulator directly.  Try both.
1093          */
1094         uhs = mci_readl(host, UHS_REG);
1095         if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
1096                 min_uv = 2700000;
1097                 max_uv = 3600000;
1098                 uhs &= ~v18;
1099         } else {
1100                 min_uv = 1700000;
1101                 max_uv = 1950000;
1102                 uhs |= v18;
1103         }
1104         if (!IS_ERR(mmc->supply.vqmmc)) {
1105                 ret = regulator_set_voltage(mmc->supply.vqmmc, min_uv, max_uv);
1106
1107                 if (ret) {
1108                         dev_err(&mmc->class_dev,
1109                                          "Regulator set error %d: %d - %d\n",
1110                                          ret, min_uv, max_uv);
1111                         return ret;
1112                 }
1113         }
1114         mci_writel(host, UHS_REG, uhs);
1115
1116         return 0;
1117 }
1118
1119 static int dw_mci_get_ro(struct mmc_host *mmc)
1120 {
1121         int read_only;
1122         struct dw_mci_slot *slot = mmc_priv(mmc);
1123         int gpio_ro = mmc_gpio_get_ro(mmc);
1124
1125         /* Use platform get_ro function, else try on board write protect */
1126         if ((slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT) ||
1127                         (slot->host->quirks & DW_MCI_QUIRK_NO_WRITE_PROTECT))
1128                 read_only = 0;
1129         else if (!IS_ERR_VALUE(gpio_ro))
1130                 read_only = gpio_ro;
1131         else
1132                 read_only =
1133                         mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1134
1135         dev_dbg(&mmc->class_dev, "card is %s\n",
1136                 read_only ? "read-only" : "read-write");
1137
1138         return read_only;
1139 }
1140
1141 static int dw_mci_get_cd(struct mmc_host *mmc)
1142 {
1143         int present;
1144         struct dw_mci_slot *slot = mmc_priv(mmc);
1145         struct dw_mci_board *brd = slot->host->pdata;
1146         struct dw_mci *host = slot->host;
1147         int gpio_cd = mmc_gpio_get_cd(mmc);
1148
1149         /* Use platform get_cd function, else try onboard card detect */
1150         if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1151                 present = 1;
1152         else if (!IS_ERR_VALUE(gpio_cd))
1153                 present = gpio_cd;
1154         else
1155                 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1156                         == 0 ? 1 : 0;
1157
1158         spin_lock_bh(&host->lock);
1159         if (present) {
1160                 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1161                 dev_dbg(&mmc->class_dev, "card is present\n");
1162         } else {
1163                 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1164                 dev_dbg(&mmc->class_dev, "card is not present\n");
1165         }
1166         spin_unlock_bh(&host->lock);
1167
1168         return present;
1169 }
1170
1171 /*
1172  * Disable lower power mode.
1173  *
1174  * Low power mode will stop the card clock when idle.  According to the
1175  * description of the CLKENA register we should disable low power mode
1176  * for SDIO cards if we need SDIO interrupts to work.
1177  *
1178  * This function is fast if low power mode is already disabled.
1179  */
1180 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1181 {
1182         struct dw_mci *host = slot->host;
1183         u32 clk_en_a;
1184         const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1185
1186         clk_en_a = mci_readl(host, CLKENA);
1187
1188         if (clk_en_a & clken_low_pwr) {
1189                 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1190                 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1191                              SDMMC_CMD_PRV_DAT_WAIT, 0);
1192         }
1193 }
1194
1195 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1196 {
1197         struct dw_mci_slot *slot = mmc_priv(mmc);
1198         struct dw_mci *host = slot->host;
1199         u32 int_mask;
1200
1201         /* Enable/disable Slot Specific SDIO interrupt */
1202         int_mask = mci_readl(host, INTMASK);
1203         if (enb) {
1204                 /*
1205                  * Turn off low power mode if it was enabled.  This is a bit of
1206                  * a heavy operation and we disable / enable IRQs a lot, so
1207                  * we'll leave low power mode disabled and it will get
1208                  * re-enabled again in dw_mci_setup_bus().
1209                  */
1210                 dw_mci_disable_low_power(slot);
1211
1212                 mci_writel(host, INTMASK,
1213                            (int_mask | SDMMC_INT_SDIO(slot->id)));
1214         } else {
1215                 mci_writel(host, INTMASK,
1216                            (int_mask & ~SDMMC_INT_SDIO(slot->id)));
1217         }
1218 }
1219
1220 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1221 {
1222         struct dw_mci_slot *slot = mmc_priv(mmc);
1223         struct dw_mci *host = slot->host;
1224         const struct dw_mci_drv_data *drv_data = host->drv_data;
1225         struct dw_mci_tuning_data tuning_data;
1226         int err = -ENOSYS;
1227
1228         if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1229                 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1230                         tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1231                         tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1232                 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1233                         tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1234                         tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1235                 } else {
1236                         return -EINVAL;
1237                 }
1238         } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1239                 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1240                 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1241         } else {
1242                 dev_err(host->dev,
1243                         "Undefined command(%d) for tuning\n", opcode);
1244                 return -EINVAL;
1245         }
1246
1247         if (drv_data && drv_data->execute_tuning)
1248                 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1249         return err;
1250 }
1251
1252 static const struct mmc_host_ops dw_mci_ops = {
1253         .request                = dw_mci_request,
1254         .pre_req                = dw_mci_pre_req,
1255         .post_req               = dw_mci_post_req,
1256         .set_ios                = dw_mci_set_ios,
1257         .get_ro                 = dw_mci_get_ro,
1258         .get_cd                 = dw_mci_get_cd,
1259         .enable_sdio_irq        = dw_mci_enable_sdio_irq,
1260         .execute_tuning         = dw_mci_execute_tuning,
1261         .card_busy              = dw_mci_card_busy,
1262         .start_signal_voltage_switch = dw_mci_switch_voltage,
1263
1264 };
1265
1266 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1267         __releases(&host->lock)
1268         __acquires(&host->lock)
1269 {
1270         struct dw_mci_slot *slot;
1271         struct mmc_host *prev_mmc = host->cur_slot->mmc;
1272
1273         WARN_ON(host->cmd || host->data);
1274
1275         host->cur_slot->mrq = NULL;
1276         host->mrq = NULL;
1277         if (!list_empty(&host->queue)) {
1278                 slot = list_entry(host->queue.next,
1279                                   struct dw_mci_slot, queue_node);
1280                 list_del(&slot->queue_node);
1281                 dev_vdbg(host->dev, "list not empty: %s is next\n",
1282                          mmc_hostname(slot->mmc));
1283                 host->state = STATE_SENDING_CMD;
1284                 dw_mci_start_request(host, slot);
1285         } else {
1286                 dev_vdbg(host->dev, "list empty\n");
1287
1288                 if (host->state == STATE_SENDING_CMD11)
1289                         host->state = STATE_WAITING_CMD11_DONE;
1290                 else
1291                         host->state = STATE_IDLE;
1292         }
1293
1294         spin_unlock(&host->lock);
1295         mmc_request_done(prev_mmc, mrq);
1296         spin_lock(&host->lock);
1297 }
1298
1299 static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1300 {
1301         u32 status = host->cmd_status;
1302
1303         host->cmd_status = 0;
1304
1305         /* Read the response from the card (up to 16 bytes) */
1306         if (cmd->flags & MMC_RSP_PRESENT) {
1307                 if (cmd->flags & MMC_RSP_136) {
1308                         cmd->resp[3] = mci_readl(host, RESP0);
1309                         cmd->resp[2] = mci_readl(host, RESP1);
1310                         cmd->resp[1] = mci_readl(host, RESP2);
1311                         cmd->resp[0] = mci_readl(host, RESP3);
1312                 } else {
1313                         cmd->resp[0] = mci_readl(host, RESP0);
1314                         cmd->resp[1] = 0;
1315                         cmd->resp[2] = 0;
1316                         cmd->resp[3] = 0;
1317                 }
1318         }
1319
1320         if (status & SDMMC_INT_RTO)
1321                 cmd->error = -ETIMEDOUT;
1322         else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1323                 cmd->error = -EILSEQ;
1324         else if (status & SDMMC_INT_RESP_ERR)
1325                 cmd->error = -EIO;
1326         else
1327                 cmd->error = 0;
1328
1329         if (cmd->error) {
1330                 /* newer ip versions need a delay between retries */
1331                 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
1332                         mdelay(20);
1333         }
1334
1335         return cmd->error;
1336 }
1337
1338 static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1339 {
1340         u32 status = host->data_status;
1341
1342         if (status & DW_MCI_DATA_ERROR_FLAGS) {
1343                 if (status & SDMMC_INT_DRTO) {
1344                         data->error = -ETIMEDOUT;
1345                 } else if (status & SDMMC_INT_DCRC) {
1346                         data->error = -EILSEQ;
1347                 } else if (status & SDMMC_INT_EBE) {
1348                         if (host->dir_status ==
1349                                 DW_MCI_SEND_STATUS) {
1350                                 /*
1351                                  * No data CRC status was returned.
1352                                  * The number of bytes transferred
1353                                  * will be exaggerated in PIO mode.
1354                                  */
1355                                 data->bytes_xfered = 0;
1356                                 data->error = -ETIMEDOUT;
1357                         } else if (host->dir_status ==
1358                                         DW_MCI_RECV_STATUS) {
1359                                 data->error = -EIO;
1360                         }
1361                 } else {
1362                         /* SDMMC_INT_SBE is included */
1363                         data->error = -EIO;
1364                 }
1365
1366                 dev_dbg(host->dev, "data error, status 0x%08x\n", status);
1367
1368                 /*
1369                  * After an error, there may be data lingering
1370                  * in the FIFO
1371                  */
1372                 dw_mci_reset(host);
1373         } else {
1374                 data->bytes_xfered = data->blocks * data->blksz;
1375                 data->error = 0;
1376         }
1377
1378         return data->error;
1379 }
1380
1381 static void dw_mci_tasklet_func(unsigned long priv)
1382 {
1383         struct dw_mci *host = (struct dw_mci *)priv;
1384         struct mmc_data *data;
1385         struct mmc_command *cmd;
1386         struct mmc_request *mrq;
1387         enum dw_mci_state state;
1388         enum dw_mci_state prev_state;
1389         unsigned int err;
1390
1391         spin_lock(&host->lock);
1392
1393         state = host->state;
1394         data = host->data;
1395         mrq = host->mrq;
1396
1397         do {
1398                 prev_state = state;
1399
1400                 switch (state) {
1401                 case STATE_IDLE:
1402                 case STATE_WAITING_CMD11_DONE:
1403                         break;
1404
1405                 case STATE_SENDING_CMD11:
1406                 case STATE_SENDING_CMD:
1407                         if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1408                                                 &host->pending_events))
1409                                 break;
1410
1411                         cmd = host->cmd;
1412                         host->cmd = NULL;
1413                         set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
1414                         err = dw_mci_command_complete(host, cmd);
1415                         if (cmd == mrq->sbc && !err) {
1416                                 prev_state = state = STATE_SENDING_CMD;
1417                                 __dw_mci_start_request(host, host->cur_slot,
1418                                                        mrq->cmd);
1419                                 goto unlock;
1420                         }
1421
1422                         if (cmd->data && err) {
1423                                 dw_mci_stop_dma(host);
1424                                 send_stop_abort(host, data);
1425                                 state = STATE_SENDING_STOP;
1426                                 break;
1427                         }
1428
1429                         if (!cmd->data || err) {
1430                                 dw_mci_request_end(host, mrq);
1431                                 goto unlock;
1432                         }
1433
1434                         prev_state = state = STATE_SENDING_DATA;
1435                         /* fall through */
1436
1437                 case STATE_SENDING_DATA:
1438                         /*
1439                          * We could get a data error and never a transfer
1440                          * complete so we'd better check for it here.
1441                          *
1442                          * Note that we don't really care if we also got a
1443                          * transfer complete; stopping the DMA and sending an
1444                          * abort won't hurt.
1445                          */
1446                         if (test_and_clear_bit(EVENT_DATA_ERROR,
1447                                                &host->pending_events)) {
1448                                 dw_mci_stop_dma(host);
1449                                 send_stop_abort(host, data);
1450                                 state = STATE_DATA_ERROR;
1451                                 break;
1452                         }
1453
1454                         if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1455                                                 &host->pending_events))
1456                                 break;
1457
1458                         set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
1459
1460                         /*
1461                          * Handle an EVENT_DATA_ERROR that might have shown up
1462                          * before the transfer completed.  This might not have
1463                          * been caught by the check above because the interrupt
1464                          * could have gone off between the previous check and
1465                          * the check for transfer complete.
1466                          *
1467                          * Technically this ought not be needed assuming we
1468                          * get a DATA_COMPLETE eventually (we'll notice the
1469                          * error and end the request), but it shouldn't hurt.
1470                          *
1471                          * This has the advantage of sending the stop command.
1472                          */
1473                         if (test_and_clear_bit(EVENT_DATA_ERROR,
1474                                                &host->pending_events)) {
1475                                 dw_mci_stop_dma(host);
1476                                 send_stop_abort(host, data);
1477                                 state = STATE_DATA_ERROR;
1478                                 break;
1479                         }
1480                         prev_state = state = STATE_DATA_BUSY;
1481
1482                         /* fall through */
1483
1484                 case STATE_DATA_BUSY:
1485                         if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1486                                                 &host->pending_events))
1487                                 break;
1488
1489                         host->data = NULL;
1490                         set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
1491                         err = dw_mci_data_complete(host, data);
1492
1493                         if (!err) {
1494                                 if (!data->stop || mrq->sbc) {
1495                                         if (mrq->sbc && data->stop)
1496                                                 data->stop->error = 0;
1497                                         dw_mci_request_end(host, mrq);
1498                                         goto unlock;
1499                                 }
1500
1501                                 /* stop command for open-ended transfer*/
1502                                 if (data->stop)
1503                                         send_stop_abort(host, data);
1504                         } else {
1505                                 /*
1506                                  * If we don't have a command complete now we'll
1507                                  * never get one since we just reset everything;
1508                                  * better end the request.
1509                                  *
1510                                  * If we do have a command complete we'll fall
1511                                  * through to the SENDING_STOP command and
1512                                  * everything will be peachy keen.
1513                                  */
1514                                 if (!test_bit(EVENT_CMD_COMPLETE,
1515                                               &host->pending_events)) {
1516                                         host->cmd = NULL;
1517                                         dw_mci_request_end(host, mrq);
1518                                         goto unlock;
1519                                 }
1520                         }
1521
1522                         /*
1523                          * If err has non-zero,
1524                          * stop-abort command has been already issued.
1525                          */
1526                         prev_state = state = STATE_SENDING_STOP;
1527
1528                         /* fall through */
1529
1530                 case STATE_SENDING_STOP:
1531                         if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1532                                                 &host->pending_events))
1533                                 break;
1534
1535                         /* CMD error in data command */
1536                         if (mrq->cmd->error && mrq->data)
1537                                 dw_mci_reset(host);
1538
1539                         host->cmd = NULL;
1540                         host->data = NULL;
1541
1542                         if (mrq->stop)
1543                                 dw_mci_command_complete(host, mrq->stop);
1544                         else
1545                                 host->cmd_status = 0;
1546
1547                         dw_mci_request_end(host, mrq);
1548                         goto unlock;
1549
1550                 case STATE_DATA_ERROR:
1551                         if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1552                                                 &host->pending_events))
1553                                 break;
1554
1555                         state = STATE_DATA_BUSY;
1556                         break;
1557                 }
1558         } while (state != prev_state);
1559
1560         host->state = state;
1561 unlock:
1562         spin_unlock(&host->lock);
1563
1564 }
1565
1566 /* push final bytes to part_buf, only use during push */
1567 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
1568 {
1569         memcpy((void *)&host->part_buf, buf, cnt);
1570         host->part_buf_count = cnt;
1571 }
1572
1573 /* append bytes to part_buf, only use during push */
1574 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1575 {
1576         cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1577         memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1578         host->part_buf_count += cnt;
1579         return cnt;
1580 }
1581
1582 /* pull first bytes from part_buf, only use during pull */
1583 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1584 {
1585         cnt = min(cnt, (int)host->part_buf_count);
1586         if (cnt) {
1587                 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1588                        cnt);
1589                 host->part_buf_count -= cnt;
1590                 host->part_buf_start += cnt;
1591         }
1592         return cnt;
1593 }
1594
1595 /* pull final bytes from the part_buf, assuming it's just been filled */
1596 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
1597 {
1598         memcpy(buf, &host->part_buf, cnt);
1599         host->part_buf_start = cnt;
1600         host->part_buf_count = (1 << host->data_shift) - cnt;
1601 }
1602
1603 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1604 {
1605         struct mmc_data *data = host->data;
1606         int init_cnt = cnt;
1607
1608         /* try and push anything in the part_buf */
1609         if (unlikely(host->part_buf_count)) {
1610                 int len = dw_mci_push_part_bytes(host, buf, cnt);
1611                 buf += len;
1612                 cnt -= len;
1613                 if (host->part_buf_count == 2) {
1614                         mci_writew(host, DATA(host->data_offset),
1615                                         host->part_buf16);
1616                         host->part_buf_count = 0;
1617                 }
1618         }
1619 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1620         if (unlikely((unsigned long)buf & 0x1)) {
1621                 while (cnt >= 2) {
1622                         u16 aligned_buf[64];
1623                         int len = min(cnt & -2, (int)sizeof(aligned_buf));
1624                         int items = len >> 1;
1625                         int i;
1626                         /* memcpy from input buffer into aligned buffer */
1627                         memcpy(aligned_buf, buf, len);
1628                         buf += len;
1629                         cnt -= len;
1630                         /* push data from aligned buffer into fifo */
1631                         for (i = 0; i < items; ++i)
1632                                 mci_writew(host, DATA(host->data_offset),
1633                                                 aligned_buf[i]);
1634                 }
1635         } else
1636 #endif
1637         {
1638                 u16 *pdata = buf;
1639                 for (; cnt >= 2; cnt -= 2)
1640                         mci_writew(host, DATA(host->data_offset), *pdata++);
1641                 buf = pdata;
1642         }
1643         /* put anything remaining in the part_buf */
1644         if (cnt) {
1645                 dw_mci_set_part_bytes(host, buf, cnt);
1646                  /* Push data if we have reached the expected data length */
1647                 if ((data->bytes_xfered + init_cnt) ==
1648                     (data->blksz * data->blocks))
1649                         mci_writew(host, DATA(host->data_offset),
1650                                    host->part_buf16);
1651         }
1652 }
1653
1654 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
1655 {
1656 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1657         if (unlikely((unsigned long)buf & 0x1)) {
1658                 while (cnt >= 2) {
1659                         /* pull data from fifo into aligned buffer */
1660                         u16 aligned_buf[64];
1661                         int len = min(cnt & -2, (int)sizeof(aligned_buf));
1662                         int items = len >> 1;
1663                         int i;
1664                         for (i = 0; i < items; ++i)
1665                                 aligned_buf[i] = mci_readw(host,
1666                                                 DATA(host->data_offset));
1667                         /* memcpy from aligned buffer into output buffer */
1668                         memcpy(buf, aligned_buf, len);
1669                         buf += len;
1670                         cnt -= len;
1671                 }
1672         } else
1673 #endif
1674         {
1675                 u16 *pdata = buf;
1676                 for (; cnt >= 2; cnt -= 2)
1677                         *pdata++ = mci_readw(host, DATA(host->data_offset));
1678                 buf = pdata;
1679         }
1680         if (cnt) {
1681                 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
1682                 dw_mci_pull_final_bytes(host, buf, cnt);
1683         }
1684 }
1685
1686 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
1687 {
1688         struct mmc_data *data = host->data;
1689         int init_cnt = cnt;
1690
1691         /* try and push anything in the part_buf */
1692         if (unlikely(host->part_buf_count)) {
1693                 int len = dw_mci_push_part_bytes(host, buf, cnt);
1694                 buf += len;
1695                 cnt -= len;
1696                 if (host->part_buf_count == 4) {
1697                         mci_writel(host, DATA(host->data_offset),
1698                                         host->part_buf32);
1699                         host->part_buf_count = 0;
1700                 }
1701         }
1702 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1703         if (unlikely((unsigned long)buf & 0x3)) {
1704                 while (cnt >= 4) {
1705                         u32 aligned_buf[32];
1706                         int len = min(cnt & -4, (int)sizeof(aligned_buf));
1707                         int items = len >> 2;
1708                         int i;
1709                         /* memcpy from input buffer into aligned buffer */
1710                         memcpy(aligned_buf, buf, len);
1711                         buf += len;
1712                         cnt -= len;
1713                         /* push data from aligned buffer into fifo */
1714                         for (i = 0; i < items; ++i)
1715                                 mci_writel(host, DATA(host->data_offset),
1716                                                 aligned_buf[i]);
1717                 }
1718         } else
1719 #endif
1720         {
1721                 u32 *pdata = buf;
1722                 for (; cnt >= 4; cnt -= 4)
1723                         mci_writel(host, DATA(host->data_offset), *pdata++);
1724                 buf = pdata;
1725         }
1726         /* put anything remaining in the part_buf */
1727         if (cnt) {
1728                 dw_mci_set_part_bytes(host, buf, cnt);
1729                  /* Push data if we have reached the expected data length */
1730                 if ((data->bytes_xfered + init_cnt) ==
1731                     (data->blksz * data->blocks))
1732                         mci_writel(host, DATA(host->data_offset),
1733                                    host->part_buf32);
1734         }
1735 }
1736
1737 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
1738 {
1739 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1740         if (unlikely((unsigned long)buf & 0x3)) {
1741                 while (cnt >= 4) {
1742                         /* pull data from fifo into aligned buffer */
1743                         u32 aligned_buf[32];
1744                         int len = min(cnt & -4, (int)sizeof(aligned_buf));
1745                         int items = len >> 2;
1746                         int i;
1747                         for (i = 0; i < items; ++i)
1748                                 aligned_buf[i] = mci_readl(host,
1749                                                 DATA(host->data_offset));
1750                         /* memcpy from aligned buffer into output buffer */
1751                         memcpy(buf, aligned_buf, len);
1752                         buf += len;
1753                         cnt -= len;
1754                 }
1755         } else
1756 #endif
1757         {
1758                 u32 *pdata = buf;
1759                 for (; cnt >= 4; cnt -= 4)
1760                         *pdata++ = mci_readl(host, DATA(host->data_offset));
1761                 buf = pdata;
1762         }
1763         if (cnt) {
1764                 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
1765                 dw_mci_pull_final_bytes(host, buf, cnt);
1766         }
1767 }
1768
1769 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1770 {
1771         struct mmc_data *data = host->data;
1772         int init_cnt = cnt;
1773
1774         /* try and push anything in the part_buf */
1775         if (unlikely(host->part_buf_count)) {
1776                 int len = dw_mci_push_part_bytes(host, buf, cnt);
1777                 buf += len;
1778                 cnt -= len;
1779
1780                 if (host->part_buf_count == 8) {
1781                         mci_writeq(host, DATA(host->data_offset),
1782                                         host->part_buf);
1783                         host->part_buf_count = 0;
1784                 }
1785         }
1786 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1787         if (unlikely((unsigned long)buf & 0x7)) {
1788                 while (cnt >= 8) {
1789                         u64 aligned_buf[16];
1790                         int len = min(cnt & -8, (int)sizeof(aligned_buf));
1791                         int items = len >> 3;
1792                         int i;
1793                         /* memcpy from input buffer into aligned buffer */
1794                         memcpy(aligned_buf, buf, len);
1795                         buf += len;
1796                         cnt -= len;
1797                         /* push data from aligned buffer into fifo */
1798                         for (i = 0; i < items; ++i)
1799                                 mci_writeq(host, DATA(host->data_offset),
1800                                                 aligned_buf[i]);
1801                 }
1802         } else
1803 #endif
1804         {
1805                 u64 *pdata = buf;
1806                 for (; cnt >= 8; cnt -= 8)
1807                         mci_writeq(host, DATA(host->data_offset), *pdata++);
1808                 buf = pdata;
1809         }
1810         /* put anything remaining in the part_buf */
1811         if (cnt) {
1812                 dw_mci_set_part_bytes(host, buf, cnt);
1813                 /* Push data if we have reached the expected data length */
1814                 if ((data->bytes_xfered + init_cnt) ==
1815                     (data->blksz * data->blocks))
1816                         mci_writeq(host, DATA(host->data_offset),
1817                                    host->part_buf);
1818         }
1819 }
1820
1821 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1822 {
1823 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1824         if (unlikely((unsigned long)buf & 0x7)) {
1825                 while (cnt >= 8) {
1826                         /* pull data from fifo into aligned buffer */
1827                         u64 aligned_buf[16];
1828                         int len = min(cnt & -8, (int)sizeof(aligned_buf));
1829                         int items = len >> 3;
1830                         int i;
1831                         for (i = 0; i < items; ++i)
1832                                 aligned_buf[i] = mci_readq(host,
1833                                                 DATA(host->data_offset));
1834                         /* memcpy from aligned buffer into output buffer */
1835                         memcpy(buf, aligned_buf, len);
1836                         buf += len;
1837                         cnt -= len;
1838                 }
1839         } else
1840 #endif
1841         {
1842                 u64 *pdata = buf;
1843                 for (; cnt >= 8; cnt -= 8)
1844                         *pdata++ = mci_readq(host, DATA(host->data_offset));
1845                 buf = pdata;
1846         }
1847         if (cnt) {
1848                 host->part_buf = mci_readq(host, DATA(host->data_offset));
1849                 dw_mci_pull_final_bytes(host, buf, cnt);
1850         }
1851 }
1852
1853 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
1854 {
1855         int len;
1856
1857         /* get remaining partial bytes */
1858         len = dw_mci_pull_part_bytes(host, buf, cnt);
1859         if (unlikely(len == cnt))
1860                 return;
1861         buf += len;
1862         cnt -= len;
1863
1864         /* get the rest of the data */
1865         host->pull_data(host, buf, cnt);
1866 }
1867
1868 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
1869 {
1870         struct sg_mapping_iter *sg_miter = &host->sg_miter;
1871         void *buf;
1872         unsigned int offset;
1873         struct mmc_data *data = host->data;
1874         int shift = host->data_shift;
1875         u32 status;
1876         unsigned int len;
1877         unsigned int remain, fcnt;
1878
1879         do {
1880                 if (!sg_miter_next(sg_miter))
1881                         goto done;
1882
1883                 host->sg = sg_miter->piter.sg;
1884                 buf = sg_miter->addr;
1885                 remain = sg_miter->length;
1886                 offset = 0;
1887
1888                 do {
1889                         fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
1890                                         << shift) + host->part_buf_count;
1891                         len = min(remain, fcnt);
1892                         if (!len)
1893                                 break;
1894                         dw_mci_pull_data(host, (void *)(buf + offset), len);
1895                         data->bytes_xfered += len;
1896                         offset += len;
1897                         remain -= len;
1898                 } while (remain);
1899
1900                 sg_miter->consumed = offset;
1901                 status = mci_readl(host, MINTSTS);
1902                 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1903         /* if the RXDR is ready read again */
1904         } while ((status & SDMMC_INT_RXDR) ||
1905                  (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
1906
1907         if (!remain) {
1908                 if (!sg_miter_next(sg_miter))
1909                         goto done;
1910                 sg_miter->consumed = 0;
1911         }
1912         sg_miter_stop(sg_miter);
1913         return;
1914
1915 done:
1916         sg_miter_stop(sg_miter);
1917         host->sg = NULL;
1918         smp_wmb();
1919         set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1920 }
1921
1922 static void dw_mci_write_data_pio(struct dw_mci *host)
1923 {
1924         struct sg_mapping_iter *sg_miter = &host->sg_miter;
1925         void *buf;
1926         unsigned int offset;
1927         struct mmc_data *data = host->data;
1928         int shift = host->data_shift;
1929         u32 status;
1930         unsigned int len;
1931         unsigned int fifo_depth = host->fifo_depth;
1932         unsigned int remain, fcnt;
1933
1934         do {
1935                 if (!sg_miter_next(sg_miter))
1936                         goto done;
1937
1938                 host->sg = sg_miter->piter.sg;
1939                 buf = sg_miter->addr;
1940                 remain = sg_miter->length;
1941                 offset = 0;
1942
1943                 do {
1944                         fcnt = ((fifo_depth -
1945                                  SDMMC_GET_FCNT(mci_readl(host, STATUS)))
1946                                         << shift) - host->part_buf_count;
1947                         len = min(remain, fcnt);
1948                         if (!len)
1949                                 break;
1950                         host->push_data(host, (void *)(buf + offset), len);
1951                         data->bytes_xfered += len;
1952                         offset += len;
1953                         remain -= len;
1954                 } while (remain);
1955
1956                 sg_miter->consumed = offset;
1957                 status = mci_readl(host, MINTSTS);
1958                 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1959         } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
1960
1961         if (!remain) {
1962                 if (!sg_miter_next(sg_miter))
1963                         goto done;
1964                 sg_miter->consumed = 0;
1965         }
1966         sg_miter_stop(sg_miter);
1967         return;
1968
1969 done:
1970         sg_miter_stop(sg_miter);
1971         host->sg = NULL;
1972         smp_wmb();
1973         set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1974 }
1975
1976 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
1977 {
1978         if (!host->cmd_status)
1979                 host->cmd_status = status;
1980
1981         smp_wmb();
1982
1983         set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1984         tasklet_schedule(&host->tasklet);
1985 }
1986
1987 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1988 {
1989         struct dw_mci *host = dev_id;
1990         u32 pending;
1991         int i;
1992
1993         pending = mci_readl(host, MINTSTS); /* read-only mask reg */
1994
1995         /*
1996          * DTO fix - version 2.10a and below, and only if internal DMA
1997          * is configured.
1998          */
1999         if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
2000                 if (!pending &&
2001                     ((mci_readl(host, STATUS) >> 17) & 0x1fff))
2002                         pending |= SDMMC_INT_DATA_OVER;
2003         }
2004
2005         if (pending) {
2006                 /* Check volt switch first, since it can look like an error */
2007                 if ((host->state == STATE_SENDING_CMD11) &&
2008                     (pending & SDMMC_INT_VOLT_SWITCH)) {
2009                         mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH);
2010                         pending &= ~SDMMC_INT_VOLT_SWITCH;
2011                         dw_mci_cmd_interrupt(host, pending);
2012                 }
2013
2014                 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2015                         mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2016                         host->cmd_status = pending;
2017                         smp_wmb();
2018                         set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2019                 }
2020
2021                 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2022                         /* if there is an error report DATA_ERROR */
2023                         mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2024                         host->data_status = pending;
2025                         smp_wmb();
2026                         set_bit(EVENT_DATA_ERROR, &host->pending_events);
2027                         tasklet_schedule(&host->tasklet);
2028                 }
2029
2030                 if (pending & SDMMC_INT_DATA_OVER) {
2031                         mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2032                         if (!host->data_status)
2033                                 host->data_status = pending;
2034                         smp_wmb();
2035                         if (host->dir_status == DW_MCI_RECV_STATUS) {
2036                                 if (host->sg != NULL)
2037                                         dw_mci_read_data_pio(host, true);
2038                         }
2039                         set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2040                         tasklet_schedule(&host->tasklet);
2041                 }
2042
2043                 if (pending & SDMMC_INT_RXDR) {
2044                         mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2045                         if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2046                                 dw_mci_read_data_pio(host, false);
2047                 }
2048
2049                 if (pending & SDMMC_INT_TXDR) {
2050                         mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2051                         if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2052                                 dw_mci_write_data_pio(host);
2053                 }
2054
2055                 if (pending & SDMMC_INT_CMD_DONE) {
2056                         mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2057                         dw_mci_cmd_interrupt(host, pending);
2058                 }
2059
2060                 if (pending & SDMMC_INT_CD) {
2061                         mci_writel(host, RINTSTS, SDMMC_INT_CD);
2062                         queue_work(host->card_workqueue, &host->card_work);
2063                 }
2064
2065                 /* Handle SDIO Interrupts */
2066                 for (i = 0; i < host->num_slots; i++) {
2067                         struct dw_mci_slot *slot = host->slot[i];
2068                         if (pending & SDMMC_INT_SDIO(i)) {
2069                                 mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i));
2070                                 mmc_signal_sdio_irq(slot->mmc);
2071                         }
2072                 }
2073
2074         }
2075
2076 #ifdef CONFIG_MMC_DW_IDMAC
2077         /* Handle DMA interrupts */
2078         pending = mci_readl(host, IDSTS);
2079         if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2080                 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
2081                 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2082                 host->dma_ops->complete(host);
2083         }
2084 #endif
2085
2086         return IRQ_HANDLED;
2087 }
2088
2089 static void dw_mci_work_routine_card(struct work_struct *work)
2090 {
2091         struct dw_mci *host = container_of(work, struct dw_mci, card_work);
2092         int i;
2093
2094         for (i = 0; i < host->num_slots; i++) {
2095                 struct dw_mci_slot *slot = host->slot[i];
2096                 struct mmc_host *mmc = slot->mmc;
2097                 struct mmc_request *mrq;
2098                 int present;
2099
2100                 present = dw_mci_get_cd(mmc);
2101                 while (present != slot->last_detect_state) {
2102                         dev_dbg(&slot->mmc->class_dev, "card %s\n",
2103                                 present ? "inserted" : "removed");
2104
2105                         spin_lock_bh(&host->lock);
2106
2107                         /* Card change detected */
2108                         slot->last_detect_state = present;
2109
2110                         /* Clean up queue if present */
2111                         mrq = slot->mrq;
2112                         if (mrq) {
2113                                 if (mrq == host->mrq) {
2114                                         host->data = NULL;
2115                                         host->cmd = NULL;
2116
2117                                         switch (host->state) {
2118                                         case STATE_IDLE:
2119                                         case STATE_WAITING_CMD11_DONE:
2120                                                 break;
2121                                         case STATE_SENDING_CMD11:
2122                                         case STATE_SENDING_CMD:
2123                                                 mrq->cmd->error = -ENOMEDIUM;
2124                                                 if (!mrq->data)
2125                                                         break;
2126                                                 /* fall through */
2127                                         case STATE_SENDING_DATA:
2128                                                 mrq->data->error = -ENOMEDIUM;
2129                                                 dw_mci_stop_dma(host);
2130                                                 break;
2131                                         case STATE_DATA_BUSY:
2132                                         case STATE_DATA_ERROR:
2133                                                 if (mrq->data->error == -EINPROGRESS)
2134                                                         mrq->data->error = -ENOMEDIUM;
2135                                                 /* fall through */
2136                                         case STATE_SENDING_STOP:
2137                                                 if (mrq->stop)
2138                                                         mrq->stop->error = -ENOMEDIUM;
2139                                                 break;
2140                                         }
2141
2142                                         dw_mci_request_end(host, mrq);
2143                                 } else {
2144                                         list_del(&slot->queue_node);
2145                                         mrq->cmd->error = -ENOMEDIUM;
2146                                         if (mrq->data)
2147                                                 mrq->data->error = -ENOMEDIUM;
2148                                         if (mrq->stop)
2149                                                 mrq->stop->error = -ENOMEDIUM;
2150
2151                                         spin_unlock(&host->lock);
2152                                         mmc_request_done(slot->mmc, mrq);
2153                                         spin_lock(&host->lock);
2154                                 }
2155                         }
2156
2157                         /* Power down slot */
2158                         if (present == 0)
2159                                 dw_mci_reset(host);
2160
2161                         spin_unlock_bh(&host->lock);
2162
2163                         present = dw_mci_get_cd(mmc);
2164                 }
2165
2166                 mmc_detect_change(slot->mmc,
2167                         msecs_to_jiffies(host->pdata->detect_delay_ms));
2168         }
2169 }
2170
2171 #ifdef CONFIG_OF
2172 /* given a slot id, find out the device node representing that slot */
2173 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2174 {
2175         struct device_node *np;
2176         const __be32 *addr;
2177         int len;
2178
2179         if (!dev || !dev->of_node)
2180                 return NULL;
2181
2182         for_each_child_of_node(dev->of_node, np) {
2183                 addr = of_get_property(np, "reg", &len);
2184                 if (!addr || (len < sizeof(int)))
2185                         continue;
2186                 if (be32_to_cpup(addr) == slot)
2187                         return np;
2188         }
2189         return NULL;
2190 }
2191
2192 static struct dw_mci_of_slot_quirks {
2193         char *quirk;
2194         int id;
2195 } of_slot_quirks[] = {
2196         {
2197                 .quirk  = "disable-wp",
2198                 .id     = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
2199         },
2200 };
2201
2202 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2203 {
2204         struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2205         int quirks = 0;
2206         int idx;
2207
2208         /* get quirks */
2209         for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
2210                 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL)) {
2211                         dev_warn(dev, "Slot quirk %s is deprecated\n",
2212                                         of_slot_quirks[idx].quirk);
2213                         quirks |= of_slot_quirks[idx].id;
2214                 }
2215
2216         return quirks;
2217 }
2218 #else /* CONFIG_OF */
2219 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2220 {
2221         return 0;
2222 }
2223 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2224 {
2225         return NULL;
2226 }
2227 #endif /* CONFIG_OF */
2228
2229 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
2230 {
2231         struct mmc_host *mmc;
2232         struct dw_mci_slot *slot;
2233         const struct dw_mci_drv_data *drv_data = host->drv_data;
2234         int ctrl_id, ret;
2235         u32 freq[2];
2236
2237         mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
2238         if (!mmc)
2239                 return -ENOMEM;
2240
2241         slot = mmc_priv(mmc);
2242         slot->id = id;
2243         slot->mmc = mmc;
2244         slot->host = host;
2245         host->slot[id] = slot;
2246
2247         slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
2248
2249         mmc->ops = &dw_mci_ops;
2250         if (of_property_read_u32_array(host->dev->of_node,
2251                                        "clock-freq-min-max", freq, 2)) {
2252                 mmc->f_min = DW_MCI_FREQ_MIN;
2253                 mmc->f_max = DW_MCI_FREQ_MAX;
2254         } else {
2255                 mmc->f_min = freq[0];
2256                 mmc->f_max = freq[1];
2257         }
2258
2259         /*if there are external regulators, get them*/
2260         ret = mmc_regulator_get_supply(mmc);
2261         if (ret == -EPROBE_DEFER)
2262                 goto err_setup_bus;
2263
2264         if (!mmc->ocr_avail)
2265                 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2266
2267         if (host->pdata->caps)
2268                 mmc->caps = host->pdata->caps;
2269
2270         if (host->pdata->pm_caps)
2271                 mmc->pm_caps = host->pdata->pm_caps;
2272
2273         if (host->dev->of_node) {
2274                 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2275                 if (ctrl_id < 0)
2276                         ctrl_id = 0;
2277         } else {
2278                 ctrl_id = to_platform_device(host->dev)->id;
2279         }
2280         if (drv_data && drv_data->caps)
2281                 mmc->caps |= drv_data->caps[ctrl_id];
2282
2283         if (host->pdata->caps2)
2284                 mmc->caps2 = host->pdata->caps2;
2285
2286         mmc_of_parse(mmc);
2287
2288         if (host->pdata->blk_settings) {
2289                 mmc->max_segs = host->pdata->blk_settings->max_segs;
2290                 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
2291                 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
2292                 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
2293                 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
2294         } else {
2295                 /* Useful defaults if platform data is unset. */
2296 #ifdef CONFIG_MMC_DW_IDMAC
2297                 mmc->max_segs = host->ring_size;
2298                 mmc->max_blk_size = 65536;
2299                 mmc->max_blk_count = host->ring_size;
2300                 mmc->max_seg_size = 0x1000;
2301                 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
2302 #else
2303                 mmc->max_segs = 64;
2304                 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
2305                 mmc->max_blk_count = 512;
2306                 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2307                 mmc->max_seg_size = mmc->max_req_size;
2308 #endif /* CONFIG_MMC_DW_IDMAC */
2309         }
2310
2311         if (dw_mci_get_cd(mmc))
2312                 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2313         else
2314                 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2315
2316         ret = mmc_add_host(mmc);
2317         if (ret)
2318                 goto err_setup_bus;
2319
2320 #if defined(CONFIG_DEBUG_FS)
2321         dw_mci_init_debugfs(slot);
2322 #endif
2323
2324         /* Card initially undetected */
2325         slot->last_detect_state = 0;
2326
2327         return 0;
2328
2329 err_setup_bus:
2330         mmc_free_host(mmc);
2331         return ret;
2332 }
2333
2334 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2335 {
2336         /* Debugfs stuff is cleaned up by mmc core */
2337         mmc_remove_host(slot->mmc);
2338         slot->host->slot[id] = NULL;
2339         mmc_free_host(slot->mmc);
2340 }
2341
2342 static void dw_mci_init_dma(struct dw_mci *host)
2343 {
2344         /* Alloc memory for sg translation */
2345         host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
2346                                           &host->sg_dma, GFP_KERNEL);
2347         if (!host->sg_cpu) {
2348                 dev_err(host->dev, "%s: could not alloc DMA memory\n",
2349                         __func__);
2350                 goto no_dma;
2351         }
2352
2353         /* Determine which DMA interface to use */
2354 #ifdef CONFIG_MMC_DW_IDMAC
2355         host->dma_ops = &dw_mci_idmac_ops;
2356         dev_info(host->dev, "Using internal DMA controller.\n");
2357 #endif
2358
2359         if (!host->dma_ops)
2360                 goto no_dma;
2361
2362         if (host->dma_ops->init && host->dma_ops->start &&
2363             host->dma_ops->stop && host->dma_ops->cleanup) {
2364                 if (host->dma_ops->init(host)) {
2365                         dev_err(host->dev, "%s: Unable to initialize "
2366                                 "DMA Controller.\n", __func__);
2367                         goto no_dma;
2368                 }
2369         } else {
2370                 dev_err(host->dev, "DMA initialization not found.\n");
2371                 goto no_dma;
2372         }
2373
2374         host->use_dma = 1;
2375         return;
2376
2377 no_dma:
2378         dev_info(host->dev, "Using PIO mode.\n");
2379         host->use_dma = 0;
2380         return;
2381 }
2382
2383 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
2384 {
2385         unsigned long timeout = jiffies + msecs_to_jiffies(500);
2386         u32 ctrl;
2387
2388         ctrl = mci_readl(host, CTRL);
2389         ctrl |= reset;
2390         mci_writel(host, CTRL, ctrl);
2391
2392         /* wait till resets clear */
2393         do {
2394                 ctrl = mci_readl(host, CTRL);
2395                 if (!(ctrl & reset))
2396                         return true;
2397         } while (time_before(jiffies, timeout));
2398
2399         dev_err(host->dev,
2400                 "Timeout resetting block (ctrl reset %#x)\n",
2401                 ctrl & reset);
2402
2403         return false;
2404 }
2405
2406 static bool dw_mci_reset(struct dw_mci *host)
2407 {
2408         u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET;
2409         bool ret = false;
2410
2411         /*
2412          * Reseting generates a block interrupt, hence setting
2413          * the scatter-gather pointer to NULL.
2414          */
2415         if (host->sg) {
2416                 sg_miter_stop(&host->sg_miter);
2417                 host->sg = NULL;
2418         }
2419
2420         if (host->use_dma)
2421                 flags |= SDMMC_CTRL_DMA_RESET;
2422
2423         if (dw_mci_ctrl_reset(host, flags)) {
2424                 /*
2425                  * In all cases we clear the RAWINTS register to clear any
2426                  * interrupts.
2427                  */
2428                 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2429
2430                 /* if using dma we wait for dma_req to clear */
2431                 if (host->use_dma) {
2432                         unsigned long timeout = jiffies + msecs_to_jiffies(500);
2433                         u32 status;
2434                         do {
2435                                 status = mci_readl(host, STATUS);
2436                                 if (!(status & SDMMC_STATUS_DMA_REQ))
2437                                         break;
2438                                 cpu_relax();
2439                         } while (time_before(jiffies, timeout));
2440
2441                         if (status & SDMMC_STATUS_DMA_REQ) {
2442                                 dev_err(host->dev,
2443                                         "%s: Timeout waiting for dma_req to "
2444                                         "clear during reset\n", __func__);
2445                                 goto ciu_out;
2446                         }
2447
2448                         /* when using DMA next we reset the fifo again */
2449                         if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET))
2450                                 goto ciu_out;
2451                 }
2452         } else {
2453                 /* if the controller reset bit did clear, then set clock regs */
2454                 if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) {
2455                         dev_err(host->dev, "%s: fifo/dma reset bits didn't "
2456                                 "clear but ciu was reset, doing clock update\n",
2457                                 __func__);
2458                         goto ciu_out;
2459                 }
2460         }
2461
2462 #if IS_ENABLED(CONFIG_MMC_DW_IDMAC)
2463         /* It is also recommended that we reset and reprogram idmac */
2464         dw_mci_idmac_reset(host);
2465 #endif
2466
2467         ret = true;
2468
2469 ciu_out:
2470         /* After a CTRL reset we need to have CIU set clock registers  */
2471         mci_send_cmd(host->cur_slot, SDMMC_CMD_UPD_CLK, 0);
2472
2473         return ret;
2474 }
2475
2476 #ifdef CONFIG_OF
2477 static struct dw_mci_of_quirks {
2478         char *quirk;
2479         int id;
2480 } of_quirks[] = {
2481         {
2482                 .quirk  = "broken-cd",
2483                 .id     = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
2484         }, {
2485                 .quirk  = "disable-wp",
2486                 .id     = DW_MCI_QUIRK_NO_WRITE_PROTECT,
2487         },
2488 };
2489
2490 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2491 {
2492         struct dw_mci_board *pdata;
2493         struct device *dev = host->dev;
2494         struct device_node *np = dev->of_node;
2495         const struct dw_mci_drv_data *drv_data = host->drv_data;
2496         int idx, ret;
2497         u32 clock_frequency;
2498
2499         pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2500         if (!pdata) {
2501                 dev_err(dev, "could not allocate memory for pdata\n");
2502                 return ERR_PTR(-ENOMEM);
2503         }
2504
2505         /* find out number of slots supported */
2506         if (of_property_read_u32(dev->of_node, "num-slots",
2507                                 &pdata->num_slots)) {
2508                 dev_info(dev, "num-slots property not found, "
2509                                 "assuming 1 slot is available\n");
2510                 pdata->num_slots = 1;
2511         }
2512
2513         /* get quirks */
2514         for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
2515                 if (of_get_property(np, of_quirks[idx].quirk, NULL))
2516                         pdata->quirks |= of_quirks[idx].id;
2517
2518         if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
2519                 dev_info(dev, "fifo-depth property not found, using "
2520                                 "value of FIFOTH register as default\n");
2521
2522         of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
2523
2524         if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
2525                 pdata->bus_hz = clock_frequency;
2526
2527         if (drv_data && drv_data->parse_dt) {
2528                 ret = drv_data->parse_dt(host);
2529                 if (ret)
2530                         return ERR_PTR(ret);
2531         }
2532
2533         if (of_find_property(np, "supports-highspeed", NULL))
2534                 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
2535
2536         return pdata;
2537 }
2538
2539 #else /* CONFIG_OF */
2540 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2541 {
2542         return ERR_PTR(-EINVAL);
2543 }
2544 #endif /* CONFIG_OF */
2545
2546 int dw_mci_probe(struct dw_mci *host)
2547 {
2548         const struct dw_mci_drv_data *drv_data = host->drv_data;
2549         int width, i, ret = 0;
2550         u32 fifo_size;
2551         int init_slots = 0;
2552
2553         if (!host->pdata) {
2554                 host->pdata = dw_mci_parse_dt(host);
2555                 if (IS_ERR(host->pdata)) {
2556                         dev_err(host->dev, "platform data not available\n");
2557                         return -EINVAL;
2558                 }
2559         }
2560
2561         if (host->pdata->num_slots > 1) {
2562                 dev_err(host->dev,
2563                         "Platform data must supply num_slots.\n");
2564                 return -ENODEV;
2565         }
2566
2567         host->biu_clk = devm_clk_get(host->dev, "biu");
2568         if (IS_ERR(host->biu_clk)) {
2569                 dev_dbg(host->dev, "biu clock not available\n");
2570         } else {
2571                 ret = clk_prepare_enable(host->biu_clk);
2572                 if (ret) {
2573                         dev_err(host->dev, "failed to enable biu clock\n");
2574                         return ret;
2575                 }
2576         }
2577
2578         host->ciu_clk = devm_clk_get(host->dev, "ciu");
2579         if (IS_ERR(host->ciu_clk)) {
2580                 dev_dbg(host->dev, "ciu clock not available\n");
2581                 host->bus_hz = host->pdata->bus_hz;
2582         } else {
2583                 ret = clk_prepare_enable(host->ciu_clk);
2584                 if (ret) {
2585                         dev_err(host->dev, "failed to enable ciu clock\n");
2586                         goto err_clk_biu;
2587                 }
2588
2589                 if (host->pdata->bus_hz) {
2590                         ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
2591                         if (ret)
2592                                 dev_warn(host->dev,
2593                                          "Unable to set bus rate to %uHz\n",
2594                                          host->pdata->bus_hz);
2595                 }
2596                 host->bus_hz = clk_get_rate(host->ciu_clk);
2597         }
2598
2599         if (!host->bus_hz) {
2600                 dev_err(host->dev,
2601                         "Platform data must supply bus speed\n");
2602                 ret = -ENODEV;
2603                 goto err_clk_ciu;
2604         }
2605
2606         if (drv_data && drv_data->init) {
2607                 ret = drv_data->init(host);
2608                 if (ret) {
2609                         dev_err(host->dev,
2610                                 "implementation specific init failed\n");
2611                         goto err_clk_ciu;
2612                 }
2613         }
2614
2615         if (drv_data && drv_data->setup_clock) {
2616                 ret = drv_data->setup_clock(host);
2617                 if (ret) {
2618                         dev_err(host->dev,
2619                                 "implementation specific clock setup failed\n");
2620                         goto err_clk_ciu;
2621                 }
2622         }
2623
2624         host->quirks = host->pdata->quirks;
2625
2626         spin_lock_init(&host->lock);
2627         INIT_LIST_HEAD(&host->queue);
2628
2629         /*
2630          * Get the host data width - this assumes that HCON has been set with
2631          * the correct values.
2632          */
2633         i = (mci_readl(host, HCON) >> 7) & 0x7;
2634         if (!i) {
2635                 host->push_data = dw_mci_push_data16;
2636                 host->pull_data = dw_mci_pull_data16;
2637                 width = 16;
2638                 host->data_shift = 1;
2639         } else if (i == 2) {
2640                 host->push_data = dw_mci_push_data64;
2641                 host->pull_data = dw_mci_pull_data64;
2642                 width = 64;
2643                 host->data_shift = 3;
2644         } else {
2645                 /* Check for a reserved value, and warn if it is */
2646                 WARN((i != 1),
2647                      "HCON reports a reserved host data width!\n"
2648                      "Defaulting to 32-bit access.\n");
2649                 host->push_data = dw_mci_push_data32;
2650                 host->pull_data = dw_mci_pull_data32;
2651                 width = 32;
2652                 host->data_shift = 2;
2653         }
2654
2655         /* Reset all blocks */
2656         if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS))
2657                 return -ENODEV;
2658
2659         host->dma_ops = host->pdata->dma_ops;
2660         dw_mci_init_dma(host);
2661
2662         /* Clear the interrupts for the host controller */
2663         mci_writel(host, RINTSTS, 0xFFFFFFFF);
2664         mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2665
2666         /* Put in max timeout */
2667         mci_writel(host, TMOUT, 0xFFFFFFFF);
2668
2669         /*
2670          * FIFO threshold settings  RxMark  = fifo_size / 2 - 1,
2671          *                          Tx Mark = fifo_size / 2 DMA Size = 8
2672          */
2673         if (!host->pdata->fifo_depth) {
2674                 /*
2675                  * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2676                  * have been overwritten by the bootloader, just like we're
2677                  * about to do, so if you know the value for your hardware, you
2678                  * should put it in the platform data.
2679                  */
2680                 fifo_size = mci_readl(host, FIFOTH);
2681                 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
2682         } else {
2683                 fifo_size = host->pdata->fifo_depth;
2684         }
2685         host->fifo_depth = fifo_size;
2686         host->fifoth_val =
2687                 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
2688         mci_writel(host, FIFOTH, host->fifoth_val);
2689
2690         /* disable clock to CIU */
2691         mci_writel(host, CLKENA, 0);
2692         mci_writel(host, CLKSRC, 0);
2693
2694         /*
2695          * In 2.40a spec, Data offset is changed.
2696          * Need to check the version-id and set data-offset for DATA register.
2697          */
2698         host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
2699         dev_info(host->dev, "Version ID is %04x\n", host->verid);
2700
2701         if (host->verid < DW_MMC_240A)
2702                 host->data_offset = DATA_OFFSET;
2703         else
2704                 host->data_offset = DATA_240A_OFFSET;
2705
2706         tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
2707         host->card_workqueue = alloc_workqueue("dw-mci-card",
2708                         WQ_MEM_RECLAIM, 1);
2709         if (!host->card_workqueue) {
2710                 ret = -ENOMEM;
2711                 goto err_dmaunmap;
2712         }
2713         INIT_WORK(&host->card_work, dw_mci_work_routine_card);
2714         ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
2715                                host->irq_flags, "dw-mci", host);
2716         if (ret)
2717                 goto err_workqueue;
2718
2719         if (host->pdata->num_slots)
2720                 host->num_slots = host->pdata->num_slots;
2721         else
2722                 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
2723
2724         /*
2725          * Enable interrupts for command done, data over, data empty, card det,
2726          * receive ready and error such as transmit, receive timeout, crc error
2727          */
2728         mci_writel(host, RINTSTS, 0xFFFFFFFF);
2729         mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2730                    SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2731                    DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2732         mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
2733
2734         dev_info(host->dev, "DW MMC controller at irq %d, "
2735                  "%d bit host data width, "
2736                  "%u deep fifo\n",
2737                  host->irq, width, fifo_size);
2738
2739         /* We need at least one slot to succeed */
2740         for (i = 0; i < host->num_slots; i++) {
2741                 ret = dw_mci_init_slot(host, i);
2742                 if (ret)
2743                         dev_dbg(host->dev, "slot %d init failed\n", i);
2744                 else
2745                         init_slots++;
2746         }
2747
2748         if (init_slots) {
2749                 dev_info(host->dev, "%d slots initialized\n", init_slots);
2750         } else {
2751                 dev_dbg(host->dev, "attempted to initialize %d slots, "
2752                                         "but failed on all\n", host->num_slots);
2753                 goto err_workqueue;
2754         }
2755
2756         if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
2757                 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
2758
2759         return 0;
2760
2761 err_workqueue:
2762         destroy_workqueue(host->card_workqueue);
2763
2764 err_dmaunmap:
2765         if (host->use_dma && host->dma_ops->exit)
2766                 host->dma_ops->exit(host);
2767
2768 err_clk_ciu:
2769         if (!IS_ERR(host->ciu_clk))
2770                 clk_disable_unprepare(host->ciu_clk);
2771
2772 err_clk_biu:
2773         if (!IS_ERR(host->biu_clk))
2774                 clk_disable_unprepare(host->biu_clk);
2775
2776         return ret;
2777 }
2778 EXPORT_SYMBOL(dw_mci_probe);
2779
2780 void dw_mci_remove(struct dw_mci *host)
2781 {
2782         int i;
2783
2784         mci_writel(host, RINTSTS, 0xFFFFFFFF);
2785         mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2786
2787         for (i = 0; i < host->num_slots; i++) {
2788                 dev_dbg(host->dev, "remove slot %d\n", i);
2789                 if (host->slot[i])
2790                         dw_mci_cleanup_slot(host->slot[i], i);
2791         }
2792
2793         /* disable clock to CIU */
2794         mci_writel(host, CLKENA, 0);
2795         mci_writel(host, CLKSRC, 0);
2796
2797         destroy_workqueue(host->card_workqueue);
2798
2799         if (host->use_dma && host->dma_ops->exit)
2800                 host->dma_ops->exit(host);
2801
2802         if (!IS_ERR(host->ciu_clk))
2803                 clk_disable_unprepare(host->ciu_clk);
2804
2805         if (!IS_ERR(host->biu_clk))
2806                 clk_disable_unprepare(host->biu_clk);
2807 }
2808 EXPORT_SYMBOL(dw_mci_remove);
2809
2810
2811
2812 #ifdef CONFIG_PM_SLEEP
2813 /*
2814  * TODO: we should probably disable the clock to the card in the suspend path.
2815  */
2816 int dw_mci_suspend(struct dw_mci *host)
2817 {
2818         return 0;
2819 }
2820 EXPORT_SYMBOL(dw_mci_suspend);
2821
2822 int dw_mci_resume(struct dw_mci *host)
2823 {
2824         int i, ret;
2825
2826         if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
2827                 ret = -ENODEV;
2828                 return ret;
2829         }
2830
2831         if (host->use_dma && host->dma_ops->init)
2832                 host->dma_ops->init(host);
2833
2834         /*
2835          * Restore the initial value at FIFOTH register
2836          * And Invalidate the prev_blksz with zero
2837          */
2838         mci_writel(host, FIFOTH, host->fifoth_val);
2839         host->prev_blksz = 0;
2840
2841         /* Put in max timeout */
2842         mci_writel(host, TMOUT, 0xFFFFFFFF);
2843
2844         mci_writel(host, RINTSTS, 0xFFFFFFFF);
2845         mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2846                    SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2847                    DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2848         mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
2849
2850         for (i = 0; i < host->num_slots; i++) {
2851                 struct dw_mci_slot *slot = host->slot[i];
2852                 if (!slot)
2853                         continue;
2854                 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
2855                         dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
2856                         dw_mci_setup_bus(slot, true);
2857                 }
2858         }
2859         return 0;
2860 }
2861 EXPORT_SYMBOL(dw_mci_resume);
2862 #endif /* CONFIG_PM_SLEEP */
2863
2864 static int __init dw_mci_init(void)
2865 {
2866         pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
2867         return 0;
2868 }
2869
2870 static void __exit dw_mci_exit(void)
2871 {
2872 }
2873
2874 module_init(dw_mci_init);
2875 module_exit(dw_mci_exit);
2876
2877 MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
2878 MODULE_AUTHOR("NXP Semiconductor VietNam");
2879 MODULE_AUTHOR("Imagination Technologies Ltd");
2880 MODULE_LICENSE("GPL v2");