mmc: dw_mmc: Make sure we don't get stuck when we get an error
[cascardo/linux.git] / drivers / mmc / host / dw_mmc.c
1 /*
2  * Synopsys DesignWare Multimedia Card Interface driver
3  *  (Based on NXP driver for lpc 31xx)
4  *
5  * Copyright (C) 2009 NXP Semiconductors
6  * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  */
13
14 #include <linux/blkdev.h>
15 #include <linux/clk.h>
16 #include <linux/debugfs.h>
17 #include <linux/device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/ioport.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/seq_file.h>
26 #include <linux/slab.h>
27 #include <linux/stat.h>
28 #include <linux/delay.h>
29 #include <linux/irq.h>
30 #include <linux/mmc/host.h>
31 #include <linux/mmc/mmc.h>
32 #include <linux/mmc/sdio.h>
33 #include <linux/mmc/dw_mmc.h>
34 #include <linux/bitops.h>
35 #include <linux/regulator/consumer.h>
36 #include <linux/workqueue.h>
37 #include <linux/of.h>
38 #include <linux/of_gpio.h>
39 #include <linux/mmc/slot-gpio.h>
40
41 #include "dw_mmc.h"
42
43 /* Common flag combinations */
44 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
45                                  SDMMC_INT_HTO | SDMMC_INT_SBE  | \
46                                  SDMMC_INT_EBE)
47 #define DW_MCI_CMD_ERROR_FLAGS  (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
48                                  SDMMC_INT_RESP_ERR)
49 #define DW_MCI_ERROR_FLAGS      (DW_MCI_DATA_ERROR_FLAGS | \
50                                  DW_MCI_CMD_ERROR_FLAGS  | SDMMC_INT_HLE)
51 #define DW_MCI_SEND_STATUS      1
52 #define DW_MCI_RECV_STATUS      2
53 #define DW_MCI_DMA_THRESHOLD    16
54
55 #define DW_MCI_FREQ_MAX 200000000       /* unit: HZ */
56 #define DW_MCI_FREQ_MIN 400000          /* unit: HZ */
57
58 #ifdef CONFIG_MMC_DW_IDMAC
59 #define IDMAC_INT_CLR           (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
60                                  SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
61                                  SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
62                                  SDMMC_IDMAC_INT_TI)
63
64 struct idmac_desc {
65         u32             des0;   /* Control Descriptor */
66 #define IDMAC_DES0_DIC  BIT(1)
67 #define IDMAC_DES0_LD   BIT(2)
68 #define IDMAC_DES0_FD   BIT(3)
69 #define IDMAC_DES0_CH   BIT(4)
70 #define IDMAC_DES0_ER   BIT(5)
71 #define IDMAC_DES0_CES  BIT(30)
72 #define IDMAC_DES0_OWN  BIT(31)
73
74         u32             des1;   /* Buffer sizes */
75 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
76         ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
77
78         u32             des2;   /* buffer 1 physical address */
79
80         u32             des3;   /* buffer 2 physical address */
81 };
82 #endif /* CONFIG_MMC_DW_IDMAC */
83
84 static const u8 tuning_blk_pattern_4bit[] = {
85         0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
86         0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
87         0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
88         0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
89         0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
90         0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
91         0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
92         0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
93 };
94
95 static const u8 tuning_blk_pattern_8bit[] = {
96         0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
97         0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
98         0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
99         0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
100         0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
101         0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
102         0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
103         0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
104         0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
105         0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
106         0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
107         0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
108         0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
109         0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
110         0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
111         0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
112 };
113
114 static bool dw_mci_reset(struct dw_mci *host);
115
116 #if defined(CONFIG_DEBUG_FS)
117 static int dw_mci_req_show(struct seq_file *s, void *v)
118 {
119         struct dw_mci_slot *slot = s->private;
120         struct mmc_request *mrq;
121         struct mmc_command *cmd;
122         struct mmc_command *stop;
123         struct mmc_data *data;
124
125         /* Make sure we get a consistent snapshot */
126         spin_lock_bh(&slot->host->lock);
127         mrq = slot->mrq;
128
129         if (mrq) {
130                 cmd = mrq->cmd;
131                 data = mrq->data;
132                 stop = mrq->stop;
133
134                 if (cmd)
135                         seq_printf(s,
136                                    "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
137                                    cmd->opcode, cmd->arg, cmd->flags,
138                                    cmd->resp[0], cmd->resp[1], cmd->resp[2],
139                                    cmd->resp[2], cmd->error);
140                 if (data)
141                         seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
142                                    data->bytes_xfered, data->blocks,
143                                    data->blksz, data->flags, data->error);
144                 if (stop)
145                         seq_printf(s,
146                                    "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
147                                    stop->opcode, stop->arg, stop->flags,
148                                    stop->resp[0], stop->resp[1], stop->resp[2],
149                                    stop->resp[2], stop->error);
150         }
151
152         spin_unlock_bh(&slot->host->lock);
153
154         return 0;
155 }
156
157 static int dw_mci_req_open(struct inode *inode, struct file *file)
158 {
159         return single_open(file, dw_mci_req_show, inode->i_private);
160 }
161
162 static const struct file_operations dw_mci_req_fops = {
163         .owner          = THIS_MODULE,
164         .open           = dw_mci_req_open,
165         .read           = seq_read,
166         .llseek         = seq_lseek,
167         .release        = single_release,
168 };
169
170 static int dw_mci_regs_show(struct seq_file *s, void *v)
171 {
172         seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
173         seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
174         seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
175         seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
176         seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
177         seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
178
179         return 0;
180 }
181
182 static int dw_mci_regs_open(struct inode *inode, struct file *file)
183 {
184         return single_open(file, dw_mci_regs_show, inode->i_private);
185 }
186
187 static const struct file_operations dw_mci_regs_fops = {
188         .owner          = THIS_MODULE,
189         .open           = dw_mci_regs_open,
190         .read           = seq_read,
191         .llseek         = seq_lseek,
192         .release        = single_release,
193 };
194
195 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
196 {
197         struct mmc_host *mmc = slot->mmc;
198         struct dw_mci *host = slot->host;
199         struct dentry *root;
200         struct dentry *node;
201
202         root = mmc->debugfs_root;
203         if (!root)
204                 return;
205
206         node = debugfs_create_file("regs", S_IRUSR, root, host,
207                                    &dw_mci_regs_fops);
208         if (!node)
209                 goto err;
210
211         node = debugfs_create_file("req", S_IRUSR, root, slot,
212                                    &dw_mci_req_fops);
213         if (!node)
214                 goto err;
215
216         node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
217         if (!node)
218                 goto err;
219
220         node = debugfs_create_x32("pending_events", S_IRUSR, root,
221                                   (u32 *)&host->pending_events);
222         if (!node)
223                 goto err;
224
225         node = debugfs_create_x32("completed_events", S_IRUSR, root,
226                                   (u32 *)&host->completed_events);
227         if (!node)
228                 goto err;
229
230         return;
231
232 err:
233         dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
234 }
235 #endif /* defined(CONFIG_DEBUG_FS) */
236
237 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
238 {
239         struct mmc_data *data;
240         struct dw_mci_slot *slot = mmc_priv(mmc);
241         const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
242         u32 cmdr;
243         cmd->error = -EINPROGRESS;
244
245         cmdr = cmd->opcode;
246
247         if (cmd->opcode == MMC_STOP_TRANSMISSION ||
248             cmd->opcode == MMC_GO_IDLE_STATE ||
249             cmd->opcode == MMC_GO_INACTIVE_STATE ||
250             (cmd->opcode == SD_IO_RW_DIRECT &&
251              ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
252                 cmdr |= SDMMC_CMD_STOP;
253         else if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
254                 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
255
256         if (cmd->flags & MMC_RSP_PRESENT) {
257                 /* We expect a response, so set this bit */
258                 cmdr |= SDMMC_CMD_RESP_EXP;
259                 if (cmd->flags & MMC_RSP_136)
260                         cmdr |= SDMMC_CMD_RESP_LONG;
261         }
262
263         if (cmd->flags & MMC_RSP_CRC)
264                 cmdr |= SDMMC_CMD_RESP_CRC;
265
266         data = cmd->data;
267         if (data) {
268                 cmdr |= SDMMC_CMD_DAT_EXP;
269                 if (data->flags & MMC_DATA_STREAM)
270                         cmdr |= SDMMC_CMD_STRM_MODE;
271                 if (data->flags & MMC_DATA_WRITE)
272                         cmdr |= SDMMC_CMD_DAT_WR;
273         }
274
275         if (drv_data && drv_data->prepare_command)
276                 drv_data->prepare_command(slot->host, &cmdr);
277
278         return cmdr;
279 }
280
281 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
282 {
283         struct mmc_command *stop;
284         u32 cmdr;
285
286         if (!cmd->data)
287                 return 0;
288
289         stop = &host->stop_abort;
290         cmdr = cmd->opcode;
291         memset(stop, 0, sizeof(struct mmc_command));
292
293         if (cmdr == MMC_READ_SINGLE_BLOCK ||
294             cmdr == MMC_READ_MULTIPLE_BLOCK ||
295             cmdr == MMC_WRITE_BLOCK ||
296             cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
297                 stop->opcode = MMC_STOP_TRANSMISSION;
298                 stop->arg = 0;
299                 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
300         } else if (cmdr == SD_IO_RW_EXTENDED) {
301                 stop->opcode = SD_IO_RW_DIRECT;
302                 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
303                              ((cmd->arg >> 28) & 0x7);
304                 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
305         } else {
306                 return 0;
307         }
308
309         cmdr = stop->opcode | SDMMC_CMD_STOP |
310                 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
311
312         return cmdr;
313 }
314
315 static void dw_mci_start_command(struct dw_mci *host,
316                                  struct mmc_command *cmd, u32 cmd_flags)
317 {
318         host->cmd = cmd;
319         dev_vdbg(host->dev,
320                  "start command: ARGR=0x%08x CMDR=0x%08x\n",
321                  cmd->arg, cmd_flags);
322
323         mci_writel(host, CMDARG, cmd->arg);
324         wmb();
325
326         mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
327 }
328
329 static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
330 {
331         struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort;
332         dw_mci_start_command(host, stop, host->stop_cmdr);
333 }
334
335 /* DMA interface functions */
336 static void dw_mci_stop_dma(struct dw_mci *host)
337 {
338         if (host->using_dma) {
339                 host->dma_ops->stop(host);
340                 host->dma_ops->cleanup(host);
341         }
342
343         /* Data transfer was stopped by the interrupt handler */
344         set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
345 }
346
347 static int dw_mci_get_dma_dir(struct mmc_data *data)
348 {
349         if (data->flags & MMC_DATA_WRITE)
350                 return DMA_TO_DEVICE;
351         else
352                 return DMA_FROM_DEVICE;
353 }
354
355 #ifdef CONFIG_MMC_DW_IDMAC
356 static void dw_mci_dma_cleanup(struct dw_mci *host)
357 {
358         struct mmc_data *data = host->data;
359
360         if (data)
361                 if (!data->host_cookie)
362                         dma_unmap_sg(host->dev,
363                                      data->sg,
364                                      data->sg_len,
365                                      dw_mci_get_dma_dir(data));
366 }
367
368 static void dw_mci_idmac_reset(struct dw_mci *host)
369 {
370         u32 bmod = mci_readl(host, BMOD);
371         /* Software reset of DMA */
372         bmod |= SDMMC_IDMAC_SWRESET;
373         mci_writel(host, BMOD, bmod);
374 }
375
376 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
377 {
378         u32 temp;
379
380         /* Disable and reset the IDMAC interface */
381         temp = mci_readl(host, CTRL);
382         temp &= ~SDMMC_CTRL_USE_IDMAC;
383         temp |= SDMMC_CTRL_DMA_RESET;
384         mci_writel(host, CTRL, temp);
385
386         /* Stop the IDMAC running */
387         temp = mci_readl(host, BMOD);
388         temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
389         temp |= SDMMC_IDMAC_SWRESET;
390         mci_writel(host, BMOD, temp);
391 }
392
393 static void dw_mci_idmac_complete_dma(struct dw_mci *host)
394 {
395         struct mmc_data *data = host->data;
396
397         dev_vdbg(host->dev, "DMA complete\n");
398
399         host->dma_ops->cleanup(host);
400
401         /*
402          * If the card was removed, data will be NULL. No point in trying to
403          * send the stop command or waiting for NBUSY in this case.
404          */
405         if (data) {
406                 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
407                 tasklet_schedule(&host->tasklet);
408         }
409 }
410
411 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
412                                     unsigned int sg_len)
413 {
414         int i;
415         struct idmac_desc *desc = host->sg_cpu;
416
417         for (i = 0; i < sg_len; i++, desc++) {
418                 unsigned int length = sg_dma_len(&data->sg[i]);
419                 u32 mem_addr = sg_dma_address(&data->sg[i]);
420
421                 /* Set the OWN bit and disable interrupts for this descriptor */
422                 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
423
424                 /* Buffer length */
425                 IDMAC_SET_BUFFER1_SIZE(desc, length);
426
427                 /* Physical address to DMA to/from */
428                 desc->des2 = mem_addr;
429         }
430
431         /* Set first descriptor */
432         desc = host->sg_cpu;
433         desc->des0 |= IDMAC_DES0_FD;
434
435         /* Set last descriptor */
436         desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
437         desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
438         desc->des0 |= IDMAC_DES0_LD;
439
440         wmb();
441 }
442
443 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
444 {
445         u32 temp;
446
447         dw_mci_translate_sglist(host, host->data, sg_len);
448
449         /* Select IDMAC interface */
450         temp = mci_readl(host, CTRL);
451         temp |= SDMMC_CTRL_USE_IDMAC;
452         mci_writel(host, CTRL, temp);
453
454         wmb();
455
456         /* Enable the IDMAC */
457         temp = mci_readl(host, BMOD);
458         temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
459         mci_writel(host, BMOD, temp);
460
461         /* Start it running */
462         mci_writel(host, PLDMND, 1);
463 }
464
465 static int dw_mci_idmac_init(struct dw_mci *host)
466 {
467         struct idmac_desc *p;
468         int i;
469
470         /* Number of descriptors in the ring buffer */
471         host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
472
473         /* Forward link the descriptor list */
474         for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
475                 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
476
477         /* Set the last descriptor as the end-of-ring descriptor */
478         p->des3 = host->sg_dma;
479         p->des0 = IDMAC_DES0_ER;
480
481         dw_mci_idmac_reset(host);
482
483         /* Mask out interrupts - get Tx & Rx complete only */
484         mci_writel(host, IDSTS, IDMAC_INT_CLR);
485         mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
486                    SDMMC_IDMAC_INT_TI);
487
488         /* Set the descriptor base address */
489         mci_writel(host, DBADDR, host->sg_dma);
490         return 0;
491 }
492
493 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
494         .init = dw_mci_idmac_init,
495         .start = dw_mci_idmac_start_dma,
496         .stop = dw_mci_idmac_stop_dma,
497         .complete = dw_mci_idmac_complete_dma,
498         .cleanup = dw_mci_dma_cleanup,
499 };
500 #endif /* CONFIG_MMC_DW_IDMAC */
501
502 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
503                                    struct mmc_data *data,
504                                    bool next)
505 {
506         struct scatterlist *sg;
507         unsigned int i, sg_len;
508
509         if (!next && data->host_cookie)
510                 return data->host_cookie;
511
512         /*
513          * We don't do DMA on "complex" transfers, i.e. with
514          * non-word-aligned buffers or lengths. Also, we don't bother
515          * with all the DMA setup overhead for short transfers.
516          */
517         if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
518                 return -EINVAL;
519
520         if (data->blksz & 3)
521                 return -EINVAL;
522
523         for_each_sg(data->sg, sg, data->sg_len, i) {
524                 if (sg->offset & 3 || sg->length & 3)
525                         return -EINVAL;
526         }
527
528         sg_len = dma_map_sg(host->dev,
529                             data->sg,
530                             data->sg_len,
531                             dw_mci_get_dma_dir(data));
532         if (sg_len == 0)
533                 return -EINVAL;
534
535         if (next)
536                 data->host_cookie = sg_len;
537
538         return sg_len;
539 }
540
541 static void dw_mci_pre_req(struct mmc_host *mmc,
542                            struct mmc_request *mrq,
543                            bool is_first_req)
544 {
545         struct dw_mci_slot *slot = mmc_priv(mmc);
546         struct mmc_data *data = mrq->data;
547
548         if (!slot->host->use_dma || !data)
549                 return;
550
551         if (data->host_cookie) {
552                 data->host_cookie = 0;
553                 return;
554         }
555
556         if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
557                 data->host_cookie = 0;
558 }
559
560 static void dw_mci_post_req(struct mmc_host *mmc,
561                             struct mmc_request *mrq,
562                             int err)
563 {
564         struct dw_mci_slot *slot = mmc_priv(mmc);
565         struct mmc_data *data = mrq->data;
566
567         if (!slot->host->use_dma || !data)
568                 return;
569
570         if (data->host_cookie)
571                 dma_unmap_sg(slot->host->dev,
572                              data->sg,
573                              data->sg_len,
574                              dw_mci_get_dma_dir(data));
575         data->host_cookie = 0;
576 }
577
578 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
579 {
580 #ifdef CONFIG_MMC_DW_IDMAC
581         unsigned int blksz = data->blksz;
582         const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
583         u32 fifo_width = 1 << host->data_shift;
584         u32 blksz_depth = blksz / fifo_width, fifoth_val;
585         u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
586         int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
587
588         tx_wmark = (host->fifo_depth) / 2;
589         tx_wmark_invers = host->fifo_depth - tx_wmark;
590
591         /*
592          * MSIZE is '1',
593          * if blksz is not a multiple of the FIFO width
594          */
595         if (blksz % fifo_width) {
596                 msize = 0;
597                 rx_wmark = 1;
598                 goto done;
599         }
600
601         do {
602                 if (!((blksz_depth % mszs[idx]) ||
603                      (tx_wmark_invers % mszs[idx]))) {
604                         msize = idx;
605                         rx_wmark = mszs[idx] - 1;
606                         break;
607                 }
608         } while (--idx > 0);
609         /*
610          * If idx is '0', it won't be tried
611          * Thus, initial values are uesed
612          */
613 done:
614         fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
615         mci_writel(host, FIFOTH, fifoth_val);
616 #endif
617 }
618
619 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
620 {
621         unsigned int blksz = data->blksz;
622         u32 blksz_depth, fifo_depth;
623         u16 thld_size;
624
625         WARN_ON(!(data->flags & MMC_DATA_READ));
626
627         if (host->timing != MMC_TIMING_MMC_HS200 &&
628             host->timing != MMC_TIMING_UHS_SDR104)
629                 goto disable;
630
631         blksz_depth = blksz / (1 << host->data_shift);
632         fifo_depth = host->fifo_depth;
633
634         if (blksz_depth > fifo_depth)
635                 goto disable;
636
637         /*
638          * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
639          * If (blksz_depth) <  (fifo_depth >> 1), should be thld_size = blksz
640          * Currently just choose blksz.
641          */
642         thld_size = blksz;
643         mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
644         return;
645
646 disable:
647         mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
648 }
649
650 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
651 {
652         int sg_len;
653         u32 temp;
654
655         host->using_dma = 0;
656
657         /* If we don't have a channel, we can't do DMA */
658         if (!host->use_dma)
659                 return -ENODEV;
660
661         sg_len = dw_mci_pre_dma_transfer(host, data, 0);
662         if (sg_len < 0) {
663                 host->dma_ops->stop(host);
664                 return sg_len;
665         }
666
667         host->using_dma = 1;
668
669         dev_vdbg(host->dev,
670                  "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
671                  (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
672                  sg_len);
673
674         /*
675          * Decide the MSIZE and RX/TX Watermark.
676          * If current block size is same with previous size,
677          * no need to update fifoth.
678          */
679         if (host->prev_blksz != data->blksz)
680                 dw_mci_adjust_fifoth(host, data);
681
682         /* Enable the DMA interface */
683         temp = mci_readl(host, CTRL);
684         temp |= SDMMC_CTRL_DMA_ENABLE;
685         mci_writel(host, CTRL, temp);
686
687         /* Disable RX/TX IRQs, let DMA handle it */
688         temp = mci_readl(host, INTMASK);
689         temp  &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
690         mci_writel(host, INTMASK, temp);
691
692         host->dma_ops->start(host, sg_len);
693
694         return 0;
695 }
696
697 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
698 {
699         u32 temp;
700
701         data->error = -EINPROGRESS;
702
703         WARN_ON(host->data);
704         host->sg = NULL;
705         host->data = data;
706
707         if (data->flags & MMC_DATA_READ) {
708                 host->dir_status = DW_MCI_RECV_STATUS;
709                 dw_mci_ctrl_rd_thld(host, data);
710         } else {
711                 host->dir_status = DW_MCI_SEND_STATUS;
712         }
713
714         if (dw_mci_submit_data_dma(host, data)) {
715                 int flags = SG_MITER_ATOMIC;
716                 if (host->data->flags & MMC_DATA_READ)
717                         flags |= SG_MITER_TO_SG;
718                 else
719                         flags |= SG_MITER_FROM_SG;
720
721                 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
722                 host->sg = data->sg;
723                 host->part_buf_start = 0;
724                 host->part_buf_count = 0;
725
726                 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
727                 temp = mci_readl(host, INTMASK);
728                 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
729                 mci_writel(host, INTMASK, temp);
730
731                 temp = mci_readl(host, CTRL);
732                 temp &= ~SDMMC_CTRL_DMA_ENABLE;
733                 mci_writel(host, CTRL, temp);
734
735                 /*
736                  * Use the initial fifoth_val for PIO mode.
737                  * If next issued data may be transfered by DMA mode,
738                  * prev_blksz should be invalidated.
739                  */
740                 mci_writel(host, FIFOTH, host->fifoth_val);
741                 host->prev_blksz = 0;
742         } else {
743                 /*
744                  * Keep the current block size.
745                  * It will be used to decide whether to update
746                  * fifoth register next time.
747                  */
748                 host->prev_blksz = data->blksz;
749         }
750 }
751
752 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
753 {
754         struct dw_mci *host = slot->host;
755         unsigned long timeout = jiffies + msecs_to_jiffies(500);
756         unsigned int cmd_status = 0;
757
758         mci_writel(host, CMDARG, arg);
759         wmb();
760         mci_writel(host, CMD, SDMMC_CMD_START | cmd);
761
762         while (time_before(jiffies, timeout)) {
763                 cmd_status = mci_readl(host, CMD);
764                 if (!(cmd_status & SDMMC_CMD_START))
765                         return;
766         }
767         dev_err(&slot->mmc->class_dev,
768                 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
769                 cmd, arg, cmd_status);
770 }
771
772 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
773 {
774         struct dw_mci *host = slot->host;
775         unsigned int clock = slot->clock;
776         u32 div;
777         u32 clk_en_a;
778
779         if (!clock) {
780                 mci_writel(host, CLKENA, 0);
781                 mci_send_cmd(slot,
782                              SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
783         } else if (clock != host->current_speed || force_clkinit) {
784                 div = host->bus_hz / clock;
785                 if (host->bus_hz % clock && host->bus_hz > clock)
786                         /*
787                          * move the + 1 after the divide to prevent
788                          * over-clocking the card.
789                          */
790                         div += 1;
791
792                 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
793
794                 if ((clock << div) != slot->__clk_old || force_clkinit)
795                         dev_info(&slot->mmc->class_dev,
796                                  "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
797                                  slot->id, host->bus_hz, clock,
798                                  div ? ((host->bus_hz / div) >> 1) :
799                                  host->bus_hz, div);
800
801                 /* disable clock */
802                 mci_writel(host, CLKENA, 0);
803                 mci_writel(host, CLKSRC, 0);
804
805                 /* inform CIU */
806                 mci_send_cmd(slot,
807                              SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
808
809                 /* set clock to desired speed */
810                 mci_writel(host, CLKDIV, div);
811
812                 /* inform CIU */
813                 mci_send_cmd(slot,
814                              SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
815
816                 /* enable clock; only low power if no SDIO */
817                 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
818                 if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id)))
819                         clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
820                 mci_writel(host, CLKENA, clk_en_a);
821
822                 /* inform CIU */
823                 mci_send_cmd(slot,
824                              SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
825
826                 /* keep the clock with reflecting clock dividor */
827                 slot->__clk_old = clock << div;
828         }
829
830         host->current_speed = clock;
831
832         /* Set the current slot bus width */
833         mci_writel(host, CTYPE, (slot->ctype << slot->id));
834 }
835
836 static void __dw_mci_start_request(struct dw_mci *host,
837                                    struct dw_mci_slot *slot,
838                                    struct mmc_command *cmd)
839 {
840         struct mmc_request *mrq;
841         struct mmc_data *data;
842         u32 cmdflags;
843
844         mrq = slot->mrq;
845
846         host->cur_slot = slot;
847         host->mrq = mrq;
848
849         host->pending_events = 0;
850         host->completed_events = 0;
851         host->cmd_status = 0;
852         host->data_status = 0;
853         host->dir_status = 0;
854
855         data = cmd->data;
856         if (data) {
857                 mci_writel(host, TMOUT, 0xFFFFFFFF);
858                 mci_writel(host, BYTCNT, data->blksz*data->blocks);
859                 mci_writel(host, BLKSIZ, data->blksz);
860         }
861
862         cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
863
864         /* this is the first command, send the initialization clock */
865         if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
866                 cmdflags |= SDMMC_CMD_INIT;
867
868         if (data) {
869                 dw_mci_submit_data(host, data);
870                 wmb();
871         }
872
873         dw_mci_start_command(host, cmd, cmdflags);
874
875         if (mrq->stop)
876                 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
877         else
878                 host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
879 }
880
881 static void dw_mci_start_request(struct dw_mci *host,
882                                  struct dw_mci_slot *slot)
883 {
884         struct mmc_request *mrq = slot->mrq;
885         struct mmc_command *cmd;
886
887         cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
888         __dw_mci_start_request(host, slot, cmd);
889 }
890
891 /* must be called with host->lock held */
892 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
893                                  struct mmc_request *mrq)
894 {
895         dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
896                  host->state);
897
898         slot->mrq = mrq;
899
900         if (host->state == STATE_IDLE) {
901                 host->state = STATE_SENDING_CMD;
902                 dw_mci_start_request(host, slot);
903         } else {
904                 list_add_tail(&slot->queue_node, &host->queue);
905         }
906 }
907
908 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
909 {
910         struct dw_mci_slot *slot = mmc_priv(mmc);
911         struct dw_mci *host = slot->host;
912
913         WARN_ON(slot->mrq);
914
915         /*
916          * The check for card presence and queueing of the request must be
917          * atomic, otherwise the card could be removed in between and the
918          * request wouldn't fail until another card was inserted.
919          */
920         spin_lock_bh(&host->lock);
921
922         if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
923                 spin_unlock_bh(&host->lock);
924                 mrq->cmd->error = -ENOMEDIUM;
925                 mmc_request_done(mmc, mrq);
926                 return;
927         }
928
929         dw_mci_queue_request(host, slot, mrq);
930
931         spin_unlock_bh(&host->lock);
932 }
933
934 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
935 {
936         struct dw_mci_slot *slot = mmc_priv(mmc);
937         const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
938         u32 regs;
939
940         switch (ios->bus_width) {
941         case MMC_BUS_WIDTH_4:
942                 slot->ctype = SDMMC_CTYPE_4BIT;
943                 break;
944         case MMC_BUS_WIDTH_8:
945                 slot->ctype = SDMMC_CTYPE_8BIT;
946                 break;
947         default:
948                 /* set default 1 bit mode */
949                 slot->ctype = SDMMC_CTYPE_1BIT;
950         }
951
952         regs = mci_readl(slot->host, UHS_REG);
953
954         /* DDR mode set */
955         if (ios->timing == MMC_TIMING_MMC_DDR52)
956                 regs |= ((0x1 << slot->id) << 16);
957         else
958                 regs &= ~((0x1 << slot->id) << 16);
959
960         mci_writel(slot->host, UHS_REG, regs);
961         slot->host->timing = ios->timing;
962
963         /*
964          * Use mirror of ios->clock to prevent race with mmc
965          * core ios update when finding the minimum.
966          */
967         slot->clock = ios->clock;
968
969         if (drv_data && drv_data->set_ios)
970                 drv_data->set_ios(slot->host, ios);
971
972         /* Slot specific timing and width adjustment */
973         dw_mci_setup_bus(slot, false);
974
975         switch (ios->power_mode) {
976         case MMC_POWER_UP:
977                 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
978                 regs = mci_readl(slot->host, PWREN);
979                 regs |= (1 << slot->id);
980                 mci_writel(slot->host, PWREN, regs);
981                 break;
982         case MMC_POWER_OFF:
983                 regs = mci_readl(slot->host, PWREN);
984                 regs &= ~(1 << slot->id);
985                 mci_writel(slot->host, PWREN, regs);
986                 break;
987         default:
988                 break;
989         }
990 }
991
992 static int dw_mci_get_ro(struct mmc_host *mmc)
993 {
994         int read_only;
995         struct dw_mci_slot *slot = mmc_priv(mmc);
996         int gpio_ro = mmc_gpio_get_ro(mmc);
997
998         /* Use platform get_ro function, else try on board write protect */
999         if ((slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT) ||
1000                         (slot->host->quirks & DW_MCI_QUIRK_NO_WRITE_PROTECT))
1001                 read_only = 0;
1002         else if (!IS_ERR_VALUE(gpio_ro))
1003                 read_only = gpio_ro;
1004         else
1005                 read_only =
1006                         mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1007
1008         dev_dbg(&mmc->class_dev, "card is %s\n",
1009                 read_only ? "read-only" : "read-write");
1010
1011         return read_only;
1012 }
1013
1014 static int dw_mci_get_cd(struct mmc_host *mmc)
1015 {
1016         int present;
1017         struct dw_mci_slot *slot = mmc_priv(mmc);
1018         struct dw_mci_board *brd = slot->host->pdata;
1019         struct dw_mci *host = slot->host;
1020         int gpio_cd = mmc_gpio_get_cd(mmc);
1021
1022         /* Use platform get_cd function, else try onboard card detect */
1023         if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1024                 present = 1;
1025         else if (!IS_ERR_VALUE(gpio_cd))
1026                 present = gpio_cd;
1027         else
1028                 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1029                         == 0 ? 1 : 0;
1030
1031         spin_lock_bh(&host->lock);
1032         if (present) {
1033                 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1034                 dev_dbg(&mmc->class_dev, "card is present\n");
1035         } else {
1036                 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1037                 dev_dbg(&mmc->class_dev, "card is not present\n");
1038         }
1039         spin_unlock_bh(&host->lock);
1040
1041         return present;
1042 }
1043
1044 /*
1045  * Disable lower power mode.
1046  *
1047  * Low power mode will stop the card clock when idle.  According to the
1048  * description of the CLKENA register we should disable low power mode
1049  * for SDIO cards if we need SDIO interrupts to work.
1050  *
1051  * This function is fast if low power mode is already disabled.
1052  */
1053 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1054 {
1055         struct dw_mci *host = slot->host;
1056         u32 clk_en_a;
1057         const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1058
1059         clk_en_a = mci_readl(host, CLKENA);
1060
1061         if (clk_en_a & clken_low_pwr) {
1062                 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1063                 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1064                              SDMMC_CMD_PRV_DAT_WAIT, 0);
1065         }
1066 }
1067
1068 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1069 {
1070         struct dw_mci_slot *slot = mmc_priv(mmc);
1071         struct dw_mci *host = slot->host;
1072         u32 int_mask;
1073
1074         /* Enable/disable Slot Specific SDIO interrupt */
1075         int_mask = mci_readl(host, INTMASK);
1076         if (enb) {
1077                 /*
1078                  * Turn off low power mode if it was enabled.  This is a bit of
1079                  * a heavy operation and we disable / enable IRQs a lot, so
1080                  * we'll leave low power mode disabled and it will get
1081                  * re-enabled again in dw_mci_setup_bus().
1082                  */
1083                 dw_mci_disable_low_power(slot);
1084
1085                 mci_writel(host, INTMASK,
1086                            (int_mask | SDMMC_INT_SDIO(slot->id)));
1087         } else {
1088                 mci_writel(host, INTMASK,
1089                            (int_mask & ~SDMMC_INT_SDIO(slot->id)));
1090         }
1091 }
1092
1093 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1094 {
1095         struct dw_mci_slot *slot = mmc_priv(mmc);
1096         struct dw_mci *host = slot->host;
1097         const struct dw_mci_drv_data *drv_data = host->drv_data;
1098         struct dw_mci_tuning_data tuning_data;
1099         int err = -ENOSYS;
1100
1101         if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1102                 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1103                         tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1104                         tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1105                 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1106                         tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1107                         tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1108                 } else {
1109                         return -EINVAL;
1110                 }
1111         } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1112                 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1113                 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1114         } else {
1115                 dev_err(host->dev,
1116                         "Undefined command(%d) for tuning\n", opcode);
1117                 return -EINVAL;
1118         }
1119
1120         if (drv_data && drv_data->execute_tuning)
1121                 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1122         return err;
1123 }
1124
1125 static const struct mmc_host_ops dw_mci_ops = {
1126         .request                = dw_mci_request,
1127         .pre_req                = dw_mci_pre_req,
1128         .post_req               = dw_mci_post_req,
1129         .set_ios                = dw_mci_set_ios,
1130         .get_ro                 = dw_mci_get_ro,
1131         .get_cd                 = dw_mci_get_cd,
1132         .enable_sdio_irq        = dw_mci_enable_sdio_irq,
1133         .execute_tuning         = dw_mci_execute_tuning,
1134 };
1135
1136 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1137         __releases(&host->lock)
1138         __acquires(&host->lock)
1139 {
1140         struct dw_mci_slot *slot;
1141         struct mmc_host *prev_mmc = host->cur_slot->mmc;
1142
1143         WARN_ON(host->cmd || host->data);
1144
1145         host->cur_slot->mrq = NULL;
1146         host->mrq = NULL;
1147         if (!list_empty(&host->queue)) {
1148                 slot = list_entry(host->queue.next,
1149                                   struct dw_mci_slot, queue_node);
1150                 list_del(&slot->queue_node);
1151                 dev_vdbg(host->dev, "list not empty: %s is next\n",
1152                          mmc_hostname(slot->mmc));
1153                 host->state = STATE_SENDING_CMD;
1154                 dw_mci_start_request(host, slot);
1155         } else {
1156                 dev_vdbg(host->dev, "list empty\n");
1157                 host->state = STATE_IDLE;
1158         }
1159
1160         spin_unlock(&host->lock);
1161         mmc_request_done(prev_mmc, mrq);
1162         spin_lock(&host->lock);
1163 }
1164
1165 static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1166 {
1167         u32 status = host->cmd_status;
1168
1169         host->cmd_status = 0;
1170
1171         /* Read the response from the card (up to 16 bytes) */
1172         if (cmd->flags & MMC_RSP_PRESENT) {
1173                 if (cmd->flags & MMC_RSP_136) {
1174                         cmd->resp[3] = mci_readl(host, RESP0);
1175                         cmd->resp[2] = mci_readl(host, RESP1);
1176                         cmd->resp[1] = mci_readl(host, RESP2);
1177                         cmd->resp[0] = mci_readl(host, RESP3);
1178                 } else {
1179                         cmd->resp[0] = mci_readl(host, RESP0);
1180                         cmd->resp[1] = 0;
1181                         cmd->resp[2] = 0;
1182                         cmd->resp[3] = 0;
1183                 }
1184         }
1185
1186         if (status & SDMMC_INT_RTO)
1187                 cmd->error = -ETIMEDOUT;
1188         else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1189                 cmd->error = -EILSEQ;
1190         else if (status & SDMMC_INT_RESP_ERR)
1191                 cmd->error = -EIO;
1192         else
1193                 cmd->error = 0;
1194
1195         if (cmd->error) {
1196                 /* newer ip versions need a delay between retries */
1197                 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
1198                         mdelay(20);
1199         }
1200
1201         return cmd->error;
1202 }
1203
1204 static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1205 {
1206         u32 status = host->data_status;
1207
1208         if (status & DW_MCI_DATA_ERROR_FLAGS) {
1209                 if (status & SDMMC_INT_DRTO) {
1210                         data->error = -ETIMEDOUT;
1211                 } else if (status & SDMMC_INT_DCRC) {
1212                         data->error = -EILSEQ;
1213                 } else if (status & SDMMC_INT_EBE) {
1214                         if (host->dir_status ==
1215                                 DW_MCI_SEND_STATUS) {
1216                                 /*
1217                                  * No data CRC status was returned.
1218                                  * The number of bytes transferred
1219                                  * will be exaggerated in PIO mode.
1220                                  */
1221                                 data->bytes_xfered = 0;
1222                                 data->error = -ETIMEDOUT;
1223                         } else if (host->dir_status ==
1224                                         DW_MCI_RECV_STATUS) {
1225                                 data->error = -EIO;
1226                         }
1227                 } else {
1228                         /* SDMMC_INT_SBE is included */
1229                         data->error = -EIO;
1230                 }
1231
1232                 dev_dbg(host->dev, "data error, status 0x%08x\n", status);
1233
1234                 /*
1235                  * After an error, there may be data lingering
1236                  * in the FIFO
1237                  */
1238                 dw_mci_reset(host);
1239         } else {
1240                 data->bytes_xfered = data->blocks * data->blksz;
1241                 data->error = 0;
1242         }
1243
1244         return data->error;
1245 }
1246
1247 static void dw_mci_tasklet_func(unsigned long priv)
1248 {
1249         struct dw_mci *host = (struct dw_mci *)priv;
1250         struct mmc_data *data;
1251         struct mmc_command *cmd;
1252         struct mmc_request *mrq;
1253         enum dw_mci_state state;
1254         enum dw_mci_state prev_state;
1255         unsigned int err;
1256
1257         spin_lock(&host->lock);
1258
1259         state = host->state;
1260         data = host->data;
1261         mrq = host->mrq;
1262
1263         do {
1264                 prev_state = state;
1265
1266                 switch (state) {
1267                 case STATE_IDLE:
1268                         break;
1269
1270                 case STATE_SENDING_CMD:
1271                         if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1272                                                 &host->pending_events))
1273                                 break;
1274
1275                         cmd = host->cmd;
1276                         host->cmd = NULL;
1277                         set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
1278                         err = dw_mci_command_complete(host, cmd);
1279                         if (cmd == mrq->sbc && !err) {
1280                                 prev_state = state = STATE_SENDING_CMD;
1281                                 __dw_mci_start_request(host, host->cur_slot,
1282                                                        mrq->cmd);
1283                                 goto unlock;
1284                         }
1285
1286                         if (cmd->data && err) {
1287                                 dw_mci_stop_dma(host);
1288                                 send_stop_abort(host, data);
1289                                 state = STATE_SENDING_STOP;
1290                                 break;
1291                         }
1292
1293                         if (!cmd->data || err) {
1294                                 dw_mci_request_end(host, mrq);
1295                                 goto unlock;
1296                         }
1297
1298                         prev_state = state = STATE_SENDING_DATA;
1299                         /* fall through */
1300
1301                 case STATE_SENDING_DATA:
1302                         /*
1303                          * We could get a data error and never a transfer
1304                          * complete so we'd better check for it here.
1305                          *
1306                          * Note that we don't really care if we also got a
1307                          * transfer complete; stopping the DMA and sending an
1308                          * abort won't hurt.
1309                          */
1310                         if (test_and_clear_bit(EVENT_DATA_ERROR,
1311                                                &host->pending_events)) {
1312                                 dw_mci_stop_dma(host);
1313                                 send_stop_abort(host, data);
1314                                 state = STATE_DATA_ERROR;
1315                                 break;
1316                         }
1317
1318                         if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1319                                                 &host->pending_events))
1320                                 break;
1321
1322                         set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
1323
1324                         /*
1325                          * Handle an EVENT_DATA_ERROR that might have shown up
1326                          * before the transfer completed.  This might not have
1327                          * been caught by the check above because the interrupt
1328                          * could have gone off between the previous check and
1329                          * the check for transfer complete.
1330                          *
1331                          * Technically this ought not be needed assuming we
1332                          * get a DATA_COMPLETE eventually (we'll notice the
1333                          * error and end the request), but it shouldn't hurt.
1334                          *
1335                          * This has the advantage of sending the stop command.
1336                          */
1337                         if (test_and_clear_bit(EVENT_DATA_ERROR,
1338                                                &host->pending_events)) {
1339                                 dw_mci_stop_dma(host);
1340                                 send_stop_abort(host, data);
1341                                 state = STATE_DATA_ERROR;
1342                                 break;
1343                         }
1344                         prev_state = state = STATE_DATA_BUSY;
1345
1346                         /* fall through */
1347
1348                 case STATE_DATA_BUSY:
1349                         if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1350                                                 &host->pending_events))
1351                                 break;
1352
1353                         host->data = NULL;
1354                         set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
1355                         err = dw_mci_data_complete(host, data);
1356
1357                         if (!err) {
1358                                 if (!data->stop || mrq->sbc) {
1359                                         if (mrq->sbc && data->stop)
1360                                                 data->stop->error = 0;
1361                                         dw_mci_request_end(host, mrq);
1362                                         goto unlock;
1363                                 }
1364
1365                                 /* stop command for open-ended transfer*/
1366                                 if (data->stop)
1367                                         send_stop_abort(host, data);
1368                         } else {
1369                                 /*
1370                                  * If we don't have a command complete now we'll
1371                                  * never get one since we just reset everything;
1372                                  * better end the request.
1373                                  *
1374                                  * If we do have a command complete we'll fall
1375                                  * through to the SENDING_STOP command and
1376                                  * everything will be peachy keen.
1377                                  */
1378                                 if (!test_bit(EVENT_CMD_COMPLETE,
1379                                               &host->pending_events)) {
1380                                         host->cmd = NULL;
1381                                         dw_mci_request_end(host, mrq);
1382                                         goto unlock;
1383                                 }
1384                         }
1385
1386                         /*
1387                          * If err has non-zero,
1388                          * stop-abort command has been already issued.
1389                          */
1390                         prev_state = state = STATE_SENDING_STOP;
1391
1392                         /* fall through */
1393
1394                 case STATE_SENDING_STOP:
1395                         if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1396                                                 &host->pending_events))
1397                                 break;
1398
1399                         /* CMD error in data command */
1400                         if (mrq->cmd->error && mrq->data)
1401                                 dw_mci_reset(host);
1402
1403                         host->cmd = NULL;
1404                         host->data = NULL;
1405
1406                         if (mrq->stop)
1407                                 dw_mci_command_complete(host, mrq->stop);
1408                         else
1409                                 host->cmd_status = 0;
1410
1411                         dw_mci_request_end(host, mrq);
1412                         goto unlock;
1413
1414                 case STATE_DATA_ERROR:
1415                         if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1416                                                 &host->pending_events))
1417                                 break;
1418
1419                         state = STATE_DATA_BUSY;
1420                         break;
1421                 }
1422         } while (state != prev_state);
1423
1424         host->state = state;
1425 unlock:
1426         spin_unlock(&host->lock);
1427
1428 }
1429
1430 /* push final bytes to part_buf, only use during push */
1431 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
1432 {
1433         memcpy((void *)&host->part_buf, buf, cnt);
1434         host->part_buf_count = cnt;
1435 }
1436
1437 /* append bytes to part_buf, only use during push */
1438 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1439 {
1440         cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1441         memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1442         host->part_buf_count += cnt;
1443         return cnt;
1444 }
1445
1446 /* pull first bytes from part_buf, only use during pull */
1447 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1448 {
1449         cnt = min(cnt, (int)host->part_buf_count);
1450         if (cnt) {
1451                 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1452                        cnt);
1453                 host->part_buf_count -= cnt;
1454                 host->part_buf_start += cnt;
1455         }
1456         return cnt;
1457 }
1458
1459 /* pull final bytes from the part_buf, assuming it's just been filled */
1460 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
1461 {
1462         memcpy(buf, &host->part_buf, cnt);
1463         host->part_buf_start = cnt;
1464         host->part_buf_count = (1 << host->data_shift) - cnt;
1465 }
1466
1467 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1468 {
1469         struct mmc_data *data = host->data;
1470         int init_cnt = cnt;
1471
1472         /* try and push anything in the part_buf */
1473         if (unlikely(host->part_buf_count)) {
1474                 int len = dw_mci_push_part_bytes(host, buf, cnt);
1475                 buf += len;
1476                 cnt -= len;
1477                 if (host->part_buf_count == 2) {
1478                         mci_writew(host, DATA(host->data_offset),
1479                                         host->part_buf16);
1480                         host->part_buf_count = 0;
1481                 }
1482         }
1483 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1484         if (unlikely((unsigned long)buf & 0x1)) {
1485                 while (cnt >= 2) {
1486                         u16 aligned_buf[64];
1487                         int len = min(cnt & -2, (int)sizeof(aligned_buf));
1488                         int items = len >> 1;
1489                         int i;
1490                         /* memcpy from input buffer into aligned buffer */
1491                         memcpy(aligned_buf, buf, len);
1492                         buf += len;
1493                         cnt -= len;
1494                         /* push data from aligned buffer into fifo */
1495                         for (i = 0; i < items; ++i)
1496                                 mci_writew(host, DATA(host->data_offset),
1497                                                 aligned_buf[i]);
1498                 }
1499         } else
1500 #endif
1501         {
1502                 u16 *pdata = buf;
1503                 for (; cnt >= 2; cnt -= 2)
1504                         mci_writew(host, DATA(host->data_offset), *pdata++);
1505                 buf = pdata;
1506         }
1507         /* put anything remaining in the part_buf */
1508         if (cnt) {
1509                 dw_mci_set_part_bytes(host, buf, cnt);
1510                  /* Push data if we have reached the expected data length */
1511                 if ((data->bytes_xfered + init_cnt) ==
1512                     (data->blksz * data->blocks))
1513                         mci_writew(host, DATA(host->data_offset),
1514                                    host->part_buf16);
1515         }
1516 }
1517
1518 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
1519 {
1520 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1521         if (unlikely((unsigned long)buf & 0x1)) {
1522                 while (cnt >= 2) {
1523                         /* pull data from fifo into aligned buffer */
1524                         u16 aligned_buf[64];
1525                         int len = min(cnt & -2, (int)sizeof(aligned_buf));
1526                         int items = len >> 1;
1527                         int i;
1528                         for (i = 0; i < items; ++i)
1529                                 aligned_buf[i] = mci_readw(host,
1530                                                 DATA(host->data_offset));
1531                         /* memcpy from aligned buffer into output buffer */
1532                         memcpy(buf, aligned_buf, len);
1533                         buf += len;
1534                         cnt -= len;
1535                 }
1536         } else
1537 #endif
1538         {
1539                 u16 *pdata = buf;
1540                 for (; cnt >= 2; cnt -= 2)
1541                         *pdata++ = mci_readw(host, DATA(host->data_offset));
1542                 buf = pdata;
1543         }
1544         if (cnt) {
1545                 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
1546                 dw_mci_pull_final_bytes(host, buf, cnt);
1547         }
1548 }
1549
1550 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
1551 {
1552         struct mmc_data *data = host->data;
1553         int init_cnt = cnt;
1554
1555         /* try and push anything in the part_buf */
1556         if (unlikely(host->part_buf_count)) {
1557                 int len = dw_mci_push_part_bytes(host, buf, cnt);
1558                 buf += len;
1559                 cnt -= len;
1560                 if (host->part_buf_count == 4) {
1561                         mci_writel(host, DATA(host->data_offset),
1562                                         host->part_buf32);
1563                         host->part_buf_count = 0;
1564                 }
1565         }
1566 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1567         if (unlikely((unsigned long)buf & 0x3)) {
1568                 while (cnt >= 4) {
1569                         u32 aligned_buf[32];
1570                         int len = min(cnt & -4, (int)sizeof(aligned_buf));
1571                         int items = len >> 2;
1572                         int i;
1573                         /* memcpy from input buffer into aligned buffer */
1574                         memcpy(aligned_buf, buf, len);
1575                         buf += len;
1576                         cnt -= len;
1577                         /* push data from aligned buffer into fifo */
1578                         for (i = 0; i < items; ++i)
1579                                 mci_writel(host, DATA(host->data_offset),
1580                                                 aligned_buf[i]);
1581                 }
1582         } else
1583 #endif
1584         {
1585                 u32 *pdata = buf;
1586                 for (; cnt >= 4; cnt -= 4)
1587                         mci_writel(host, DATA(host->data_offset), *pdata++);
1588                 buf = pdata;
1589         }
1590         /* put anything remaining in the part_buf */
1591         if (cnt) {
1592                 dw_mci_set_part_bytes(host, buf, cnt);
1593                  /* Push data if we have reached the expected data length */
1594                 if ((data->bytes_xfered + init_cnt) ==
1595                     (data->blksz * data->blocks))
1596                         mci_writel(host, DATA(host->data_offset),
1597                                    host->part_buf32);
1598         }
1599 }
1600
1601 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
1602 {
1603 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1604         if (unlikely((unsigned long)buf & 0x3)) {
1605                 while (cnt >= 4) {
1606                         /* pull data from fifo into aligned buffer */
1607                         u32 aligned_buf[32];
1608                         int len = min(cnt & -4, (int)sizeof(aligned_buf));
1609                         int items = len >> 2;
1610                         int i;
1611                         for (i = 0; i < items; ++i)
1612                                 aligned_buf[i] = mci_readl(host,
1613                                                 DATA(host->data_offset));
1614                         /* memcpy from aligned buffer into output buffer */
1615                         memcpy(buf, aligned_buf, len);
1616                         buf += len;
1617                         cnt -= len;
1618                 }
1619         } else
1620 #endif
1621         {
1622                 u32 *pdata = buf;
1623                 for (; cnt >= 4; cnt -= 4)
1624                         *pdata++ = mci_readl(host, DATA(host->data_offset));
1625                 buf = pdata;
1626         }
1627         if (cnt) {
1628                 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
1629                 dw_mci_pull_final_bytes(host, buf, cnt);
1630         }
1631 }
1632
1633 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1634 {
1635         struct mmc_data *data = host->data;
1636         int init_cnt = cnt;
1637
1638         /* try and push anything in the part_buf */
1639         if (unlikely(host->part_buf_count)) {
1640                 int len = dw_mci_push_part_bytes(host, buf, cnt);
1641                 buf += len;
1642                 cnt -= len;
1643
1644                 if (host->part_buf_count == 8) {
1645                         mci_writeq(host, DATA(host->data_offset),
1646                                         host->part_buf);
1647                         host->part_buf_count = 0;
1648                 }
1649         }
1650 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1651         if (unlikely((unsigned long)buf & 0x7)) {
1652                 while (cnt >= 8) {
1653                         u64 aligned_buf[16];
1654                         int len = min(cnt & -8, (int)sizeof(aligned_buf));
1655                         int items = len >> 3;
1656                         int i;
1657                         /* memcpy from input buffer into aligned buffer */
1658                         memcpy(aligned_buf, buf, len);
1659                         buf += len;
1660                         cnt -= len;
1661                         /* push data from aligned buffer into fifo */
1662                         for (i = 0; i < items; ++i)
1663                                 mci_writeq(host, DATA(host->data_offset),
1664                                                 aligned_buf[i]);
1665                 }
1666         } else
1667 #endif
1668         {
1669                 u64 *pdata = buf;
1670                 for (; cnt >= 8; cnt -= 8)
1671                         mci_writeq(host, DATA(host->data_offset), *pdata++);
1672                 buf = pdata;
1673         }
1674         /* put anything remaining in the part_buf */
1675         if (cnt) {
1676                 dw_mci_set_part_bytes(host, buf, cnt);
1677                 /* Push data if we have reached the expected data length */
1678                 if ((data->bytes_xfered + init_cnt) ==
1679                     (data->blksz * data->blocks))
1680                         mci_writeq(host, DATA(host->data_offset),
1681                                    host->part_buf);
1682         }
1683 }
1684
1685 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1686 {
1687 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1688         if (unlikely((unsigned long)buf & 0x7)) {
1689                 while (cnt >= 8) {
1690                         /* pull data from fifo into aligned buffer */
1691                         u64 aligned_buf[16];
1692                         int len = min(cnt & -8, (int)sizeof(aligned_buf));
1693                         int items = len >> 3;
1694                         int i;
1695                         for (i = 0; i < items; ++i)
1696                                 aligned_buf[i] = mci_readq(host,
1697                                                 DATA(host->data_offset));
1698                         /* memcpy from aligned buffer into output buffer */
1699                         memcpy(buf, aligned_buf, len);
1700                         buf += len;
1701                         cnt -= len;
1702                 }
1703         } else
1704 #endif
1705         {
1706                 u64 *pdata = buf;
1707                 for (; cnt >= 8; cnt -= 8)
1708                         *pdata++ = mci_readq(host, DATA(host->data_offset));
1709                 buf = pdata;
1710         }
1711         if (cnt) {
1712                 host->part_buf = mci_readq(host, DATA(host->data_offset));
1713                 dw_mci_pull_final_bytes(host, buf, cnt);
1714         }
1715 }
1716
1717 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
1718 {
1719         int len;
1720
1721         /* get remaining partial bytes */
1722         len = dw_mci_pull_part_bytes(host, buf, cnt);
1723         if (unlikely(len == cnt))
1724                 return;
1725         buf += len;
1726         cnt -= len;
1727
1728         /* get the rest of the data */
1729         host->pull_data(host, buf, cnt);
1730 }
1731
1732 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
1733 {
1734         struct sg_mapping_iter *sg_miter = &host->sg_miter;
1735         void *buf;
1736         unsigned int offset;
1737         struct mmc_data *data = host->data;
1738         int shift = host->data_shift;
1739         u32 status;
1740         unsigned int len;
1741         unsigned int remain, fcnt;
1742
1743         do {
1744                 if (!sg_miter_next(sg_miter))
1745                         goto done;
1746
1747                 host->sg = sg_miter->piter.sg;
1748                 buf = sg_miter->addr;
1749                 remain = sg_miter->length;
1750                 offset = 0;
1751
1752                 do {
1753                         fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
1754                                         << shift) + host->part_buf_count;
1755                         len = min(remain, fcnt);
1756                         if (!len)
1757                                 break;
1758                         dw_mci_pull_data(host, (void *)(buf + offset), len);
1759                         data->bytes_xfered += len;
1760                         offset += len;
1761                         remain -= len;
1762                 } while (remain);
1763
1764                 sg_miter->consumed = offset;
1765                 status = mci_readl(host, MINTSTS);
1766                 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1767         /* if the RXDR is ready read again */
1768         } while ((status & SDMMC_INT_RXDR) ||
1769                  (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
1770
1771         if (!remain) {
1772                 if (!sg_miter_next(sg_miter))
1773                         goto done;
1774                 sg_miter->consumed = 0;
1775         }
1776         sg_miter_stop(sg_miter);
1777         return;
1778
1779 done:
1780         sg_miter_stop(sg_miter);
1781         host->sg = NULL;
1782         smp_wmb();
1783         set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1784 }
1785
1786 static void dw_mci_write_data_pio(struct dw_mci *host)
1787 {
1788         struct sg_mapping_iter *sg_miter = &host->sg_miter;
1789         void *buf;
1790         unsigned int offset;
1791         struct mmc_data *data = host->data;
1792         int shift = host->data_shift;
1793         u32 status;
1794         unsigned int len;
1795         unsigned int fifo_depth = host->fifo_depth;
1796         unsigned int remain, fcnt;
1797
1798         do {
1799                 if (!sg_miter_next(sg_miter))
1800                         goto done;
1801
1802                 host->sg = sg_miter->piter.sg;
1803                 buf = sg_miter->addr;
1804                 remain = sg_miter->length;
1805                 offset = 0;
1806
1807                 do {
1808                         fcnt = ((fifo_depth -
1809                                  SDMMC_GET_FCNT(mci_readl(host, STATUS)))
1810                                         << shift) - host->part_buf_count;
1811                         len = min(remain, fcnt);
1812                         if (!len)
1813                                 break;
1814                         host->push_data(host, (void *)(buf + offset), len);
1815                         data->bytes_xfered += len;
1816                         offset += len;
1817                         remain -= len;
1818                 } while (remain);
1819
1820                 sg_miter->consumed = offset;
1821                 status = mci_readl(host, MINTSTS);
1822                 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1823         } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
1824
1825         if (!remain) {
1826                 if (!sg_miter_next(sg_miter))
1827                         goto done;
1828                 sg_miter->consumed = 0;
1829         }
1830         sg_miter_stop(sg_miter);
1831         return;
1832
1833 done:
1834         sg_miter_stop(sg_miter);
1835         host->sg = NULL;
1836         smp_wmb();
1837         set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1838 }
1839
1840 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
1841 {
1842         if (!host->cmd_status)
1843                 host->cmd_status = status;
1844
1845         smp_wmb();
1846
1847         set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1848         tasklet_schedule(&host->tasklet);
1849 }
1850
1851 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1852 {
1853         struct dw_mci *host = dev_id;
1854         u32 pending;
1855         int i;
1856
1857         pending = mci_readl(host, MINTSTS); /* read-only mask reg */
1858
1859         /*
1860          * DTO fix - version 2.10a and below, and only if internal DMA
1861          * is configured.
1862          */
1863         if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
1864                 if (!pending &&
1865                     ((mci_readl(host, STATUS) >> 17) & 0x1fff))
1866                         pending |= SDMMC_INT_DATA_OVER;
1867         }
1868
1869         if (pending) {
1870                 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
1871                         mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
1872                         host->cmd_status = pending;
1873                         smp_wmb();
1874                         set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1875                 }
1876
1877                 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
1878                         /* if there is an error report DATA_ERROR */
1879                         mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
1880                         host->data_status = pending;
1881                         smp_wmb();
1882                         set_bit(EVENT_DATA_ERROR, &host->pending_events);
1883                         tasklet_schedule(&host->tasklet);
1884                 }
1885
1886                 if (pending & SDMMC_INT_DATA_OVER) {
1887                         mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1888                         if (!host->data_status)
1889                                 host->data_status = pending;
1890                         smp_wmb();
1891                         if (host->dir_status == DW_MCI_RECV_STATUS) {
1892                                 if (host->sg != NULL)
1893                                         dw_mci_read_data_pio(host, true);
1894                         }
1895                         set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1896                         tasklet_schedule(&host->tasklet);
1897                 }
1898
1899                 if (pending & SDMMC_INT_RXDR) {
1900                         mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1901                         if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
1902                                 dw_mci_read_data_pio(host, false);
1903                 }
1904
1905                 if (pending & SDMMC_INT_TXDR) {
1906                         mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1907                         if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
1908                                 dw_mci_write_data_pio(host);
1909                 }
1910
1911                 if (pending & SDMMC_INT_CMD_DONE) {
1912                         mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
1913                         dw_mci_cmd_interrupt(host, pending);
1914                 }
1915
1916                 if (pending & SDMMC_INT_CD) {
1917                         mci_writel(host, RINTSTS, SDMMC_INT_CD);
1918                         queue_work(host->card_workqueue, &host->card_work);
1919                 }
1920
1921                 /* Handle SDIO Interrupts */
1922                 for (i = 0; i < host->num_slots; i++) {
1923                         struct dw_mci_slot *slot = host->slot[i];
1924                         if (pending & SDMMC_INT_SDIO(i)) {
1925                                 mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i));
1926                                 mmc_signal_sdio_irq(slot->mmc);
1927                         }
1928                 }
1929
1930         }
1931
1932 #ifdef CONFIG_MMC_DW_IDMAC
1933         /* Handle DMA interrupts */
1934         pending = mci_readl(host, IDSTS);
1935         if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
1936                 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
1937                 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
1938                 host->dma_ops->complete(host);
1939         }
1940 #endif
1941
1942         return IRQ_HANDLED;
1943 }
1944
1945 static void dw_mci_work_routine_card(struct work_struct *work)
1946 {
1947         struct dw_mci *host = container_of(work, struct dw_mci, card_work);
1948         int i;
1949
1950         for (i = 0; i < host->num_slots; i++) {
1951                 struct dw_mci_slot *slot = host->slot[i];
1952                 struct mmc_host *mmc = slot->mmc;
1953                 struct mmc_request *mrq;
1954                 int present;
1955
1956                 present = dw_mci_get_cd(mmc);
1957                 while (present != slot->last_detect_state) {
1958                         dev_dbg(&slot->mmc->class_dev, "card %s\n",
1959                                 present ? "inserted" : "removed");
1960
1961                         spin_lock_bh(&host->lock);
1962
1963                         /* Card change detected */
1964                         slot->last_detect_state = present;
1965
1966                         /* Clean up queue if present */
1967                         mrq = slot->mrq;
1968                         if (mrq) {
1969                                 if (mrq == host->mrq) {
1970                                         host->data = NULL;
1971                                         host->cmd = NULL;
1972
1973                                         switch (host->state) {
1974                                         case STATE_IDLE:
1975                                                 break;
1976                                         case STATE_SENDING_CMD:
1977                                                 mrq->cmd->error = -ENOMEDIUM;
1978                                                 if (!mrq->data)
1979                                                         break;
1980                                                 /* fall through */
1981                                         case STATE_SENDING_DATA:
1982                                                 mrq->data->error = -ENOMEDIUM;
1983                                                 dw_mci_stop_dma(host);
1984                                                 break;
1985                                         case STATE_DATA_BUSY:
1986                                         case STATE_DATA_ERROR:
1987                                                 if (mrq->data->error == -EINPROGRESS)
1988                                                         mrq->data->error = -ENOMEDIUM;
1989                                                 /* fall through */
1990                                         case STATE_SENDING_STOP:
1991                                                 if (mrq->stop)
1992                                                         mrq->stop->error = -ENOMEDIUM;
1993                                                 break;
1994                                         }
1995
1996                                         dw_mci_request_end(host, mrq);
1997                                 } else {
1998                                         list_del(&slot->queue_node);
1999                                         mrq->cmd->error = -ENOMEDIUM;
2000                                         if (mrq->data)
2001                                                 mrq->data->error = -ENOMEDIUM;
2002                                         if (mrq->stop)
2003                                                 mrq->stop->error = -ENOMEDIUM;
2004
2005                                         spin_unlock(&host->lock);
2006                                         mmc_request_done(slot->mmc, mrq);
2007                                         spin_lock(&host->lock);
2008                                 }
2009                         }
2010
2011                         /* Power down slot */
2012                         if (present == 0)
2013                                 dw_mci_reset(host);
2014
2015                         spin_unlock_bh(&host->lock);
2016
2017                         present = dw_mci_get_cd(mmc);
2018                 }
2019
2020                 mmc_detect_change(slot->mmc,
2021                         msecs_to_jiffies(host->pdata->detect_delay_ms));
2022         }
2023 }
2024
2025 #ifdef CONFIG_OF
2026 /* given a slot id, find out the device node representing that slot */
2027 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2028 {
2029         struct device_node *np;
2030         const __be32 *addr;
2031         int len;
2032
2033         if (!dev || !dev->of_node)
2034                 return NULL;
2035
2036         for_each_child_of_node(dev->of_node, np) {
2037                 addr = of_get_property(np, "reg", &len);
2038                 if (!addr || (len < sizeof(int)))
2039                         continue;
2040                 if (be32_to_cpup(addr) == slot)
2041                         return np;
2042         }
2043         return NULL;
2044 }
2045
2046 static struct dw_mci_of_slot_quirks {
2047         char *quirk;
2048         int id;
2049 } of_slot_quirks[] = {
2050         {
2051                 .quirk  = "disable-wp",
2052                 .id     = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
2053         },
2054 };
2055
2056 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2057 {
2058         struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2059         int quirks = 0;
2060         int idx;
2061
2062         /* get quirks */
2063         for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
2064                 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL)) {
2065                         dev_warn(dev, "Slot quirk %s is deprecated\n",
2066                                         of_slot_quirks[idx].quirk);
2067                         quirks |= of_slot_quirks[idx].id;
2068                 }
2069
2070         return quirks;
2071 }
2072 #else /* CONFIG_OF */
2073 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2074 {
2075         return 0;
2076 }
2077 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2078 {
2079         return NULL;
2080 }
2081 #endif /* CONFIG_OF */
2082
2083 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
2084 {
2085         struct mmc_host *mmc;
2086         struct dw_mci_slot *slot;
2087         const struct dw_mci_drv_data *drv_data = host->drv_data;
2088         int ctrl_id, ret;
2089         u32 freq[2];
2090
2091         mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
2092         if (!mmc)
2093                 return -ENOMEM;
2094
2095         slot = mmc_priv(mmc);
2096         slot->id = id;
2097         slot->mmc = mmc;
2098         slot->host = host;
2099         host->slot[id] = slot;
2100
2101         slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
2102
2103         mmc->ops = &dw_mci_ops;
2104         if (of_property_read_u32_array(host->dev->of_node,
2105                                        "clock-freq-min-max", freq, 2)) {
2106                 mmc->f_min = DW_MCI_FREQ_MIN;
2107                 mmc->f_max = DW_MCI_FREQ_MAX;
2108         } else {
2109                 mmc->f_min = freq[0];
2110                 mmc->f_max = freq[1];
2111         }
2112
2113         mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2114
2115         if (host->pdata->caps)
2116                 mmc->caps = host->pdata->caps;
2117
2118         if (host->pdata->pm_caps)
2119                 mmc->pm_caps = host->pdata->pm_caps;
2120
2121         if (host->dev->of_node) {
2122                 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2123                 if (ctrl_id < 0)
2124                         ctrl_id = 0;
2125         } else {
2126                 ctrl_id = to_platform_device(host->dev)->id;
2127         }
2128         if (drv_data && drv_data->caps)
2129                 mmc->caps |= drv_data->caps[ctrl_id];
2130
2131         if (host->pdata->caps2)
2132                 mmc->caps2 = host->pdata->caps2;
2133
2134         mmc_of_parse(mmc);
2135
2136         if (host->pdata->blk_settings) {
2137                 mmc->max_segs = host->pdata->blk_settings->max_segs;
2138                 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
2139                 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
2140                 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
2141                 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
2142         } else {
2143                 /* Useful defaults if platform data is unset. */
2144 #ifdef CONFIG_MMC_DW_IDMAC
2145                 mmc->max_segs = host->ring_size;
2146                 mmc->max_blk_size = 65536;
2147                 mmc->max_blk_count = host->ring_size;
2148                 mmc->max_seg_size = 0x1000;
2149                 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
2150 #else
2151                 mmc->max_segs = 64;
2152                 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
2153                 mmc->max_blk_count = 512;
2154                 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2155                 mmc->max_seg_size = mmc->max_req_size;
2156 #endif /* CONFIG_MMC_DW_IDMAC */
2157         }
2158
2159         if (dw_mci_get_cd(mmc))
2160                 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2161         else
2162                 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2163
2164         ret = mmc_add_host(mmc);
2165         if (ret)
2166                 goto err_setup_bus;
2167
2168 #if defined(CONFIG_DEBUG_FS)
2169         dw_mci_init_debugfs(slot);
2170 #endif
2171
2172         /* Card initially undetected */
2173         slot->last_detect_state = 0;
2174
2175         return 0;
2176
2177 err_setup_bus:
2178         mmc_free_host(mmc);
2179         return -EINVAL;
2180 }
2181
2182 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2183 {
2184         /* Debugfs stuff is cleaned up by mmc core */
2185         mmc_remove_host(slot->mmc);
2186         slot->host->slot[id] = NULL;
2187         mmc_free_host(slot->mmc);
2188 }
2189
2190 static void dw_mci_init_dma(struct dw_mci *host)
2191 {
2192         /* Alloc memory for sg translation */
2193         host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
2194                                           &host->sg_dma, GFP_KERNEL);
2195         if (!host->sg_cpu) {
2196                 dev_err(host->dev, "%s: could not alloc DMA memory\n",
2197                         __func__);
2198                 goto no_dma;
2199         }
2200
2201         /* Determine which DMA interface to use */
2202 #ifdef CONFIG_MMC_DW_IDMAC
2203         host->dma_ops = &dw_mci_idmac_ops;
2204         dev_info(host->dev, "Using internal DMA controller.\n");
2205 #endif
2206
2207         if (!host->dma_ops)
2208                 goto no_dma;
2209
2210         if (host->dma_ops->init && host->dma_ops->start &&
2211             host->dma_ops->stop && host->dma_ops->cleanup) {
2212                 if (host->dma_ops->init(host)) {
2213                         dev_err(host->dev, "%s: Unable to initialize "
2214                                 "DMA Controller.\n", __func__);
2215                         goto no_dma;
2216                 }
2217         } else {
2218                 dev_err(host->dev, "DMA initialization not found.\n");
2219                 goto no_dma;
2220         }
2221
2222         host->use_dma = 1;
2223         return;
2224
2225 no_dma:
2226         dev_info(host->dev, "Using PIO mode.\n");
2227         host->use_dma = 0;
2228         return;
2229 }
2230
2231 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
2232 {
2233         unsigned long timeout = jiffies + msecs_to_jiffies(500);
2234         u32 ctrl;
2235
2236         ctrl = mci_readl(host, CTRL);
2237         ctrl |= reset;
2238         mci_writel(host, CTRL, ctrl);
2239
2240         /* wait till resets clear */
2241         do {
2242                 ctrl = mci_readl(host, CTRL);
2243                 if (!(ctrl & reset))
2244                         return true;
2245         } while (time_before(jiffies, timeout));
2246
2247         dev_err(host->dev,
2248                 "Timeout resetting block (ctrl reset %#x)\n",
2249                 ctrl & reset);
2250
2251         return false;
2252 }
2253
2254 static bool dw_mci_reset(struct dw_mci *host)
2255 {
2256         u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET;
2257         bool ret = false;
2258
2259         /*
2260          * Reseting generates a block interrupt, hence setting
2261          * the scatter-gather pointer to NULL.
2262          */
2263         if (host->sg) {
2264                 sg_miter_stop(&host->sg_miter);
2265                 host->sg = NULL;
2266         }
2267
2268         if (host->use_dma)
2269                 flags |= SDMMC_CTRL_DMA_RESET;
2270
2271         if (dw_mci_ctrl_reset(host, flags)) {
2272                 /*
2273                  * In all cases we clear the RAWINTS register to clear any
2274                  * interrupts.
2275                  */
2276                 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2277
2278                 /* if using dma we wait for dma_req to clear */
2279                 if (host->use_dma) {
2280                         unsigned long timeout = jiffies + msecs_to_jiffies(500);
2281                         u32 status;
2282                         do {
2283                                 status = mci_readl(host, STATUS);
2284                                 if (!(status & SDMMC_STATUS_DMA_REQ))
2285                                         break;
2286                                 cpu_relax();
2287                         } while (time_before(jiffies, timeout));
2288
2289                         if (status & SDMMC_STATUS_DMA_REQ) {
2290                                 dev_err(host->dev,
2291                                         "%s: Timeout waiting for dma_req to "
2292                                         "clear during reset\n", __func__);
2293                                 goto ciu_out;
2294                         }
2295
2296                         /* when using DMA next we reset the fifo again */
2297                         if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET))
2298                                 goto ciu_out;
2299                 }
2300         } else {
2301                 /* if the controller reset bit did clear, then set clock regs */
2302                 if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) {
2303                         dev_err(host->dev, "%s: fifo/dma reset bits didn't "
2304                                 "clear but ciu was reset, doing clock update\n",
2305                                 __func__);
2306                         goto ciu_out;
2307                 }
2308         }
2309
2310 #if IS_ENABLED(CONFIG_MMC_DW_IDMAC)
2311         /* It is also recommended that we reset and reprogram idmac */
2312         dw_mci_idmac_reset(host);
2313 #endif
2314
2315         ret = true;
2316
2317 ciu_out:
2318         /* After a CTRL reset we need to have CIU set clock registers  */
2319         mci_send_cmd(host->cur_slot, SDMMC_CMD_UPD_CLK, 0);
2320
2321         return ret;
2322 }
2323
2324 #ifdef CONFIG_OF
2325 static struct dw_mci_of_quirks {
2326         char *quirk;
2327         int id;
2328 } of_quirks[] = {
2329         {
2330                 .quirk  = "broken-cd",
2331                 .id     = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
2332         }, {
2333                 .quirk  = "disable-wp",
2334                 .id     = DW_MCI_QUIRK_NO_WRITE_PROTECT,
2335         },
2336 };
2337
2338 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2339 {
2340         struct dw_mci_board *pdata;
2341         struct device *dev = host->dev;
2342         struct device_node *np = dev->of_node;
2343         const struct dw_mci_drv_data *drv_data = host->drv_data;
2344         int idx, ret;
2345         u32 clock_frequency;
2346
2347         pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2348         if (!pdata) {
2349                 dev_err(dev, "could not allocate memory for pdata\n");
2350                 return ERR_PTR(-ENOMEM);
2351         }
2352
2353         /* find out number of slots supported */
2354         if (of_property_read_u32(dev->of_node, "num-slots",
2355                                 &pdata->num_slots)) {
2356                 dev_info(dev, "num-slots property not found, "
2357                                 "assuming 1 slot is available\n");
2358                 pdata->num_slots = 1;
2359         }
2360
2361         /* get quirks */
2362         for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
2363                 if (of_get_property(np, of_quirks[idx].quirk, NULL))
2364                         pdata->quirks |= of_quirks[idx].id;
2365
2366         if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
2367                 dev_info(dev, "fifo-depth property not found, using "
2368                                 "value of FIFOTH register as default\n");
2369
2370         of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
2371
2372         if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
2373                 pdata->bus_hz = clock_frequency;
2374
2375         if (drv_data && drv_data->parse_dt) {
2376                 ret = drv_data->parse_dt(host);
2377                 if (ret)
2378                         return ERR_PTR(ret);
2379         }
2380
2381         if (of_find_property(np, "supports-highspeed", NULL))
2382                 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
2383
2384         return pdata;
2385 }
2386
2387 #else /* CONFIG_OF */
2388 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2389 {
2390         return ERR_PTR(-EINVAL);
2391 }
2392 #endif /* CONFIG_OF */
2393
2394 int dw_mci_probe(struct dw_mci *host)
2395 {
2396         const struct dw_mci_drv_data *drv_data = host->drv_data;
2397         int width, i, ret = 0;
2398         u32 fifo_size;
2399         int init_slots = 0;
2400
2401         if (!host->pdata) {
2402                 host->pdata = dw_mci_parse_dt(host);
2403                 if (IS_ERR(host->pdata)) {
2404                         dev_err(host->dev, "platform data not available\n");
2405                         return -EINVAL;
2406                 }
2407         }
2408
2409         if (host->pdata->num_slots > 1) {
2410                 dev_err(host->dev,
2411                         "Platform data must supply num_slots.\n");
2412                 return -ENODEV;
2413         }
2414
2415         host->biu_clk = devm_clk_get(host->dev, "biu");
2416         if (IS_ERR(host->biu_clk)) {
2417                 dev_dbg(host->dev, "biu clock not available\n");
2418         } else {
2419                 ret = clk_prepare_enable(host->biu_clk);
2420                 if (ret) {
2421                         dev_err(host->dev, "failed to enable biu clock\n");
2422                         return ret;
2423                 }
2424         }
2425
2426         host->ciu_clk = devm_clk_get(host->dev, "ciu");
2427         if (IS_ERR(host->ciu_clk)) {
2428                 dev_dbg(host->dev, "ciu clock not available\n");
2429                 host->bus_hz = host->pdata->bus_hz;
2430         } else {
2431                 ret = clk_prepare_enable(host->ciu_clk);
2432                 if (ret) {
2433                         dev_err(host->dev, "failed to enable ciu clock\n");
2434                         goto err_clk_biu;
2435                 }
2436
2437                 if (host->pdata->bus_hz) {
2438                         ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
2439                         if (ret)
2440                                 dev_warn(host->dev,
2441                                          "Unable to set bus rate to %uHz\n",
2442                                          host->pdata->bus_hz);
2443                 }
2444                 host->bus_hz = clk_get_rate(host->ciu_clk);
2445         }
2446
2447         if (!host->bus_hz) {
2448                 dev_err(host->dev,
2449                         "Platform data must supply bus speed\n");
2450                 ret = -ENODEV;
2451                 goto err_clk_ciu;
2452         }
2453
2454         if (drv_data && drv_data->init) {
2455                 ret = drv_data->init(host);
2456                 if (ret) {
2457                         dev_err(host->dev,
2458                                 "implementation specific init failed\n");
2459                         goto err_clk_ciu;
2460                 }
2461         }
2462
2463         if (drv_data && drv_data->setup_clock) {
2464                 ret = drv_data->setup_clock(host);
2465                 if (ret) {
2466                         dev_err(host->dev,
2467                                 "implementation specific clock setup failed\n");
2468                         goto err_clk_ciu;
2469                 }
2470         }
2471
2472         host->vmmc = devm_regulator_get_optional(host->dev, "vmmc");
2473         if (IS_ERR(host->vmmc)) {
2474                 ret = PTR_ERR(host->vmmc);
2475                 if (ret == -EPROBE_DEFER)
2476                         goto err_clk_ciu;
2477
2478                 dev_info(host->dev, "no vmmc regulator found: %d\n", ret);
2479                 host->vmmc = NULL;
2480         } else {
2481                 ret = regulator_enable(host->vmmc);
2482                 if (ret) {
2483                         if (ret != -EPROBE_DEFER)
2484                                 dev_err(host->dev,
2485                                         "regulator_enable fail: %d\n", ret);
2486                         goto err_clk_ciu;
2487                 }
2488         }
2489
2490         host->quirks = host->pdata->quirks;
2491
2492         spin_lock_init(&host->lock);
2493         INIT_LIST_HEAD(&host->queue);
2494
2495         /*
2496          * Get the host data width - this assumes that HCON has been set with
2497          * the correct values.
2498          */
2499         i = (mci_readl(host, HCON) >> 7) & 0x7;
2500         if (!i) {
2501                 host->push_data = dw_mci_push_data16;
2502                 host->pull_data = dw_mci_pull_data16;
2503                 width = 16;
2504                 host->data_shift = 1;
2505         } else if (i == 2) {
2506                 host->push_data = dw_mci_push_data64;
2507                 host->pull_data = dw_mci_pull_data64;
2508                 width = 64;
2509                 host->data_shift = 3;
2510         } else {
2511                 /* Check for a reserved value, and warn if it is */
2512                 WARN((i != 1),
2513                      "HCON reports a reserved host data width!\n"
2514                      "Defaulting to 32-bit access.\n");
2515                 host->push_data = dw_mci_push_data32;
2516                 host->pull_data = dw_mci_pull_data32;
2517                 width = 32;
2518                 host->data_shift = 2;
2519         }
2520
2521         /* Reset all blocks */
2522         if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS))
2523                 return -ENODEV;
2524
2525         host->dma_ops = host->pdata->dma_ops;
2526         dw_mci_init_dma(host);
2527
2528         /* Clear the interrupts for the host controller */
2529         mci_writel(host, RINTSTS, 0xFFFFFFFF);
2530         mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2531
2532         /* Put in max timeout */
2533         mci_writel(host, TMOUT, 0xFFFFFFFF);
2534
2535         /*
2536          * FIFO threshold settings  RxMark  = fifo_size / 2 - 1,
2537          *                          Tx Mark = fifo_size / 2 DMA Size = 8
2538          */
2539         if (!host->pdata->fifo_depth) {
2540                 /*
2541                  * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2542                  * have been overwritten by the bootloader, just like we're
2543                  * about to do, so if you know the value for your hardware, you
2544                  * should put it in the platform data.
2545                  */
2546                 fifo_size = mci_readl(host, FIFOTH);
2547                 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
2548         } else {
2549                 fifo_size = host->pdata->fifo_depth;
2550         }
2551         host->fifo_depth = fifo_size;
2552         host->fifoth_val =
2553                 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
2554         mci_writel(host, FIFOTH, host->fifoth_val);
2555
2556         /* disable clock to CIU */
2557         mci_writel(host, CLKENA, 0);
2558         mci_writel(host, CLKSRC, 0);
2559
2560         /*
2561          * In 2.40a spec, Data offset is changed.
2562          * Need to check the version-id and set data-offset for DATA register.
2563          */
2564         host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
2565         dev_info(host->dev, "Version ID is %04x\n", host->verid);
2566
2567         if (host->verid < DW_MMC_240A)
2568                 host->data_offset = DATA_OFFSET;
2569         else
2570                 host->data_offset = DATA_240A_OFFSET;
2571
2572         tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
2573         host->card_workqueue = alloc_workqueue("dw-mci-card",
2574                         WQ_MEM_RECLAIM, 1);
2575         if (!host->card_workqueue) {
2576                 ret = -ENOMEM;
2577                 goto err_dmaunmap;
2578         }
2579         INIT_WORK(&host->card_work, dw_mci_work_routine_card);
2580         ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
2581                                host->irq_flags, "dw-mci", host);
2582         if (ret)
2583                 goto err_workqueue;
2584
2585         if (host->pdata->num_slots)
2586                 host->num_slots = host->pdata->num_slots;
2587         else
2588                 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
2589
2590         /*
2591          * Enable interrupts for command done, data over, data empty, card det,
2592          * receive ready and error such as transmit, receive timeout, crc error
2593          */
2594         mci_writel(host, RINTSTS, 0xFFFFFFFF);
2595         mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2596                    SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2597                    DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2598         mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
2599
2600         dev_info(host->dev, "DW MMC controller at irq %d, "
2601                  "%d bit host data width, "
2602                  "%u deep fifo\n",
2603                  host->irq, width, fifo_size);
2604
2605         /* We need at least one slot to succeed */
2606         for (i = 0; i < host->num_slots; i++) {
2607                 ret = dw_mci_init_slot(host, i);
2608                 if (ret)
2609                         dev_dbg(host->dev, "slot %d init failed\n", i);
2610                 else
2611                         init_slots++;
2612         }
2613
2614         if (init_slots) {
2615                 dev_info(host->dev, "%d slots initialized\n", init_slots);
2616         } else {
2617                 dev_dbg(host->dev, "attempted to initialize %d slots, "
2618                                         "but failed on all\n", host->num_slots);
2619                 goto err_workqueue;
2620         }
2621
2622         if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
2623                 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
2624
2625         return 0;
2626
2627 err_workqueue:
2628         destroy_workqueue(host->card_workqueue);
2629
2630 err_dmaunmap:
2631         if (host->use_dma && host->dma_ops->exit)
2632                 host->dma_ops->exit(host);
2633         if (host->vmmc)
2634                 regulator_disable(host->vmmc);
2635
2636 err_clk_ciu:
2637         if (!IS_ERR(host->ciu_clk))
2638                 clk_disable_unprepare(host->ciu_clk);
2639
2640 err_clk_biu:
2641         if (!IS_ERR(host->biu_clk))
2642                 clk_disable_unprepare(host->biu_clk);
2643
2644         return ret;
2645 }
2646 EXPORT_SYMBOL(dw_mci_probe);
2647
2648 void dw_mci_remove(struct dw_mci *host)
2649 {
2650         int i;
2651
2652         mci_writel(host, RINTSTS, 0xFFFFFFFF);
2653         mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2654
2655         for (i = 0; i < host->num_slots; i++) {
2656                 dev_dbg(host->dev, "remove slot %d\n", i);
2657                 if (host->slot[i])
2658                         dw_mci_cleanup_slot(host->slot[i], i);
2659         }
2660
2661         /* disable clock to CIU */
2662         mci_writel(host, CLKENA, 0);
2663         mci_writel(host, CLKSRC, 0);
2664
2665         destroy_workqueue(host->card_workqueue);
2666
2667         if (host->use_dma && host->dma_ops->exit)
2668                 host->dma_ops->exit(host);
2669
2670         if (host->vmmc)
2671                 regulator_disable(host->vmmc);
2672
2673         if (!IS_ERR(host->ciu_clk))
2674                 clk_disable_unprepare(host->ciu_clk);
2675
2676         if (!IS_ERR(host->biu_clk))
2677                 clk_disable_unprepare(host->biu_clk);
2678 }
2679 EXPORT_SYMBOL(dw_mci_remove);
2680
2681
2682
2683 #ifdef CONFIG_PM_SLEEP
2684 /*
2685  * TODO: we should probably disable the clock to the card in the suspend path.
2686  */
2687 int dw_mci_suspend(struct dw_mci *host)
2688 {
2689         if (host->vmmc)
2690                 regulator_disable(host->vmmc);
2691
2692         return 0;
2693 }
2694 EXPORT_SYMBOL(dw_mci_suspend);
2695
2696 int dw_mci_resume(struct dw_mci *host)
2697 {
2698         int i, ret;
2699
2700         if (host->vmmc) {
2701                 ret = regulator_enable(host->vmmc);
2702                 if (ret) {
2703                         dev_err(host->dev,
2704                                 "failed to enable regulator: %d\n", ret);
2705                         return ret;
2706                 }
2707         }
2708
2709         if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
2710                 ret = -ENODEV;
2711                 return ret;
2712         }
2713
2714         if (host->use_dma && host->dma_ops->init)
2715                 host->dma_ops->init(host);
2716
2717         /*
2718          * Restore the initial value at FIFOTH register
2719          * And Invalidate the prev_blksz with zero
2720          */
2721         mci_writel(host, FIFOTH, host->fifoth_val);
2722         host->prev_blksz = 0;
2723
2724         /* Put in max timeout */
2725         mci_writel(host, TMOUT, 0xFFFFFFFF);
2726
2727         mci_writel(host, RINTSTS, 0xFFFFFFFF);
2728         mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2729                    SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2730                    DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2731         mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
2732
2733         for (i = 0; i < host->num_slots; i++) {
2734                 struct dw_mci_slot *slot = host->slot[i];
2735                 if (!slot)
2736                         continue;
2737                 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
2738                         dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
2739                         dw_mci_setup_bus(slot, true);
2740                 }
2741         }
2742         return 0;
2743 }
2744 EXPORT_SYMBOL(dw_mci_resume);
2745 #endif /* CONFIG_PM_SLEEP */
2746
2747 static int __init dw_mci_init(void)
2748 {
2749         pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
2750         return 0;
2751 }
2752
2753 static void __exit dw_mci_exit(void)
2754 {
2755 }
2756
2757 module_init(dw_mci_init);
2758 module_exit(dw_mci_exit);
2759
2760 MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
2761 MODULE_AUTHOR("NXP Semiconductor VietNam");
2762 MODULE_AUTHOR("Imagination Technologies Ltd");
2763 MODULE_LICENSE("GPL v2");