Merge branch 'slab/next' into slab/for-linus
[cascardo/linux.git] / drivers / mmc / host / dw_mmc.c
1 /*
2  * Synopsys DesignWare Multimedia Card Interface driver
3  *  (Based on NXP driver for lpc 31xx)
4  *
5  * Copyright (C) 2009 NXP Semiconductors
6  * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  */
13
14 #include <linux/blkdev.h>
15 #include <linux/clk.h>
16 #include <linux/debugfs.h>
17 #include <linux/device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/ioport.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/seq_file.h>
26 #include <linux/slab.h>
27 #include <linux/stat.h>
28 #include <linux/delay.h>
29 #include <linux/irq.h>
30 #include <linux/mmc/host.h>
31 #include <linux/mmc/mmc.h>
32 #include <linux/mmc/dw_mmc.h>
33 #include <linux/bitops.h>
34 #include <linux/regulator/consumer.h>
35 #include <linux/workqueue.h>
36 #include <linux/of.h>
37 #include <linux/of_gpio.h>
38
39 #include "dw_mmc.h"
40
41 /* Common flag combinations */
42 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DTO | SDMMC_INT_DCRC | \
43                                  SDMMC_INT_HTO | SDMMC_INT_SBE  | \
44                                  SDMMC_INT_EBE)
45 #define DW_MCI_CMD_ERROR_FLAGS  (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
46                                  SDMMC_INT_RESP_ERR)
47 #define DW_MCI_ERROR_FLAGS      (DW_MCI_DATA_ERROR_FLAGS | \
48                                  DW_MCI_CMD_ERROR_FLAGS  | SDMMC_INT_HLE)
49 #define DW_MCI_SEND_STATUS      1
50 #define DW_MCI_RECV_STATUS      2
51 #define DW_MCI_DMA_THRESHOLD    16
52
53 #ifdef CONFIG_MMC_DW_IDMAC
54 struct idmac_desc {
55         u32             des0;   /* Control Descriptor */
56 #define IDMAC_DES0_DIC  BIT(1)
57 #define IDMAC_DES0_LD   BIT(2)
58 #define IDMAC_DES0_FD   BIT(3)
59 #define IDMAC_DES0_CH   BIT(4)
60 #define IDMAC_DES0_ER   BIT(5)
61 #define IDMAC_DES0_CES  BIT(30)
62 #define IDMAC_DES0_OWN  BIT(31)
63
64         u32             des1;   /* Buffer sizes */
65 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
66         ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
67
68         u32             des2;   /* buffer 1 physical address */
69
70         u32             des3;   /* buffer 2 physical address */
71 };
72 #endif /* CONFIG_MMC_DW_IDMAC */
73
74 /**
75  * struct dw_mci_slot - MMC slot state
76  * @mmc: The mmc_host representing this slot.
77  * @host: The MMC controller this slot is using.
78  * @quirks: Slot-level quirks (DW_MCI_SLOT_QUIRK_XXX)
79  * @wp_gpio: If gpio_is_valid() we'll use this to read write protect.
80  * @ctype: Card type for this slot.
81  * @mrq: mmc_request currently being processed or waiting to be
82  *      processed, or NULL when the slot is idle.
83  * @queue_node: List node for placing this node in the @queue list of
84  *      &struct dw_mci.
85  * @clock: Clock rate configured by set_ios(). Protected by host->lock.
86  * @flags: Random state bits associated with the slot.
87  * @id: Number of this slot.
88  * @last_detect_state: Most recently observed card detect state.
89  */
90 struct dw_mci_slot {
91         struct mmc_host         *mmc;
92         struct dw_mci           *host;
93
94         int                     quirks;
95         int                     wp_gpio;
96
97         u32                     ctype;
98
99         struct mmc_request      *mrq;
100         struct list_head        queue_node;
101
102         unsigned int            clock;
103         unsigned long           flags;
104 #define DW_MMC_CARD_PRESENT     0
105 #define DW_MMC_CARD_NEED_INIT   1
106         int                     id;
107         int                     last_detect_state;
108 };
109
110 #if defined(CONFIG_DEBUG_FS)
111 static int dw_mci_req_show(struct seq_file *s, void *v)
112 {
113         struct dw_mci_slot *slot = s->private;
114         struct mmc_request *mrq;
115         struct mmc_command *cmd;
116         struct mmc_command *stop;
117         struct mmc_data *data;
118
119         /* Make sure we get a consistent snapshot */
120         spin_lock_bh(&slot->host->lock);
121         mrq = slot->mrq;
122
123         if (mrq) {
124                 cmd = mrq->cmd;
125                 data = mrq->data;
126                 stop = mrq->stop;
127
128                 if (cmd)
129                         seq_printf(s,
130                                    "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
131                                    cmd->opcode, cmd->arg, cmd->flags,
132                                    cmd->resp[0], cmd->resp[1], cmd->resp[2],
133                                    cmd->resp[2], cmd->error);
134                 if (data)
135                         seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
136                                    data->bytes_xfered, data->blocks,
137                                    data->blksz, data->flags, data->error);
138                 if (stop)
139                         seq_printf(s,
140                                    "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
141                                    stop->opcode, stop->arg, stop->flags,
142                                    stop->resp[0], stop->resp[1], stop->resp[2],
143                                    stop->resp[2], stop->error);
144         }
145
146         spin_unlock_bh(&slot->host->lock);
147
148         return 0;
149 }
150
151 static int dw_mci_req_open(struct inode *inode, struct file *file)
152 {
153         return single_open(file, dw_mci_req_show, inode->i_private);
154 }
155
156 static const struct file_operations dw_mci_req_fops = {
157         .owner          = THIS_MODULE,
158         .open           = dw_mci_req_open,
159         .read           = seq_read,
160         .llseek         = seq_lseek,
161         .release        = single_release,
162 };
163
164 static int dw_mci_regs_show(struct seq_file *s, void *v)
165 {
166         seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
167         seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
168         seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
169         seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
170         seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
171         seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
172
173         return 0;
174 }
175
176 static int dw_mci_regs_open(struct inode *inode, struct file *file)
177 {
178         return single_open(file, dw_mci_regs_show, inode->i_private);
179 }
180
181 static const struct file_operations dw_mci_regs_fops = {
182         .owner          = THIS_MODULE,
183         .open           = dw_mci_regs_open,
184         .read           = seq_read,
185         .llseek         = seq_lseek,
186         .release        = single_release,
187 };
188
189 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
190 {
191         struct mmc_host *mmc = slot->mmc;
192         struct dw_mci *host = slot->host;
193         struct dentry *root;
194         struct dentry *node;
195
196         root = mmc->debugfs_root;
197         if (!root)
198                 return;
199
200         node = debugfs_create_file("regs", S_IRUSR, root, host,
201                                    &dw_mci_regs_fops);
202         if (!node)
203                 goto err;
204
205         node = debugfs_create_file("req", S_IRUSR, root, slot,
206                                    &dw_mci_req_fops);
207         if (!node)
208                 goto err;
209
210         node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
211         if (!node)
212                 goto err;
213
214         node = debugfs_create_x32("pending_events", S_IRUSR, root,
215                                   (u32 *)&host->pending_events);
216         if (!node)
217                 goto err;
218
219         node = debugfs_create_x32("completed_events", S_IRUSR, root,
220                                   (u32 *)&host->completed_events);
221         if (!node)
222                 goto err;
223
224         return;
225
226 err:
227         dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
228 }
229 #endif /* defined(CONFIG_DEBUG_FS) */
230
231 static void dw_mci_set_timeout(struct dw_mci *host)
232 {
233         /* timeout (maximum) */
234         mci_writel(host, TMOUT, 0xffffffff);
235 }
236
237 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
238 {
239         struct mmc_data *data;
240         struct dw_mci_slot *slot = mmc_priv(mmc);
241         const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
242         u32 cmdr;
243         cmd->error = -EINPROGRESS;
244
245         cmdr = cmd->opcode;
246
247         if (cmdr == MMC_STOP_TRANSMISSION)
248                 cmdr |= SDMMC_CMD_STOP;
249         else
250                 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
251
252         if (cmd->flags & MMC_RSP_PRESENT) {
253                 /* We expect a response, so set this bit */
254                 cmdr |= SDMMC_CMD_RESP_EXP;
255                 if (cmd->flags & MMC_RSP_136)
256                         cmdr |= SDMMC_CMD_RESP_LONG;
257         }
258
259         if (cmd->flags & MMC_RSP_CRC)
260                 cmdr |= SDMMC_CMD_RESP_CRC;
261
262         data = cmd->data;
263         if (data) {
264                 cmdr |= SDMMC_CMD_DAT_EXP;
265                 if (data->flags & MMC_DATA_STREAM)
266                         cmdr |= SDMMC_CMD_STRM_MODE;
267                 if (data->flags & MMC_DATA_WRITE)
268                         cmdr |= SDMMC_CMD_DAT_WR;
269         }
270
271         if (drv_data && drv_data->prepare_command)
272                 drv_data->prepare_command(slot->host, &cmdr);
273
274         return cmdr;
275 }
276
277 static void dw_mci_start_command(struct dw_mci *host,
278                                  struct mmc_command *cmd, u32 cmd_flags)
279 {
280         host->cmd = cmd;
281         dev_vdbg(host->dev,
282                  "start command: ARGR=0x%08x CMDR=0x%08x\n",
283                  cmd->arg, cmd_flags);
284
285         mci_writel(host, CMDARG, cmd->arg);
286         wmb();
287
288         mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
289 }
290
291 static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
292 {
293         dw_mci_start_command(host, data->stop, host->stop_cmdr);
294 }
295
296 /* DMA interface functions */
297 static void dw_mci_stop_dma(struct dw_mci *host)
298 {
299         if (host->using_dma) {
300                 host->dma_ops->stop(host);
301                 host->dma_ops->cleanup(host);
302         } else {
303                 /* Data transfer was stopped by the interrupt handler */
304                 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
305         }
306 }
307
308 static int dw_mci_get_dma_dir(struct mmc_data *data)
309 {
310         if (data->flags & MMC_DATA_WRITE)
311                 return DMA_TO_DEVICE;
312         else
313                 return DMA_FROM_DEVICE;
314 }
315
316 #ifdef CONFIG_MMC_DW_IDMAC
317 static void dw_mci_dma_cleanup(struct dw_mci *host)
318 {
319         struct mmc_data *data = host->data;
320
321         if (data)
322                 if (!data->host_cookie)
323                         dma_unmap_sg(host->dev,
324                                      data->sg,
325                                      data->sg_len,
326                                      dw_mci_get_dma_dir(data));
327 }
328
329 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
330 {
331         u32 temp;
332
333         /* Disable and reset the IDMAC interface */
334         temp = mci_readl(host, CTRL);
335         temp &= ~SDMMC_CTRL_USE_IDMAC;
336         temp |= SDMMC_CTRL_DMA_RESET;
337         mci_writel(host, CTRL, temp);
338
339         /* Stop the IDMAC running */
340         temp = mci_readl(host, BMOD);
341         temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
342         mci_writel(host, BMOD, temp);
343 }
344
345 static void dw_mci_idmac_complete_dma(struct dw_mci *host)
346 {
347         struct mmc_data *data = host->data;
348
349         dev_vdbg(host->dev, "DMA complete\n");
350
351         host->dma_ops->cleanup(host);
352
353         /*
354          * If the card was removed, data will be NULL. No point in trying to
355          * send the stop command or waiting for NBUSY in this case.
356          */
357         if (data) {
358                 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
359                 tasklet_schedule(&host->tasklet);
360         }
361 }
362
363 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
364                                     unsigned int sg_len)
365 {
366         int i;
367         struct idmac_desc *desc = host->sg_cpu;
368
369         for (i = 0; i < sg_len; i++, desc++) {
370                 unsigned int length = sg_dma_len(&data->sg[i]);
371                 u32 mem_addr = sg_dma_address(&data->sg[i]);
372
373                 /* Set the OWN bit and disable interrupts for this descriptor */
374                 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
375
376                 /* Buffer length */
377                 IDMAC_SET_BUFFER1_SIZE(desc, length);
378
379                 /* Physical address to DMA to/from */
380                 desc->des2 = mem_addr;
381         }
382
383         /* Set first descriptor */
384         desc = host->sg_cpu;
385         desc->des0 |= IDMAC_DES0_FD;
386
387         /* Set last descriptor */
388         desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
389         desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
390         desc->des0 |= IDMAC_DES0_LD;
391
392         wmb();
393 }
394
395 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
396 {
397         u32 temp;
398
399         dw_mci_translate_sglist(host, host->data, sg_len);
400
401         /* Select IDMAC interface */
402         temp = mci_readl(host, CTRL);
403         temp |= SDMMC_CTRL_USE_IDMAC;
404         mci_writel(host, CTRL, temp);
405
406         wmb();
407
408         /* Enable the IDMAC */
409         temp = mci_readl(host, BMOD);
410         temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
411         mci_writel(host, BMOD, temp);
412
413         /* Start it running */
414         mci_writel(host, PLDMND, 1);
415 }
416
417 static int dw_mci_idmac_init(struct dw_mci *host)
418 {
419         struct idmac_desc *p;
420         int i;
421
422         /* Number of descriptors in the ring buffer */
423         host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
424
425         /* Forward link the descriptor list */
426         for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
427                 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
428
429         /* Set the last descriptor as the end-of-ring descriptor */
430         p->des3 = host->sg_dma;
431         p->des0 = IDMAC_DES0_ER;
432
433         mci_writel(host, BMOD, SDMMC_IDMAC_SWRESET);
434
435         /* Mask out interrupts - get Tx & Rx complete only */
436         mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
437                    SDMMC_IDMAC_INT_TI);
438
439         /* Set the descriptor base address */
440         mci_writel(host, DBADDR, host->sg_dma);
441         return 0;
442 }
443
444 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
445         .init = dw_mci_idmac_init,
446         .start = dw_mci_idmac_start_dma,
447         .stop = dw_mci_idmac_stop_dma,
448         .complete = dw_mci_idmac_complete_dma,
449         .cleanup = dw_mci_dma_cleanup,
450 };
451 #endif /* CONFIG_MMC_DW_IDMAC */
452
453 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
454                                    struct mmc_data *data,
455                                    bool next)
456 {
457         struct scatterlist *sg;
458         unsigned int i, sg_len;
459
460         if (!next && data->host_cookie)
461                 return data->host_cookie;
462
463         /*
464          * We don't do DMA on "complex" transfers, i.e. with
465          * non-word-aligned buffers or lengths. Also, we don't bother
466          * with all the DMA setup overhead for short transfers.
467          */
468         if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
469                 return -EINVAL;
470
471         if (data->blksz & 3)
472                 return -EINVAL;
473
474         for_each_sg(data->sg, sg, data->sg_len, i) {
475                 if (sg->offset & 3 || sg->length & 3)
476                         return -EINVAL;
477         }
478
479         sg_len = dma_map_sg(host->dev,
480                             data->sg,
481                             data->sg_len,
482                             dw_mci_get_dma_dir(data));
483         if (sg_len == 0)
484                 return -EINVAL;
485
486         if (next)
487                 data->host_cookie = sg_len;
488
489         return sg_len;
490 }
491
492 static void dw_mci_pre_req(struct mmc_host *mmc,
493                            struct mmc_request *mrq,
494                            bool is_first_req)
495 {
496         struct dw_mci_slot *slot = mmc_priv(mmc);
497         struct mmc_data *data = mrq->data;
498
499         if (!slot->host->use_dma || !data)
500                 return;
501
502         if (data->host_cookie) {
503                 data->host_cookie = 0;
504                 return;
505         }
506
507         if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
508                 data->host_cookie = 0;
509 }
510
511 static void dw_mci_post_req(struct mmc_host *mmc,
512                             struct mmc_request *mrq,
513                             int err)
514 {
515         struct dw_mci_slot *slot = mmc_priv(mmc);
516         struct mmc_data *data = mrq->data;
517
518         if (!slot->host->use_dma || !data)
519                 return;
520
521         if (data->host_cookie)
522                 dma_unmap_sg(slot->host->dev,
523                              data->sg,
524                              data->sg_len,
525                              dw_mci_get_dma_dir(data));
526         data->host_cookie = 0;
527 }
528
529 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
530 {
531         int sg_len;
532         u32 temp;
533
534         host->using_dma = 0;
535
536         /* If we don't have a channel, we can't do DMA */
537         if (!host->use_dma)
538                 return -ENODEV;
539
540         sg_len = dw_mci_pre_dma_transfer(host, data, 0);
541         if (sg_len < 0) {
542                 host->dma_ops->stop(host);
543                 return sg_len;
544         }
545
546         host->using_dma = 1;
547
548         dev_vdbg(host->dev,
549                  "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
550                  (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
551                  sg_len);
552
553         /* Enable the DMA interface */
554         temp = mci_readl(host, CTRL);
555         temp |= SDMMC_CTRL_DMA_ENABLE;
556         mci_writel(host, CTRL, temp);
557
558         /* Disable RX/TX IRQs, let DMA handle it */
559         temp = mci_readl(host, INTMASK);
560         temp  &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
561         mci_writel(host, INTMASK, temp);
562
563         host->dma_ops->start(host, sg_len);
564
565         return 0;
566 }
567
568 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
569 {
570         u32 temp;
571
572         data->error = -EINPROGRESS;
573
574         WARN_ON(host->data);
575         host->sg = NULL;
576         host->data = data;
577
578         if (data->flags & MMC_DATA_READ)
579                 host->dir_status = DW_MCI_RECV_STATUS;
580         else
581                 host->dir_status = DW_MCI_SEND_STATUS;
582
583         if (dw_mci_submit_data_dma(host, data)) {
584                 int flags = SG_MITER_ATOMIC;
585                 if (host->data->flags & MMC_DATA_READ)
586                         flags |= SG_MITER_TO_SG;
587                 else
588                         flags |= SG_MITER_FROM_SG;
589
590                 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
591                 host->sg = data->sg;
592                 host->part_buf_start = 0;
593                 host->part_buf_count = 0;
594
595                 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
596                 temp = mci_readl(host, INTMASK);
597                 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
598                 mci_writel(host, INTMASK, temp);
599
600                 temp = mci_readl(host, CTRL);
601                 temp &= ~SDMMC_CTRL_DMA_ENABLE;
602                 mci_writel(host, CTRL, temp);
603         }
604 }
605
606 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
607 {
608         struct dw_mci *host = slot->host;
609         unsigned long timeout = jiffies + msecs_to_jiffies(500);
610         unsigned int cmd_status = 0;
611
612         mci_writel(host, CMDARG, arg);
613         wmb();
614         mci_writel(host, CMD, SDMMC_CMD_START | cmd);
615
616         while (time_before(jiffies, timeout)) {
617                 cmd_status = mci_readl(host, CMD);
618                 if (!(cmd_status & SDMMC_CMD_START))
619                         return;
620         }
621         dev_err(&slot->mmc->class_dev,
622                 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
623                 cmd, arg, cmd_status);
624 }
625
626 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
627 {
628         struct dw_mci *host = slot->host;
629         u32 div;
630         u32 clk_en_a;
631
632         if (slot->clock != host->current_speed || force_clkinit) {
633                 div = host->bus_hz / slot->clock;
634                 if (host->bus_hz % slot->clock && host->bus_hz > slot->clock)
635                         /*
636                          * move the + 1 after the divide to prevent
637                          * over-clocking the card.
638                          */
639                         div += 1;
640
641                 div = (host->bus_hz != slot->clock) ? DIV_ROUND_UP(div, 2) : 0;
642
643                 dev_info(&slot->mmc->class_dev,
644                          "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ"
645                          " div = %d)\n", slot->id, host->bus_hz, slot->clock,
646                          div ? ((host->bus_hz / div) >> 1) : host->bus_hz, div);
647
648                 /* disable clock */
649                 mci_writel(host, CLKENA, 0);
650                 mci_writel(host, CLKSRC, 0);
651
652                 /* inform CIU */
653                 mci_send_cmd(slot,
654                              SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
655
656                 /* set clock to desired speed */
657                 mci_writel(host, CLKDIV, div);
658
659                 /* inform CIU */
660                 mci_send_cmd(slot,
661                              SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
662
663                 /* enable clock; only low power if no SDIO */
664                 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
665                 if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id)))
666                         clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
667                 mci_writel(host, CLKENA, clk_en_a);
668
669                 /* inform CIU */
670                 mci_send_cmd(slot,
671                              SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
672
673                 host->current_speed = slot->clock;
674         }
675
676         /* Set the current slot bus width */
677         mci_writel(host, CTYPE, (slot->ctype << slot->id));
678 }
679
680 static void __dw_mci_start_request(struct dw_mci *host,
681                                    struct dw_mci_slot *slot,
682                                    struct mmc_command *cmd)
683 {
684         struct mmc_request *mrq;
685         struct mmc_data *data;
686         u32 cmdflags;
687
688         mrq = slot->mrq;
689         if (host->pdata->select_slot)
690                 host->pdata->select_slot(slot->id);
691
692         host->cur_slot = slot;
693         host->mrq = mrq;
694
695         host->pending_events = 0;
696         host->completed_events = 0;
697         host->data_status = 0;
698
699         data = cmd->data;
700         if (data) {
701                 dw_mci_set_timeout(host);
702                 mci_writel(host, BYTCNT, data->blksz*data->blocks);
703                 mci_writel(host, BLKSIZ, data->blksz);
704         }
705
706         cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
707
708         /* this is the first command, send the initialization clock */
709         if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
710                 cmdflags |= SDMMC_CMD_INIT;
711
712         if (data) {
713                 dw_mci_submit_data(host, data);
714                 wmb();
715         }
716
717         dw_mci_start_command(host, cmd, cmdflags);
718
719         if (mrq->stop)
720                 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
721 }
722
723 static void dw_mci_start_request(struct dw_mci *host,
724                                  struct dw_mci_slot *slot)
725 {
726         struct mmc_request *mrq = slot->mrq;
727         struct mmc_command *cmd;
728
729         cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
730         __dw_mci_start_request(host, slot, cmd);
731 }
732
733 /* must be called with host->lock held */
734 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
735                                  struct mmc_request *mrq)
736 {
737         dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
738                  host->state);
739
740         slot->mrq = mrq;
741
742         if (host->state == STATE_IDLE) {
743                 host->state = STATE_SENDING_CMD;
744                 dw_mci_start_request(host, slot);
745         } else {
746                 list_add_tail(&slot->queue_node, &host->queue);
747         }
748 }
749
750 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
751 {
752         struct dw_mci_slot *slot = mmc_priv(mmc);
753         struct dw_mci *host = slot->host;
754
755         WARN_ON(slot->mrq);
756
757         /*
758          * The check for card presence and queueing of the request must be
759          * atomic, otherwise the card could be removed in between and the
760          * request wouldn't fail until another card was inserted.
761          */
762         spin_lock_bh(&host->lock);
763
764         if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
765                 spin_unlock_bh(&host->lock);
766                 mrq->cmd->error = -ENOMEDIUM;
767                 mmc_request_done(mmc, mrq);
768                 return;
769         }
770
771         dw_mci_queue_request(host, slot, mrq);
772
773         spin_unlock_bh(&host->lock);
774 }
775
776 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
777 {
778         struct dw_mci_slot *slot = mmc_priv(mmc);
779         const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
780         u32 regs;
781
782         switch (ios->bus_width) {
783         case MMC_BUS_WIDTH_4:
784                 slot->ctype = SDMMC_CTYPE_4BIT;
785                 break;
786         case MMC_BUS_WIDTH_8:
787                 slot->ctype = SDMMC_CTYPE_8BIT;
788                 break;
789         default:
790                 /* set default 1 bit mode */
791                 slot->ctype = SDMMC_CTYPE_1BIT;
792         }
793
794         regs = mci_readl(slot->host, UHS_REG);
795
796         /* DDR mode set */
797         if (ios->timing == MMC_TIMING_UHS_DDR50)
798                 regs |= (0x1 << slot->id) << 16;
799         else
800                 regs &= ~(0x1 << slot->id) << 16;
801
802         mci_writel(slot->host, UHS_REG, regs);
803
804         if (ios->clock) {
805                 /*
806                  * Use mirror of ios->clock to prevent race with mmc
807                  * core ios update when finding the minimum.
808                  */
809                 slot->clock = ios->clock;
810         }
811
812         if (drv_data && drv_data->set_ios)
813                 drv_data->set_ios(slot->host, ios);
814
815         /* Slot specific timing and width adjustment */
816         dw_mci_setup_bus(slot, false);
817
818         switch (ios->power_mode) {
819         case MMC_POWER_UP:
820                 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
821                 break;
822         default:
823                 break;
824         }
825 }
826
827 static int dw_mci_get_ro(struct mmc_host *mmc)
828 {
829         int read_only;
830         struct dw_mci_slot *slot = mmc_priv(mmc);
831         struct dw_mci_board *brd = slot->host->pdata;
832
833         /* Use platform get_ro function, else try on board write protect */
834         if (slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
835                 read_only = 0;
836         else if (brd->get_ro)
837                 read_only = brd->get_ro(slot->id);
838         else if (gpio_is_valid(slot->wp_gpio))
839                 read_only = gpio_get_value(slot->wp_gpio);
840         else
841                 read_only =
842                         mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
843
844         dev_dbg(&mmc->class_dev, "card is %s\n",
845                 read_only ? "read-only" : "read-write");
846
847         return read_only;
848 }
849
850 static int dw_mci_get_cd(struct mmc_host *mmc)
851 {
852         int present;
853         struct dw_mci_slot *slot = mmc_priv(mmc);
854         struct dw_mci_board *brd = slot->host->pdata;
855
856         /* Use platform get_cd function, else try onboard card detect */
857         if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
858                 present = 1;
859         else if (brd->get_cd)
860                 present = !brd->get_cd(slot->id);
861         else
862                 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
863                         == 0 ? 1 : 0;
864
865         if (present)
866                 dev_dbg(&mmc->class_dev, "card is present\n");
867         else
868                 dev_dbg(&mmc->class_dev, "card is not present\n");
869
870         return present;
871 }
872
873 /*
874  * Disable lower power mode.
875  *
876  * Low power mode will stop the card clock when idle.  According to the
877  * description of the CLKENA register we should disable low power mode
878  * for SDIO cards if we need SDIO interrupts to work.
879  *
880  * This function is fast if low power mode is already disabled.
881  */
882 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
883 {
884         struct dw_mci *host = slot->host;
885         u32 clk_en_a;
886         const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
887
888         clk_en_a = mci_readl(host, CLKENA);
889
890         if (clk_en_a & clken_low_pwr) {
891                 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
892                 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
893                              SDMMC_CMD_PRV_DAT_WAIT, 0);
894         }
895 }
896
897 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
898 {
899         struct dw_mci_slot *slot = mmc_priv(mmc);
900         struct dw_mci *host = slot->host;
901         u32 int_mask;
902
903         /* Enable/disable Slot Specific SDIO interrupt */
904         int_mask = mci_readl(host, INTMASK);
905         if (enb) {
906                 /*
907                  * Turn off low power mode if it was enabled.  This is a bit of
908                  * a heavy operation and we disable / enable IRQs a lot, so
909                  * we'll leave low power mode disabled and it will get
910                  * re-enabled again in dw_mci_setup_bus().
911                  */
912                 dw_mci_disable_low_power(slot);
913
914                 mci_writel(host, INTMASK,
915                            (int_mask | SDMMC_INT_SDIO(slot->id)));
916         } else {
917                 mci_writel(host, INTMASK,
918                            (int_mask & ~SDMMC_INT_SDIO(slot->id)));
919         }
920 }
921
922 static const struct mmc_host_ops dw_mci_ops = {
923         .request                = dw_mci_request,
924         .pre_req                = dw_mci_pre_req,
925         .post_req               = dw_mci_post_req,
926         .set_ios                = dw_mci_set_ios,
927         .get_ro                 = dw_mci_get_ro,
928         .get_cd                 = dw_mci_get_cd,
929         .enable_sdio_irq        = dw_mci_enable_sdio_irq,
930 };
931
932 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
933         __releases(&host->lock)
934         __acquires(&host->lock)
935 {
936         struct dw_mci_slot *slot;
937         struct mmc_host *prev_mmc = host->cur_slot->mmc;
938
939         WARN_ON(host->cmd || host->data);
940
941         host->cur_slot->mrq = NULL;
942         host->mrq = NULL;
943         if (!list_empty(&host->queue)) {
944                 slot = list_entry(host->queue.next,
945                                   struct dw_mci_slot, queue_node);
946                 list_del(&slot->queue_node);
947                 dev_vdbg(host->dev, "list not empty: %s is next\n",
948                          mmc_hostname(slot->mmc));
949                 host->state = STATE_SENDING_CMD;
950                 dw_mci_start_request(host, slot);
951         } else {
952                 dev_vdbg(host->dev, "list empty\n");
953                 host->state = STATE_IDLE;
954         }
955
956         spin_unlock(&host->lock);
957         mmc_request_done(prev_mmc, mrq);
958         spin_lock(&host->lock);
959 }
960
961 static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
962 {
963         u32 status = host->cmd_status;
964
965         host->cmd_status = 0;
966
967         /* Read the response from the card (up to 16 bytes) */
968         if (cmd->flags & MMC_RSP_PRESENT) {
969                 if (cmd->flags & MMC_RSP_136) {
970                         cmd->resp[3] = mci_readl(host, RESP0);
971                         cmd->resp[2] = mci_readl(host, RESP1);
972                         cmd->resp[1] = mci_readl(host, RESP2);
973                         cmd->resp[0] = mci_readl(host, RESP3);
974                 } else {
975                         cmd->resp[0] = mci_readl(host, RESP0);
976                         cmd->resp[1] = 0;
977                         cmd->resp[2] = 0;
978                         cmd->resp[3] = 0;
979                 }
980         }
981
982         if (status & SDMMC_INT_RTO)
983                 cmd->error = -ETIMEDOUT;
984         else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
985                 cmd->error = -EILSEQ;
986         else if (status & SDMMC_INT_RESP_ERR)
987                 cmd->error = -EIO;
988         else
989                 cmd->error = 0;
990
991         if (cmd->error) {
992                 /* newer ip versions need a delay between retries */
993                 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
994                         mdelay(20);
995
996                 if (cmd->data) {
997                         dw_mci_stop_dma(host);
998                         host->data = NULL;
999                 }
1000         }
1001 }
1002
1003 static void dw_mci_tasklet_func(unsigned long priv)
1004 {
1005         struct dw_mci *host = (struct dw_mci *)priv;
1006         struct mmc_data *data;
1007         struct mmc_command *cmd;
1008         enum dw_mci_state state;
1009         enum dw_mci_state prev_state;
1010         u32 status, ctrl;
1011
1012         spin_lock(&host->lock);
1013
1014         state = host->state;
1015         data = host->data;
1016
1017         do {
1018                 prev_state = state;
1019
1020                 switch (state) {
1021                 case STATE_IDLE:
1022                         break;
1023
1024                 case STATE_SENDING_CMD:
1025                         if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1026                                                 &host->pending_events))
1027                                 break;
1028
1029                         cmd = host->cmd;
1030                         host->cmd = NULL;
1031                         set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
1032                         dw_mci_command_complete(host, cmd);
1033                         if (cmd == host->mrq->sbc && !cmd->error) {
1034                                 prev_state = state = STATE_SENDING_CMD;
1035                                 __dw_mci_start_request(host, host->cur_slot,
1036                                                        host->mrq->cmd);
1037                                 goto unlock;
1038                         }
1039
1040                         if (!host->mrq->data || cmd->error) {
1041                                 dw_mci_request_end(host, host->mrq);
1042                                 goto unlock;
1043                         }
1044
1045                         prev_state = state = STATE_SENDING_DATA;
1046                         /* fall through */
1047
1048                 case STATE_SENDING_DATA:
1049                         if (test_and_clear_bit(EVENT_DATA_ERROR,
1050                                                &host->pending_events)) {
1051                                 dw_mci_stop_dma(host);
1052                                 if (data->stop)
1053                                         send_stop_cmd(host, data);
1054                                 state = STATE_DATA_ERROR;
1055                                 break;
1056                         }
1057
1058                         if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1059                                                 &host->pending_events))
1060                                 break;
1061
1062                         set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
1063                         prev_state = state = STATE_DATA_BUSY;
1064                         /* fall through */
1065
1066                 case STATE_DATA_BUSY:
1067                         if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1068                                                 &host->pending_events))
1069                                 break;
1070
1071                         host->data = NULL;
1072                         set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
1073                         status = host->data_status;
1074
1075                         if (status & DW_MCI_DATA_ERROR_FLAGS) {
1076                                 if (status & SDMMC_INT_DTO) {
1077                                         data->error = -ETIMEDOUT;
1078                                 } else if (status & SDMMC_INT_DCRC) {
1079                                         data->error = -EILSEQ;
1080                                 } else if (status & SDMMC_INT_EBE &&
1081                                            host->dir_status ==
1082                                                         DW_MCI_SEND_STATUS) {
1083                                         /*
1084                                          * No data CRC status was returned.
1085                                          * The number of bytes transferred will
1086                                          * be exaggerated in PIO mode.
1087                                          */
1088                                         data->bytes_xfered = 0;
1089                                         data->error = -ETIMEDOUT;
1090                                 } else {
1091                                         dev_err(host->dev,
1092                                                 "data FIFO error "
1093                                                 "(status=%08x)\n",
1094                                                 status);
1095                                         data->error = -EIO;
1096                                 }
1097                                 /*
1098                                  * After an error, there may be data lingering
1099                                  * in the FIFO, so reset it - doing so
1100                                  * generates a block interrupt, hence setting
1101                                  * the scatter-gather pointer to NULL.
1102                                  */
1103                                 sg_miter_stop(&host->sg_miter);
1104                                 host->sg = NULL;
1105                                 ctrl = mci_readl(host, CTRL);
1106                                 ctrl |= SDMMC_CTRL_FIFO_RESET;
1107                                 mci_writel(host, CTRL, ctrl);
1108                         } else {
1109                                 data->bytes_xfered = data->blocks * data->blksz;
1110                                 data->error = 0;
1111                         }
1112
1113                         if (!data->stop) {
1114                                 dw_mci_request_end(host, host->mrq);
1115                                 goto unlock;
1116                         }
1117
1118                         if (host->mrq->sbc && !data->error) {
1119                                 data->stop->error = 0;
1120                                 dw_mci_request_end(host, host->mrq);
1121                                 goto unlock;
1122                         }
1123
1124                         prev_state = state = STATE_SENDING_STOP;
1125                         if (!data->error)
1126                                 send_stop_cmd(host, data);
1127                         /* fall through */
1128
1129                 case STATE_SENDING_STOP:
1130                         if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1131                                                 &host->pending_events))
1132                                 break;
1133
1134                         host->cmd = NULL;
1135                         dw_mci_command_complete(host, host->mrq->stop);
1136                         dw_mci_request_end(host, host->mrq);
1137                         goto unlock;
1138
1139                 case STATE_DATA_ERROR:
1140                         if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1141                                                 &host->pending_events))
1142                                 break;
1143
1144                         state = STATE_DATA_BUSY;
1145                         break;
1146                 }
1147         } while (state != prev_state);
1148
1149         host->state = state;
1150 unlock:
1151         spin_unlock(&host->lock);
1152
1153 }
1154
1155 /* push final bytes to part_buf, only use during push */
1156 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
1157 {
1158         memcpy((void *)&host->part_buf, buf, cnt);
1159         host->part_buf_count = cnt;
1160 }
1161
1162 /* append bytes to part_buf, only use during push */
1163 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1164 {
1165         cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1166         memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1167         host->part_buf_count += cnt;
1168         return cnt;
1169 }
1170
1171 /* pull first bytes from part_buf, only use during pull */
1172 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1173 {
1174         cnt = min(cnt, (int)host->part_buf_count);
1175         if (cnt) {
1176                 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1177                        cnt);
1178                 host->part_buf_count -= cnt;
1179                 host->part_buf_start += cnt;
1180         }
1181         return cnt;
1182 }
1183
1184 /* pull final bytes from the part_buf, assuming it's just been filled */
1185 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
1186 {
1187         memcpy(buf, &host->part_buf, cnt);
1188         host->part_buf_start = cnt;
1189         host->part_buf_count = (1 << host->data_shift) - cnt;
1190 }
1191
1192 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1193 {
1194         /* try and push anything in the part_buf */
1195         if (unlikely(host->part_buf_count)) {
1196                 int len = dw_mci_push_part_bytes(host, buf, cnt);
1197                 buf += len;
1198                 cnt -= len;
1199                 if (!sg_next(host->sg) || host->part_buf_count == 2) {
1200                         mci_writew(host, DATA(host->data_offset),
1201                                         host->part_buf16);
1202                         host->part_buf_count = 0;
1203                 }
1204         }
1205 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1206         if (unlikely((unsigned long)buf & 0x1)) {
1207                 while (cnt >= 2) {
1208                         u16 aligned_buf[64];
1209                         int len = min(cnt & -2, (int)sizeof(aligned_buf));
1210                         int items = len >> 1;
1211                         int i;
1212                         /* memcpy from input buffer into aligned buffer */
1213                         memcpy(aligned_buf, buf, len);
1214                         buf += len;
1215                         cnt -= len;
1216                         /* push data from aligned buffer into fifo */
1217                         for (i = 0; i < items; ++i)
1218                                 mci_writew(host, DATA(host->data_offset),
1219                                                 aligned_buf[i]);
1220                 }
1221         } else
1222 #endif
1223         {
1224                 u16 *pdata = buf;
1225                 for (; cnt >= 2; cnt -= 2)
1226                         mci_writew(host, DATA(host->data_offset), *pdata++);
1227                 buf = pdata;
1228         }
1229         /* put anything remaining in the part_buf */
1230         if (cnt) {
1231                 dw_mci_set_part_bytes(host, buf, cnt);
1232                 if (!sg_next(host->sg))
1233                         mci_writew(host, DATA(host->data_offset),
1234                                         host->part_buf16);
1235         }
1236 }
1237
1238 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
1239 {
1240 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1241         if (unlikely((unsigned long)buf & 0x1)) {
1242                 while (cnt >= 2) {
1243                         /* pull data from fifo into aligned buffer */
1244                         u16 aligned_buf[64];
1245                         int len = min(cnt & -2, (int)sizeof(aligned_buf));
1246                         int items = len >> 1;
1247                         int i;
1248                         for (i = 0; i < items; ++i)
1249                                 aligned_buf[i] = mci_readw(host,
1250                                                 DATA(host->data_offset));
1251                         /* memcpy from aligned buffer into output buffer */
1252                         memcpy(buf, aligned_buf, len);
1253                         buf += len;
1254                         cnt -= len;
1255                 }
1256         } else
1257 #endif
1258         {
1259                 u16 *pdata = buf;
1260                 for (; cnt >= 2; cnt -= 2)
1261                         *pdata++ = mci_readw(host, DATA(host->data_offset));
1262                 buf = pdata;
1263         }
1264         if (cnt) {
1265                 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
1266                 dw_mci_pull_final_bytes(host, buf, cnt);
1267         }
1268 }
1269
1270 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
1271 {
1272         /* try and push anything in the part_buf */
1273         if (unlikely(host->part_buf_count)) {
1274                 int len = dw_mci_push_part_bytes(host, buf, cnt);
1275                 buf += len;
1276                 cnt -= len;
1277                 if (!sg_next(host->sg) || host->part_buf_count == 4) {
1278                         mci_writel(host, DATA(host->data_offset),
1279                                         host->part_buf32);
1280                         host->part_buf_count = 0;
1281                 }
1282         }
1283 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1284         if (unlikely((unsigned long)buf & 0x3)) {
1285                 while (cnt >= 4) {
1286                         u32 aligned_buf[32];
1287                         int len = min(cnt & -4, (int)sizeof(aligned_buf));
1288                         int items = len >> 2;
1289                         int i;
1290                         /* memcpy from input buffer into aligned buffer */
1291                         memcpy(aligned_buf, buf, len);
1292                         buf += len;
1293                         cnt -= len;
1294                         /* push data from aligned buffer into fifo */
1295                         for (i = 0; i < items; ++i)
1296                                 mci_writel(host, DATA(host->data_offset),
1297                                                 aligned_buf[i]);
1298                 }
1299         } else
1300 #endif
1301         {
1302                 u32 *pdata = buf;
1303                 for (; cnt >= 4; cnt -= 4)
1304                         mci_writel(host, DATA(host->data_offset), *pdata++);
1305                 buf = pdata;
1306         }
1307         /* put anything remaining in the part_buf */
1308         if (cnt) {
1309                 dw_mci_set_part_bytes(host, buf, cnt);
1310                 if (!sg_next(host->sg))
1311                         mci_writel(host, DATA(host->data_offset),
1312                                                 host->part_buf32);
1313         }
1314 }
1315
1316 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
1317 {
1318 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1319         if (unlikely((unsigned long)buf & 0x3)) {
1320                 while (cnt >= 4) {
1321                         /* pull data from fifo into aligned buffer */
1322                         u32 aligned_buf[32];
1323                         int len = min(cnt & -4, (int)sizeof(aligned_buf));
1324                         int items = len >> 2;
1325                         int i;
1326                         for (i = 0; i < items; ++i)
1327                                 aligned_buf[i] = mci_readl(host,
1328                                                 DATA(host->data_offset));
1329                         /* memcpy from aligned buffer into output buffer */
1330                         memcpy(buf, aligned_buf, len);
1331                         buf += len;
1332                         cnt -= len;
1333                 }
1334         } else
1335 #endif
1336         {
1337                 u32 *pdata = buf;
1338                 for (; cnt >= 4; cnt -= 4)
1339                         *pdata++ = mci_readl(host, DATA(host->data_offset));
1340                 buf = pdata;
1341         }
1342         if (cnt) {
1343                 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
1344                 dw_mci_pull_final_bytes(host, buf, cnt);
1345         }
1346 }
1347
1348 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1349 {
1350         /* try and push anything in the part_buf */
1351         if (unlikely(host->part_buf_count)) {
1352                 int len = dw_mci_push_part_bytes(host, buf, cnt);
1353                 buf += len;
1354                 cnt -= len;
1355                 if (!sg_next(host->sg) || host->part_buf_count == 8) {
1356                         mci_writew(host, DATA(host->data_offset),
1357                                         host->part_buf);
1358                         host->part_buf_count = 0;
1359                 }
1360         }
1361 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1362         if (unlikely((unsigned long)buf & 0x7)) {
1363                 while (cnt >= 8) {
1364                         u64 aligned_buf[16];
1365                         int len = min(cnt & -8, (int)sizeof(aligned_buf));
1366                         int items = len >> 3;
1367                         int i;
1368                         /* memcpy from input buffer into aligned buffer */
1369                         memcpy(aligned_buf, buf, len);
1370                         buf += len;
1371                         cnt -= len;
1372                         /* push data from aligned buffer into fifo */
1373                         for (i = 0; i < items; ++i)
1374                                 mci_writeq(host, DATA(host->data_offset),
1375                                                 aligned_buf[i]);
1376                 }
1377         } else
1378 #endif
1379         {
1380                 u64 *pdata = buf;
1381                 for (; cnt >= 8; cnt -= 8)
1382                         mci_writeq(host, DATA(host->data_offset), *pdata++);
1383                 buf = pdata;
1384         }
1385         /* put anything remaining in the part_buf */
1386         if (cnt) {
1387                 dw_mci_set_part_bytes(host, buf, cnt);
1388                 if (!sg_next(host->sg))
1389                         mci_writeq(host, DATA(host->data_offset),
1390                                         host->part_buf);
1391         }
1392 }
1393
1394 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1395 {
1396 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1397         if (unlikely((unsigned long)buf & 0x7)) {
1398                 while (cnt >= 8) {
1399                         /* pull data from fifo into aligned buffer */
1400                         u64 aligned_buf[16];
1401                         int len = min(cnt & -8, (int)sizeof(aligned_buf));
1402                         int items = len >> 3;
1403                         int i;
1404                         for (i = 0; i < items; ++i)
1405                                 aligned_buf[i] = mci_readq(host,
1406                                                 DATA(host->data_offset));
1407                         /* memcpy from aligned buffer into output buffer */
1408                         memcpy(buf, aligned_buf, len);
1409                         buf += len;
1410                         cnt -= len;
1411                 }
1412         } else
1413 #endif
1414         {
1415                 u64 *pdata = buf;
1416                 for (; cnt >= 8; cnt -= 8)
1417                         *pdata++ = mci_readq(host, DATA(host->data_offset));
1418                 buf = pdata;
1419         }
1420         if (cnt) {
1421                 host->part_buf = mci_readq(host, DATA(host->data_offset));
1422                 dw_mci_pull_final_bytes(host, buf, cnt);
1423         }
1424 }
1425
1426 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
1427 {
1428         int len;
1429
1430         /* get remaining partial bytes */
1431         len = dw_mci_pull_part_bytes(host, buf, cnt);
1432         if (unlikely(len == cnt))
1433                 return;
1434         buf += len;
1435         cnt -= len;
1436
1437         /* get the rest of the data */
1438         host->pull_data(host, buf, cnt);
1439 }
1440
1441 static void dw_mci_read_data_pio(struct dw_mci *host)
1442 {
1443         struct sg_mapping_iter *sg_miter = &host->sg_miter;
1444         void *buf;
1445         unsigned int offset;
1446         struct mmc_data *data = host->data;
1447         int shift = host->data_shift;
1448         u32 status;
1449         unsigned int nbytes = 0, len;
1450         unsigned int remain, fcnt;
1451
1452         do {
1453                 if (!sg_miter_next(sg_miter))
1454                         goto done;
1455
1456                 host->sg = sg_miter->piter.sg;
1457                 buf = sg_miter->addr;
1458                 remain = sg_miter->length;
1459                 offset = 0;
1460
1461                 do {
1462                         fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
1463                                         << shift) + host->part_buf_count;
1464                         len = min(remain, fcnt);
1465                         if (!len)
1466                                 break;
1467                         dw_mci_pull_data(host, (void *)(buf + offset), len);
1468                         offset += len;
1469                         nbytes += len;
1470                         remain -= len;
1471                 } while (remain);
1472
1473                 sg_miter->consumed = offset;
1474                 status = mci_readl(host, MINTSTS);
1475                 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1476         } while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/
1477         data->bytes_xfered += nbytes;
1478
1479         if (!remain) {
1480                 if (!sg_miter_next(sg_miter))
1481                         goto done;
1482                 sg_miter->consumed = 0;
1483         }
1484         sg_miter_stop(sg_miter);
1485         return;
1486
1487 done:
1488         data->bytes_xfered += nbytes;
1489         sg_miter_stop(sg_miter);
1490         host->sg = NULL;
1491         smp_wmb();
1492         set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1493 }
1494
1495 static void dw_mci_write_data_pio(struct dw_mci *host)
1496 {
1497         struct sg_mapping_iter *sg_miter = &host->sg_miter;
1498         void *buf;
1499         unsigned int offset;
1500         struct mmc_data *data = host->data;
1501         int shift = host->data_shift;
1502         u32 status;
1503         unsigned int nbytes = 0, len;
1504         unsigned int fifo_depth = host->fifo_depth;
1505         unsigned int remain, fcnt;
1506
1507         do {
1508                 if (!sg_miter_next(sg_miter))
1509                         goto done;
1510
1511                 host->sg = sg_miter->piter.sg;
1512                 buf = sg_miter->addr;
1513                 remain = sg_miter->length;
1514                 offset = 0;
1515
1516                 do {
1517                         fcnt = ((fifo_depth -
1518                                  SDMMC_GET_FCNT(mci_readl(host, STATUS)))
1519                                         << shift) - host->part_buf_count;
1520                         len = min(remain, fcnt);
1521                         if (!len)
1522                                 break;
1523                         host->push_data(host, (void *)(buf + offset), len);
1524                         offset += len;
1525                         nbytes += len;
1526                         remain -= len;
1527                 } while (remain);
1528
1529                 sg_miter->consumed = offset;
1530                 status = mci_readl(host, MINTSTS);
1531                 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1532         } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
1533         data->bytes_xfered += nbytes;
1534
1535         if (!remain) {
1536                 if (!sg_miter_next(sg_miter))
1537                         goto done;
1538                 sg_miter->consumed = 0;
1539         }
1540         sg_miter_stop(sg_miter);
1541         return;
1542
1543 done:
1544         data->bytes_xfered += nbytes;
1545         sg_miter_stop(sg_miter);
1546         host->sg = NULL;
1547         smp_wmb();
1548         set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1549 }
1550
1551 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
1552 {
1553         if (!host->cmd_status)
1554                 host->cmd_status = status;
1555
1556         smp_wmb();
1557
1558         set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1559         tasklet_schedule(&host->tasklet);
1560 }
1561
1562 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1563 {
1564         struct dw_mci *host = dev_id;
1565         u32 pending;
1566         unsigned int pass_count = 0;
1567         int i;
1568
1569         do {
1570                 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
1571
1572                 /*
1573                  * DTO fix - version 2.10a and below, and only if internal DMA
1574                  * is configured.
1575                  */
1576                 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
1577                         if (!pending &&
1578                             ((mci_readl(host, STATUS) >> 17) & 0x1fff))
1579                                 pending |= SDMMC_INT_DATA_OVER;
1580                 }
1581
1582                 if (!pending)
1583                         break;
1584
1585                 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
1586                         mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
1587                         host->cmd_status = pending;
1588                         smp_wmb();
1589                         set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1590                 }
1591
1592                 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
1593                         /* if there is an error report DATA_ERROR */
1594                         mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
1595                         host->data_status = pending;
1596                         smp_wmb();
1597                         set_bit(EVENT_DATA_ERROR, &host->pending_events);
1598                         tasklet_schedule(&host->tasklet);
1599                 }
1600
1601                 if (pending & SDMMC_INT_DATA_OVER) {
1602                         mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1603                         if (!host->data_status)
1604                                 host->data_status = pending;
1605                         smp_wmb();
1606                         if (host->dir_status == DW_MCI_RECV_STATUS) {
1607                                 if (host->sg != NULL)
1608                                         dw_mci_read_data_pio(host);
1609                         }
1610                         set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1611                         tasklet_schedule(&host->tasklet);
1612                 }
1613
1614                 if (pending & SDMMC_INT_RXDR) {
1615                         mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1616                         if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
1617                                 dw_mci_read_data_pio(host);
1618                 }
1619
1620                 if (pending & SDMMC_INT_TXDR) {
1621                         mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1622                         if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
1623                                 dw_mci_write_data_pio(host);
1624                 }
1625
1626                 if (pending & SDMMC_INT_CMD_DONE) {
1627                         mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
1628                         dw_mci_cmd_interrupt(host, pending);
1629                 }
1630
1631                 if (pending & SDMMC_INT_CD) {
1632                         mci_writel(host, RINTSTS, SDMMC_INT_CD);
1633                         queue_work(host->card_workqueue, &host->card_work);
1634                 }
1635
1636                 /* Handle SDIO Interrupts */
1637                 for (i = 0; i < host->num_slots; i++) {
1638                         struct dw_mci_slot *slot = host->slot[i];
1639                         if (pending & SDMMC_INT_SDIO(i)) {
1640                                 mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i));
1641                                 mmc_signal_sdio_irq(slot->mmc);
1642                         }
1643                 }
1644
1645         } while (pass_count++ < 5);
1646
1647 #ifdef CONFIG_MMC_DW_IDMAC
1648         /* Handle DMA interrupts */
1649         pending = mci_readl(host, IDSTS);
1650         if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
1651                 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
1652                 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
1653                 host->dma_ops->complete(host);
1654         }
1655 #endif
1656
1657         return IRQ_HANDLED;
1658 }
1659
1660 static void dw_mci_work_routine_card(struct work_struct *work)
1661 {
1662         struct dw_mci *host = container_of(work, struct dw_mci, card_work);
1663         int i;
1664
1665         for (i = 0; i < host->num_slots; i++) {
1666                 struct dw_mci_slot *slot = host->slot[i];
1667                 struct mmc_host *mmc = slot->mmc;
1668                 struct mmc_request *mrq;
1669                 int present;
1670                 u32 ctrl;
1671
1672                 present = dw_mci_get_cd(mmc);
1673                 while (present != slot->last_detect_state) {
1674                         dev_dbg(&slot->mmc->class_dev, "card %s\n",
1675                                 present ? "inserted" : "removed");
1676
1677                         /* Power up slot (before spin_lock, may sleep) */
1678                         if (present != 0 && host->pdata->setpower)
1679                                 host->pdata->setpower(slot->id, mmc->ocr_avail);
1680
1681                         spin_lock_bh(&host->lock);
1682
1683                         /* Card change detected */
1684                         slot->last_detect_state = present;
1685
1686                         /* Mark card as present if applicable */
1687                         if (present != 0)
1688                                 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1689
1690                         /* Clean up queue if present */
1691                         mrq = slot->mrq;
1692                         if (mrq) {
1693                                 if (mrq == host->mrq) {
1694                                         host->data = NULL;
1695                                         host->cmd = NULL;
1696
1697                                         switch (host->state) {
1698                                         case STATE_IDLE:
1699                                                 break;
1700                                         case STATE_SENDING_CMD:
1701                                                 mrq->cmd->error = -ENOMEDIUM;
1702                                                 if (!mrq->data)
1703                                                         break;
1704                                                 /* fall through */
1705                                         case STATE_SENDING_DATA:
1706                                                 mrq->data->error = -ENOMEDIUM;
1707                                                 dw_mci_stop_dma(host);
1708                                                 break;
1709                                         case STATE_DATA_BUSY:
1710                                         case STATE_DATA_ERROR:
1711                                                 if (mrq->data->error == -EINPROGRESS)
1712                                                         mrq->data->error = -ENOMEDIUM;
1713                                                 if (!mrq->stop)
1714                                                         break;
1715                                                 /* fall through */
1716                                         case STATE_SENDING_STOP:
1717                                                 mrq->stop->error = -ENOMEDIUM;
1718                                                 break;
1719                                         }
1720
1721                                         dw_mci_request_end(host, mrq);
1722                                 } else {
1723                                         list_del(&slot->queue_node);
1724                                         mrq->cmd->error = -ENOMEDIUM;
1725                                         if (mrq->data)
1726                                                 mrq->data->error = -ENOMEDIUM;
1727                                         if (mrq->stop)
1728                                                 mrq->stop->error = -ENOMEDIUM;
1729
1730                                         spin_unlock(&host->lock);
1731                                         mmc_request_done(slot->mmc, mrq);
1732                                         spin_lock(&host->lock);
1733                                 }
1734                         }
1735
1736                         /* Power down slot */
1737                         if (present == 0) {
1738                                 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1739
1740                                 /*
1741                                  * Clear down the FIFO - doing so generates a
1742                                  * block interrupt, hence setting the
1743                                  * scatter-gather pointer to NULL.
1744                                  */
1745                                 sg_miter_stop(&host->sg_miter);
1746                                 host->sg = NULL;
1747
1748                                 ctrl = mci_readl(host, CTRL);
1749                                 ctrl |= SDMMC_CTRL_FIFO_RESET;
1750                                 mci_writel(host, CTRL, ctrl);
1751
1752 #ifdef CONFIG_MMC_DW_IDMAC
1753                                 ctrl = mci_readl(host, BMOD);
1754                                 /* Software reset of DMA */
1755                                 ctrl |= SDMMC_IDMAC_SWRESET;
1756                                 mci_writel(host, BMOD, ctrl);
1757 #endif
1758
1759                         }
1760
1761                         spin_unlock_bh(&host->lock);
1762
1763                         /* Power down slot (after spin_unlock, may sleep) */
1764                         if (present == 0 && host->pdata->setpower)
1765                                 host->pdata->setpower(slot->id, 0);
1766
1767                         present = dw_mci_get_cd(mmc);
1768                 }
1769
1770                 mmc_detect_change(slot->mmc,
1771                         msecs_to_jiffies(host->pdata->detect_delay_ms));
1772         }
1773 }
1774
1775 #ifdef CONFIG_OF
1776 /* given a slot id, find out the device node representing that slot */
1777 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
1778 {
1779         struct device_node *np;
1780         const __be32 *addr;
1781         int len;
1782
1783         if (!dev || !dev->of_node)
1784                 return NULL;
1785
1786         for_each_child_of_node(dev->of_node, np) {
1787                 addr = of_get_property(np, "reg", &len);
1788                 if (!addr || (len < sizeof(int)))
1789                         continue;
1790                 if (be32_to_cpup(addr) == slot)
1791                         return np;
1792         }
1793         return NULL;
1794 }
1795
1796 static struct dw_mci_of_slot_quirks {
1797         char *quirk;
1798         int id;
1799 } of_slot_quirks[] = {
1800         {
1801                 .quirk  = "disable-wp",
1802                 .id     = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
1803         },
1804 };
1805
1806 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
1807 {
1808         struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
1809         int quirks = 0;
1810         int idx;
1811
1812         /* get quirks */
1813         for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
1814                 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
1815                         quirks |= of_slot_quirks[idx].id;
1816
1817         return quirks;
1818 }
1819
1820 /* find out bus-width for a given slot */
1821 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
1822 {
1823         struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
1824         u32 bus_wd = 1;
1825
1826         if (!np)
1827                 return 1;
1828
1829         if (of_property_read_u32(np, "bus-width", &bus_wd))
1830                 dev_err(dev, "bus-width property not found, assuming width"
1831                                " as 1\n");
1832         return bus_wd;
1833 }
1834
1835 /* find the write protect gpio for a given slot; or -1 if none specified */
1836 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
1837 {
1838         struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
1839         int gpio;
1840
1841         if (!np)
1842                 return -EINVAL;
1843
1844         gpio = of_get_named_gpio(np, "wp-gpios", 0);
1845
1846         /* Having a missing entry is valid; return silently */
1847         if (!gpio_is_valid(gpio))
1848                 return -EINVAL;
1849
1850         if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
1851                 dev_warn(dev, "gpio [%d] request failed\n", gpio);
1852                 return -EINVAL;
1853         }
1854
1855         return gpio;
1856 }
1857 #else /* CONFIG_OF */
1858 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
1859 {
1860         return 0;
1861 }
1862 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
1863 {
1864         return 1;
1865 }
1866 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
1867 {
1868         return NULL;
1869 }
1870 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
1871 {
1872         return -EINVAL;
1873 }
1874 #endif /* CONFIG_OF */
1875
1876 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
1877 {
1878         struct mmc_host *mmc;
1879         struct dw_mci_slot *slot;
1880         const struct dw_mci_drv_data *drv_data = host->drv_data;
1881         int ctrl_id, ret;
1882         u8 bus_width;
1883
1884         mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
1885         if (!mmc)
1886                 return -ENOMEM;
1887
1888         slot = mmc_priv(mmc);
1889         slot->id = id;
1890         slot->mmc = mmc;
1891         slot->host = host;
1892         host->slot[id] = slot;
1893
1894         slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
1895
1896         mmc->ops = &dw_mci_ops;
1897         mmc->f_min = DIV_ROUND_UP(host->bus_hz, 510);
1898         mmc->f_max = host->bus_hz;
1899
1900         if (host->pdata->get_ocr)
1901                 mmc->ocr_avail = host->pdata->get_ocr(id);
1902         else
1903                 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1904
1905         /*
1906          * Start with slot power disabled, it will be enabled when a card
1907          * is detected.
1908          */
1909         if (host->pdata->setpower)
1910                 host->pdata->setpower(id, 0);
1911
1912         if (host->pdata->caps)
1913                 mmc->caps = host->pdata->caps;
1914
1915         if (host->pdata->pm_caps)
1916                 mmc->pm_caps = host->pdata->pm_caps;
1917
1918         if (host->dev->of_node) {
1919                 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
1920                 if (ctrl_id < 0)
1921                         ctrl_id = 0;
1922         } else {
1923                 ctrl_id = to_platform_device(host->dev)->id;
1924         }
1925         if (drv_data && drv_data->caps)
1926                 mmc->caps |= drv_data->caps[ctrl_id];
1927
1928         if (host->pdata->caps2)
1929                 mmc->caps2 = host->pdata->caps2;
1930
1931         if (host->pdata->get_bus_wd)
1932                 bus_width = host->pdata->get_bus_wd(slot->id);
1933         else if (host->dev->of_node)
1934                 bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
1935         else
1936                 bus_width = 1;
1937
1938         if (drv_data && drv_data->setup_bus) {
1939                 struct device_node *slot_np;
1940                 slot_np = dw_mci_of_find_slot_node(host->dev, slot->id);
1941                 ret = drv_data->setup_bus(host, slot_np, bus_width);
1942                 if (ret)
1943                         goto err_setup_bus;
1944         }
1945
1946         switch (bus_width) {
1947         case 8:
1948                 mmc->caps |= MMC_CAP_8_BIT_DATA;
1949         case 4:
1950                 mmc->caps |= MMC_CAP_4_BIT_DATA;
1951         }
1952
1953         if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED)
1954                 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
1955
1956         if (host->pdata->blk_settings) {
1957                 mmc->max_segs = host->pdata->blk_settings->max_segs;
1958                 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
1959                 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
1960                 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
1961                 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
1962         } else {
1963                 /* Useful defaults if platform data is unset. */
1964 #ifdef CONFIG_MMC_DW_IDMAC
1965                 mmc->max_segs = host->ring_size;
1966                 mmc->max_blk_size = 65536;
1967                 mmc->max_blk_count = host->ring_size;
1968                 mmc->max_seg_size = 0x1000;
1969                 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
1970 #else
1971                 mmc->max_segs = 64;
1972                 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
1973                 mmc->max_blk_count = 512;
1974                 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1975                 mmc->max_seg_size = mmc->max_req_size;
1976 #endif /* CONFIG_MMC_DW_IDMAC */
1977         }
1978
1979         host->vmmc = devm_regulator_get(mmc_dev(mmc), "vmmc");
1980         if (IS_ERR(host->vmmc)) {
1981                 pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
1982                 host->vmmc = NULL;
1983         } else
1984                 regulator_enable(host->vmmc);
1985
1986         if (dw_mci_get_cd(mmc))
1987                 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1988         else
1989                 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1990
1991         slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
1992
1993         mmc_add_host(mmc);
1994
1995 #if defined(CONFIG_DEBUG_FS)
1996         dw_mci_init_debugfs(slot);
1997 #endif
1998
1999         /* Card initially undetected */
2000         slot->last_detect_state = 0;
2001
2002         /*
2003          * Card may have been plugged in prior to boot so we
2004          * need to run the detect tasklet
2005          */
2006         queue_work(host->card_workqueue, &host->card_work);
2007
2008         return 0;
2009
2010 err_setup_bus:
2011         mmc_free_host(mmc);
2012         return -EINVAL;
2013 }
2014
2015 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2016 {
2017         /* Shutdown detect IRQ */
2018         if (slot->host->pdata->exit)
2019                 slot->host->pdata->exit(id);
2020
2021         /* Debugfs stuff is cleaned up by mmc core */
2022         mmc_remove_host(slot->mmc);
2023         slot->host->slot[id] = NULL;
2024         mmc_free_host(slot->mmc);
2025 }
2026
2027 static void dw_mci_init_dma(struct dw_mci *host)
2028 {
2029         /* Alloc memory for sg translation */
2030         host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
2031                                           &host->sg_dma, GFP_KERNEL);
2032         if (!host->sg_cpu) {
2033                 dev_err(host->dev, "%s: could not alloc DMA memory\n",
2034                         __func__);
2035                 goto no_dma;
2036         }
2037
2038         /* Determine which DMA interface to use */
2039 #ifdef CONFIG_MMC_DW_IDMAC
2040         host->dma_ops = &dw_mci_idmac_ops;
2041         dev_info(host->dev, "Using internal DMA controller.\n");
2042 #endif
2043
2044         if (!host->dma_ops)
2045                 goto no_dma;
2046
2047         if (host->dma_ops->init && host->dma_ops->start &&
2048             host->dma_ops->stop && host->dma_ops->cleanup) {
2049                 if (host->dma_ops->init(host)) {
2050                         dev_err(host->dev, "%s: Unable to initialize "
2051                                 "DMA Controller.\n", __func__);
2052                         goto no_dma;
2053                 }
2054         } else {
2055                 dev_err(host->dev, "DMA initialization not found.\n");
2056                 goto no_dma;
2057         }
2058
2059         host->use_dma = 1;
2060         return;
2061
2062 no_dma:
2063         dev_info(host->dev, "Using PIO mode.\n");
2064         host->use_dma = 0;
2065         return;
2066 }
2067
2068 static bool mci_wait_reset(struct device *dev, struct dw_mci *host)
2069 {
2070         unsigned long timeout = jiffies + msecs_to_jiffies(500);
2071         unsigned int ctrl;
2072
2073         mci_writel(host, CTRL, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
2074                                 SDMMC_CTRL_DMA_RESET));
2075
2076         /* wait till resets clear */
2077         do {
2078                 ctrl = mci_readl(host, CTRL);
2079                 if (!(ctrl & (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
2080                               SDMMC_CTRL_DMA_RESET)))
2081                         return true;
2082         } while (time_before(jiffies, timeout));
2083
2084         dev_err(dev, "Timeout resetting block (ctrl %#x)\n", ctrl);
2085
2086         return false;
2087 }
2088
2089 #ifdef CONFIG_OF
2090 static struct dw_mci_of_quirks {
2091         char *quirk;
2092         int id;
2093 } of_quirks[] = {
2094         {
2095                 .quirk  = "supports-highspeed",
2096                 .id     = DW_MCI_QUIRK_HIGHSPEED,
2097         }, {
2098                 .quirk  = "broken-cd",
2099                 .id     = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
2100         },
2101 };
2102
2103 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2104 {
2105         struct dw_mci_board *pdata;
2106         struct device *dev = host->dev;
2107         struct device_node *np = dev->of_node;
2108         const struct dw_mci_drv_data *drv_data = host->drv_data;
2109         int idx, ret;
2110
2111         pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2112         if (!pdata) {
2113                 dev_err(dev, "could not allocate memory for pdata\n");
2114                 return ERR_PTR(-ENOMEM);
2115         }
2116
2117         /* find out number of slots supported */
2118         if (of_property_read_u32(dev->of_node, "num-slots",
2119                                 &pdata->num_slots)) {
2120                 dev_info(dev, "num-slots property not found, "
2121                                 "assuming 1 slot is available\n");
2122                 pdata->num_slots = 1;
2123         }
2124
2125         /* get quirks */
2126         for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
2127                 if (of_get_property(np, of_quirks[idx].quirk, NULL))
2128                         pdata->quirks |= of_quirks[idx].id;
2129
2130         if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
2131                 dev_info(dev, "fifo-depth property not found, using "
2132                                 "value of FIFOTH register as default\n");
2133
2134         of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
2135
2136         if (drv_data && drv_data->parse_dt) {
2137                 ret = drv_data->parse_dt(host);
2138                 if (ret)
2139                         return ERR_PTR(ret);
2140         }
2141
2142         if (of_find_property(np, "keep-power-in-suspend", NULL))
2143                 pdata->pm_caps |= MMC_PM_KEEP_POWER;
2144
2145         if (of_find_property(np, "enable-sdio-wakeup", NULL))
2146                 pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
2147
2148         return pdata;
2149 }
2150
2151 #else /* CONFIG_OF */
2152 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2153 {
2154         return ERR_PTR(-EINVAL);
2155 }
2156 #endif /* CONFIG_OF */
2157
2158 int dw_mci_probe(struct dw_mci *host)
2159 {
2160         const struct dw_mci_drv_data *drv_data = host->drv_data;
2161         int width, i, ret = 0;
2162         u32 fifo_size;
2163         int init_slots = 0;
2164
2165         if (!host->pdata) {
2166                 host->pdata = dw_mci_parse_dt(host);
2167                 if (IS_ERR(host->pdata)) {
2168                         dev_err(host->dev, "platform data not available\n");
2169                         return -EINVAL;
2170                 }
2171         }
2172
2173         if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
2174                 dev_err(host->dev,
2175                         "Platform data must supply select_slot function\n");
2176                 return -ENODEV;
2177         }
2178
2179         host->biu_clk = devm_clk_get(host->dev, "biu");
2180         if (IS_ERR(host->biu_clk)) {
2181                 dev_dbg(host->dev, "biu clock not available\n");
2182         } else {
2183                 ret = clk_prepare_enable(host->biu_clk);
2184                 if (ret) {
2185                         dev_err(host->dev, "failed to enable biu clock\n");
2186                         return ret;
2187                 }
2188         }
2189
2190         host->ciu_clk = devm_clk_get(host->dev, "ciu");
2191         if (IS_ERR(host->ciu_clk)) {
2192                 dev_dbg(host->dev, "ciu clock not available\n");
2193         } else {
2194                 ret = clk_prepare_enable(host->ciu_clk);
2195                 if (ret) {
2196                         dev_err(host->dev, "failed to enable ciu clock\n");
2197                         goto err_clk_biu;
2198                 }
2199         }
2200
2201         if (IS_ERR(host->ciu_clk))
2202                 host->bus_hz = host->pdata->bus_hz;
2203         else
2204                 host->bus_hz = clk_get_rate(host->ciu_clk);
2205
2206         if (drv_data && drv_data->setup_clock) {
2207                 ret = drv_data->setup_clock(host);
2208                 if (ret) {
2209                         dev_err(host->dev,
2210                                 "implementation specific clock setup failed\n");
2211                         goto err_clk_ciu;
2212                 }
2213         }
2214
2215         if (!host->bus_hz) {
2216                 dev_err(host->dev,
2217                         "Platform data must supply bus speed\n");
2218                 ret = -ENODEV;
2219                 goto err_clk_ciu;
2220         }
2221
2222         host->quirks = host->pdata->quirks;
2223
2224         spin_lock_init(&host->lock);
2225         INIT_LIST_HEAD(&host->queue);
2226
2227         /*
2228          * Get the host data width - this assumes that HCON has been set with
2229          * the correct values.
2230          */
2231         i = (mci_readl(host, HCON) >> 7) & 0x7;
2232         if (!i) {
2233                 host->push_data = dw_mci_push_data16;
2234                 host->pull_data = dw_mci_pull_data16;
2235                 width = 16;
2236                 host->data_shift = 1;
2237         } else if (i == 2) {
2238                 host->push_data = dw_mci_push_data64;
2239                 host->pull_data = dw_mci_pull_data64;
2240                 width = 64;
2241                 host->data_shift = 3;
2242         } else {
2243                 /* Check for a reserved value, and warn if it is */
2244                 WARN((i != 1),
2245                      "HCON reports a reserved host data width!\n"
2246                      "Defaulting to 32-bit access.\n");
2247                 host->push_data = dw_mci_push_data32;
2248                 host->pull_data = dw_mci_pull_data32;
2249                 width = 32;
2250                 host->data_shift = 2;
2251         }
2252
2253         /* Reset all blocks */
2254         if (!mci_wait_reset(host->dev, host))
2255                 return -ENODEV;
2256
2257         host->dma_ops = host->pdata->dma_ops;
2258         dw_mci_init_dma(host);
2259
2260         /* Clear the interrupts for the host controller */
2261         mci_writel(host, RINTSTS, 0xFFFFFFFF);
2262         mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2263
2264         /* Put in max timeout */
2265         mci_writel(host, TMOUT, 0xFFFFFFFF);
2266
2267         /*
2268          * FIFO threshold settings  RxMark  = fifo_size / 2 - 1,
2269          *                          Tx Mark = fifo_size / 2 DMA Size = 8
2270          */
2271         if (!host->pdata->fifo_depth) {
2272                 /*
2273                  * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2274                  * have been overwritten by the bootloader, just like we're
2275                  * about to do, so if you know the value for your hardware, you
2276                  * should put it in the platform data.
2277                  */
2278                 fifo_size = mci_readl(host, FIFOTH);
2279                 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
2280         } else {
2281                 fifo_size = host->pdata->fifo_depth;
2282         }
2283         host->fifo_depth = fifo_size;
2284         host->fifoth_val = ((0x2 << 28) | ((fifo_size/2 - 1) << 16) |
2285                         ((fifo_size/2) << 0));
2286         mci_writel(host, FIFOTH, host->fifoth_val);
2287
2288         /* disable clock to CIU */
2289         mci_writel(host, CLKENA, 0);
2290         mci_writel(host, CLKSRC, 0);
2291
2292         tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
2293         host->card_workqueue = alloc_workqueue("dw-mci-card",
2294                         WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
2295         if (!host->card_workqueue)
2296                 goto err_dmaunmap;
2297         INIT_WORK(&host->card_work, dw_mci_work_routine_card);
2298         ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
2299                                host->irq_flags, "dw-mci", host);
2300         if (ret)
2301                 goto err_workqueue;
2302
2303         if (host->pdata->num_slots)
2304                 host->num_slots = host->pdata->num_slots;
2305         else
2306                 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
2307
2308         /*
2309          * Enable interrupts for command done, data over, data empty, card det,
2310          * receive ready and error such as transmit, receive timeout, crc error
2311          */
2312         mci_writel(host, RINTSTS, 0xFFFFFFFF);
2313         mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2314                    SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2315                    DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2316         mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
2317
2318         dev_info(host->dev, "DW MMC controller at irq %d, "
2319                  "%d bit host data width, "
2320                  "%u deep fifo\n",
2321                  host->irq, width, fifo_size);
2322
2323         /* We need at least one slot to succeed */
2324         for (i = 0; i < host->num_slots; i++) {
2325                 ret = dw_mci_init_slot(host, i);
2326                 if (ret)
2327                         dev_dbg(host->dev, "slot %d init failed\n", i);
2328                 else
2329                         init_slots++;
2330         }
2331
2332         if (init_slots) {
2333                 dev_info(host->dev, "%d slots initialized\n", init_slots);
2334         } else {
2335                 dev_dbg(host->dev, "attempted to initialize %d slots, "
2336                                         "but failed on all\n", host->num_slots);
2337                 goto err_workqueue;
2338         }
2339
2340         /*
2341          * In 2.40a spec, Data offset is changed.
2342          * Need to check the version-id and set data-offset for DATA register.
2343          */
2344         host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
2345         dev_info(host->dev, "Version ID is %04x\n", host->verid);
2346
2347         if (host->verid < DW_MMC_240A)
2348                 host->data_offset = DATA_OFFSET;
2349         else
2350                 host->data_offset = DATA_240A_OFFSET;
2351
2352         if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
2353                 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
2354
2355         return 0;
2356
2357 err_workqueue:
2358         destroy_workqueue(host->card_workqueue);
2359
2360 err_dmaunmap:
2361         if (host->use_dma && host->dma_ops->exit)
2362                 host->dma_ops->exit(host);
2363
2364         if (host->vmmc)
2365                 regulator_disable(host->vmmc);
2366
2367 err_clk_ciu:
2368         if (!IS_ERR(host->ciu_clk))
2369                 clk_disable_unprepare(host->ciu_clk);
2370
2371 err_clk_biu:
2372         if (!IS_ERR(host->biu_clk))
2373                 clk_disable_unprepare(host->biu_clk);
2374
2375         return ret;
2376 }
2377 EXPORT_SYMBOL(dw_mci_probe);
2378
2379 void dw_mci_remove(struct dw_mci *host)
2380 {
2381         int i;
2382
2383         mci_writel(host, RINTSTS, 0xFFFFFFFF);
2384         mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2385
2386         for (i = 0; i < host->num_slots; i++) {
2387                 dev_dbg(host->dev, "remove slot %d\n", i);
2388                 if (host->slot[i])
2389                         dw_mci_cleanup_slot(host->slot[i], i);
2390         }
2391
2392         /* disable clock to CIU */
2393         mci_writel(host, CLKENA, 0);
2394         mci_writel(host, CLKSRC, 0);
2395
2396         destroy_workqueue(host->card_workqueue);
2397
2398         if (host->use_dma && host->dma_ops->exit)
2399                 host->dma_ops->exit(host);
2400
2401         if (host->vmmc)
2402                 regulator_disable(host->vmmc);
2403
2404         if (!IS_ERR(host->ciu_clk))
2405                 clk_disable_unprepare(host->ciu_clk);
2406
2407         if (!IS_ERR(host->biu_clk))
2408                 clk_disable_unprepare(host->biu_clk);
2409 }
2410 EXPORT_SYMBOL(dw_mci_remove);
2411
2412
2413
2414 #ifdef CONFIG_PM_SLEEP
2415 /*
2416  * TODO: we should probably disable the clock to the card in the suspend path.
2417  */
2418 int dw_mci_suspend(struct dw_mci *host)
2419 {
2420         int i, ret = 0;
2421
2422         for (i = 0; i < host->num_slots; i++) {
2423                 struct dw_mci_slot *slot = host->slot[i];
2424                 if (!slot)
2425                         continue;
2426                 ret = mmc_suspend_host(slot->mmc);
2427                 if (ret < 0) {
2428                         while (--i >= 0) {
2429                                 slot = host->slot[i];
2430                                 if (slot)
2431                                         mmc_resume_host(host->slot[i]->mmc);
2432                         }
2433                         return ret;
2434                 }
2435         }
2436
2437         if (host->vmmc)
2438                 regulator_disable(host->vmmc);
2439
2440         return 0;
2441 }
2442 EXPORT_SYMBOL(dw_mci_suspend);
2443
2444 int dw_mci_resume(struct dw_mci *host)
2445 {
2446         int i, ret;
2447
2448         if (host->vmmc)
2449                 regulator_enable(host->vmmc);
2450
2451         if (!mci_wait_reset(host->dev, host)) {
2452                 ret = -ENODEV;
2453                 return ret;
2454         }
2455
2456         if (host->use_dma && host->dma_ops->init)
2457                 host->dma_ops->init(host);
2458
2459         /* Restore the old value at FIFOTH register */
2460         mci_writel(host, FIFOTH, host->fifoth_val);
2461
2462         mci_writel(host, RINTSTS, 0xFFFFFFFF);
2463         mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2464                    SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2465                    DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2466         mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
2467
2468         for (i = 0; i < host->num_slots; i++) {
2469                 struct dw_mci_slot *slot = host->slot[i];
2470                 if (!slot)
2471                         continue;
2472                 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
2473                         dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
2474                         dw_mci_setup_bus(slot, true);
2475                 }
2476
2477                 ret = mmc_resume_host(host->slot[i]->mmc);
2478                 if (ret < 0)
2479                         return ret;
2480         }
2481         return 0;
2482 }
2483 EXPORT_SYMBOL(dw_mci_resume);
2484 #endif /* CONFIG_PM_SLEEP */
2485
2486 static int __init dw_mci_init(void)
2487 {
2488         printk(KERN_INFO "Synopsys Designware Multimedia Card Interface Driver");
2489         return 0;
2490 }
2491
2492 static void __exit dw_mci_exit(void)
2493 {
2494 }
2495
2496 module_init(dw_mci_init);
2497 module_exit(dw_mci_exit);
2498
2499 MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
2500 MODULE_AUTHOR("NXP Semiconductor VietNam");
2501 MODULE_AUTHOR("Imagination Technologies Ltd");
2502 MODULE_LICENSE("GPL v2");