2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 #include <linux/blkdev.h>
15 #include <linux/clk.h>
16 #include <linux/debugfs.h>
17 #include <linux/device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/ioport.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/seq_file.h>
26 #include <linux/slab.h>
27 #include <linux/stat.h>
28 #include <linux/delay.h>
29 #include <linux/irq.h>
30 #include <linux/mmc/host.h>
31 #include <linux/mmc/mmc.h>
32 #include <linux/mmc/sd.h>
33 #include <linux/mmc/sdio.h>
34 #include <linux/mmc/dw_mmc.h>
35 #include <linux/bitops.h>
36 #include <linux/regulator/consumer.h>
37 #include <linux/workqueue.h>
39 #include <linux/of_gpio.h>
40 #include <linux/mmc/slot-gpio.h>
44 /* Common flag combinations */
45 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
46 SDMMC_INT_HTO | SDMMC_INT_SBE | \
48 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
50 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
51 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
52 #define DW_MCI_SEND_STATUS 1
53 #define DW_MCI_RECV_STATUS 2
54 #define DW_MCI_DMA_THRESHOLD 16
56 #define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
57 #define DW_MCI_FREQ_MIN 400000 /* unit: HZ */
59 #ifdef CONFIG_MMC_DW_IDMAC
60 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
61 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
62 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
66 u32 des0; /* Control Descriptor */
67 #define IDMAC_DES0_DIC BIT(1)
68 #define IDMAC_DES0_LD BIT(2)
69 #define IDMAC_DES0_FD BIT(3)
70 #define IDMAC_DES0_CH BIT(4)
71 #define IDMAC_DES0_ER BIT(5)
72 #define IDMAC_DES0_CES BIT(30)
73 #define IDMAC_DES0_OWN BIT(31)
75 u32 des1; /* Buffer sizes */
76 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
77 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
79 u32 des2; /* buffer 1 physical address */
81 u32 des3; /* buffer 2 physical address */
83 #endif /* CONFIG_MMC_DW_IDMAC */
85 static const u8 tuning_blk_pattern_4bit[] = {
86 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
87 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
88 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
89 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
90 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
91 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
92 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
93 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
96 static const u8 tuning_blk_pattern_8bit[] = {
97 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
98 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
99 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
100 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
101 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
102 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
103 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
104 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
105 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
106 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
107 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
108 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
109 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
110 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
111 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
112 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
115 static bool dw_mci_reset(struct dw_mci *host);
117 #if defined(CONFIG_DEBUG_FS)
118 static int dw_mci_req_show(struct seq_file *s, void *v)
120 struct dw_mci_slot *slot = s->private;
121 struct mmc_request *mrq;
122 struct mmc_command *cmd;
123 struct mmc_command *stop;
124 struct mmc_data *data;
126 /* Make sure we get a consistent snapshot */
127 spin_lock_bh(&slot->host->lock);
137 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
138 cmd->opcode, cmd->arg, cmd->flags,
139 cmd->resp[0], cmd->resp[1], cmd->resp[2],
140 cmd->resp[2], cmd->error);
142 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
143 data->bytes_xfered, data->blocks,
144 data->blksz, data->flags, data->error);
147 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
148 stop->opcode, stop->arg, stop->flags,
149 stop->resp[0], stop->resp[1], stop->resp[2],
150 stop->resp[2], stop->error);
153 spin_unlock_bh(&slot->host->lock);
158 static int dw_mci_req_open(struct inode *inode, struct file *file)
160 return single_open(file, dw_mci_req_show, inode->i_private);
163 static const struct file_operations dw_mci_req_fops = {
164 .owner = THIS_MODULE,
165 .open = dw_mci_req_open,
168 .release = single_release,
171 static int dw_mci_regs_show(struct seq_file *s, void *v)
173 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
174 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
175 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
176 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
177 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
178 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
183 static int dw_mci_regs_open(struct inode *inode, struct file *file)
185 return single_open(file, dw_mci_regs_show, inode->i_private);
188 static const struct file_operations dw_mci_regs_fops = {
189 .owner = THIS_MODULE,
190 .open = dw_mci_regs_open,
193 .release = single_release,
196 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
198 struct mmc_host *mmc = slot->mmc;
199 struct dw_mci *host = slot->host;
203 root = mmc->debugfs_root;
207 node = debugfs_create_file("regs", S_IRUSR, root, host,
212 node = debugfs_create_file("req", S_IRUSR, root, slot,
217 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
221 node = debugfs_create_x32("pending_events", S_IRUSR, root,
222 (u32 *)&host->pending_events);
226 node = debugfs_create_x32("completed_events", S_IRUSR, root,
227 (u32 *)&host->completed_events);
234 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
236 #endif /* defined(CONFIG_DEBUG_FS) */
238 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg);
240 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
242 struct mmc_data *data;
243 struct dw_mci_slot *slot = mmc_priv(mmc);
244 struct dw_mci *host = slot->host;
245 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
247 cmd->error = -EINPROGRESS;
251 if (cmd->opcode == MMC_STOP_TRANSMISSION ||
252 cmd->opcode == MMC_GO_IDLE_STATE ||
253 cmd->opcode == MMC_GO_INACTIVE_STATE ||
254 (cmd->opcode == SD_IO_RW_DIRECT &&
255 ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
256 cmdr |= SDMMC_CMD_STOP;
257 else if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
258 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
260 if (cmd->opcode == SD_SWITCH_VOLTAGE) {
263 /* Special bit makes CMD11 not die */
264 cmdr |= SDMMC_CMD_VOLT_SWITCH;
266 /* Change state to continue to handle CMD11 weirdness */
267 WARN_ON(slot->host->state != STATE_SENDING_CMD);
268 slot->host->state = STATE_SENDING_CMD11;
271 * We need to disable low power mode (automatic clock stop)
272 * while doing voltage switch so we don't confuse the card,
273 * since stopping the clock is a specific part of the UHS
274 * voltage change dance.
276 * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be
277 * unconditionally turned back on in dw_mci_setup_bus() if it's
278 * ever called with a non-zero clock. That shouldn't happen
279 * until the voltage change is all done.
281 clk_en_a = mci_readl(host, CLKENA);
282 clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id);
283 mci_writel(host, CLKENA, clk_en_a);
284 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
285 SDMMC_CMD_PRV_DAT_WAIT, 0);
288 if (cmd->flags & MMC_RSP_PRESENT) {
289 /* We expect a response, so set this bit */
290 cmdr |= SDMMC_CMD_RESP_EXP;
291 if (cmd->flags & MMC_RSP_136)
292 cmdr |= SDMMC_CMD_RESP_LONG;
295 if (cmd->flags & MMC_RSP_CRC)
296 cmdr |= SDMMC_CMD_RESP_CRC;
300 cmdr |= SDMMC_CMD_DAT_EXP;
301 if (data->flags & MMC_DATA_STREAM)
302 cmdr |= SDMMC_CMD_STRM_MODE;
303 if (data->flags & MMC_DATA_WRITE)
304 cmdr |= SDMMC_CMD_DAT_WR;
307 if (drv_data && drv_data->prepare_command)
308 drv_data->prepare_command(slot->host, &cmdr);
313 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
315 struct mmc_command *stop;
321 stop = &host->stop_abort;
323 memset(stop, 0, sizeof(struct mmc_command));
325 if (cmdr == MMC_READ_SINGLE_BLOCK ||
326 cmdr == MMC_READ_MULTIPLE_BLOCK ||
327 cmdr == MMC_WRITE_BLOCK ||
328 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
329 stop->opcode = MMC_STOP_TRANSMISSION;
331 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
332 } else if (cmdr == SD_IO_RW_EXTENDED) {
333 stop->opcode = SD_IO_RW_DIRECT;
334 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
335 ((cmd->arg >> 28) & 0x7);
336 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
341 cmdr = stop->opcode | SDMMC_CMD_STOP |
342 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
347 static void dw_mci_start_command(struct dw_mci *host,
348 struct mmc_command *cmd, u32 cmd_flags)
352 "start command: ARGR=0x%08x CMDR=0x%08x\n",
353 cmd->arg, cmd_flags);
355 mci_writel(host, CMDARG, cmd->arg);
358 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
361 static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
363 struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort;
364 dw_mci_start_command(host, stop, host->stop_cmdr);
367 /* DMA interface functions */
368 static void dw_mci_stop_dma(struct dw_mci *host)
370 if (host->using_dma) {
371 host->dma_ops->stop(host);
372 host->dma_ops->cleanup(host);
375 /* Data transfer was stopped by the interrupt handler */
376 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
379 static int dw_mci_get_dma_dir(struct mmc_data *data)
381 if (data->flags & MMC_DATA_WRITE)
382 return DMA_TO_DEVICE;
384 return DMA_FROM_DEVICE;
387 #ifdef CONFIG_MMC_DW_IDMAC
388 static void dw_mci_dma_cleanup(struct dw_mci *host)
390 struct mmc_data *data = host->data;
393 if (!data->host_cookie)
394 dma_unmap_sg(host->dev,
397 dw_mci_get_dma_dir(data));
400 static void dw_mci_idmac_reset(struct dw_mci *host)
402 u32 bmod = mci_readl(host, BMOD);
403 /* Software reset of DMA */
404 bmod |= SDMMC_IDMAC_SWRESET;
405 mci_writel(host, BMOD, bmod);
408 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
412 /* Disable and reset the IDMAC interface */
413 temp = mci_readl(host, CTRL);
414 temp &= ~SDMMC_CTRL_USE_IDMAC;
415 temp |= SDMMC_CTRL_DMA_RESET;
416 mci_writel(host, CTRL, temp);
418 /* Stop the IDMAC running */
419 temp = mci_readl(host, BMOD);
420 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
421 temp |= SDMMC_IDMAC_SWRESET;
422 mci_writel(host, BMOD, temp);
425 static void dw_mci_idmac_complete_dma(struct dw_mci *host)
427 struct mmc_data *data = host->data;
429 dev_vdbg(host->dev, "DMA complete\n");
431 host->dma_ops->cleanup(host);
434 * If the card was removed, data will be NULL. No point in trying to
435 * send the stop command or waiting for NBUSY in this case.
438 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
439 tasklet_schedule(&host->tasklet);
443 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
447 struct idmac_desc *desc = host->sg_cpu;
449 for (i = 0; i < sg_len; i++, desc++) {
450 unsigned int length = sg_dma_len(&data->sg[i]);
451 u32 mem_addr = sg_dma_address(&data->sg[i]);
453 /* Set the OWN bit and disable interrupts for this descriptor */
454 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
457 IDMAC_SET_BUFFER1_SIZE(desc, length);
459 /* Physical address to DMA to/from */
460 desc->des2 = mem_addr;
463 /* Set first descriptor */
465 desc->des0 |= IDMAC_DES0_FD;
467 /* Set last descriptor */
468 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
469 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
470 desc->des0 |= IDMAC_DES0_LD;
475 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
479 dw_mci_translate_sglist(host, host->data, sg_len);
481 /* Select IDMAC interface */
482 temp = mci_readl(host, CTRL);
483 temp |= SDMMC_CTRL_USE_IDMAC;
484 mci_writel(host, CTRL, temp);
488 /* Enable the IDMAC */
489 temp = mci_readl(host, BMOD);
490 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
491 mci_writel(host, BMOD, temp);
493 /* Start it running */
494 mci_writel(host, PLDMND, 1);
497 static int dw_mci_idmac_init(struct dw_mci *host)
499 struct idmac_desc *p;
502 /* Number of descriptors in the ring buffer */
503 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
505 /* Forward link the descriptor list */
506 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
507 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
509 /* Set the last descriptor as the end-of-ring descriptor */
510 p->des3 = host->sg_dma;
511 p->des0 = IDMAC_DES0_ER;
513 dw_mci_idmac_reset(host);
515 /* Mask out interrupts - get Tx & Rx complete only */
516 mci_writel(host, IDSTS, IDMAC_INT_CLR);
517 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
520 /* Set the descriptor base address */
521 mci_writel(host, DBADDR, host->sg_dma);
525 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
526 .init = dw_mci_idmac_init,
527 .start = dw_mci_idmac_start_dma,
528 .stop = dw_mci_idmac_stop_dma,
529 .complete = dw_mci_idmac_complete_dma,
530 .cleanup = dw_mci_dma_cleanup,
532 #endif /* CONFIG_MMC_DW_IDMAC */
534 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
535 struct mmc_data *data,
538 struct scatterlist *sg;
539 unsigned int i, sg_len;
541 if (!next && data->host_cookie)
542 return data->host_cookie;
545 * We don't do DMA on "complex" transfers, i.e. with
546 * non-word-aligned buffers or lengths. Also, we don't bother
547 * with all the DMA setup overhead for short transfers.
549 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
555 for_each_sg(data->sg, sg, data->sg_len, i) {
556 if (sg->offset & 3 || sg->length & 3)
560 sg_len = dma_map_sg(host->dev,
563 dw_mci_get_dma_dir(data));
568 data->host_cookie = sg_len;
573 static void dw_mci_pre_req(struct mmc_host *mmc,
574 struct mmc_request *mrq,
577 struct dw_mci_slot *slot = mmc_priv(mmc);
578 struct mmc_data *data = mrq->data;
580 if (!slot->host->use_dma || !data)
583 if (data->host_cookie) {
584 data->host_cookie = 0;
588 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
589 data->host_cookie = 0;
592 static void dw_mci_post_req(struct mmc_host *mmc,
593 struct mmc_request *mrq,
596 struct dw_mci_slot *slot = mmc_priv(mmc);
597 struct mmc_data *data = mrq->data;
599 if (!slot->host->use_dma || !data)
602 if (data->host_cookie)
603 dma_unmap_sg(slot->host->dev,
606 dw_mci_get_dma_dir(data));
607 data->host_cookie = 0;
610 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
612 #ifdef CONFIG_MMC_DW_IDMAC
613 unsigned int blksz = data->blksz;
614 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
615 u32 fifo_width = 1 << host->data_shift;
616 u32 blksz_depth = blksz / fifo_width, fifoth_val;
617 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
618 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
620 tx_wmark = (host->fifo_depth) / 2;
621 tx_wmark_invers = host->fifo_depth - tx_wmark;
625 * if blksz is not a multiple of the FIFO width
627 if (blksz % fifo_width) {
634 if (!((blksz_depth % mszs[idx]) ||
635 (tx_wmark_invers % mszs[idx]))) {
637 rx_wmark = mszs[idx] - 1;
642 * If idx is '0', it won't be tried
643 * Thus, initial values are uesed
646 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
647 mci_writel(host, FIFOTH, fifoth_val);
651 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
653 unsigned int blksz = data->blksz;
654 u32 blksz_depth, fifo_depth;
657 WARN_ON(!(data->flags & MMC_DATA_READ));
659 if (host->timing != MMC_TIMING_MMC_HS200 &&
660 host->timing != MMC_TIMING_UHS_SDR104)
663 blksz_depth = blksz / (1 << host->data_shift);
664 fifo_depth = host->fifo_depth;
666 if (blksz_depth > fifo_depth)
670 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
671 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
672 * Currently just choose blksz.
675 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
679 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
682 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
689 /* If we don't have a channel, we can't do DMA */
693 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
695 host->dma_ops->stop(host);
702 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
703 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
707 * Decide the MSIZE and RX/TX Watermark.
708 * If current block size is same with previous size,
709 * no need to update fifoth.
711 if (host->prev_blksz != data->blksz)
712 dw_mci_adjust_fifoth(host, data);
714 /* Enable the DMA interface */
715 temp = mci_readl(host, CTRL);
716 temp |= SDMMC_CTRL_DMA_ENABLE;
717 mci_writel(host, CTRL, temp);
719 /* Disable RX/TX IRQs, let DMA handle it */
720 temp = mci_readl(host, INTMASK);
721 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
722 mci_writel(host, INTMASK, temp);
724 host->dma_ops->start(host, sg_len);
729 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
733 data->error = -EINPROGRESS;
739 if (data->flags & MMC_DATA_READ) {
740 host->dir_status = DW_MCI_RECV_STATUS;
741 dw_mci_ctrl_rd_thld(host, data);
743 host->dir_status = DW_MCI_SEND_STATUS;
746 if (dw_mci_submit_data_dma(host, data)) {
747 int flags = SG_MITER_ATOMIC;
748 if (host->data->flags & MMC_DATA_READ)
749 flags |= SG_MITER_TO_SG;
751 flags |= SG_MITER_FROM_SG;
753 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
755 host->part_buf_start = 0;
756 host->part_buf_count = 0;
758 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
759 temp = mci_readl(host, INTMASK);
760 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
761 mci_writel(host, INTMASK, temp);
763 temp = mci_readl(host, CTRL);
764 temp &= ~SDMMC_CTRL_DMA_ENABLE;
765 mci_writel(host, CTRL, temp);
768 * Use the initial fifoth_val for PIO mode.
769 * If next issued data may be transfered by DMA mode,
770 * prev_blksz should be invalidated.
772 mci_writel(host, FIFOTH, host->fifoth_val);
773 host->prev_blksz = 0;
776 * Keep the current block size.
777 * It will be used to decide whether to update
778 * fifoth register next time.
780 host->prev_blksz = data->blksz;
784 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
786 struct dw_mci *host = slot->host;
787 unsigned long timeout = jiffies + msecs_to_jiffies(500);
788 unsigned int cmd_status = 0;
790 mci_writel(host, CMDARG, arg);
792 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
794 while (time_before(jiffies, timeout)) {
795 cmd_status = mci_readl(host, CMD);
796 if (!(cmd_status & SDMMC_CMD_START))
799 dev_err(&slot->mmc->class_dev,
800 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
801 cmd, arg, cmd_status);
804 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
806 struct dw_mci *host = slot->host;
807 unsigned int clock = slot->clock;
810 u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT;
812 /* We must continue to set bit 28 in CMD until the change is complete */
813 if (host->state == STATE_WAITING_CMD11_DONE)
814 sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH;
817 mci_writel(host, CLKENA, 0);
818 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
819 } else if (clock != host->current_speed || force_clkinit) {
820 div = host->bus_hz / clock;
821 if (host->bus_hz % clock && host->bus_hz > clock)
823 * move the + 1 after the divide to prevent
824 * over-clocking the card.
828 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
830 if ((clock << div) != slot->__clk_old || force_clkinit)
831 dev_info(&slot->mmc->class_dev,
832 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
833 slot->id, host->bus_hz, clock,
834 div ? ((host->bus_hz / div) >> 1) :
838 mci_writel(host, CLKENA, 0);
839 mci_writel(host, CLKSRC, 0);
842 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
844 /* set clock to desired speed */
845 mci_writel(host, CLKDIV, div);
848 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
850 /* enable clock; only low power if no SDIO */
851 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
852 if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id)))
853 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
854 mci_writel(host, CLKENA, clk_en_a);
857 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
859 /* keep the clock with reflecting clock dividor */
860 slot->__clk_old = clock << div;
863 host->current_speed = clock;
865 /* Set the current slot bus width */
866 mci_writel(host, CTYPE, (slot->ctype << slot->id));
869 static void __dw_mci_start_request(struct dw_mci *host,
870 struct dw_mci_slot *slot,
871 struct mmc_command *cmd)
873 struct mmc_request *mrq;
874 struct mmc_data *data;
879 host->cur_slot = slot;
882 host->pending_events = 0;
883 host->completed_events = 0;
884 host->cmd_status = 0;
885 host->data_status = 0;
886 host->dir_status = 0;
890 mci_writel(host, TMOUT, 0xFFFFFFFF);
891 mci_writel(host, BYTCNT, data->blksz*data->blocks);
892 mci_writel(host, BLKSIZ, data->blksz);
895 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
897 /* this is the first command, send the initialization clock */
898 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
899 cmdflags |= SDMMC_CMD_INIT;
902 dw_mci_submit_data(host, data);
906 dw_mci_start_command(host, cmd, cmdflags);
909 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
911 host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
914 static void dw_mci_start_request(struct dw_mci *host,
915 struct dw_mci_slot *slot)
917 struct mmc_request *mrq = slot->mrq;
918 struct mmc_command *cmd;
920 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
921 __dw_mci_start_request(host, slot, cmd);
924 /* must be called with host->lock held */
925 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
926 struct mmc_request *mrq)
928 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
933 if (host->state == STATE_WAITING_CMD11_DONE) {
934 dev_warn(&slot->mmc->class_dev,
935 "Voltage change didn't complete\n");
937 * this case isn't expected to happen, so we can
938 * either crash here or just try to continue on
939 * in the closest possible state
941 host->state = STATE_IDLE;
944 if (host->state == STATE_IDLE) {
945 host->state = STATE_SENDING_CMD;
946 dw_mci_start_request(host, slot);
948 list_add_tail(&slot->queue_node, &host->queue);
952 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
954 struct dw_mci_slot *slot = mmc_priv(mmc);
955 struct dw_mci *host = slot->host;
960 * The check for card presence and queueing of the request must be
961 * atomic, otherwise the card could be removed in between and the
962 * request wouldn't fail until another card was inserted.
964 spin_lock_bh(&host->lock);
966 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
967 spin_unlock_bh(&host->lock);
968 mrq->cmd->error = -ENOMEDIUM;
969 mmc_request_done(mmc, mrq);
973 dw_mci_queue_request(host, slot, mrq);
975 spin_unlock_bh(&host->lock);
978 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
980 struct dw_mci_slot *slot = mmc_priv(mmc);
981 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
985 switch (ios->bus_width) {
986 case MMC_BUS_WIDTH_4:
987 slot->ctype = SDMMC_CTYPE_4BIT;
989 case MMC_BUS_WIDTH_8:
990 slot->ctype = SDMMC_CTYPE_8BIT;
993 /* set default 1 bit mode */
994 slot->ctype = SDMMC_CTYPE_1BIT;
997 regs = mci_readl(slot->host, UHS_REG);
1000 if (ios->timing == MMC_TIMING_MMC_DDR52)
1001 regs |= ((0x1 << slot->id) << 16);
1003 regs &= ~((0x1 << slot->id) << 16);
1005 mci_writel(slot->host, UHS_REG, regs);
1006 slot->host->timing = ios->timing;
1009 * Use mirror of ios->clock to prevent race with mmc
1010 * core ios update when finding the minimum.
1012 slot->clock = ios->clock;
1014 if (drv_data && drv_data->set_ios)
1015 drv_data->set_ios(slot->host, ios);
1017 /* Slot specific timing and width adjustment */
1018 dw_mci_setup_bus(slot, false);
1020 if (slot->host->state == STATE_WAITING_CMD11_DONE && ios->clock != 0)
1021 slot->host->state = STATE_IDLE;
1023 switch (ios->power_mode) {
1025 if (!IS_ERR(mmc->supply.vmmc)) {
1026 ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
1029 dev_err(slot->host->dev,
1030 "failed to enable vmmc regulator\n");
1031 /*return, if failed turn on vmmc*/
1035 if (!IS_ERR(mmc->supply.vqmmc) && !slot->host->vqmmc_enabled) {
1036 ret = regulator_enable(mmc->supply.vqmmc);
1038 dev_err(slot->host->dev,
1039 "failed to enable vqmmc regulator\n");
1041 slot->host->vqmmc_enabled = true;
1043 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1044 regs = mci_readl(slot->host, PWREN);
1045 regs |= (1 << slot->id);
1046 mci_writel(slot->host, PWREN, regs);
1049 if (!IS_ERR(mmc->supply.vmmc))
1050 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1052 if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled) {
1053 regulator_disable(mmc->supply.vqmmc);
1054 slot->host->vqmmc_enabled = false;
1057 regs = mci_readl(slot->host, PWREN);
1058 regs &= ~(1 << slot->id);
1059 mci_writel(slot->host, PWREN, regs);
1066 static int dw_mci_card_busy(struct mmc_host *mmc)
1068 struct dw_mci_slot *slot = mmc_priv(mmc);
1072 * Check the busy bit which is low when DAT[3:0]
1073 * (the data lines) are 0000
1075 status = mci_readl(slot->host, STATUS);
1077 return !!(status & SDMMC_STATUS_BUSY);
1080 static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
1082 struct dw_mci_slot *slot = mmc_priv(mmc);
1083 struct dw_mci *host = slot->host;
1085 u32 v18 = SDMMC_UHS_18V << slot->id;
1090 * Program the voltage. Note that some instances of dw_mmc may use
1091 * the UHS_REG for this. For other instances (like exynos) the UHS_REG
1092 * does no harm but you need to set the regulator directly. Try both.
1094 uhs = mci_readl(host, UHS_REG);
1095 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
1104 if (!IS_ERR(mmc->supply.vqmmc)) {
1105 ret = regulator_set_voltage(mmc->supply.vqmmc, min_uv, max_uv);
1108 dev_err(&mmc->class_dev,
1109 "Regulator set error %d: %d - %d\n",
1110 ret, min_uv, max_uv);
1114 mci_writel(host, UHS_REG, uhs);
1119 static int dw_mci_get_ro(struct mmc_host *mmc)
1122 struct dw_mci_slot *slot = mmc_priv(mmc);
1123 int gpio_ro = mmc_gpio_get_ro(mmc);
1125 /* Use platform get_ro function, else try on board write protect */
1126 if ((slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT) ||
1127 (slot->host->quirks & DW_MCI_QUIRK_NO_WRITE_PROTECT))
1129 else if (!IS_ERR_VALUE(gpio_ro))
1130 read_only = gpio_ro;
1133 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1135 dev_dbg(&mmc->class_dev, "card is %s\n",
1136 read_only ? "read-only" : "read-write");
1141 static int dw_mci_get_cd(struct mmc_host *mmc)
1144 struct dw_mci_slot *slot = mmc_priv(mmc);
1145 struct dw_mci_board *brd = slot->host->pdata;
1146 struct dw_mci *host = slot->host;
1147 int gpio_cd = mmc_gpio_get_cd(mmc);
1149 /* Use platform get_cd function, else try onboard card detect */
1150 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1152 else if (!IS_ERR_VALUE(gpio_cd))
1155 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1158 spin_lock_bh(&host->lock);
1160 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1161 dev_dbg(&mmc->class_dev, "card is present\n");
1163 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1164 dev_dbg(&mmc->class_dev, "card is not present\n");
1166 spin_unlock_bh(&host->lock);
1172 * Disable lower power mode.
1174 * Low power mode will stop the card clock when idle. According to the
1175 * description of the CLKENA register we should disable low power mode
1176 * for SDIO cards if we need SDIO interrupts to work.
1178 * This function is fast if low power mode is already disabled.
1180 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1182 struct dw_mci *host = slot->host;
1184 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1186 clk_en_a = mci_readl(host, CLKENA);
1188 if (clk_en_a & clken_low_pwr) {
1189 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1190 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1191 SDMMC_CMD_PRV_DAT_WAIT, 0);
1195 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1197 struct dw_mci_slot *slot = mmc_priv(mmc);
1198 struct dw_mci *host = slot->host;
1201 /* Enable/disable Slot Specific SDIO interrupt */
1202 int_mask = mci_readl(host, INTMASK);
1205 * Turn off low power mode if it was enabled. This is a bit of
1206 * a heavy operation and we disable / enable IRQs a lot, so
1207 * we'll leave low power mode disabled and it will get
1208 * re-enabled again in dw_mci_setup_bus().
1210 dw_mci_disable_low_power(slot);
1212 mci_writel(host, INTMASK,
1213 (int_mask | SDMMC_INT_SDIO(slot->id)));
1215 mci_writel(host, INTMASK,
1216 (int_mask & ~SDMMC_INT_SDIO(slot->id)));
1220 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1222 struct dw_mci_slot *slot = mmc_priv(mmc);
1223 struct dw_mci *host = slot->host;
1224 const struct dw_mci_drv_data *drv_data = host->drv_data;
1225 struct dw_mci_tuning_data tuning_data;
1228 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1229 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1230 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1231 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1232 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1233 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1234 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1238 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1239 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1240 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1243 "Undefined command(%d) for tuning\n", opcode);
1247 if (drv_data && drv_data->execute_tuning)
1248 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1252 static const struct mmc_host_ops dw_mci_ops = {
1253 .request = dw_mci_request,
1254 .pre_req = dw_mci_pre_req,
1255 .post_req = dw_mci_post_req,
1256 .set_ios = dw_mci_set_ios,
1257 .get_ro = dw_mci_get_ro,
1258 .get_cd = dw_mci_get_cd,
1259 .enable_sdio_irq = dw_mci_enable_sdio_irq,
1260 .execute_tuning = dw_mci_execute_tuning,
1261 .card_busy = dw_mci_card_busy,
1262 .start_signal_voltage_switch = dw_mci_switch_voltage,
1266 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1267 __releases(&host->lock)
1268 __acquires(&host->lock)
1270 struct dw_mci_slot *slot;
1271 struct mmc_host *prev_mmc = host->cur_slot->mmc;
1273 WARN_ON(host->cmd || host->data);
1275 host->cur_slot->mrq = NULL;
1277 if (!list_empty(&host->queue)) {
1278 slot = list_entry(host->queue.next,
1279 struct dw_mci_slot, queue_node);
1280 list_del(&slot->queue_node);
1281 dev_vdbg(host->dev, "list not empty: %s is next\n",
1282 mmc_hostname(slot->mmc));
1283 host->state = STATE_SENDING_CMD;
1284 dw_mci_start_request(host, slot);
1286 dev_vdbg(host->dev, "list empty\n");
1288 if (host->state == STATE_SENDING_CMD11)
1289 host->state = STATE_WAITING_CMD11_DONE;
1291 host->state = STATE_IDLE;
1294 spin_unlock(&host->lock);
1295 mmc_request_done(prev_mmc, mrq);
1296 spin_lock(&host->lock);
1299 static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1301 u32 status = host->cmd_status;
1303 host->cmd_status = 0;
1305 /* Read the response from the card (up to 16 bytes) */
1306 if (cmd->flags & MMC_RSP_PRESENT) {
1307 if (cmd->flags & MMC_RSP_136) {
1308 cmd->resp[3] = mci_readl(host, RESP0);
1309 cmd->resp[2] = mci_readl(host, RESP1);
1310 cmd->resp[1] = mci_readl(host, RESP2);
1311 cmd->resp[0] = mci_readl(host, RESP3);
1313 cmd->resp[0] = mci_readl(host, RESP0);
1320 if (status & SDMMC_INT_RTO)
1321 cmd->error = -ETIMEDOUT;
1322 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1323 cmd->error = -EILSEQ;
1324 else if (status & SDMMC_INT_RESP_ERR)
1330 /* newer ip versions need a delay between retries */
1331 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
1338 static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1340 u32 status = host->data_status;
1342 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1343 if (status & SDMMC_INT_DRTO) {
1344 data->error = -ETIMEDOUT;
1345 } else if (status & SDMMC_INT_DCRC) {
1346 data->error = -EILSEQ;
1347 } else if (status & SDMMC_INT_EBE) {
1348 if (host->dir_status ==
1349 DW_MCI_SEND_STATUS) {
1351 * No data CRC status was returned.
1352 * The number of bytes transferred
1353 * will be exaggerated in PIO mode.
1355 data->bytes_xfered = 0;
1356 data->error = -ETIMEDOUT;
1357 } else if (host->dir_status ==
1358 DW_MCI_RECV_STATUS) {
1362 /* SDMMC_INT_SBE is included */
1366 dev_dbg(host->dev, "data error, status 0x%08x\n", status);
1369 * After an error, there may be data lingering
1374 data->bytes_xfered = data->blocks * data->blksz;
1381 static void dw_mci_tasklet_func(unsigned long priv)
1383 struct dw_mci *host = (struct dw_mci *)priv;
1384 struct mmc_data *data;
1385 struct mmc_command *cmd;
1386 struct mmc_request *mrq;
1387 enum dw_mci_state state;
1388 enum dw_mci_state prev_state;
1391 spin_lock(&host->lock);
1393 state = host->state;
1402 case STATE_WAITING_CMD11_DONE:
1405 case STATE_SENDING_CMD11:
1406 case STATE_SENDING_CMD:
1407 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1408 &host->pending_events))
1413 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
1414 err = dw_mci_command_complete(host, cmd);
1415 if (cmd == mrq->sbc && !err) {
1416 prev_state = state = STATE_SENDING_CMD;
1417 __dw_mci_start_request(host, host->cur_slot,
1422 if (cmd->data && err) {
1423 dw_mci_stop_dma(host);
1424 send_stop_abort(host, data);
1425 state = STATE_SENDING_STOP;
1429 if (!cmd->data || err) {
1430 dw_mci_request_end(host, mrq);
1434 prev_state = state = STATE_SENDING_DATA;
1437 case STATE_SENDING_DATA:
1439 * We could get a data error and never a transfer
1440 * complete so we'd better check for it here.
1442 * Note that we don't really care if we also got a
1443 * transfer complete; stopping the DMA and sending an
1446 if (test_and_clear_bit(EVENT_DATA_ERROR,
1447 &host->pending_events)) {
1448 dw_mci_stop_dma(host);
1449 send_stop_abort(host, data);
1450 state = STATE_DATA_ERROR;
1454 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1455 &host->pending_events))
1458 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
1461 * Handle an EVENT_DATA_ERROR that might have shown up
1462 * before the transfer completed. This might not have
1463 * been caught by the check above because the interrupt
1464 * could have gone off between the previous check and
1465 * the check for transfer complete.
1467 * Technically this ought not be needed assuming we
1468 * get a DATA_COMPLETE eventually (we'll notice the
1469 * error and end the request), but it shouldn't hurt.
1471 * This has the advantage of sending the stop command.
1473 if (test_and_clear_bit(EVENT_DATA_ERROR,
1474 &host->pending_events)) {
1475 dw_mci_stop_dma(host);
1476 send_stop_abort(host, data);
1477 state = STATE_DATA_ERROR;
1480 prev_state = state = STATE_DATA_BUSY;
1484 case STATE_DATA_BUSY:
1485 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1486 &host->pending_events))
1490 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
1491 err = dw_mci_data_complete(host, data);
1494 if (!data->stop || mrq->sbc) {
1495 if (mrq->sbc && data->stop)
1496 data->stop->error = 0;
1497 dw_mci_request_end(host, mrq);
1501 /* stop command for open-ended transfer*/
1503 send_stop_abort(host, data);
1506 * If we don't have a command complete now we'll
1507 * never get one since we just reset everything;
1508 * better end the request.
1510 * If we do have a command complete we'll fall
1511 * through to the SENDING_STOP command and
1512 * everything will be peachy keen.
1514 if (!test_bit(EVENT_CMD_COMPLETE,
1515 &host->pending_events)) {
1517 dw_mci_request_end(host, mrq);
1523 * If err has non-zero,
1524 * stop-abort command has been already issued.
1526 prev_state = state = STATE_SENDING_STOP;
1530 case STATE_SENDING_STOP:
1531 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1532 &host->pending_events))
1535 /* CMD error in data command */
1536 if (mrq->cmd->error && mrq->data)
1543 dw_mci_command_complete(host, mrq->stop);
1545 host->cmd_status = 0;
1547 dw_mci_request_end(host, mrq);
1550 case STATE_DATA_ERROR:
1551 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1552 &host->pending_events))
1555 state = STATE_DATA_BUSY;
1558 } while (state != prev_state);
1560 host->state = state;
1562 spin_unlock(&host->lock);
1566 /* push final bytes to part_buf, only use during push */
1567 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
1569 memcpy((void *)&host->part_buf, buf, cnt);
1570 host->part_buf_count = cnt;
1573 /* append bytes to part_buf, only use during push */
1574 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1576 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1577 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1578 host->part_buf_count += cnt;
1582 /* pull first bytes from part_buf, only use during pull */
1583 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1585 cnt = min(cnt, (int)host->part_buf_count);
1587 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1589 host->part_buf_count -= cnt;
1590 host->part_buf_start += cnt;
1595 /* pull final bytes from the part_buf, assuming it's just been filled */
1596 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
1598 memcpy(buf, &host->part_buf, cnt);
1599 host->part_buf_start = cnt;
1600 host->part_buf_count = (1 << host->data_shift) - cnt;
1603 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1605 struct mmc_data *data = host->data;
1608 /* try and push anything in the part_buf */
1609 if (unlikely(host->part_buf_count)) {
1610 int len = dw_mci_push_part_bytes(host, buf, cnt);
1613 if (host->part_buf_count == 2) {
1614 mci_writew(host, DATA(host->data_offset),
1616 host->part_buf_count = 0;
1619 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1620 if (unlikely((unsigned long)buf & 0x1)) {
1622 u16 aligned_buf[64];
1623 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1624 int items = len >> 1;
1626 /* memcpy from input buffer into aligned buffer */
1627 memcpy(aligned_buf, buf, len);
1630 /* push data from aligned buffer into fifo */
1631 for (i = 0; i < items; ++i)
1632 mci_writew(host, DATA(host->data_offset),
1639 for (; cnt >= 2; cnt -= 2)
1640 mci_writew(host, DATA(host->data_offset), *pdata++);
1643 /* put anything remaining in the part_buf */
1645 dw_mci_set_part_bytes(host, buf, cnt);
1646 /* Push data if we have reached the expected data length */
1647 if ((data->bytes_xfered + init_cnt) ==
1648 (data->blksz * data->blocks))
1649 mci_writew(host, DATA(host->data_offset),
1654 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
1656 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1657 if (unlikely((unsigned long)buf & 0x1)) {
1659 /* pull data from fifo into aligned buffer */
1660 u16 aligned_buf[64];
1661 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1662 int items = len >> 1;
1664 for (i = 0; i < items; ++i)
1665 aligned_buf[i] = mci_readw(host,
1666 DATA(host->data_offset));
1667 /* memcpy from aligned buffer into output buffer */
1668 memcpy(buf, aligned_buf, len);
1676 for (; cnt >= 2; cnt -= 2)
1677 *pdata++ = mci_readw(host, DATA(host->data_offset));
1681 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
1682 dw_mci_pull_final_bytes(host, buf, cnt);
1686 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
1688 struct mmc_data *data = host->data;
1691 /* try and push anything in the part_buf */
1692 if (unlikely(host->part_buf_count)) {
1693 int len = dw_mci_push_part_bytes(host, buf, cnt);
1696 if (host->part_buf_count == 4) {
1697 mci_writel(host, DATA(host->data_offset),
1699 host->part_buf_count = 0;
1702 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1703 if (unlikely((unsigned long)buf & 0x3)) {
1705 u32 aligned_buf[32];
1706 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1707 int items = len >> 2;
1709 /* memcpy from input buffer into aligned buffer */
1710 memcpy(aligned_buf, buf, len);
1713 /* push data from aligned buffer into fifo */
1714 for (i = 0; i < items; ++i)
1715 mci_writel(host, DATA(host->data_offset),
1722 for (; cnt >= 4; cnt -= 4)
1723 mci_writel(host, DATA(host->data_offset), *pdata++);
1726 /* put anything remaining in the part_buf */
1728 dw_mci_set_part_bytes(host, buf, cnt);
1729 /* Push data if we have reached the expected data length */
1730 if ((data->bytes_xfered + init_cnt) ==
1731 (data->blksz * data->blocks))
1732 mci_writel(host, DATA(host->data_offset),
1737 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
1739 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1740 if (unlikely((unsigned long)buf & 0x3)) {
1742 /* pull data from fifo into aligned buffer */
1743 u32 aligned_buf[32];
1744 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1745 int items = len >> 2;
1747 for (i = 0; i < items; ++i)
1748 aligned_buf[i] = mci_readl(host,
1749 DATA(host->data_offset));
1750 /* memcpy from aligned buffer into output buffer */
1751 memcpy(buf, aligned_buf, len);
1759 for (; cnt >= 4; cnt -= 4)
1760 *pdata++ = mci_readl(host, DATA(host->data_offset));
1764 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
1765 dw_mci_pull_final_bytes(host, buf, cnt);
1769 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1771 struct mmc_data *data = host->data;
1774 /* try and push anything in the part_buf */
1775 if (unlikely(host->part_buf_count)) {
1776 int len = dw_mci_push_part_bytes(host, buf, cnt);
1780 if (host->part_buf_count == 8) {
1781 mci_writeq(host, DATA(host->data_offset),
1783 host->part_buf_count = 0;
1786 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1787 if (unlikely((unsigned long)buf & 0x7)) {
1789 u64 aligned_buf[16];
1790 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1791 int items = len >> 3;
1793 /* memcpy from input buffer into aligned buffer */
1794 memcpy(aligned_buf, buf, len);
1797 /* push data from aligned buffer into fifo */
1798 for (i = 0; i < items; ++i)
1799 mci_writeq(host, DATA(host->data_offset),
1806 for (; cnt >= 8; cnt -= 8)
1807 mci_writeq(host, DATA(host->data_offset), *pdata++);
1810 /* put anything remaining in the part_buf */
1812 dw_mci_set_part_bytes(host, buf, cnt);
1813 /* Push data if we have reached the expected data length */
1814 if ((data->bytes_xfered + init_cnt) ==
1815 (data->blksz * data->blocks))
1816 mci_writeq(host, DATA(host->data_offset),
1821 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1823 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1824 if (unlikely((unsigned long)buf & 0x7)) {
1826 /* pull data from fifo into aligned buffer */
1827 u64 aligned_buf[16];
1828 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1829 int items = len >> 3;
1831 for (i = 0; i < items; ++i)
1832 aligned_buf[i] = mci_readq(host,
1833 DATA(host->data_offset));
1834 /* memcpy from aligned buffer into output buffer */
1835 memcpy(buf, aligned_buf, len);
1843 for (; cnt >= 8; cnt -= 8)
1844 *pdata++ = mci_readq(host, DATA(host->data_offset));
1848 host->part_buf = mci_readq(host, DATA(host->data_offset));
1849 dw_mci_pull_final_bytes(host, buf, cnt);
1853 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
1857 /* get remaining partial bytes */
1858 len = dw_mci_pull_part_bytes(host, buf, cnt);
1859 if (unlikely(len == cnt))
1864 /* get the rest of the data */
1865 host->pull_data(host, buf, cnt);
1868 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
1870 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1872 unsigned int offset;
1873 struct mmc_data *data = host->data;
1874 int shift = host->data_shift;
1877 unsigned int remain, fcnt;
1880 if (!sg_miter_next(sg_miter))
1883 host->sg = sg_miter->piter.sg;
1884 buf = sg_miter->addr;
1885 remain = sg_miter->length;
1889 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
1890 << shift) + host->part_buf_count;
1891 len = min(remain, fcnt);
1894 dw_mci_pull_data(host, (void *)(buf + offset), len);
1895 data->bytes_xfered += len;
1900 sg_miter->consumed = offset;
1901 status = mci_readl(host, MINTSTS);
1902 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1903 /* if the RXDR is ready read again */
1904 } while ((status & SDMMC_INT_RXDR) ||
1905 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
1908 if (!sg_miter_next(sg_miter))
1910 sg_miter->consumed = 0;
1912 sg_miter_stop(sg_miter);
1916 sg_miter_stop(sg_miter);
1919 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1922 static void dw_mci_write_data_pio(struct dw_mci *host)
1924 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1926 unsigned int offset;
1927 struct mmc_data *data = host->data;
1928 int shift = host->data_shift;
1931 unsigned int fifo_depth = host->fifo_depth;
1932 unsigned int remain, fcnt;
1935 if (!sg_miter_next(sg_miter))
1938 host->sg = sg_miter->piter.sg;
1939 buf = sg_miter->addr;
1940 remain = sg_miter->length;
1944 fcnt = ((fifo_depth -
1945 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
1946 << shift) - host->part_buf_count;
1947 len = min(remain, fcnt);
1950 host->push_data(host, (void *)(buf + offset), len);
1951 data->bytes_xfered += len;
1956 sg_miter->consumed = offset;
1957 status = mci_readl(host, MINTSTS);
1958 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1959 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
1962 if (!sg_miter_next(sg_miter))
1964 sg_miter->consumed = 0;
1966 sg_miter_stop(sg_miter);
1970 sg_miter_stop(sg_miter);
1973 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1976 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
1978 if (!host->cmd_status)
1979 host->cmd_status = status;
1983 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1984 tasklet_schedule(&host->tasklet);
1987 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1989 struct dw_mci *host = dev_id;
1993 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
1996 * DTO fix - version 2.10a and below, and only if internal DMA
1999 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
2001 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
2002 pending |= SDMMC_INT_DATA_OVER;
2006 /* Check volt switch first, since it can look like an error */
2007 if ((host->state == STATE_SENDING_CMD11) &&
2008 (pending & SDMMC_INT_VOLT_SWITCH)) {
2009 mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH);
2010 pending &= ~SDMMC_INT_VOLT_SWITCH;
2011 dw_mci_cmd_interrupt(host, pending);
2014 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2015 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2016 host->cmd_status = pending;
2018 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2021 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2022 /* if there is an error report DATA_ERROR */
2023 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2024 host->data_status = pending;
2026 set_bit(EVENT_DATA_ERROR, &host->pending_events);
2027 tasklet_schedule(&host->tasklet);
2030 if (pending & SDMMC_INT_DATA_OVER) {
2031 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2032 if (!host->data_status)
2033 host->data_status = pending;
2035 if (host->dir_status == DW_MCI_RECV_STATUS) {
2036 if (host->sg != NULL)
2037 dw_mci_read_data_pio(host, true);
2039 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2040 tasklet_schedule(&host->tasklet);
2043 if (pending & SDMMC_INT_RXDR) {
2044 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2045 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2046 dw_mci_read_data_pio(host, false);
2049 if (pending & SDMMC_INT_TXDR) {
2050 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2051 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2052 dw_mci_write_data_pio(host);
2055 if (pending & SDMMC_INT_CMD_DONE) {
2056 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2057 dw_mci_cmd_interrupt(host, pending);
2060 if (pending & SDMMC_INT_CD) {
2061 mci_writel(host, RINTSTS, SDMMC_INT_CD);
2062 queue_work(host->card_workqueue, &host->card_work);
2065 /* Handle SDIO Interrupts */
2066 for (i = 0; i < host->num_slots; i++) {
2067 struct dw_mci_slot *slot = host->slot[i];
2068 if (pending & SDMMC_INT_SDIO(i)) {
2069 mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i));
2070 mmc_signal_sdio_irq(slot->mmc);
2076 #ifdef CONFIG_MMC_DW_IDMAC
2077 /* Handle DMA interrupts */
2078 pending = mci_readl(host, IDSTS);
2079 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2080 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
2081 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2082 host->dma_ops->complete(host);
2089 static void dw_mci_work_routine_card(struct work_struct *work)
2091 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
2094 for (i = 0; i < host->num_slots; i++) {
2095 struct dw_mci_slot *slot = host->slot[i];
2096 struct mmc_host *mmc = slot->mmc;
2097 struct mmc_request *mrq;
2100 present = dw_mci_get_cd(mmc);
2101 while (present != slot->last_detect_state) {
2102 dev_dbg(&slot->mmc->class_dev, "card %s\n",
2103 present ? "inserted" : "removed");
2105 spin_lock_bh(&host->lock);
2107 /* Card change detected */
2108 slot->last_detect_state = present;
2110 /* Clean up queue if present */
2113 if (mrq == host->mrq) {
2117 switch (host->state) {
2119 case STATE_WAITING_CMD11_DONE:
2121 case STATE_SENDING_CMD11:
2122 case STATE_SENDING_CMD:
2123 mrq->cmd->error = -ENOMEDIUM;
2127 case STATE_SENDING_DATA:
2128 mrq->data->error = -ENOMEDIUM;
2129 dw_mci_stop_dma(host);
2131 case STATE_DATA_BUSY:
2132 case STATE_DATA_ERROR:
2133 if (mrq->data->error == -EINPROGRESS)
2134 mrq->data->error = -ENOMEDIUM;
2136 case STATE_SENDING_STOP:
2138 mrq->stop->error = -ENOMEDIUM;
2142 dw_mci_request_end(host, mrq);
2144 list_del(&slot->queue_node);
2145 mrq->cmd->error = -ENOMEDIUM;
2147 mrq->data->error = -ENOMEDIUM;
2149 mrq->stop->error = -ENOMEDIUM;
2151 spin_unlock(&host->lock);
2152 mmc_request_done(slot->mmc, mrq);
2153 spin_lock(&host->lock);
2157 /* Power down slot */
2161 spin_unlock_bh(&host->lock);
2163 present = dw_mci_get_cd(mmc);
2166 mmc_detect_change(slot->mmc,
2167 msecs_to_jiffies(host->pdata->detect_delay_ms));
2172 /* given a slot id, find out the device node representing that slot */
2173 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2175 struct device_node *np;
2179 if (!dev || !dev->of_node)
2182 for_each_child_of_node(dev->of_node, np) {
2183 addr = of_get_property(np, "reg", &len);
2184 if (!addr || (len < sizeof(int)))
2186 if (be32_to_cpup(addr) == slot)
2192 static struct dw_mci_of_slot_quirks {
2195 } of_slot_quirks[] = {
2197 .quirk = "disable-wp",
2198 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
2202 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2204 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2209 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
2210 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL)) {
2211 dev_warn(dev, "Slot quirk %s is deprecated\n",
2212 of_slot_quirks[idx].quirk);
2213 quirks |= of_slot_quirks[idx].id;
2218 #else /* CONFIG_OF */
2219 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2223 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2227 #endif /* CONFIG_OF */
2229 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
2231 struct mmc_host *mmc;
2232 struct dw_mci_slot *slot;
2233 const struct dw_mci_drv_data *drv_data = host->drv_data;
2237 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
2241 slot = mmc_priv(mmc);
2245 host->slot[id] = slot;
2247 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
2249 mmc->ops = &dw_mci_ops;
2250 if (of_property_read_u32_array(host->dev->of_node,
2251 "clock-freq-min-max", freq, 2)) {
2252 mmc->f_min = DW_MCI_FREQ_MIN;
2253 mmc->f_max = DW_MCI_FREQ_MAX;
2255 mmc->f_min = freq[0];
2256 mmc->f_max = freq[1];
2259 /*if there are external regulators, get them*/
2260 ret = mmc_regulator_get_supply(mmc);
2261 if (ret == -EPROBE_DEFER)
2264 if (!mmc->ocr_avail)
2265 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2267 if (host->pdata->caps)
2268 mmc->caps = host->pdata->caps;
2270 if (host->pdata->pm_caps)
2271 mmc->pm_caps = host->pdata->pm_caps;
2273 if (host->dev->of_node) {
2274 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2278 ctrl_id = to_platform_device(host->dev)->id;
2280 if (drv_data && drv_data->caps)
2281 mmc->caps |= drv_data->caps[ctrl_id];
2283 if (host->pdata->caps2)
2284 mmc->caps2 = host->pdata->caps2;
2288 if (host->pdata->blk_settings) {
2289 mmc->max_segs = host->pdata->blk_settings->max_segs;
2290 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
2291 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
2292 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
2293 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
2295 /* Useful defaults if platform data is unset. */
2296 #ifdef CONFIG_MMC_DW_IDMAC
2297 mmc->max_segs = host->ring_size;
2298 mmc->max_blk_size = 65536;
2299 mmc->max_blk_count = host->ring_size;
2300 mmc->max_seg_size = 0x1000;
2301 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
2304 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
2305 mmc->max_blk_count = 512;
2306 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2307 mmc->max_seg_size = mmc->max_req_size;
2308 #endif /* CONFIG_MMC_DW_IDMAC */
2311 if (dw_mci_get_cd(mmc))
2312 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2314 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2316 ret = mmc_add_host(mmc);
2320 #if defined(CONFIG_DEBUG_FS)
2321 dw_mci_init_debugfs(slot);
2324 /* Card initially undetected */
2325 slot->last_detect_state = 0;
2334 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2336 /* Debugfs stuff is cleaned up by mmc core */
2337 mmc_remove_host(slot->mmc);
2338 slot->host->slot[id] = NULL;
2339 mmc_free_host(slot->mmc);
2342 static void dw_mci_init_dma(struct dw_mci *host)
2344 /* Alloc memory for sg translation */
2345 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
2346 &host->sg_dma, GFP_KERNEL);
2347 if (!host->sg_cpu) {
2348 dev_err(host->dev, "%s: could not alloc DMA memory\n",
2353 /* Determine which DMA interface to use */
2354 #ifdef CONFIG_MMC_DW_IDMAC
2355 host->dma_ops = &dw_mci_idmac_ops;
2356 dev_info(host->dev, "Using internal DMA controller.\n");
2362 if (host->dma_ops->init && host->dma_ops->start &&
2363 host->dma_ops->stop && host->dma_ops->cleanup) {
2364 if (host->dma_ops->init(host)) {
2365 dev_err(host->dev, "%s: Unable to initialize "
2366 "DMA Controller.\n", __func__);
2370 dev_err(host->dev, "DMA initialization not found.\n");
2378 dev_info(host->dev, "Using PIO mode.\n");
2383 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
2385 unsigned long timeout = jiffies + msecs_to_jiffies(500);
2388 ctrl = mci_readl(host, CTRL);
2390 mci_writel(host, CTRL, ctrl);
2392 /* wait till resets clear */
2394 ctrl = mci_readl(host, CTRL);
2395 if (!(ctrl & reset))
2397 } while (time_before(jiffies, timeout));
2400 "Timeout resetting block (ctrl reset %#x)\n",
2406 static bool dw_mci_reset(struct dw_mci *host)
2408 u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET;
2412 * Reseting generates a block interrupt, hence setting
2413 * the scatter-gather pointer to NULL.
2416 sg_miter_stop(&host->sg_miter);
2421 flags |= SDMMC_CTRL_DMA_RESET;
2423 if (dw_mci_ctrl_reset(host, flags)) {
2425 * In all cases we clear the RAWINTS register to clear any
2428 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2430 /* if using dma we wait for dma_req to clear */
2431 if (host->use_dma) {
2432 unsigned long timeout = jiffies + msecs_to_jiffies(500);
2435 status = mci_readl(host, STATUS);
2436 if (!(status & SDMMC_STATUS_DMA_REQ))
2439 } while (time_before(jiffies, timeout));
2441 if (status & SDMMC_STATUS_DMA_REQ) {
2443 "%s: Timeout waiting for dma_req to "
2444 "clear during reset\n", __func__);
2448 /* when using DMA next we reset the fifo again */
2449 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET))
2453 /* if the controller reset bit did clear, then set clock regs */
2454 if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) {
2455 dev_err(host->dev, "%s: fifo/dma reset bits didn't "
2456 "clear but ciu was reset, doing clock update\n",
2462 #if IS_ENABLED(CONFIG_MMC_DW_IDMAC)
2463 /* It is also recommended that we reset and reprogram idmac */
2464 dw_mci_idmac_reset(host);
2470 /* After a CTRL reset we need to have CIU set clock registers */
2471 mci_send_cmd(host->cur_slot, SDMMC_CMD_UPD_CLK, 0);
2477 static struct dw_mci_of_quirks {
2482 .quirk = "broken-cd",
2483 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
2485 .quirk = "disable-wp",
2486 .id = DW_MCI_QUIRK_NO_WRITE_PROTECT,
2490 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2492 struct dw_mci_board *pdata;
2493 struct device *dev = host->dev;
2494 struct device_node *np = dev->of_node;
2495 const struct dw_mci_drv_data *drv_data = host->drv_data;
2497 u32 clock_frequency;
2499 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2501 dev_err(dev, "could not allocate memory for pdata\n");
2502 return ERR_PTR(-ENOMEM);
2505 /* find out number of slots supported */
2506 if (of_property_read_u32(dev->of_node, "num-slots",
2507 &pdata->num_slots)) {
2508 dev_info(dev, "num-slots property not found, "
2509 "assuming 1 slot is available\n");
2510 pdata->num_slots = 1;
2514 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
2515 if (of_get_property(np, of_quirks[idx].quirk, NULL))
2516 pdata->quirks |= of_quirks[idx].id;
2518 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
2519 dev_info(dev, "fifo-depth property not found, using "
2520 "value of FIFOTH register as default\n");
2522 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
2524 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
2525 pdata->bus_hz = clock_frequency;
2527 if (drv_data && drv_data->parse_dt) {
2528 ret = drv_data->parse_dt(host);
2530 return ERR_PTR(ret);
2533 if (of_find_property(np, "supports-highspeed", NULL))
2534 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
2539 #else /* CONFIG_OF */
2540 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2542 return ERR_PTR(-EINVAL);
2544 #endif /* CONFIG_OF */
2546 int dw_mci_probe(struct dw_mci *host)
2548 const struct dw_mci_drv_data *drv_data = host->drv_data;
2549 int width, i, ret = 0;
2554 host->pdata = dw_mci_parse_dt(host);
2555 if (IS_ERR(host->pdata)) {
2556 dev_err(host->dev, "platform data not available\n");
2561 if (host->pdata->num_slots > 1) {
2563 "Platform data must supply num_slots.\n");
2567 host->biu_clk = devm_clk_get(host->dev, "biu");
2568 if (IS_ERR(host->biu_clk)) {
2569 dev_dbg(host->dev, "biu clock not available\n");
2571 ret = clk_prepare_enable(host->biu_clk);
2573 dev_err(host->dev, "failed to enable biu clock\n");
2578 host->ciu_clk = devm_clk_get(host->dev, "ciu");
2579 if (IS_ERR(host->ciu_clk)) {
2580 dev_dbg(host->dev, "ciu clock not available\n");
2581 host->bus_hz = host->pdata->bus_hz;
2583 ret = clk_prepare_enable(host->ciu_clk);
2585 dev_err(host->dev, "failed to enable ciu clock\n");
2589 if (host->pdata->bus_hz) {
2590 ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
2593 "Unable to set bus rate to %uHz\n",
2594 host->pdata->bus_hz);
2596 host->bus_hz = clk_get_rate(host->ciu_clk);
2599 if (!host->bus_hz) {
2601 "Platform data must supply bus speed\n");
2606 if (drv_data && drv_data->init) {
2607 ret = drv_data->init(host);
2610 "implementation specific init failed\n");
2615 if (drv_data && drv_data->setup_clock) {
2616 ret = drv_data->setup_clock(host);
2619 "implementation specific clock setup failed\n");
2624 host->quirks = host->pdata->quirks;
2626 spin_lock_init(&host->lock);
2627 INIT_LIST_HEAD(&host->queue);
2630 * Get the host data width - this assumes that HCON has been set with
2631 * the correct values.
2633 i = (mci_readl(host, HCON) >> 7) & 0x7;
2635 host->push_data = dw_mci_push_data16;
2636 host->pull_data = dw_mci_pull_data16;
2638 host->data_shift = 1;
2639 } else if (i == 2) {
2640 host->push_data = dw_mci_push_data64;
2641 host->pull_data = dw_mci_pull_data64;
2643 host->data_shift = 3;
2645 /* Check for a reserved value, and warn if it is */
2647 "HCON reports a reserved host data width!\n"
2648 "Defaulting to 32-bit access.\n");
2649 host->push_data = dw_mci_push_data32;
2650 host->pull_data = dw_mci_pull_data32;
2652 host->data_shift = 2;
2655 /* Reset all blocks */
2656 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS))
2659 host->dma_ops = host->pdata->dma_ops;
2660 dw_mci_init_dma(host);
2662 /* Clear the interrupts for the host controller */
2663 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2664 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2666 /* Put in max timeout */
2667 mci_writel(host, TMOUT, 0xFFFFFFFF);
2670 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
2671 * Tx Mark = fifo_size / 2 DMA Size = 8
2673 if (!host->pdata->fifo_depth) {
2675 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2676 * have been overwritten by the bootloader, just like we're
2677 * about to do, so if you know the value for your hardware, you
2678 * should put it in the platform data.
2680 fifo_size = mci_readl(host, FIFOTH);
2681 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
2683 fifo_size = host->pdata->fifo_depth;
2685 host->fifo_depth = fifo_size;
2687 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
2688 mci_writel(host, FIFOTH, host->fifoth_val);
2690 /* disable clock to CIU */
2691 mci_writel(host, CLKENA, 0);
2692 mci_writel(host, CLKSRC, 0);
2695 * In 2.40a spec, Data offset is changed.
2696 * Need to check the version-id and set data-offset for DATA register.
2698 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
2699 dev_info(host->dev, "Version ID is %04x\n", host->verid);
2701 if (host->verid < DW_MMC_240A)
2702 host->data_offset = DATA_OFFSET;
2704 host->data_offset = DATA_240A_OFFSET;
2706 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
2707 host->card_workqueue = alloc_workqueue("dw-mci-card",
2709 if (!host->card_workqueue) {
2713 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
2714 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
2715 host->irq_flags, "dw-mci", host);
2719 if (host->pdata->num_slots)
2720 host->num_slots = host->pdata->num_slots;
2722 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
2725 * Enable interrupts for command done, data over, data empty, card det,
2726 * receive ready and error such as transmit, receive timeout, crc error
2728 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2729 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2730 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2731 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2732 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
2734 dev_info(host->dev, "DW MMC controller at irq %d, "
2735 "%d bit host data width, "
2737 host->irq, width, fifo_size);
2739 /* We need at least one slot to succeed */
2740 for (i = 0; i < host->num_slots; i++) {
2741 ret = dw_mci_init_slot(host, i);
2743 dev_dbg(host->dev, "slot %d init failed\n", i);
2749 dev_info(host->dev, "%d slots initialized\n", init_slots);
2751 dev_dbg(host->dev, "attempted to initialize %d slots, "
2752 "but failed on all\n", host->num_slots);
2756 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
2757 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
2762 destroy_workqueue(host->card_workqueue);
2765 if (host->use_dma && host->dma_ops->exit)
2766 host->dma_ops->exit(host);
2769 if (!IS_ERR(host->ciu_clk))
2770 clk_disable_unprepare(host->ciu_clk);
2773 if (!IS_ERR(host->biu_clk))
2774 clk_disable_unprepare(host->biu_clk);
2778 EXPORT_SYMBOL(dw_mci_probe);
2780 void dw_mci_remove(struct dw_mci *host)
2784 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2785 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2787 for (i = 0; i < host->num_slots; i++) {
2788 dev_dbg(host->dev, "remove slot %d\n", i);
2790 dw_mci_cleanup_slot(host->slot[i], i);
2793 /* disable clock to CIU */
2794 mci_writel(host, CLKENA, 0);
2795 mci_writel(host, CLKSRC, 0);
2797 destroy_workqueue(host->card_workqueue);
2799 if (host->use_dma && host->dma_ops->exit)
2800 host->dma_ops->exit(host);
2802 if (!IS_ERR(host->ciu_clk))
2803 clk_disable_unprepare(host->ciu_clk);
2805 if (!IS_ERR(host->biu_clk))
2806 clk_disable_unprepare(host->biu_clk);
2808 EXPORT_SYMBOL(dw_mci_remove);
2812 #ifdef CONFIG_PM_SLEEP
2814 * TODO: we should probably disable the clock to the card in the suspend path.
2816 int dw_mci_suspend(struct dw_mci *host)
2820 EXPORT_SYMBOL(dw_mci_suspend);
2822 int dw_mci_resume(struct dw_mci *host)
2826 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
2831 if (host->use_dma && host->dma_ops->init)
2832 host->dma_ops->init(host);
2835 * Restore the initial value at FIFOTH register
2836 * And Invalidate the prev_blksz with zero
2838 mci_writel(host, FIFOTH, host->fifoth_val);
2839 host->prev_blksz = 0;
2841 /* Put in max timeout */
2842 mci_writel(host, TMOUT, 0xFFFFFFFF);
2844 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2845 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2846 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2847 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2848 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
2850 for (i = 0; i < host->num_slots; i++) {
2851 struct dw_mci_slot *slot = host->slot[i];
2854 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
2855 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
2856 dw_mci_setup_bus(slot, true);
2861 EXPORT_SYMBOL(dw_mci_resume);
2862 #endif /* CONFIG_PM_SLEEP */
2864 static int __init dw_mci_init(void)
2866 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
2870 static void __exit dw_mci_exit(void)
2874 module_init(dw_mci_init);
2875 module_exit(dw_mci_exit);
2877 MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
2878 MODULE_AUTHOR("NXP Semiconductor VietNam");
2879 MODULE_AUTHOR("Imagination Technologies Ltd");
2880 MODULE_LICENSE("GPL v2");