2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 #include <linux/blkdev.h>
15 #include <linux/clk.h>
16 #include <linux/debugfs.h>
17 #include <linux/device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/ioport.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/seq_file.h>
26 #include <linux/slab.h>
27 #include <linux/stat.h>
28 #include <linux/delay.h>
29 #include <linux/irq.h>
30 #include <linux/mmc/host.h>
31 #include <linux/mmc/mmc.h>
32 #include <linux/mmc/sdio.h>
33 #include <linux/mmc/dw_mmc.h>
34 #include <linux/bitops.h>
35 #include <linux/regulator/consumer.h>
36 #include <linux/workqueue.h>
38 #include <linux/of_gpio.h>
39 #include <linux/mmc/slot-gpio.h>
43 /* Common flag combinations */
44 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
45 SDMMC_INT_HTO | SDMMC_INT_SBE | \
47 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
49 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
50 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
51 #define DW_MCI_SEND_STATUS 1
52 #define DW_MCI_RECV_STATUS 2
53 #define DW_MCI_DMA_THRESHOLD 16
55 #define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
56 #define DW_MCI_FREQ_MIN 400000 /* unit: HZ */
58 #ifdef CONFIG_MMC_DW_IDMAC
59 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
60 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
61 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
65 u32 des0; /* Control Descriptor */
66 #define IDMAC_DES0_DIC BIT(1)
67 #define IDMAC_DES0_LD BIT(2)
68 #define IDMAC_DES0_FD BIT(3)
69 #define IDMAC_DES0_CH BIT(4)
70 #define IDMAC_DES0_ER BIT(5)
71 #define IDMAC_DES0_CES BIT(30)
72 #define IDMAC_DES0_OWN BIT(31)
74 u32 des1; /* Buffer sizes */
75 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
76 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
78 u32 des2; /* buffer 1 physical address */
80 u32 des3; /* buffer 2 physical address */
82 #endif /* CONFIG_MMC_DW_IDMAC */
84 static const u8 tuning_blk_pattern_4bit[] = {
85 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
86 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
87 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
88 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
89 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
90 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
91 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
92 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
95 static const u8 tuning_blk_pattern_8bit[] = {
96 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
97 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
98 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
99 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
100 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
101 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
102 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
103 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
104 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
105 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
106 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
107 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
108 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
109 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
110 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
111 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
114 static bool dw_mci_reset(struct dw_mci *host);
116 #if defined(CONFIG_DEBUG_FS)
117 static int dw_mci_req_show(struct seq_file *s, void *v)
119 struct dw_mci_slot *slot = s->private;
120 struct mmc_request *mrq;
121 struct mmc_command *cmd;
122 struct mmc_command *stop;
123 struct mmc_data *data;
125 /* Make sure we get a consistent snapshot */
126 spin_lock_bh(&slot->host->lock);
136 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
137 cmd->opcode, cmd->arg, cmd->flags,
138 cmd->resp[0], cmd->resp[1], cmd->resp[2],
139 cmd->resp[2], cmd->error);
141 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
142 data->bytes_xfered, data->blocks,
143 data->blksz, data->flags, data->error);
146 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
147 stop->opcode, stop->arg, stop->flags,
148 stop->resp[0], stop->resp[1], stop->resp[2],
149 stop->resp[2], stop->error);
152 spin_unlock_bh(&slot->host->lock);
157 static int dw_mci_req_open(struct inode *inode, struct file *file)
159 return single_open(file, dw_mci_req_show, inode->i_private);
162 static const struct file_operations dw_mci_req_fops = {
163 .owner = THIS_MODULE,
164 .open = dw_mci_req_open,
167 .release = single_release,
170 static int dw_mci_regs_show(struct seq_file *s, void *v)
172 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
173 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
174 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
175 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
176 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
177 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
182 static int dw_mci_regs_open(struct inode *inode, struct file *file)
184 return single_open(file, dw_mci_regs_show, inode->i_private);
187 static const struct file_operations dw_mci_regs_fops = {
188 .owner = THIS_MODULE,
189 .open = dw_mci_regs_open,
192 .release = single_release,
195 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
197 struct mmc_host *mmc = slot->mmc;
198 struct dw_mci *host = slot->host;
202 root = mmc->debugfs_root;
206 node = debugfs_create_file("regs", S_IRUSR, root, host,
211 node = debugfs_create_file("req", S_IRUSR, root, slot,
216 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
220 node = debugfs_create_x32("pending_events", S_IRUSR, root,
221 (u32 *)&host->pending_events);
225 node = debugfs_create_x32("completed_events", S_IRUSR, root,
226 (u32 *)&host->completed_events);
233 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
235 #endif /* defined(CONFIG_DEBUG_FS) */
237 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
239 struct mmc_data *data;
240 struct dw_mci_slot *slot = mmc_priv(mmc);
241 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
243 cmd->error = -EINPROGRESS;
247 if (cmd->opcode == MMC_STOP_TRANSMISSION ||
248 cmd->opcode == MMC_GO_IDLE_STATE ||
249 cmd->opcode == MMC_GO_INACTIVE_STATE ||
250 (cmd->opcode == SD_IO_RW_DIRECT &&
251 ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
252 cmdr |= SDMMC_CMD_STOP;
253 else if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
254 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
256 if (cmd->flags & MMC_RSP_PRESENT) {
257 /* We expect a response, so set this bit */
258 cmdr |= SDMMC_CMD_RESP_EXP;
259 if (cmd->flags & MMC_RSP_136)
260 cmdr |= SDMMC_CMD_RESP_LONG;
263 if (cmd->flags & MMC_RSP_CRC)
264 cmdr |= SDMMC_CMD_RESP_CRC;
268 cmdr |= SDMMC_CMD_DAT_EXP;
269 if (data->flags & MMC_DATA_STREAM)
270 cmdr |= SDMMC_CMD_STRM_MODE;
271 if (data->flags & MMC_DATA_WRITE)
272 cmdr |= SDMMC_CMD_DAT_WR;
275 if (drv_data && drv_data->prepare_command)
276 drv_data->prepare_command(slot->host, &cmdr);
281 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
283 struct mmc_command *stop;
289 stop = &host->stop_abort;
291 memset(stop, 0, sizeof(struct mmc_command));
293 if (cmdr == MMC_READ_SINGLE_BLOCK ||
294 cmdr == MMC_READ_MULTIPLE_BLOCK ||
295 cmdr == MMC_WRITE_BLOCK ||
296 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
297 stop->opcode = MMC_STOP_TRANSMISSION;
299 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
300 } else if (cmdr == SD_IO_RW_EXTENDED) {
301 stop->opcode = SD_IO_RW_DIRECT;
302 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
303 ((cmd->arg >> 28) & 0x7);
304 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
309 cmdr = stop->opcode | SDMMC_CMD_STOP |
310 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
315 static void dw_mci_start_command(struct dw_mci *host,
316 struct mmc_command *cmd, u32 cmd_flags)
320 "start command: ARGR=0x%08x CMDR=0x%08x\n",
321 cmd->arg, cmd_flags);
323 mci_writel(host, CMDARG, cmd->arg);
326 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
329 static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
331 struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort;
332 dw_mci_start_command(host, stop, host->stop_cmdr);
335 /* DMA interface functions */
336 static void dw_mci_stop_dma(struct dw_mci *host)
338 if (host->using_dma) {
339 host->dma_ops->stop(host);
340 host->dma_ops->cleanup(host);
343 /* Data transfer was stopped by the interrupt handler */
344 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
347 static int dw_mci_get_dma_dir(struct mmc_data *data)
349 if (data->flags & MMC_DATA_WRITE)
350 return DMA_TO_DEVICE;
352 return DMA_FROM_DEVICE;
355 #ifdef CONFIG_MMC_DW_IDMAC
356 static void dw_mci_dma_cleanup(struct dw_mci *host)
358 struct mmc_data *data = host->data;
361 if (!data->host_cookie)
362 dma_unmap_sg(host->dev,
365 dw_mci_get_dma_dir(data));
368 static void dw_mci_idmac_reset(struct dw_mci *host)
370 u32 bmod = mci_readl(host, BMOD);
371 /* Software reset of DMA */
372 bmod |= SDMMC_IDMAC_SWRESET;
373 mci_writel(host, BMOD, bmod);
376 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
380 /* Disable and reset the IDMAC interface */
381 temp = mci_readl(host, CTRL);
382 temp &= ~SDMMC_CTRL_USE_IDMAC;
383 temp |= SDMMC_CTRL_DMA_RESET;
384 mci_writel(host, CTRL, temp);
386 /* Stop the IDMAC running */
387 temp = mci_readl(host, BMOD);
388 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
389 temp |= SDMMC_IDMAC_SWRESET;
390 mci_writel(host, BMOD, temp);
393 static void dw_mci_idmac_complete_dma(struct dw_mci *host)
395 struct mmc_data *data = host->data;
397 dev_vdbg(host->dev, "DMA complete\n");
399 host->dma_ops->cleanup(host);
402 * If the card was removed, data will be NULL. No point in trying to
403 * send the stop command or waiting for NBUSY in this case.
406 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
407 tasklet_schedule(&host->tasklet);
411 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
415 struct idmac_desc *desc = host->sg_cpu;
417 for (i = 0; i < sg_len; i++, desc++) {
418 unsigned int length = sg_dma_len(&data->sg[i]);
419 u32 mem_addr = sg_dma_address(&data->sg[i]);
421 /* Set the OWN bit and disable interrupts for this descriptor */
422 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
425 IDMAC_SET_BUFFER1_SIZE(desc, length);
427 /* Physical address to DMA to/from */
428 desc->des2 = mem_addr;
431 /* Set first descriptor */
433 desc->des0 |= IDMAC_DES0_FD;
435 /* Set last descriptor */
436 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
437 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
438 desc->des0 |= IDMAC_DES0_LD;
443 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
447 dw_mci_translate_sglist(host, host->data, sg_len);
449 /* Select IDMAC interface */
450 temp = mci_readl(host, CTRL);
451 temp |= SDMMC_CTRL_USE_IDMAC;
452 mci_writel(host, CTRL, temp);
456 /* Enable the IDMAC */
457 temp = mci_readl(host, BMOD);
458 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
459 mci_writel(host, BMOD, temp);
461 /* Start it running */
462 mci_writel(host, PLDMND, 1);
465 static int dw_mci_idmac_init(struct dw_mci *host)
467 struct idmac_desc *p;
470 /* Number of descriptors in the ring buffer */
471 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
473 /* Forward link the descriptor list */
474 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
475 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
477 /* Set the last descriptor as the end-of-ring descriptor */
478 p->des3 = host->sg_dma;
479 p->des0 = IDMAC_DES0_ER;
481 dw_mci_idmac_reset(host);
483 /* Mask out interrupts - get Tx & Rx complete only */
484 mci_writel(host, IDSTS, IDMAC_INT_CLR);
485 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
488 /* Set the descriptor base address */
489 mci_writel(host, DBADDR, host->sg_dma);
493 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
494 .init = dw_mci_idmac_init,
495 .start = dw_mci_idmac_start_dma,
496 .stop = dw_mci_idmac_stop_dma,
497 .complete = dw_mci_idmac_complete_dma,
498 .cleanup = dw_mci_dma_cleanup,
500 #endif /* CONFIG_MMC_DW_IDMAC */
502 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
503 struct mmc_data *data,
506 struct scatterlist *sg;
507 unsigned int i, sg_len;
509 if (!next && data->host_cookie)
510 return data->host_cookie;
513 * We don't do DMA on "complex" transfers, i.e. with
514 * non-word-aligned buffers or lengths. Also, we don't bother
515 * with all the DMA setup overhead for short transfers.
517 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
523 for_each_sg(data->sg, sg, data->sg_len, i) {
524 if (sg->offset & 3 || sg->length & 3)
528 sg_len = dma_map_sg(host->dev,
531 dw_mci_get_dma_dir(data));
536 data->host_cookie = sg_len;
541 static void dw_mci_pre_req(struct mmc_host *mmc,
542 struct mmc_request *mrq,
545 struct dw_mci_slot *slot = mmc_priv(mmc);
546 struct mmc_data *data = mrq->data;
548 if (!slot->host->use_dma || !data)
551 if (data->host_cookie) {
552 data->host_cookie = 0;
556 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
557 data->host_cookie = 0;
560 static void dw_mci_post_req(struct mmc_host *mmc,
561 struct mmc_request *mrq,
564 struct dw_mci_slot *slot = mmc_priv(mmc);
565 struct mmc_data *data = mrq->data;
567 if (!slot->host->use_dma || !data)
570 if (data->host_cookie)
571 dma_unmap_sg(slot->host->dev,
574 dw_mci_get_dma_dir(data));
575 data->host_cookie = 0;
578 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
580 #ifdef CONFIG_MMC_DW_IDMAC
581 unsigned int blksz = data->blksz;
582 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
583 u32 fifo_width = 1 << host->data_shift;
584 u32 blksz_depth = blksz / fifo_width, fifoth_val;
585 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
586 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
588 tx_wmark = (host->fifo_depth) / 2;
589 tx_wmark_invers = host->fifo_depth - tx_wmark;
593 * if blksz is not a multiple of the FIFO width
595 if (blksz % fifo_width) {
602 if (!((blksz_depth % mszs[idx]) ||
603 (tx_wmark_invers % mszs[idx]))) {
605 rx_wmark = mszs[idx] - 1;
610 * If idx is '0', it won't be tried
611 * Thus, initial values are uesed
614 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
615 mci_writel(host, FIFOTH, fifoth_val);
619 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
621 unsigned int blksz = data->blksz;
622 u32 blksz_depth, fifo_depth;
625 WARN_ON(!(data->flags & MMC_DATA_READ));
627 if (host->timing != MMC_TIMING_MMC_HS200 &&
628 host->timing != MMC_TIMING_UHS_SDR104)
631 blksz_depth = blksz / (1 << host->data_shift);
632 fifo_depth = host->fifo_depth;
634 if (blksz_depth > fifo_depth)
638 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
639 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
640 * Currently just choose blksz.
643 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
647 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
650 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
657 /* If we don't have a channel, we can't do DMA */
661 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
663 host->dma_ops->stop(host);
670 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
671 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
675 * Decide the MSIZE and RX/TX Watermark.
676 * If current block size is same with previous size,
677 * no need to update fifoth.
679 if (host->prev_blksz != data->blksz)
680 dw_mci_adjust_fifoth(host, data);
682 /* Enable the DMA interface */
683 temp = mci_readl(host, CTRL);
684 temp |= SDMMC_CTRL_DMA_ENABLE;
685 mci_writel(host, CTRL, temp);
687 /* Disable RX/TX IRQs, let DMA handle it */
688 temp = mci_readl(host, INTMASK);
689 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
690 mci_writel(host, INTMASK, temp);
692 host->dma_ops->start(host, sg_len);
697 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
701 data->error = -EINPROGRESS;
707 if (data->flags & MMC_DATA_READ) {
708 host->dir_status = DW_MCI_RECV_STATUS;
709 dw_mci_ctrl_rd_thld(host, data);
711 host->dir_status = DW_MCI_SEND_STATUS;
714 if (dw_mci_submit_data_dma(host, data)) {
715 int flags = SG_MITER_ATOMIC;
716 if (host->data->flags & MMC_DATA_READ)
717 flags |= SG_MITER_TO_SG;
719 flags |= SG_MITER_FROM_SG;
721 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
723 host->part_buf_start = 0;
724 host->part_buf_count = 0;
726 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
727 temp = mci_readl(host, INTMASK);
728 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
729 mci_writel(host, INTMASK, temp);
731 temp = mci_readl(host, CTRL);
732 temp &= ~SDMMC_CTRL_DMA_ENABLE;
733 mci_writel(host, CTRL, temp);
736 * Use the initial fifoth_val for PIO mode.
737 * If next issued data may be transfered by DMA mode,
738 * prev_blksz should be invalidated.
740 mci_writel(host, FIFOTH, host->fifoth_val);
741 host->prev_blksz = 0;
744 * Keep the current block size.
745 * It will be used to decide whether to update
746 * fifoth register next time.
748 host->prev_blksz = data->blksz;
752 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
754 struct dw_mci *host = slot->host;
755 unsigned long timeout = jiffies + msecs_to_jiffies(500);
756 unsigned int cmd_status = 0;
758 mci_writel(host, CMDARG, arg);
760 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
762 while (time_before(jiffies, timeout)) {
763 cmd_status = mci_readl(host, CMD);
764 if (!(cmd_status & SDMMC_CMD_START))
767 dev_err(&slot->mmc->class_dev,
768 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
769 cmd, arg, cmd_status);
772 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
774 struct dw_mci *host = slot->host;
775 unsigned int clock = slot->clock;
780 mci_writel(host, CLKENA, 0);
782 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
783 } else if (clock != host->current_speed || force_clkinit) {
784 div = host->bus_hz / clock;
785 if (host->bus_hz % clock && host->bus_hz > clock)
787 * move the + 1 after the divide to prevent
788 * over-clocking the card.
792 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
794 if ((clock << div) != slot->__clk_old || force_clkinit)
795 dev_info(&slot->mmc->class_dev,
796 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
797 slot->id, host->bus_hz, clock,
798 div ? ((host->bus_hz / div) >> 1) :
802 mci_writel(host, CLKENA, 0);
803 mci_writel(host, CLKSRC, 0);
807 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
809 /* set clock to desired speed */
810 mci_writel(host, CLKDIV, div);
814 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
816 /* enable clock; only low power if no SDIO */
817 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
818 if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id)))
819 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
820 mci_writel(host, CLKENA, clk_en_a);
824 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
826 /* keep the clock with reflecting clock dividor */
827 slot->__clk_old = clock << div;
830 host->current_speed = clock;
832 /* Set the current slot bus width */
833 mci_writel(host, CTYPE, (slot->ctype << slot->id));
836 static void __dw_mci_start_request(struct dw_mci *host,
837 struct dw_mci_slot *slot,
838 struct mmc_command *cmd)
840 struct mmc_request *mrq;
841 struct mmc_data *data;
846 host->cur_slot = slot;
849 host->pending_events = 0;
850 host->completed_events = 0;
851 host->cmd_status = 0;
852 host->data_status = 0;
853 host->dir_status = 0;
857 mci_writel(host, TMOUT, 0xFFFFFFFF);
858 mci_writel(host, BYTCNT, data->blksz*data->blocks);
859 mci_writel(host, BLKSIZ, data->blksz);
862 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
864 /* this is the first command, send the initialization clock */
865 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
866 cmdflags |= SDMMC_CMD_INIT;
869 dw_mci_submit_data(host, data);
873 dw_mci_start_command(host, cmd, cmdflags);
876 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
878 host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
881 static void dw_mci_start_request(struct dw_mci *host,
882 struct dw_mci_slot *slot)
884 struct mmc_request *mrq = slot->mrq;
885 struct mmc_command *cmd;
887 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
888 __dw_mci_start_request(host, slot, cmd);
891 /* must be called with host->lock held */
892 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
893 struct mmc_request *mrq)
895 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
900 if (host->state == STATE_IDLE) {
901 host->state = STATE_SENDING_CMD;
902 dw_mci_start_request(host, slot);
904 list_add_tail(&slot->queue_node, &host->queue);
908 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
910 struct dw_mci_slot *slot = mmc_priv(mmc);
911 struct dw_mci *host = slot->host;
916 * The check for card presence and queueing of the request must be
917 * atomic, otherwise the card could be removed in between and the
918 * request wouldn't fail until another card was inserted.
920 spin_lock_bh(&host->lock);
922 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
923 spin_unlock_bh(&host->lock);
924 mrq->cmd->error = -ENOMEDIUM;
925 mmc_request_done(mmc, mrq);
929 dw_mci_queue_request(host, slot, mrq);
931 spin_unlock_bh(&host->lock);
934 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
936 struct dw_mci_slot *slot = mmc_priv(mmc);
937 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
940 switch (ios->bus_width) {
941 case MMC_BUS_WIDTH_4:
942 slot->ctype = SDMMC_CTYPE_4BIT;
944 case MMC_BUS_WIDTH_8:
945 slot->ctype = SDMMC_CTYPE_8BIT;
948 /* set default 1 bit mode */
949 slot->ctype = SDMMC_CTYPE_1BIT;
952 regs = mci_readl(slot->host, UHS_REG);
955 if (ios->timing == MMC_TIMING_MMC_DDR52)
956 regs |= ((0x1 << slot->id) << 16);
958 regs &= ~((0x1 << slot->id) << 16);
960 mci_writel(slot->host, UHS_REG, regs);
961 slot->host->timing = ios->timing;
964 * Use mirror of ios->clock to prevent race with mmc
965 * core ios update when finding the minimum.
967 slot->clock = ios->clock;
969 if (drv_data && drv_data->set_ios)
970 drv_data->set_ios(slot->host, ios);
972 /* Slot specific timing and width adjustment */
973 dw_mci_setup_bus(slot, false);
975 switch (ios->power_mode) {
977 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
978 regs = mci_readl(slot->host, PWREN);
979 regs |= (1 << slot->id);
980 mci_writel(slot->host, PWREN, regs);
983 regs = mci_readl(slot->host, PWREN);
984 regs &= ~(1 << slot->id);
985 mci_writel(slot->host, PWREN, regs);
992 static int dw_mci_get_ro(struct mmc_host *mmc)
995 struct dw_mci_slot *slot = mmc_priv(mmc);
996 int gpio_ro = mmc_gpio_get_ro(mmc);
998 /* Use platform get_ro function, else try on board write protect */
999 if ((slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT) ||
1000 (slot->host->quirks & DW_MCI_QUIRK_NO_WRITE_PROTECT))
1002 else if (!IS_ERR_VALUE(gpio_ro))
1003 read_only = gpio_ro;
1006 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1008 dev_dbg(&mmc->class_dev, "card is %s\n",
1009 read_only ? "read-only" : "read-write");
1014 static int dw_mci_get_cd(struct mmc_host *mmc)
1017 struct dw_mci_slot *slot = mmc_priv(mmc);
1018 struct dw_mci_board *brd = slot->host->pdata;
1019 struct dw_mci *host = slot->host;
1020 int gpio_cd = mmc_gpio_get_cd(mmc);
1022 /* Use platform get_cd function, else try onboard card detect */
1023 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1025 else if (!IS_ERR_VALUE(gpio_cd))
1028 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1031 spin_lock_bh(&host->lock);
1033 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1034 dev_dbg(&mmc->class_dev, "card is present\n");
1036 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1037 dev_dbg(&mmc->class_dev, "card is not present\n");
1039 spin_unlock_bh(&host->lock);
1045 * Disable lower power mode.
1047 * Low power mode will stop the card clock when idle. According to the
1048 * description of the CLKENA register we should disable low power mode
1049 * for SDIO cards if we need SDIO interrupts to work.
1051 * This function is fast if low power mode is already disabled.
1053 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1055 struct dw_mci *host = slot->host;
1057 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1059 clk_en_a = mci_readl(host, CLKENA);
1061 if (clk_en_a & clken_low_pwr) {
1062 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1063 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1064 SDMMC_CMD_PRV_DAT_WAIT, 0);
1068 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1070 struct dw_mci_slot *slot = mmc_priv(mmc);
1071 struct dw_mci *host = slot->host;
1074 /* Enable/disable Slot Specific SDIO interrupt */
1075 int_mask = mci_readl(host, INTMASK);
1078 * Turn off low power mode if it was enabled. This is a bit of
1079 * a heavy operation and we disable / enable IRQs a lot, so
1080 * we'll leave low power mode disabled and it will get
1081 * re-enabled again in dw_mci_setup_bus().
1083 dw_mci_disable_low_power(slot);
1085 mci_writel(host, INTMASK,
1086 (int_mask | SDMMC_INT_SDIO(slot->id)));
1088 mci_writel(host, INTMASK,
1089 (int_mask & ~SDMMC_INT_SDIO(slot->id)));
1093 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1095 struct dw_mci_slot *slot = mmc_priv(mmc);
1096 struct dw_mci *host = slot->host;
1097 const struct dw_mci_drv_data *drv_data = host->drv_data;
1098 struct dw_mci_tuning_data tuning_data;
1101 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1102 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1103 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1104 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1105 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1106 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1107 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1111 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1112 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1113 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1116 "Undefined command(%d) for tuning\n", opcode);
1120 if (drv_data && drv_data->execute_tuning)
1121 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1125 static const struct mmc_host_ops dw_mci_ops = {
1126 .request = dw_mci_request,
1127 .pre_req = dw_mci_pre_req,
1128 .post_req = dw_mci_post_req,
1129 .set_ios = dw_mci_set_ios,
1130 .get_ro = dw_mci_get_ro,
1131 .get_cd = dw_mci_get_cd,
1132 .enable_sdio_irq = dw_mci_enable_sdio_irq,
1133 .execute_tuning = dw_mci_execute_tuning,
1136 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1137 __releases(&host->lock)
1138 __acquires(&host->lock)
1140 struct dw_mci_slot *slot;
1141 struct mmc_host *prev_mmc = host->cur_slot->mmc;
1143 WARN_ON(host->cmd || host->data);
1145 host->cur_slot->mrq = NULL;
1147 if (!list_empty(&host->queue)) {
1148 slot = list_entry(host->queue.next,
1149 struct dw_mci_slot, queue_node);
1150 list_del(&slot->queue_node);
1151 dev_vdbg(host->dev, "list not empty: %s is next\n",
1152 mmc_hostname(slot->mmc));
1153 host->state = STATE_SENDING_CMD;
1154 dw_mci_start_request(host, slot);
1156 dev_vdbg(host->dev, "list empty\n");
1157 host->state = STATE_IDLE;
1160 spin_unlock(&host->lock);
1161 mmc_request_done(prev_mmc, mrq);
1162 spin_lock(&host->lock);
1165 static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1167 u32 status = host->cmd_status;
1169 host->cmd_status = 0;
1171 /* Read the response from the card (up to 16 bytes) */
1172 if (cmd->flags & MMC_RSP_PRESENT) {
1173 if (cmd->flags & MMC_RSP_136) {
1174 cmd->resp[3] = mci_readl(host, RESP0);
1175 cmd->resp[2] = mci_readl(host, RESP1);
1176 cmd->resp[1] = mci_readl(host, RESP2);
1177 cmd->resp[0] = mci_readl(host, RESP3);
1179 cmd->resp[0] = mci_readl(host, RESP0);
1186 if (status & SDMMC_INT_RTO)
1187 cmd->error = -ETIMEDOUT;
1188 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1189 cmd->error = -EILSEQ;
1190 else if (status & SDMMC_INT_RESP_ERR)
1196 /* newer ip versions need a delay between retries */
1197 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
1204 static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1206 u32 status = host->data_status;
1208 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1209 if (status & SDMMC_INT_DRTO) {
1210 data->error = -ETIMEDOUT;
1211 } else if (status & SDMMC_INT_DCRC) {
1212 data->error = -EILSEQ;
1213 } else if (status & SDMMC_INT_EBE) {
1214 if (host->dir_status ==
1215 DW_MCI_SEND_STATUS) {
1217 * No data CRC status was returned.
1218 * The number of bytes transferred
1219 * will be exaggerated in PIO mode.
1221 data->bytes_xfered = 0;
1222 data->error = -ETIMEDOUT;
1223 } else if (host->dir_status ==
1224 DW_MCI_RECV_STATUS) {
1228 /* SDMMC_INT_SBE is included */
1232 dev_dbg(host->dev, "data error, status 0x%08x\n", status);
1235 * After an error, there may be data lingering
1240 data->bytes_xfered = data->blocks * data->blksz;
1247 static void dw_mci_tasklet_func(unsigned long priv)
1249 struct dw_mci *host = (struct dw_mci *)priv;
1250 struct mmc_data *data;
1251 struct mmc_command *cmd;
1252 struct mmc_request *mrq;
1253 enum dw_mci_state state;
1254 enum dw_mci_state prev_state;
1257 spin_lock(&host->lock);
1259 state = host->state;
1270 case STATE_SENDING_CMD:
1271 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1272 &host->pending_events))
1277 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
1278 err = dw_mci_command_complete(host, cmd);
1279 if (cmd == mrq->sbc && !err) {
1280 prev_state = state = STATE_SENDING_CMD;
1281 __dw_mci_start_request(host, host->cur_slot,
1286 if (cmd->data && err) {
1287 dw_mci_stop_dma(host);
1288 send_stop_abort(host, data);
1289 state = STATE_SENDING_STOP;
1293 if (!cmd->data || err) {
1294 dw_mci_request_end(host, mrq);
1298 prev_state = state = STATE_SENDING_DATA;
1301 case STATE_SENDING_DATA:
1303 * We could get a data error and never a transfer
1304 * complete so we'd better check for it here.
1306 * Note that we don't really care if we also got a
1307 * transfer complete; stopping the DMA and sending an
1310 if (test_and_clear_bit(EVENT_DATA_ERROR,
1311 &host->pending_events)) {
1312 dw_mci_stop_dma(host);
1313 send_stop_abort(host, data);
1314 state = STATE_DATA_ERROR;
1318 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1319 &host->pending_events))
1322 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
1325 * Handle an EVENT_DATA_ERROR that might have shown up
1326 * before the transfer completed. This might not have
1327 * been caught by the check above because the interrupt
1328 * could have gone off between the previous check and
1329 * the check for transfer complete.
1331 * Technically this ought not be needed assuming we
1332 * get a DATA_COMPLETE eventually (we'll notice the
1333 * error and end the request), but it shouldn't hurt.
1335 * This has the advantage of sending the stop command.
1337 if (test_and_clear_bit(EVENT_DATA_ERROR,
1338 &host->pending_events)) {
1339 dw_mci_stop_dma(host);
1340 send_stop_abort(host, data);
1341 state = STATE_DATA_ERROR;
1344 prev_state = state = STATE_DATA_BUSY;
1348 case STATE_DATA_BUSY:
1349 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1350 &host->pending_events))
1354 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
1355 err = dw_mci_data_complete(host, data);
1358 if (!data->stop || mrq->sbc) {
1359 if (mrq->sbc && data->stop)
1360 data->stop->error = 0;
1361 dw_mci_request_end(host, mrq);
1365 /* stop command for open-ended transfer*/
1367 send_stop_abort(host, data);
1370 * If we don't have a command complete now we'll
1371 * never get one since we just reset everything;
1372 * better end the request.
1374 * If we do have a command complete we'll fall
1375 * through to the SENDING_STOP command and
1376 * everything will be peachy keen.
1378 if (!test_bit(EVENT_CMD_COMPLETE,
1379 &host->pending_events)) {
1381 dw_mci_request_end(host, mrq);
1387 * If err has non-zero,
1388 * stop-abort command has been already issued.
1390 prev_state = state = STATE_SENDING_STOP;
1394 case STATE_SENDING_STOP:
1395 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1396 &host->pending_events))
1399 /* CMD error in data command */
1400 if (mrq->cmd->error && mrq->data)
1407 dw_mci_command_complete(host, mrq->stop);
1409 host->cmd_status = 0;
1411 dw_mci_request_end(host, mrq);
1414 case STATE_DATA_ERROR:
1415 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1416 &host->pending_events))
1419 state = STATE_DATA_BUSY;
1422 } while (state != prev_state);
1424 host->state = state;
1426 spin_unlock(&host->lock);
1430 /* push final bytes to part_buf, only use during push */
1431 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
1433 memcpy((void *)&host->part_buf, buf, cnt);
1434 host->part_buf_count = cnt;
1437 /* append bytes to part_buf, only use during push */
1438 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1440 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1441 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1442 host->part_buf_count += cnt;
1446 /* pull first bytes from part_buf, only use during pull */
1447 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1449 cnt = min(cnt, (int)host->part_buf_count);
1451 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1453 host->part_buf_count -= cnt;
1454 host->part_buf_start += cnt;
1459 /* pull final bytes from the part_buf, assuming it's just been filled */
1460 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
1462 memcpy(buf, &host->part_buf, cnt);
1463 host->part_buf_start = cnt;
1464 host->part_buf_count = (1 << host->data_shift) - cnt;
1467 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1469 struct mmc_data *data = host->data;
1472 /* try and push anything in the part_buf */
1473 if (unlikely(host->part_buf_count)) {
1474 int len = dw_mci_push_part_bytes(host, buf, cnt);
1477 if (host->part_buf_count == 2) {
1478 mci_writew(host, DATA(host->data_offset),
1480 host->part_buf_count = 0;
1483 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1484 if (unlikely((unsigned long)buf & 0x1)) {
1486 u16 aligned_buf[64];
1487 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1488 int items = len >> 1;
1490 /* memcpy from input buffer into aligned buffer */
1491 memcpy(aligned_buf, buf, len);
1494 /* push data from aligned buffer into fifo */
1495 for (i = 0; i < items; ++i)
1496 mci_writew(host, DATA(host->data_offset),
1503 for (; cnt >= 2; cnt -= 2)
1504 mci_writew(host, DATA(host->data_offset), *pdata++);
1507 /* put anything remaining in the part_buf */
1509 dw_mci_set_part_bytes(host, buf, cnt);
1510 /* Push data if we have reached the expected data length */
1511 if ((data->bytes_xfered + init_cnt) ==
1512 (data->blksz * data->blocks))
1513 mci_writew(host, DATA(host->data_offset),
1518 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
1520 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1521 if (unlikely((unsigned long)buf & 0x1)) {
1523 /* pull data from fifo into aligned buffer */
1524 u16 aligned_buf[64];
1525 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1526 int items = len >> 1;
1528 for (i = 0; i < items; ++i)
1529 aligned_buf[i] = mci_readw(host,
1530 DATA(host->data_offset));
1531 /* memcpy from aligned buffer into output buffer */
1532 memcpy(buf, aligned_buf, len);
1540 for (; cnt >= 2; cnt -= 2)
1541 *pdata++ = mci_readw(host, DATA(host->data_offset));
1545 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
1546 dw_mci_pull_final_bytes(host, buf, cnt);
1550 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
1552 struct mmc_data *data = host->data;
1555 /* try and push anything in the part_buf */
1556 if (unlikely(host->part_buf_count)) {
1557 int len = dw_mci_push_part_bytes(host, buf, cnt);
1560 if (host->part_buf_count == 4) {
1561 mci_writel(host, DATA(host->data_offset),
1563 host->part_buf_count = 0;
1566 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1567 if (unlikely((unsigned long)buf & 0x3)) {
1569 u32 aligned_buf[32];
1570 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1571 int items = len >> 2;
1573 /* memcpy from input buffer into aligned buffer */
1574 memcpy(aligned_buf, buf, len);
1577 /* push data from aligned buffer into fifo */
1578 for (i = 0; i < items; ++i)
1579 mci_writel(host, DATA(host->data_offset),
1586 for (; cnt >= 4; cnt -= 4)
1587 mci_writel(host, DATA(host->data_offset), *pdata++);
1590 /* put anything remaining in the part_buf */
1592 dw_mci_set_part_bytes(host, buf, cnt);
1593 /* Push data if we have reached the expected data length */
1594 if ((data->bytes_xfered + init_cnt) ==
1595 (data->blksz * data->blocks))
1596 mci_writel(host, DATA(host->data_offset),
1601 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
1603 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1604 if (unlikely((unsigned long)buf & 0x3)) {
1606 /* pull data from fifo into aligned buffer */
1607 u32 aligned_buf[32];
1608 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1609 int items = len >> 2;
1611 for (i = 0; i < items; ++i)
1612 aligned_buf[i] = mci_readl(host,
1613 DATA(host->data_offset));
1614 /* memcpy from aligned buffer into output buffer */
1615 memcpy(buf, aligned_buf, len);
1623 for (; cnt >= 4; cnt -= 4)
1624 *pdata++ = mci_readl(host, DATA(host->data_offset));
1628 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
1629 dw_mci_pull_final_bytes(host, buf, cnt);
1633 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1635 struct mmc_data *data = host->data;
1638 /* try and push anything in the part_buf */
1639 if (unlikely(host->part_buf_count)) {
1640 int len = dw_mci_push_part_bytes(host, buf, cnt);
1644 if (host->part_buf_count == 8) {
1645 mci_writeq(host, DATA(host->data_offset),
1647 host->part_buf_count = 0;
1650 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1651 if (unlikely((unsigned long)buf & 0x7)) {
1653 u64 aligned_buf[16];
1654 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1655 int items = len >> 3;
1657 /* memcpy from input buffer into aligned buffer */
1658 memcpy(aligned_buf, buf, len);
1661 /* push data from aligned buffer into fifo */
1662 for (i = 0; i < items; ++i)
1663 mci_writeq(host, DATA(host->data_offset),
1670 for (; cnt >= 8; cnt -= 8)
1671 mci_writeq(host, DATA(host->data_offset), *pdata++);
1674 /* put anything remaining in the part_buf */
1676 dw_mci_set_part_bytes(host, buf, cnt);
1677 /* Push data if we have reached the expected data length */
1678 if ((data->bytes_xfered + init_cnt) ==
1679 (data->blksz * data->blocks))
1680 mci_writeq(host, DATA(host->data_offset),
1685 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1687 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1688 if (unlikely((unsigned long)buf & 0x7)) {
1690 /* pull data from fifo into aligned buffer */
1691 u64 aligned_buf[16];
1692 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1693 int items = len >> 3;
1695 for (i = 0; i < items; ++i)
1696 aligned_buf[i] = mci_readq(host,
1697 DATA(host->data_offset));
1698 /* memcpy from aligned buffer into output buffer */
1699 memcpy(buf, aligned_buf, len);
1707 for (; cnt >= 8; cnt -= 8)
1708 *pdata++ = mci_readq(host, DATA(host->data_offset));
1712 host->part_buf = mci_readq(host, DATA(host->data_offset));
1713 dw_mci_pull_final_bytes(host, buf, cnt);
1717 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
1721 /* get remaining partial bytes */
1722 len = dw_mci_pull_part_bytes(host, buf, cnt);
1723 if (unlikely(len == cnt))
1728 /* get the rest of the data */
1729 host->pull_data(host, buf, cnt);
1732 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
1734 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1736 unsigned int offset;
1737 struct mmc_data *data = host->data;
1738 int shift = host->data_shift;
1741 unsigned int remain, fcnt;
1744 if (!sg_miter_next(sg_miter))
1747 host->sg = sg_miter->piter.sg;
1748 buf = sg_miter->addr;
1749 remain = sg_miter->length;
1753 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
1754 << shift) + host->part_buf_count;
1755 len = min(remain, fcnt);
1758 dw_mci_pull_data(host, (void *)(buf + offset), len);
1759 data->bytes_xfered += len;
1764 sg_miter->consumed = offset;
1765 status = mci_readl(host, MINTSTS);
1766 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1767 /* if the RXDR is ready read again */
1768 } while ((status & SDMMC_INT_RXDR) ||
1769 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
1772 if (!sg_miter_next(sg_miter))
1774 sg_miter->consumed = 0;
1776 sg_miter_stop(sg_miter);
1780 sg_miter_stop(sg_miter);
1783 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1786 static void dw_mci_write_data_pio(struct dw_mci *host)
1788 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1790 unsigned int offset;
1791 struct mmc_data *data = host->data;
1792 int shift = host->data_shift;
1795 unsigned int fifo_depth = host->fifo_depth;
1796 unsigned int remain, fcnt;
1799 if (!sg_miter_next(sg_miter))
1802 host->sg = sg_miter->piter.sg;
1803 buf = sg_miter->addr;
1804 remain = sg_miter->length;
1808 fcnt = ((fifo_depth -
1809 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
1810 << shift) - host->part_buf_count;
1811 len = min(remain, fcnt);
1814 host->push_data(host, (void *)(buf + offset), len);
1815 data->bytes_xfered += len;
1820 sg_miter->consumed = offset;
1821 status = mci_readl(host, MINTSTS);
1822 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1823 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
1826 if (!sg_miter_next(sg_miter))
1828 sg_miter->consumed = 0;
1830 sg_miter_stop(sg_miter);
1834 sg_miter_stop(sg_miter);
1837 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1840 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
1842 if (!host->cmd_status)
1843 host->cmd_status = status;
1847 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1848 tasklet_schedule(&host->tasklet);
1851 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1853 struct dw_mci *host = dev_id;
1857 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
1860 * DTO fix - version 2.10a and below, and only if internal DMA
1863 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
1865 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
1866 pending |= SDMMC_INT_DATA_OVER;
1870 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
1871 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
1872 host->cmd_status = pending;
1874 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1877 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
1878 /* if there is an error report DATA_ERROR */
1879 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
1880 host->data_status = pending;
1882 set_bit(EVENT_DATA_ERROR, &host->pending_events);
1883 tasklet_schedule(&host->tasklet);
1886 if (pending & SDMMC_INT_DATA_OVER) {
1887 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1888 if (!host->data_status)
1889 host->data_status = pending;
1891 if (host->dir_status == DW_MCI_RECV_STATUS) {
1892 if (host->sg != NULL)
1893 dw_mci_read_data_pio(host, true);
1895 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1896 tasklet_schedule(&host->tasklet);
1899 if (pending & SDMMC_INT_RXDR) {
1900 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1901 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
1902 dw_mci_read_data_pio(host, false);
1905 if (pending & SDMMC_INT_TXDR) {
1906 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1907 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
1908 dw_mci_write_data_pio(host);
1911 if (pending & SDMMC_INT_CMD_DONE) {
1912 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
1913 dw_mci_cmd_interrupt(host, pending);
1916 if (pending & SDMMC_INT_CD) {
1917 mci_writel(host, RINTSTS, SDMMC_INT_CD);
1918 queue_work(host->card_workqueue, &host->card_work);
1921 /* Handle SDIO Interrupts */
1922 for (i = 0; i < host->num_slots; i++) {
1923 struct dw_mci_slot *slot = host->slot[i];
1924 if (pending & SDMMC_INT_SDIO(i)) {
1925 mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i));
1926 mmc_signal_sdio_irq(slot->mmc);
1932 #ifdef CONFIG_MMC_DW_IDMAC
1933 /* Handle DMA interrupts */
1934 pending = mci_readl(host, IDSTS);
1935 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
1936 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
1937 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
1938 host->dma_ops->complete(host);
1945 static void dw_mci_work_routine_card(struct work_struct *work)
1947 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
1950 for (i = 0; i < host->num_slots; i++) {
1951 struct dw_mci_slot *slot = host->slot[i];
1952 struct mmc_host *mmc = slot->mmc;
1953 struct mmc_request *mrq;
1956 present = dw_mci_get_cd(mmc);
1957 while (present != slot->last_detect_state) {
1958 dev_dbg(&slot->mmc->class_dev, "card %s\n",
1959 present ? "inserted" : "removed");
1961 spin_lock_bh(&host->lock);
1963 /* Card change detected */
1964 slot->last_detect_state = present;
1966 /* Clean up queue if present */
1969 if (mrq == host->mrq) {
1973 switch (host->state) {
1976 case STATE_SENDING_CMD:
1977 mrq->cmd->error = -ENOMEDIUM;
1981 case STATE_SENDING_DATA:
1982 mrq->data->error = -ENOMEDIUM;
1983 dw_mci_stop_dma(host);
1985 case STATE_DATA_BUSY:
1986 case STATE_DATA_ERROR:
1987 if (mrq->data->error == -EINPROGRESS)
1988 mrq->data->error = -ENOMEDIUM;
1990 case STATE_SENDING_STOP:
1992 mrq->stop->error = -ENOMEDIUM;
1996 dw_mci_request_end(host, mrq);
1998 list_del(&slot->queue_node);
1999 mrq->cmd->error = -ENOMEDIUM;
2001 mrq->data->error = -ENOMEDIUM;
2003 mrq->stop->error = -ENOMEDIUM;
2005 spin_unlock(&host->lock);
2006 mmc_request_done(slot->mmc, mrq);
2007 spin_lock(&host->lock);
2011 /* Power down slot */
2015 spin_unlock_bh(&host->lock);
2017 present = dw_mci_get_cd(mmc);
2020 mmc_detect_change(slot->mmc,
2021 msecs_to_jiffies(host->pdata->detect_delay_ms));
2026 /* given a slot id, find out the device node representing that slot */
2027 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2029 struct device_node *np;
2033 if (!dev || !dev->of_node)
2036 for_each_child_of_node(dev->of_node, np) {
2037 addr = of_get_property(np, "reg", &len);
2038 if (!addr || (len < sizeof(int)))
2040 if (be32_to_cpup(addr) == slot)
2046 static struct dw_mci_of_slot_quirks {
2049 } of_slot_quirks[] = {
2051 .quirk = "disable-wp",
2052 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
2056 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2058 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2063 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
2064 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL)) {
2065 dev_warn(dev, "Slot quirk %s is deprecated\n",
2066 of_slot_quirks[idx].quirk);
2067 quirks |= of_slot_quirks[idx].id;
2072 #else /* CONFIG_OF */
2073 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2077 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2081 #endif /* CONFIG_OF */
2083 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
2085 struct mmc_host *mmc;
2086 struct dw_mci_slot *slot;
2087 const struct dw_mci_drv_data *drv_data = host->drv_data;
2091 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
2095 slot = mmc_priv(mmc);
2099 host->slot[id] = slot;
2101 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
2103 mmc->ops = &dw_mci_ops;
2104 if (of_property_read_u32_array(host->dev->of_node,
2105 "clock-freq-min-max", freq, 2)) {
2106 mmc->f_min = DW_MCI_FREQ_MIN;
2107 mmc->f_max = DW_MCI_FREQ_MAX;
2109 mmc->f_min = freq[0];
2110 mmc->f_max = freq[1];
2113 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2115 if (host->pdata->caps)
2116 mmc->caps = host->pdata->caps;
2118 if (host->pdata->pm_caps)
2119 mmc->pm_caps = host->pdata->pm_caps;
2121 if (host->dev->of_node) {
2122 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2126 ctrl_id = to_platform_device(host->dev)->id;
2128 if (drv_data && drv_data->caps)
2129 mmc->caps |= drv_data->caps[ctrl_id];
2131 if (host->pdata->caps2)
2132 mmc->caps2 = host->pdata->caps2;
2136 if (host->pdata->blk_settings) {
2137 mmc->max_segs = host->pdata->blk_settings->max_segs;
2138 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
2139 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
2140 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
2141 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
2143 /* Useful defaults if platform data is unset. */
2144 #ifdef CONFIG_MMC_DW_IDMAC
2145 mmc->max_segs = host->ring_size;
2146 mmc->max_blk_size = 65536;
2147 mmc->max_blk_count = host->ring_size;
2148 mmc->max_seg_size = 0x1000;
2149 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
2152 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
2153 mmc->max_blk_count = 512;
2154 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2155 mmc->max_seg_size = mmc->max_req_size;
2156 #endif /* CONFIG_MMC_DW_IDMAC */
2159 if (dw_mci_get_cd(mmc))
2160 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2162 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2164 ret = mmc_add_host(mmc);
2168 #if defined(CONFIG_DEBUG_FS)
2169 dw_mci_init_debugfs(slot);
2172 /* Card initially undetected */
2173 slot->last_detect_state = 0;
2182 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2184 /* Debugfs stuff is cleaned up by mmc core */
2185 mmc_remove_host(slot->mmc);
2186 slot->host->slot[id] = NULL;
2187 mmc_free_host(slot->mmc);
2190 static void dw_mci_init_dma(struct dw_mci *host)
2192 /* Alloc memory for sg translation */
2193 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
2194 &host->sg_dma, GFP_KERNEL);
2195 if (!host->sg_cpu) {
2196 dev_err(host->dev, "%s: could not alloc DMA memory\n",
2201 /* Determine which DMA interface to use */
2202 #ifdef CONFIG_MMC_DW_IDMAC
2203 host->dma_ops = &dw_mci_idmac_ops;
2204 dev_info(host->dev, "Using internal DMA controller.\n");
2210 if (host->dma_ops->init && host->dma_ops->start &&
2211 host->dma_ops->stop && host->dma_ops->cleanup) {
2212 if (host->dma_ops->init(host)) {
2213 dev_err(host->dev, "%s: Unable to initialize "
2214 "DMA Controller.\n", __func__);
2218 dev_err(host->dev, "DMA initialization not found.\n");
2226 dev_info(host->dev, "Using PIO mode.\n");
2231 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
2233 unsigned long timeout = jiffies + msecs_to_jiffies(500);
2236 ctrl = mci_readl(host, CTRL);
2238 mci_writel(host, CTRL, ctrl);
2240 /* wait till resets clear */
2242 ctrl = mci_readl(host, CTRL);
2243 if (!(ctrl & reset))
2245 } while (time_before(jiffies, timeout));
2248 "Timeout resetting block (ctrl reset %#x)\n",
2254 static bool dw_mci_reset(struct dw_mci *host)
2256 u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET;
2260 * Reseting generates a block interrupt, hence setting
2261 * the scatter-gather pointer to NULL.
2264 sg_miter_stop(&host->sg_miter);
2269 flags |= SDMMC_CTRL_DMA_RESET;
2271 if (dw_mci_ctrl_reset(host, flags)) {
2273 * In all cases we clear the RAWINTS register to clear any
2276 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2278 /* if using dma we wait for dma_req to clear */
2279 if (host->use_dma) {
2280 unsigned long timeout = jiffies + msecs_to_jiffies(500);
2283 status = mci_readl(host, STATUS);
2284 if (!(status & SDMMC_STATUS_DMA_REQ))
2287 } while (time_before(jiffies, timeout));
2289 if (status & SDMMC_STATUS_DMA_REQ) {
2291 "%s: Timeout waiting for dma_req to "
2292 "clear during reset\n", __func__);
2296 /* when using DMA next we reset the fifo again */
2297 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET))
2301 /* if the controller reset bit did clear, then set clock regs */
2302 if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) {
2303 dev_err(host->dev, "%s: fifo/dma reset bits didn't "
2304 "clear but ciu was reset, doing clock update\n",
2310 #if IS_ENABLED(CONFIG_MMC_DW_IDMAC)
2311 /* It is also recommended that we reset and reprogram idmac */
2312 dw_mci_idmac_reset(host);
2318 /* After a CTRL reset we need to have CIU set clock registers */
2319 mci_send_cmd(host->cur_slot, SDMMC_CMD_UPD_CLK, 0);
2325 static struct dw_mci_of_quirks {
2330 .quirk = "broken-cd",
2331 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
2333 .quirk = "disable-wp",
2334 .id = DW_MCI_QUIRK_NO_WRITE_PROTECT,
2338 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2340 struct dw_mci_board *pdata;
2341 struct device *dev = host->dev;
2342 struct device_node *np = dev->of_node;
2343 const struct dw_mci_drv_data *drv_data = host->drv_data;
2345 u32 clock_frequency;
2347 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2349 dev_err(dev, "could not allocate memory for pdata\n");
2350 return ERR_PTR(-ENOMEM);
2353 /* find out number of slots supported */
2354 if (of_property_read_u32(dev->of_node, "num-slots",
2355 &pdata->num_slots)) {
2356 dev_info(dev, "num-slots property not found, "
2357 "assuming 1 slot is available\n");
2358 pdata->num_slots = 1;
2362 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
2363 if (of_get_property(np, of_quirks[idx].quirk, NULL))
2364 pdata->quirks |= of_quirks[idx].id;
2366 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
2367 dev_info(dev, "fifo-depth property not found, using "
2368 "value of FIFOTH register as default\n");
2370 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
2372 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
2373 pdata->bus_hz = clock_frequency;
2375 if (drv_data && drv_data->parse_dt) {
2376 ret = drv_data->parse_dt(host);
2378 return ERR_PTR(ret);
2381 if (of_find_property(np, "supports-highspeed", NULL))
2382 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
2387 #else /* CONFIG_OF */
2388 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2390 return ERR_PTR(-EINVAL);
2392 #endif /* CONFIG_OF */
2394 int dw_mci_probe(struct dw_mci *host)
2396 const struct dw_mci_drv_data *drv_data = host->drv_data;
2397 int width, i, ret = 0;
2402 host->pdata = dw_mci_parse_dt(host);
2403 if (IS_ERR(host->pdata)) {
2404 dev_err(host->dev, "platform data not available\n");
2409 if (host->pdata->num_slots > 1) {
2411 "Platform data must supply num_slots.\n");
2415 host->biu_clk = devm_clk_get(host->dev, "biu");
2416 if (IS_ERR(host->biu_clk)) {
2417 dev_dbg(host->dev, "biu clock not available\n");
2419 ret = clk_prepare_enable(host->biu_clk);
2421 dev_err(host->dev, "failed to enable biu clock\n");
2426 host->ciu_clk = devm_clk_get(host->dev, "ciu");
2427 if (IS_ERR(host->ciu_clk)) {
2428 dev_dbg(host->dev, "ciu clock not available\n");
2429 host->bus_hz = host->pdata->bus_hz;
2431 ret = clk_prepare_enable(host->ciu_clk);
2433 dev_err(host->dev, "failed to enable ciu clock\n");
2437 if (host->pdata->bus_hz) {
2438 ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
2441 "Unable to set bus rate to %uHz\n",
2442 host->pdata->bus_hz);
2444 host->bus_hz = clk_get_rate(host->ciu_clk);
2447 if (!host->bus_hz) {
2449 "Platform data must supply bus speed\n");
2454 if (drv_data && drv_data->init) {
2455 ret = drv_data->init(host);
2458 "implementation specific init failed\n");
2463 if (drv_data && drv_data->setup_clock) {
2464 ret = drv_data->setup_clock(host);
2467 "implementation specific clock setup failed\n");
2472 host->vmmc = devm_regulator_get_optional(host->dev, "vmmc");
2473 if (IS_ERR(host->vmmc)) {
2474 ret = PTR_ERR(host->vmmc);
2475 if (ret == -EPROBE_DEFER)
2478 dev_info(host->dev, "no vmmc regulator found: %d\n", ret);
2481 ret = regulator_enable(host->vmmc);
2483 if (ret != -EPROBE_DEFER)
2485 "regulator_enable fail: %d\n", ret);
2490 host->quirks = host->pdata->quirks;
2492 spin_lock_init(&host->lock);
2493 INIT_LIST_HEAD(&host->queue);
2496 * Get the host data width - this assumes that HCON has been set with
2497 * the correct values.
2499 i = (mci_readl(host, HCON) >> 7) & 0x7;
2501 host->push_data = dw_mci_push_data16;
2502 host->pull_data = dw_mci_pull_data16;
2504 host->data_shift = 1;
2505 } else if (i == 2) {
2506 host->push_data = dw_mci_push_data64;
2507 host->pull_data = dw_mci_pull_data64;
2509 host->data_shift = 3;
2511 /* Check for a reserved value, and warn if it is */
2513 "HCON reports a reserved host data width!\n"
2514 "Defaulting to 32-bit access.\n");
2515 host->push_data = dw_mci_push_data32;
2516 host->pull_data = dw_mci_pull_data32;
2518 host->data_shift = 2;
2521 /* Reset all blocks */
2522 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS))
2525 host->dma_ops = host->pdata->dma_ops;
2526 dw_mci_init_dma(host);
2528 /* Clear the interrupts for the host controller */
2529 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2530 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2532 /* Put in max timeout */
2533 mci_writel(host, TMOUT, 0xFFFFFFFF);
2536 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
2537 * Tx Mark = fifo_size / 2 DMA Size = 8
2539 if (!host->pdata->fifo_depth) {
2541 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2542 * have been overwritten by the bootloader, just like we're
2543 * about to do, so if you know the value for your hardware, you
2544 * should put it in the platform data.
2546 fifo_size = mci_readl(host, FIFOTH);
2547 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
2549 fifo_size = host->pdata->fifo_depth;
2551 host->fifo_depth = fifo_size;
2553 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
2554 mci_writel(host, FIFOTH, host->fifoth_val);
2556 /* disable clock to CIU */
2557 mci_writel(host, CLKENA, 0);
2558 mci_writel(host, CLKSRC, 0);
2561 * In 2.40a spec, Data offset is changed.
2562 * Need to check the version-id and set data-offset for DATA register.
2564 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
2565 dev_info(host->dev, "Version ID is %04x\n", host->verid);
2567 if (host->verid < DW_MMC_240A)
2568 host->data_offset = DATA_OFFSET;
2570 host->data_offset = DATA_240A_OFFSET;
2572 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
2573 host->card_workqueue = alloc_workqueue("dw-mci-card",
2575 if (!host->card_workqueue) {
2579 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
2580 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
2581 host->irq_flags, "dw-mci", host);
2585 if (host->pdata->num_slots)
2586 host->num_slots = host->pdata->num_slots;
2588 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
2591 * Enable interrupts for command done, data over, data empty, card det,
2592 * receive ready and error such as transmit, receive timeout, crc error
2594 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2595 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2596 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2597 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2598 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
2600 dev_info(host->dev, "DW MMC controller at irq %d, "
2601 "%d bit host data width, "
2603 host->irq, width, fifo_size);
2605 /* We need at least one slot to succeed */
2606 for (i = 0; i < host->num_slots; i++) {
2607 ret = dw_mci_init_slot(host, i);
2609 dev_dbg(host->dev, "slot %d init failed\n", i);
2615 dev_info(host->dev, "%d slots initialized\n", init_slots);
2617 dev_dbg(host->dev, "attempted to initialize %d slots, "
2618 "but failed on all\n", host->num_slots);
2622 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
2623 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
2628 destroy_workqueue(host->card_workqueue);
2631 if (host->use_dma && host->dma_ops->exit)
2632 host->dma_ops->exit(host);
2634 regulator_disable(host->vmmc);
2637 if (!IS_ERR(host->ciu_clk))
2638 clk_disable_unprepare(host->ciu_clk);
2641 if (!IS_ERR(host->biu_clk))
2642 clk_disable_unprepare(host->biu_clk);
2646 EXPORT_SYMBOL(dw_mci_probe);
2648 void dw_mci_remove(struct dw_mci *host)
2652 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2653 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2655 for (i = 0; i < host->num_slots; i++) {
2656 dev_dbg(host->dev, "remove slot %d\n", i);
2658 dw_mci_cleanup_slot(host->slot[i], i);
2661 /* disable clock to CIU */
2662 mci_writel(host, CLKENA, 0);
2663 mci_writel(host, CLKSRC, 0);
2665 destroy_workqueue(host->card_workqueue);
2667 if (host->use_dma && host->dma_ops->exit)
2668 host->dma_ops->exit(host);
2671 regulator_disable(host->vmmc);
2673 if (!IS_ERR(host->ciu_clk))
2674 clk_disable_unprepare(host->ciu_clk);
2676 if (!IS_ERR(host->biu_clk))
2677 clk_disable_unprepare(host->biu_clk);
2679 EXPORT_SYMBOL(dw_mci_remove);
2683 #ifdef CONFIG_PM_SLEEP
2685 * TODO: we should probably disable the clock to the card in the suspend path.
2687 int dw_mci_suspend(struct dw_mci *host)
2690 regulator_disable(host->vmmc);
2694 EXPORT_SYMBOL(dw_mci_suspend);
2696 int dw_mci_resume(struct dw_mci *host)
2701 ret = regulator_enable(host->vmmc);
2704 "failed to enable regulator: %d\n", ret);
2709 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
2714 if (host->use_dma && host->dma_ops->init)
2715 host->dma_ops->init(host);
2718 * Restore the initial value at FIFOTH register
2719 * And Invalidate the prev_blksz with zero
2721 mci_writel(host, FIFOTH, host->fifoth_val);
2722 host->prev_blksz = 0;
2724 /* Put in max timeout */
2725 mci_writel(host, TMOUT, 0xFFFFFFFF);
2727 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2728 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2729 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2730 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2731 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
2733 for (i = 0; i < host->num_slots; i++) {
2734 struct dw_mci_slot *slot = host->slot[i];
2737 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
2738 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
2739 dw_mci_setup_bus(slot, true);
2744 EXPORT_SYMBOL(dw_mci_resume);
2745 #endif /* CONFIG_PM_SLEEP */
2747 static int __init dw_mci_init(void)
2749 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
2753 static void __exit dw_mci_exit(void)
2757 module_init(dw_mci_init);
2758 module_exit(dw_mci_exit);
2760 MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
2761 MODULE_AUTHOR("NXP Semiconductor VietNam");
2762 MODULE_AUTHOR("Imagination Technologies Ltd");
2763 MODULE_LICENSE("GPL v2");