2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 #include <linux/blkdev.h>
15 #include <linux/clk.h>
16 #include <linux/debugfs.h>
17 #include <linux/device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/ioport.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/seq_file.h>
26 #include <linux/slab.h>
27 #include <linux/stat.h>
28 #include <linux/delay.h>
29 #include <linux/irq.h>
30 #include <linux/mmc/card.h>
31 #include <linux/mmc/host.h>
32 #include <linux/mmc/mmc.h>
33 #include <linux/mmc/sd.h>
34 #include <linux/mmc/sdio.h>
35 #include <linux/mmc/dw_mmc.h>
36 #include <linux/bitops.h>
37 #include <linux/regulator/consumer.h>
39 #include <linux/of_gpio.h>
40 #include <linux/mmc/slot-gpio.h>
44 /* Common flag combinations */
45 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
46 SDMMC_INT_HTO | SDMMC_INT_SBE | \
48 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
50 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
51 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
52 #define DW_MCI_SEND_STATUS 1
53 #define DW_MCI_RECV_STATUS 2
54 #define DW_MCI_DMA_THRESHOLD 16
56 #define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
57 #define DW_MCI_FREQ_MIN 400000 /* unit: HZ */
59 #ifdef CONFIG_MMC_DW_IDMAC
60 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
61 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
62 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
65 struct idmac_desc_64addr {
66 u32 des0; /* Control Descriptor */
68 u32 des1; /* Reserved */
70 u32 des2; /*Buffer sizes */
71 #define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \
72 ((d)->des2 = ((d)->des2 & 0x03ffe000) | ((s) & 0x1fff))
74 u32 des3; /* Reserved */
76 u32 des4; /* Lower 32-bits of Buffer Address Pointer 1*/
77 u32 des5; /* Upper 32-bits of Buffer Address Pointer 1*/
79 u32 des6; /* Lower 32-bits of Next Descriptor Address */
80 u32 des7; /* Upper 32-bits of Next Descriptor Address */
84 u32 des0; /* Control Descriptor */
85 #define IDMAC_DES0_DIC BIT(1)
86 #define IDMAC_DES0_LD BIT(2)
87 #define IDMAC_DES0_FD BIT(3)
88 #define IDMAC_DES0_CH BIT(4)
89 #define IDMAC_DES0_ER BIT(5)
90 #define IDMAC_DES0_CES BIT(30)
91 #define IDMAC_DES0_OWN BIT(31)
93 u32 des1; /* Buffer sizes */
94 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
95 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
97 u32 des2; /* buffer 1 physical address */
99 u32 des3; /* buffer 2 physical address */
101 #endif /* CONFIG_MMC_DW_IDMAC */
103 static bool dw_mci_reset(struct dw_mci *host);
104 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
106 #if defined(CONFIG_DEBUG_FS)
107 static int dw_mci_req_show(struct seq_file *s, void *v)
109 struct dw_mci_slot *slot = s->private;
110 struct mmc_request *mrq;
111 struct mmc_command *cmd;
112 struct mmc_command *stop;
113 struct mmc_data *data;
115 /* Make sure we get a consistent snapshot */
116 spin_lock_bh(&slot->host->lock);
126 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
127 cmd->opcode, cmd->arg, cmd->flags,
128 cmd->resp[0], cmd->resp[1], cmd->resp[2],
129 cmd->resp[2], cmd->error);
131 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
132 data->bytes_xfered, data->blocks,
133 data->blksz, data->flags, data->error);
136 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
137 stop->opcode, stop->arg, stop->flags,
138 stop->resp[0], stop->resp[1], stop->resp[2],
139 stop->resp[2], stop->error);
142 spin_unlock_bh(&slot->host->lock);
147 static int dw_mci_req_open(struct inode *inode, struct file *file)
149 return single_open(file, dw_mci_req_show, inode->i_private);
152 static const struct file_operations dw_mci_req_fops = {
153 .owner = THIS_MODULE,
154 .open = dw_mci_req_open,
157 .release = single_release,
160 static int dw_mci_regs_show(struct seq_file *s, void *v)
162 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
163 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
164 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
165 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
166 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
167 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
172 static int dw_mci_regs_open(struct inode *inode, struct file *file)
174 return single_open(file, dw_mci_regs_show, inode->i_private);
177 static const struct file_operations dw_mci_regs_fops = {
178 .owner = THIS_MODULE,
179 .open = dw_mci_regs_open,
182 .release = single_release,
185 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
187 struct mmc_host *mmc = slot->mmc;
188 struct dw_mci *host = slot->host;
192 root = mmc->debugfs_root;
196 node = debugfs_create_file("regs", S_IRUSR, root, host,
201 node = debugfs_create_file("req", S_IRUSR, root, slot,
206 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
210 node = debugfs_create_x32("pending_events", S_IRUSR, root,
211 (u32 *)&host->pending_events);
215 node = debugfs_create_x32("completed_events", S_IRUSR, root,
216 (u32 *)&host->completed_events);
223 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
225 #endif /* defined(CONFIG_DEBUG_FS) */
227 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg);
229 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
231 struct mmc_data *data;
232 struct dw_mci_slot *slot = mmc_priv(mmc);
233 struct dw_mci *host = slot->host;
234 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
236 cmd->error = -EINPROGRESS;
240 if (cmd->opcode == MMC_STOP_TRANSMISSION ||
241 cmd->opcode == MMC_GO_IDLE_STATE ||
242 cmd->opcode == MMC_GO_INACTIVE_STATE ||
243 (cmd->opcode == SD_IO_RW_DIRECT &&
244 ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
245 cmdr |= SDMMC_CMD_STOP;
246 else if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
247 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
249 if (cmd->opcode == SD_SWITCH_VOLTAGE) {
252 /* Special bit makes CMD11 not die */
253 cmdr |= SDMMC_CMD_VOLT_SWITCH;
255 /* Change state to continue to handle CMD11 weirdness */
256 WARN_ON(slot->host->state != STATE_SENDING_CMD);
257 slot->host->state = STATE_SENDING_CMD11;
260 * We need to disable low power mode (automatic clock stop)
261 * while doing voltage switch so we don't confuse the card,
262 * since stopping the clock is a specific part of the UHS
263 * voltage change dance.
265 * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be
266 * unconditionally turned back on in dw_mci_setup_bus() if it's
267 * ever called with a non-zero clock. That shouldn't happen
268 * until the voltage change is all done.
270 clk_en_a = mci_readl(host, CLKENA);
271 clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id);
272 mci_writel(host, CLKENA, clk_en_a);
273 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
274 SDMMC_CMD_PRV_DAT_WAIT, 0);
277 if (cmd->flags & MMC_RSP_PRESENT) {
278 /* We expect a response, so set this bit */
279 cmdr |= SDMMC_CMD_RESP_EXP;
280 if (cmd->flags & MMC_RSP_136)
281 cmdr |= SDMMC_CMD_RESP_LONG;
284 if (cmd->flags & MMC_RSP_CRC)
285 cmdr |= SDMMC_CMD_RESP_CRC;
289 cmdr |= SDMMC_CMD_DAT_EXP;
290 if (data->flags & MMC_DATA_STREAM)
291 cmdr |= SDMMC_CMD_STRM_MODE;
292 if (data->flags & MMC_DATA_WRITE)
293 cmdr |= SDMMC_CMD_DAT_WR;
296 if (drv_data && drv_data->prepare_command)
297 drv_data->prepare_command(slot->host, &cmdr);
302 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
304 struct mmc_command *stop;
310 stop = &host->stop_abort;
312 memset(stop, 0, sizeof(struct mmc_command));
314 if (cmdr == MMC_READ_SINGLE_BLOCK ||
315 cmdr == MMC_READ_MULTIPLE_BLOCK ||
316 cmdr == MMC_WRITE_BLOCK ||
317 cmdr == MMC_WRITE_MULTIPLE_BLOCK ||
318 cmdr == MMC_SEND_TUNING_BLOCK ||
319 cmdr == MMC_SEND_TUNING_BLOCK_HS200) {
320 stop->opcode = MMC_STOP_TRANSMISSION;
322 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
323 } else if (cmdr == SD_IO_RW_EXTENDED) {
324 stop->opcode = SD_IO_RW_DIRECT;
325 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
326 ((cmd->arg >> 28) & 0x7);
327 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
332 cmdr = stop->opcode | SDMMC_CMD_STOP |
333 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
338 static void dw_mci_start_command(struct dw_mci *host,
339 struct mmc_command *cmd, u32 cmd_flags)
343 "start command: ARGR=0x%08x CMDR=0x%08x\n",
344 cmd->arg, cmd_flags);
346 mci_writel(host, CMDARG, cmd->arg);
349 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
352 static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
354 struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort;
355 dw_mci_start_command(host, stop, host->stop_cmdr);
358 /* DMA interface functions */
359 static void dw_mci_stop_dma(struct dw_mci *host)
361 if (host->using_dma) {
362 host->dma_ops->stop(host);
363 host->dma_ops->cleanup(host);
366 /* Data transfer was stopped by the interrupt handler */
367 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
370 static int dw_mci_get_dma_dir(struct mmc_data *data)
372 if (data->flags & MMC_DATA_WRITE)
373 return DMA_TO_DEVICE;
375 return DMA_FROM_DEVICE;
378 #ifdef CONFIG_MMC_DW_IDMAC
379 static void dw_mci_dma_cleanup(struct dw_mci *host)
381 struct mmc_data *data = host->data;
384 if (!data->host_cookie)
385 dma_unmap_sg(host->dev,
388 dw_mci_get_dma_dir(data));
391 static void dw_mci_idmac_reset(struct dw_mci *host)
393 u32 bmod = mci_readl(host, BMOD);
394 /* Software reset of DMA */
395 bmod |= SDMMC_IDMAC_SWRESET;
396 mci_writel(host, BMOD, bmod);
399 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
403 /* Disable and reset the IDMAC interface */
404 temp = mci_readl(host, CTRL);
405 temp &= ~SDMMC_CTRL_USE_IDMAC;
406 temp |= SDMMC_CTRL_DMA_RESET;
407 mci_writel(host, CTRL, temp);
409 /* Stop the IDMAC running */
410 temp = mci_readl(host, BMOD);
411 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
412 temp |= SDMMC_IDMAC_SWRESET;
413 mci_writel(host, BMOD, temp);
416 static void dw_mci_idmac_complete_dma(struct dw_mci *host)
418 struct mmc_data *data = host->data;
420 dev_vdbg(host->dev, "DMA complete\n");
422 host->dma_ops->cleanup(host);
425 * If the card was removed, data will be NULL. No point in trying to
426 * send the stop command or waiting for NBUSY in this case.
429 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
430 tasklet_schedule(&host->tasklet);
434 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
438 if (host->dma_64bit_address == 1) {
439 struct idmac_desc_64addr *desc = host->sg_cpu;
441 for (i = 0; i < sg_len; i++, desc++) {
442 unsigned int length = sg_dma_len(&data->sg[i]);
443 u64 mem_addr = sg_dma_address(&data->sg[i]);
446 * Set the OWN bit and disable interrupts for this
449 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
452 IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, length);
454 /* Physical address to DMA to/from */
455 desc->des4 = mem_addr & 0xffffffff;
456 desc->des5 = mem_addr >> 32;
459 /* Set first descriptor */
461 desc->des0 |= IDMAC_DES0_FD;
463 /* Set last descriptor */
464 desc = host->sg_cpu + (i - 1) *
465 sizeof(struct idmac_desc_64addr);
466 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
467 desc->des0 |= IDMAC_DES0_LD;
470 struct idmac_desc *desc = host->sg_cpu;
472 for (i = 0; i < sg_len; i++, desc++) {
473 unsigned int length = sg_dma_len(&data->sg[i]);
474 u32 mem_addr = sg_dma_address(&data->sg[i]);
477 * Set the OWN bit and disable interrupts for this
480 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
483 IDMAC_SET_BUFFER1_SIZE(desc, length);
485 /* Physical address to DMA to/from */
486 desc->des2 = mem_addr;
489 /* Set first descriptor */
491 desc->des0 |= IDMAC_DES0_FD;
493 /* Set last descriptor */
494 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
495 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
496 desc->des0 |= IDMAC_DES0_LD;
502 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
506 dw_mci_translate_sglist(host, host->data, sg_len);
508 /* Make sure to reset DMA in case we did PIO before this */
509 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);
510 dw_mci_idmac_reset(host);
512 /* Select IDMAC interface */
513 temp = mci_readl(host, CTRL);
514 temp |= SDMMC_CTRL_USE_IDMAC;
515 mci_writel(host, CTRL, temp);
519 /* Enable the IDMAC */
520 temp = mci_readl(host, BMOD);
521 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
522 mci_writel(host, BMOD, temp);
524 /* Start it running */
525 mci_writel(host, PLDMND, 1);
528 static int dw_mci_idmac_init(struct dw_mci *host)
532 if (host->dma_64bit_address == 1) {
533 struct idmac_desc_64addr *p;
534 /* Number of descriptors in the ring buffer */
535 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc_64addr);
537 /* Forward link the descriptor list */
538 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1;
540 p->des6 = (host->sg_dma +
541 (sizeof(struct idmac_desc_64addr) *
542 (i + 1))) & 0xffffffff;
544 p->des7 = (u64)(host->sg_dma +
545 (sizeof(struct idmac_desc_64addr) *
547 /* Initialize reserved and buffer size fields to "0" */
553 /* Set the last descriptor as the end-of-ring descriptor */
554 p->des6 = host->sg_dma & 0xffffffff;
555 p->des7 = (u64)host->sg_dma >> 32;
556 p->des0 = IDMAC_DES0_ER;
559 struct idmac_desc *p;
560 /* Number of descriptors in the ring buffer */
561 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
563 /* Forward link the descriptor list */
564 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
565 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) *
568 /* Set the last descriptor as the end-of-ring descriptor */
569 p->des3 = host->sg_dma;
570 p->des0 = IDMAC_DES0_ER;
573 dw_mci_idmac_reset(host);
575 if (host->dma_64bit_address == 1) {
576 /* Mask out interrupts - get Tx & Rx complete only */
577 mci_writel(host, IDSTS64, IDMAC_INT_CLR);
578 mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI |
579 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
581 /* Set the descriptor base address */
582 mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff);
583 mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32);
586 /* Mask out interrupts - get Tx & Rx complete only */
587 mci_writel(host, IDSTS, IDMAC_INT_CLR);
588 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI |
589 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
591 /* Set the descriptor base address */
592 mci_writel(host, DBADDR, host->sg_dma);
598 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
599 .init = dw_mci_idmac_init,
600 .start = dw_mci_idmac_start_dma,
601 .stop = dw_mci_idmac_stop_dma,
602 .complete = dw_mci_idmac_complete_dma,
603 .cleanup = dw_mci_dma_cleanup,
605 #endif /* CONFIG_MMC_DW_IDMAC */
607 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
608 struct mmc_data *data,
611 struct scatterlist *sg;
612 unsigned int i, sg_len;
614 if (!next && data->host_cookie)
615 return data->host_cookie;
618 * We don't do DMA on "complex" transfers, i.e. with
619 * non-word-aligned buffers or lengths. Also, we don't bother
620 * with all the DMA setup overhead for short transfers.
622 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
628 for_each_sg(data->sg, sg, data->sg_len, i) {
629 if (sg->offset & 3 || sg->length & 3)
633 sg_len = dma_map_sg(host->dev,
636 dw_mci_get_dma_dir(data));
641 data->host_cookie = sg_len;
646 static void dw_mci_pre_req(struct mmc_host *mmc,
647 struct mmc_request *mrq,
650 struct dw_mci_slot *slot = mmc_priv(mmc);
651 struct mmc_data *data = mrq->data;
653 if (!slot->host->use_dma || !data)
656 if (data->host_cookie) {
657 data->host_cookie = 0;
661 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
662 data->host_cookie = 0;
665 static void dw_mci_post_req(struct mmc_host *mmc,
666 struct mmc_request *mrq,
669 struct dw_mci_slot *slot = mmc_priv(mmc);
670 struct mmc_data *data = mrq->data;
672 if (!slot->host->use_dma || !data)
675 if (data->host_cookie)
676 dma_unmap_sg(slot->host->dev,
679 dw_mci_get_dma_dir(data));
680 data->host_cookie = 0;
683 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
685 #ifdef CONFIG_MMC_DW_IDMAC
686 unsigned int blksz = data->blksz;
687 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
688 u32 fifo_width = 1 << host->data_shift;
689 u32 blksz_depth = blksz / fifo_width, fifoth_val;
690 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
691 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
693 tx_wmark = (host->fifo_depth) / 2;
694 tx_wmark_invers = host->fifo_depth - tx_wmark;
698 * if blksz is not a multiple of the FIFO width
700 if (blksz % fifo_width) {
707 if (!((blksz_depth % mszs[idx]) ||
708 (tx_wmark_invers % mszs[idx]))) {
710 rx_wmark = mszs[idx] - 1;
715 * If idx is '0', it won't be tried
716 * Thus, initial values are uesed
719 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
720 mci_writel(host, FIFOTH, fifoth_val);
724 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
726 unsigned int blksz = data->blksz;
727 u32 blksz_depth, fifo_depth;
730 WARN_ON(!(data->flags & MMC_DATA_READ));
733 * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is
734 * in the FIFO region, so we really shouldn't access it).
736 if (host->verid < DW_MMC_240A)
739 if (host->timing != MMC_TIMING_MMC_HS200 &&
740 host->timing != MMC_TIMING_UHS_SDR104)
743 blksz_depth = blksz / (1 << host->data_shift);
744 fifo_depth = host->fifo_depth;
746 if (blksz_depth > fifo_depth)
750 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
751 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
752 * Currently just choose blksz.
755 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
759 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
762 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
764 unsigned long irqflags;
770 /* If we don't have a channel, we can't do DMA */
774 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
776 host->dma_ops->stop(host);
783 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
784 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
788 * Decide the MSIZE and RX/TX Watermark.
789 * If current block size is same with previous size,
790 * no need to update fifoth.
792 if (host->prev_blksz != data->blksz)
793 dw_mci_adjust_fifoth(host, data);
795 /* Enable the DMA interface */
796 temp = mci_readl(host, CTRL);
797 temp |= SDMMC_CTRL_DMA_ENABLE;
798 mci_writel(host, CTRL, temp);
800 /* Disable RX/TX IRQs, let DMA handle it */
801 spin_lock_irqsave(&host->irq_lock, irqflags);
802 temp = mci_readl(host, INTMASK);
803 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
804 mci_writel(host, INTMASK, temp);
805 spin_unlock_irqrestore(&host->irq_lock, irqflags);
807 host->dma_ops->start(host, sg_len);
812 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
814 unsigned long irqflags;
817 data->error = -EINPROGRESS;
823 if (data->flags & MMC_DATA_READ) {
824 host->dir_status = DW_MCI_RECV_STATUS;
825 dw_mci_ctrl_rd_thld(host, data);
827 host->dir_status = DW_MCI_SEND_STATUS;
830 if (dw_mci_submit_data_dma(host, data)) {
831 int flags = SG_MITER_ATOMIC;
832 if (host->data->flags & MMC_DATA_READ)
833 flags |= SG_MITER_TO_SG;
835 flags |= SG_MITER_FROM_SG;
837 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
839 host->part_buf_start = 0;
840 host->part_buf_count = 0;
842 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
844 spin_lock_irqsave(&host->irq_lock, irqflags);
845 temp = mci_readl(host, INTMASK);
846 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
847 mci_writel(host, INTMASK, temp);
848 spin_unlock_irqrestore(&host->irq_lock, irqflags);
850 temp = mci_readl(host, CTRL);
851 temp &= ~SDMMC_CTRL_DMA_ENABLE;
852 mci_writel(host, CTRL, temp);
855 * Use the initial fifoth_val for PIO mode.
856 * If next issued data may be transfered by DMA mode,
857 * prev_blksz should be invalidated.
859 mci_writel(host, FIFOTH, host->fifoth_val);
860 host->prev_blksz = 0;
863 * Keep the current block size.
864 * It will be used to decide whether to update
865 * fifoth register next time.
867 host->prev_blksz = data->blksz;
871 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
873 struct dw_mci *host = slot->host;
874 unsigned long timeout = jiffies + msecs_to_jiffies(500);
875 unsigned int cmd_status = 0;
877 mci_writel(host, CMDARG, arg);
879 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
881 while (time_before(jiffies, timeout)) {
882 cmd_status = mci_readl(host, CMD);
883 if (!(cmd_status & SDMMC_CMD_START))
886 dev_err(&slot->mmc->class_dev,
887 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
888 cmd, arg, cmd_status);
891 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
893 struct dw_mci *host = slot->host;
894 unsigned int clock = slot->clock;
897 u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT;
899 /* We must continue to set bit 28 in CMD until the change is complete */
900 if (host->state == STATE_WAITING_CMD11_DONE)
901 sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH;
904 mci_writel(host, CLKENA, 0);
905 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
906 } else if (clock != host->current_speed || force_clkinit) {
907 div = host->bus_hz / clock;
908 if (host->bus_hz % clock && host->bus_hz > clock)
910 * move the + 1 after the divide to prevent
911 * over-clocking the card.
915 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
917 if ((clock << div) != slot->__clk_old || force_clkinit)
918 dev_info(&slot->mmc->class_dev,
919 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
920 slot->id, host->bus_hz, clock,
921 div ? ((host->bus_hz / div) >> 1) :
925 mci_writel(host, CLKENA, 0);
926 mci_writel(host, CLKSRC, 0);
929 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
931 /* set clock to desired speed */
932 mci_writel(host, CLKDIV, div);
935 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
937 /* enable clock; only low power if no SDIO */
938 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
939 if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags))
940 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
941 mci_writel(host, CLKENA, clk_en_a);
944 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
946 /* keep the clock with reflecting clock dividor */
947 slot->__clk_old = clock << div;
950 host->current_speed = clock;
952 /* Set the current slot bus width */
953 mci_writel(host, CTYPE, (slot->ctype << slot->id));
956 static void __dw_mci_start_request(struct dw_mci *host,
957 struct dw_mci_slot *slot,
958 struct mmc_command *cmd)
960 struct mmc_request *mrq;
961 struct mmc_data *data;
966 host->cur_slot = slot;
969 host->pending_events = 0;
970 host->completed_events = 0;
971 host->cmd_status = 0;
972 host->data_status = 0;
973 host->dir_status = 0;
977 mci_writel(host, TMOUT, 0xFFFFFFFF);
978 mci_writel(host, BYTCNT, data->blksz*data->blocks);
979 mci_writel(host, BLKSIZ, data->blksz);
982 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
984 /* this is the first command, send the initialization clock */
985 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
986 cmdflags |= SDMMC_CMD_INIT;
989 dw_mci_submit_data(host, data);
993 dw_mci_start_command(host, cmd, cmdflags);
996 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
998 host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
1001 static void dw_mci_start_request(struct dw_mci *host,
1002 struct dw_mci_slot *slot)
1004 struct mmc_request *mrq = slot->mrq;
1005 struct mmc_command *cmd;
1007 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1008 __dw_mci_start_request(host, slot, cmd);
1011 /* must be called with host->lock held */
1012 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1013 struct mmc_request *mrq)
1015 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1020 if (host->state == STATE_WAITING_CMD11_DONE) {
1021 dev_warn(&slot->mmc->class_dev,
1022 "Voltage change didn't complete\n");
1024 * this case isn't expected to happen, so we can
1025 * either crash here or just try to continue on
1026 * in the closest possible state
1028 host->state = STATE_IDLE;
1031 if (host->state == STATE_IDLE) {
1032 host->state = STATE_SENDING_CMD;
1033 dw_mci_start_request(host, slot);
1035 list_add_tail(&slot->queue_node, &host->queue);
1039 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1041 struct dw_mci_slot *slot = mmc_priv(mmc);
1042 struct dw_mci *host = slot->host;
1047 * The check for card presence and queueing of the request must be
1048 * atomic, otherwise the card could be removed in between and the
1049 * request wouldn't fail until another card was inserted.
1051 spin_lock_bh(&host->lock);
1053 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1054 spin_unlock_bh(&host->lock);
1055 mrq->cmd->error = -ENOMEDIUM;
1056 mmc_request_done(mmc, mrq);
1060 dw_mci_queue_request(host, slot, mrq);
1062 spin_unlock_bh(&host->lock);
1065 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1067 struct dw_mci_slot *slot = mmc_priv(mmc);
1068 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1072 switch (ios->bus_width) {
1073 case MMC_BUS_WIDTH_4:
1074 slot->ctype = SDMMC_CTYPE_4BIT;
1076 case MMC_BUS_WIDTH_8:
1077 slot->ctype = SDMMC_CTYPE_8BIT;
1080 /* set default 1 bit mode */
1081 slot->ctype = SDMMC_CTYPE_1BIT;
1084 regs = mci_readl(slot->host, UHS_REG);
1087 if (ios->timing == MMC_TIMING_MMC_DDR52)
1088 regs |= ((0x1 << slot->id) << 16);
1090 regs &= ~((0x1 << slot->id) << 16);
1092 mci_writel(slot->host, UHS_REG, regs);
1093 slot->host->timing = ios->timing;
1096 * Use mirror of ios->clock to prevent race with mmc
1097 * core ios update when finding the minimum.
1099 slot->clock = ios->clock;
1101 if (drv_data && drv_data->set_ios)
1102 drv_data->set_ios(slot->host, ios);
1104 /* Slot specific timing and width adjustment */
1105 dw_mci_setup_bus(slot, false);
1107 if (slot->host->state == STATE_WAITING_CMD11_DONE && ios->clock != 0)
1108 slot->host->state = STATE_IDLE;
1110 switch (ios->power_mode) {
1112 if (!IS_ERR(mmc->supply.vmmc)) {
1113 ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
1116 dev_err(slot->host->dev,
1117 "failed to enable vmmc regulator\n");
1118 /*return, if failed turn on vmmc*/
1122 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1123 regs = mci_readl(slot->host, PWREN);
1124 regs |= (1 << slot->id);
1125 mci_writel(slot->host, PWREN, regs);
1128 if (!IS_ERR(mmc->supply.vqmmc) && !slot->host->vqmmc_enabled) {
1129 ret = regulator_enable(mmc->supply.vqmmc);
1131 dev_err(slot->host->dev,
1132 "failed to enable vqmmc regulator\n");
1134 slot->host->vqmmc_enabled = true;
1138 if (!IS_ERR(mmc->supply.vmmc))
1139 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1141 if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled) {
1142 regulator_disable(mmc->supply.vqmmc);
1143 slot->host->vqmmc_enabled = false;
1146 regs = mci_readl(slot->host, PWREN);
1147 regs &= ~(1 << slot->id);
1148 mci_writel(slot->host, PWREN, regs);
1155 static int dw_mci_card_busy(struct mmc_host *mmc)
1157 struct dw_mci_slot *slot = mmc_priv(mmc);
1161 * Check the busy bit which is low when DAT[3:0]
1162 * (the data lines) are 0000
1164 status = mci_readl(slot->host, STATUS);
1166 return !!(status & SDMMC_STATUS_BUSY);
1169 static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
1171 struct dw_mci_slot *slot = mmc_priv(mmc);
1172 struct dw_mci *host = slot->host;
1174 u32 v18 = SDMMC_UHS_18V << slot->id;
1179 * Program the voltage. Note that some instances of dw_mmc may use
1180 * the UHS_REG for this. For other instances (like exynos) the UHS_REG
1181 * does no harm but you need to set the regulator directly. Try both.
1183 uhs = mci_readl(host, UHS_REG);
1184 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
1193 if (!IS_ERR(mmc->supply.vqmmc)) {
1194 ret = regulator_set_voltage(mmc->supply.vqmmc, min_uv, max_uv);
1197 dev_dbg(&mmc->class_dev,
1198 "Regulator set error %d: %d - %d\n",
1199 ret, min_uv, max_uv);
1203 mci_writel(host, UHS_REG, uhs);
1208 static int dw_mci_get_ro(struct mmc_host *mmc)
1211 struct dw_mci_slot *slot = mmc_priv(mmc);
1212 int gpio_ro = mmc_gpio_get_ro(mmc);
1214 /* Use platform get_ro function, else try on board write protect */
1215 if ((slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT) ||
1216 (slot->host->quirks & DW_MCI_QUIRK_NO_WRITE_PROTECT))
1218 else if (!IS_ERR_VALUE(gpio_ro))
1219 read_only = gpio_ro;
1222 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1224 dev_dbg(&mmc->class_dev, "card is %s\n",
1225 read_only ? "read-only" : "read-write");
1230 static int dw_mci_get_cd(struct mmc_host *mmc)
1233 struct dw_mci_slot *slot = mmc_priv(mmc);
1234 struct dw_mci_board *brd = slot->host->pdata;
1235 struct dw_mci *host = slot->host;
1236 int gpio_cd = mmc_gpio_get_cd(mmc);
1238 /* Use platform get_cd function, else try onboard card detect */
1239 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1241 else if (!IS_ERR_VALUE(gpio_cd))
1244 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1247 spin_lock_bh(&host->lock);
1249 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1250 dev_dbg(&mmc->class_dev, "card is present\n");
1252 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1253 dev_dbg(&mmc->class_dev, "card is not present\n");
1255 spin_unlock_bh(&host->lock);
1260 static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card)
1262 struct dw_mci_slot *slot = mmc_priv(mmc);
1263 struct dw_mci *host = slot->host;
1266 * Low power mode will stop the card clock when idle. According to the
1267 * description of the CLKENA register we should disable low power mode
1268 * for SDIO cards if we need SDIO interrupts to work.
1270 if (mmc->caps & MMC_CAP_SDIO_IRQ) {
1271 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1275 clk_en_a_old = mci_readl(host, CLKENA);
1277 if (card->type == MMC_TYPE_SDIO ||
1278 card->type == MMC_TYPE_SD_COMBO) {
1279 set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
1280 clk_en_a = clk_en_a_old & ~clken_low_pwr;
1282 clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
1283 clk_en_a = clk_en_a_old | clken_low_pwr;
1286 if (clk_en_a != clk_en_a_old) {
1287 mci_writel(host, CLKENA, clk_en_a);
1288 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1289 SDMMC_CMD_PRV_DAT_WAIT, 0);
1294 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1296 struct dw_mci_slot *slot = mmc_priv(mmc);
1297 struct dw_mci *host = slot->host;
1298 unsigned long irqflags;
1301 spin_lock_irqsave(&host->irq_lock, irqflags);
1303 /* Enable/disable Slot Specific SDIO interrupt */
1304 int_mask = mci_readl(host, INTMASK);
1306 int_mask |= SDMMC_INT_SDIO(slot->sdio_id);
1308 int_mask &= ~SDMMC_INT_SDIO(slot->sdio_id);
1309 mci_writel(host, INTMASK, int_mask);
1311 spin_unlock_irqrestore(&host->irq_lock, irqflags);
1314 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1316 struct dw_mci_slot *slot = mmc_priv(mmc);
1317 struct dw_mci *host = slot->host;
1318 const struct dw_mci_drv_data *drv_data = host->drv_data;
1321 if (drv_data && drv_data->execute_tuning)
1322 err = drv_data->execute_tuning(slot);
1326 static const struct mmc_host_ops dw_mci_ops = {
1327 .request = dw_mci_request,
1328 .pre_req = dw_mci_pre_req,
1329 .post_req = dw_mci_post_req,
1330 .set_ios = dw_mci_set_ios,
1331 .get_ro = dw_mci_get_ro,
1332 .get_cd = dw_mci_get_cd,
1333 .enable_sdio_irq = dw_mci_enable_sdio_irq,
1334 .execute_tuning = dw_mci_execute_tuning,
1335 .card_busy = dw_mci_card_busy,
1336 .start_signal_voltage_switch = dw_mci_switch_voltage,
1337 .init_card = dw_mci_init_card,
1340 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1341 __releases(&host->lock)
1342 __acquires(&host->lock)
1344 struct dw_mci_slot *slot;
1345 struct mmc_host *prev_mmc = host->cur_slot->mmc;
1347 WARN_ON(host->cmd || host->data);
1349 host->cur_slot->mrq = NULL;
1351 if (!list_empty(&host->queue)) {
1352 slot = list_entry(host->queue.next,
1353 struct dw_mci_slot, queue_node);
1354 list_del(&slot->queue_node);
1355 dev_vdbg(host->dev, "list not empty: %s is next\n",
1356 mmc_hostname(slot->mmc));
1357 host->state = STATE_SENDING_CMD;
1358 dw_mci_start_request(host, slot);
1360 dev_vdbg(host->dev, "list empty\n");
1362 if (host->state == STATE_SENDING_CMD11)
1363 host->state = STATE_WAITING_CMD11_DONE;
1365 host->state = STATE_IDLE;
1368 spin_unlock(&host->lock);
1369 mmc_request_done(prev_mmc, mrq);
1370 spin_lock(&host->lock);
1373 static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1375 u32 status = host->cmd_status;
1377 host->cmd_status = 0;
1379 /* Read the response from the card (up to 16 bytes) */
1380 if (cmd->flags & MMC_RSP_PRESENT) {
1381 if (cmd->flags & MMC_RSP_136) {
1382 cmd->resp[3] = mci_readl(host, RESP0);
1383 cmd->resp[2] = mci_readl(host, RESP1);
1384 cmd->resp[1] = mci_readl(host, RESP2);
1385 cmd->resp[0] = mci_readl(host, RESP3);
1387 cmd->resp[0] = mci_readl(host, RESP0);
1394 if (status & SDMMC_INT_RTO)
1395 cmd->error = -ETIMEDOUT;
1396 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1397 cmd->error = -EILSEQ;
1398 else if (status & SDMMC_INT_RESP_ERR)
1404 /* newer ip versions need a delay between retries */
1405 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
1412 static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1414 u32 status = host->data_status;
1416 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1417 if (status & SDMMC_INT_DRTO) {
1418 data->error = -ETIMEDOUT;
1419 } else if (status & SDMMC_INT_DCRC) {
1420 data->error = -EILSEQ;
1421 } else if (status & SDMMC_INT_EBE) {
1422 if (host->dir_status ==
1423 DW_MCI_SEND_STATUS) {
1425 * No data CRC status was returned.
1426 * The number of bytes transferred
1427 * will be exaggerated in PIO mode.
1429 data->bytes_xfered = 0;
1430 data->error = -ETIMEDOUT;
1431 } else if (host->dir_status ==
1432 DW_MCI_RECV_STATUS) {
1436 /* SDMMC_INT_SBE is included */
1440 dev_dbg(host->dev, "data error, status 0x%08x\n", status);
1443 * After an error, there may be data lingering
1448 data->bytes_xfered = data->blocks * data->blksz;
1455 static void dw_mci_tasklet_func(unsigned long priv)
1457 struct dw_mci *host = (struct dw_mci *)priv;
1458 struct mmc_data *data;
1459 struct mmc_command *cmd;
1460 struct mmc_request *mrq;
1461 enum dw_mci_state state;
1462 enum dw_mci_state prev_state;
1465 spin_lock(&host->lock);
1467 state = host->state;
1476 case STATE_WAITING_CMD11_DONE:
1479 case STATE_SENDING_CMD11:
1480 case STATE_SENDING_CMD:
1481 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1482 &host->pending_events))
1487 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
1488 err = dw_mci_command_complete(host, cmd);
1489 if (cmd == mrq->sbc && !err) {
1490 prev_state = state = STATE_SENDING_CMD;
1491 __dw_mci_start_request(host, host->cur_slot,
1496 if (cmd->data && err) {
1497 dw_mci_stop_dma(host);
1498 send_stop_abort(host, data);
1499 state = STATE_SENDING_STOP;
1503 if (!cmd->data || err) {
1504 dw_mci_request_end(host, mrq);
1508 prev_state = state = STATE_SENDING_DATA;
1511 case STATE_SENDING_DATA:
1513 * We could get a data error and never a transfer
1514 * complete so we'd better check for it here.
1516 * Note that we don't really care if we also got a
1517 * transfer complete; stopping the DMA and sending an
1520 if (test_and_clear_bit(EVENT_DATA_ERROR,
1521 &host->pending_events)) {
1522 dw_mci_stop_dma(host);
1523 send_stop_abort(host, data);
1524 state = STATE_DATA_ERROR;
1528 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1529 &host->pending_events))
1532 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
1535 * Handle an EVENT_DATA_ERROR that might have shown up
1536 * before the transfer completed. This might not have
1537 * been caught by the check above because the interrupt
1538 * could have gone off between the previous check and
1539 * the check for transfer complete.
1541 * Technically this ought not be needed assuming we
1542 * get a DATA_COMPLETE eventually (we'll notice the
1543 * error and end the request), but it shouldn't hurt.
1545 * This has the advantage of sending the stop command.
1547 if (test_and_clear_bit(EVENT_DATA_ERROR,
1548 &host->pending_events)) {
1549 dw_mci_stop_dma(host);
1550 send_stop_abort(host, data);
1551 state = STATE_DATA_ERROR;
1554 prev_state = state = STATE_DATA_BUSY;
1558 case STATE_DATA_BUSY:
1559 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1560 &host->pending_events))
1564 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
1565 err = dw_mci_data_complete(host, data);
1568 if (!data->stop || mrq->sbc) {
1569 if (mrq->sbc && data->stop)
1570 data->stop->error = 0;
1571 dw_mci_request_end(host, mrq);
1575 /* stop command for open-ended transfer*/
1577 send_stop_abort(host, data);
1580 * If we don't have a command complete now we'll
1581 * never get one since we just reset everything;
1582 * better end the request.
1584 * If we do have a command complete we'll fall
1585 * through to the SENDING_STOP command and
1586 * everything will be peachy keen.
1588 if (!test_bit(EVENT_CMD_COMPLETE,
1589 &host->pending_events)) {
1591 dw_mci_request_end(host, mrq);
1597 * If err has non-zero,
1598 * stop-abort command has been already issued.
1600 prev_state = state = STATE_SENDING_STOP;
1604 case STATE_SENDING_STOP:
1605 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1606 &host->pending_events))
1609 /* CMD error in data command */
1610 if (mrq->cmd->error && mrq->data)
1617 dw_mci_command_complete(host, mrq->stop);
1619 host->cmd_status = 0;
1621 dw_mci_request_end(host, mrq);
1624 case STATE_DATA_ERROR:
1625 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1626 &host->pending_events))
1629 state = STATE_DATA_BUSY;
1632 } while (state != prev_state);
1634 host->state = state;
1636 spin_unlock(&host->lock);
1640 /* push final bytes to part_buf, only use during push */
1641 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
1643 memcpy((void *)&host->part_buf, buf, cnt);
1644 host->part_buf_count = cnt;
1647 /* append bytes to part_buf, only use during push */
1648 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1650 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1651 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1652 host->part_buf_count += cnt;
1656 /* pull first bytes from part_buf, only use during pull */
1657 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1659 cnt = min(cnt, (int)host->part_buf_count);
1661 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1663 host->part_buf_count -= cnt;
1664 host->part_buf_start += cnt;
1669 /* pull final bytes from the part_buf, assuming it's just been filled */
1670 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
1672 memcpy(buf, &host->part_buf, cnt);
1673 host->part_buf_start = cnt;
1674 host->part_buf_count = (1 << host->data_shift) - cnt;
1677 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1679 struct mmc_data *data = host->data;
1682 /* try and push anything in the part_buf */
1683 if (unlikely(host->part_buf_count)) {
1684 int len = dw_mci_push_part_bytes(host, buf, cnt);
1687 if (host->part_buf_count == 2) {
1688 mci_writew(host, DATA(host->data_offset),
1690 host->part_buf_count = 0;
1693 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1694 if (unlikely((unsigned long)buf & 0x1)) {
1696 u16 aligned_buf[64];
1697 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1698 int items = len >> 1;
1700 /* memcpy from input buffer into aligned buffer */
1701 memcpy(aligned_buf, buf, len);
1704 /* push data from aligned buffer into fifo */
1705 for (i = 0; i < items; ++i)
1706 mci_writew(host, DATA(host->data_offset),
1713 for (; cnt >= 2; cnt -= 2)
1714 mci_writew(host, DATA(host->data_offset), *pdata++);
1717 /* put anything remaining in the part_buf */
1719 dw_mci_set_part_bytes(host, buf, cnt);
1720 /* Push data if we have reached the expected data length */
1721 if ((data->bytes_xfered + init_cnt) ==
1722 (data->blksz * data->blocks))
1723 mci_writew(host, DATA(host->data_offset),
1728 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
1730 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1731 if (unlikely((unsigned long)buf & 0x1)) {
1733 /* pull data from fifo into aligned buffer */
1734 u16 aligned_buf[64];
1735 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1736 int items = len >> 1;
1738 for (i = 0; i < items; ++i)
1739 aligned_buf[i] = mci_readw(host,
1740 DATA(host->data_offset));
1741 /* memcpy from aligned buffer into output buffer */
1742 memcpy(buf, aligned_buf, len);
1750 for (; cnt >= 2; cnt -= 2)
1751 *pdata++ = mci_readw(host, DATA(host->data_offset));
1755 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
1756 dw_mci_pull_final_bytes(host, buf, cnt);
1760 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
1762 struct mmc_data *data = host->data;
1765 /* try and push anything in the part_buf */
1766 if (unlikely(host->part_buf_count)) {
1767 int len = dw_mci_push_part_bytes(host, buf, cnt);
1770 if (host->part_buf_count == 4) {
1771 mci_writel(host, DATA(host->data_offset),
1773 host->part_buf_count = 0;
1776 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1777 if (unlikely((unsigned long)buf & 0x3)) {
1779 u32 aligned_buf[32];
1780 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1781 int items = len >> 2;
1783 /* memcpy from input buffer into aligned buffer */
1784 memcpy(aligned_buf, buf, len);
1787 /* push data from aligned buffer into fifo */
1788 for (i = 0; i < items; ++i)
1789 mci_writel(host, DATA(host->data_offset),
1796 for (; cnt >= 4; cnt -= 4)
1797 mci_writel(host, DATA(host->data_offset), *pdata++);
1800 /* put anything remaining in the part_buf */
1802 dw_mci_set_part_bytes(host, buf, cnt);
1803 /* Push data if we have reached the expected data length */
1804 if ((data->bytes_xfered + init_cnt) ==
1805 (data->blksz * data->blocks))
1806 mci_writel(host, DATA(host->data_offset),
1811 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
1813 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1814 if (unlikely((unsigned long)buf & 0x3)) {
1816 /* pull data from fifo into aligned buffer */
1817 u32 aligned_buf[32];
1818 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1819 int items = len >> 2;
1821 for (i = 0; i < items; ++i)
1822 aligned_buf[i] = mci_readl(host,
1823 DATA(host->data_offset));
1824 /* memcpy from aligned buffer into output buffer */
1825 memcpy(buf, aligned_buf, len);
1833 for (; cnt >= 4; cnt -= 4)
1834 *pdata++ = mci_readl(host, DATA(host->data_offset));
1838 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
1839 dw_mci_pull_final_bytes(host, buf, cnt);
1843 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1845 struct mmc_data *data = host->data;
1848 /* try and push anything in the part_buf */
1849 if (unlikely(host->part_buf_count)) {
1850 int len = dw_mci_push_part_bytes(host, buf, cnt);
1854 if (host->part_buf_count == 8) {
1855 mci_writeq(host, DATA(host->data_offset),
1857 host->part_buf_count = 0;
1860 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1861 if (unlikely((unsigned long)buf & 0x7)) {
1863 u64 aligned_buf[16];
1864 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1865 int items = len >> 3;
1867 /* memcpy from input buffer into aligned buffer */
1868 memcpy(aligned_buf, buf, len);
1871 /* push data from aligned buffer into fifo */
1872 for (i = 0; i < items; ++i)
1873 mci_writeq(host, DATA(host->data_offset),
1880 for (; cnt >= 8; cnt -= 8)
1881 mci_writeq(host, DATA(host->data_offset), *pdata++);
1884 /* put anything remaining in the part_buf */
1886 dw_mci_set_part_bytes(host, buf, cnt);
1887 /* Push data if we have reached the expected data length */
1888 if ((data->bytes_xfered + init_cnt) ==
1889 (data->blksz * data->blocks))
1890 mci_writeq(host, DATA(host->data_offset),
1895 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1897 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1898 if (unlikely((unsigned long)buf & 0x7)) {
1900 /* pull data from fifo into aligned buffer */
1901 u64 aligned_buf[16];
1902 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1903 int items = len >> 3;
1905 for (i = 0; i < items; ++i)
1906 aligned_buf[i] = mci_readq(host,
1907 DATA(host->data_offset));
1908 /* memcpy from aligned buffer into output buffer */
1909 memcpy(buf, aligned_buf, len);
1917 for (; cnt >= 8; cnt -= 8)
1918 *pdata++ = mci_readq(host, DATA(host->data_offset));
1922 host->part_buf = mci_readq(host, DATA(host->data_offset));
1923 dw_mci_pull_final_bytes(host, buf, cnt);
1927 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
1931 /* get remaining partial bytes */
1932 len = dw_mci_pull_part_bytes(host, buf, cnt);
1933 if (unlikely(len == cnt))
1938 /* get the rest of the data */
1939 host->pull_data(host, buf, cnt);
1942 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
1944 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1946 unsigned int offset;
1947 struct mmc_data *data = host->data;
1948 int shift = host->data_shift;
1951 unsigned int remain, fcnt;
1954 if (!sg_miter_next(sg_miter))
1957 host->sg = sg_miter->piter.sg;
1958 buf = sg_miter->addr;
1959 remain = sg_miter->length;
1963 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
1964 << shift) + host->part_buf_count;
1965 len = min(remain, fcnt);
1968 dw_mci_pull_data(host, (void *)(buf + offset), len);
1969 data->bytes_xfered += len;
1974 sg_miter->consumed = offset;
1975 status = mci_readl(host, MINTSTS);
1976 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1977 /* if the RXDR is ready read again */
1978 } while ((status & SDMMC_INT_RXDR) ||
1979 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
1982 if (!sg_miter_next(sg_miter))
1984 sg_miter->consumed = 0;
1986 sg_miter_stop(sg_miter);
1990 sg_miter_stop(sg_miter);
1993 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1996 static void dw_mci_write_data_pio(struct dw_mci *host)
1998 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2000 unsigned int offset;
2001 struct mmc_data *data = host->data;
2002 int shift = host->data_shift;
2005 unsigned int fifo_depth = host->fifo_depth;
2006 unsigned int remain, fcnt;
2009 if (!sg_miter_next(sg_miter))
2012 host->sg = sg_miter->piter.sg;
2013 buf = sg_miter->addr;
2014 remain = sg_miter->length;
2018 fcnt = ((fifo_depth -
2019 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2020 << shift) - host->part_buf_count;
2021 len = min(remain, fcnt);
2024 host->push_data(host, (void *)(buf + offset), len);
2025 data->bytes_xfered += len;
2030 sg_miter->consumed = offset;
2031 status = mci_readl(host, MINTSTS);
2032 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2033 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
2036 if (!sg_miter_next(sg_miter))
2038 sg_miter->consumed = 0;
2040 sg_miter_stop(sg_miter);
2044 sg_miter_stop(sg_miter);
2047 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2050 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2052 if (!host->cmd_status)
2053 host->cmd_status = status;
2057 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2058 tasklet_schedule(&host->tasklet);
2061 static void dw_mci_handle_cd(struct dw_mci *host)
2065 for (i = 0; i < host->num_slots; i++) {
2066 struct dw_mci_slot *slot = host->slot[i];
2071 if (slot->mmc->ops->card_event)
2072 slot->mmc->ops->card_event(slot->mmc);
2073 mmc_detect_change(slot->mmc,
2074 msecs_to_jiffies(host->pdata->detect_delay_ms));
2078 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2080 struct dw_mci *host = dev_id;
2084 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2087 * DTO fix - version 2.10a and below, and only if internal DMA
2090 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
2092 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
2093 pending |= SDMMC_INT_DATA_OVER;
2097 /* Check volt switch first, since it can look like an error */
2098 if ((host->state == STATE_SENDING_CMD11) &&
2099 (pending & SDMMC_INT_VOLT_SWITCH)) {
2100 mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH);
2101 pending &= ~SDMMC_INT_VOLT_SWITCH;
2102 dw_mci_cmd_interrupt(host, pending);
2105 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2106 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2107 host->cmd_status = pending;
2109 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2112 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2113 /* if there is an error report DATA_ERROR */
2114 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2115 host->data_status = pending;
2117 set_bit(EVENT_DATA_ERROR, &host->pending_events);
2118 tasklet_schedule(&host->tasklet);
2121 if (pending & SDMMC_INT_DATA_OVER) {
2122 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2123 if (!host->data_status)
2124 host->data_status = pending;
2126 if (host->dir_status == DW_MCI_RECV_STATUS) {
2127 if (host->sg != NULL)
2128 dw_mci_read_data_pio(host, true);
2130 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2131 tasklet_schedule(&host->tasklet);
2134 if (pending & SDMMC_INT_RXDR) {
2135 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2136 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2137 dw_mci_read_data_pio(host, false);
2140 if (pending & SDMMC_INT_TXDR) {
2141 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2142 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2143 dw_mci_write_data_pio(host);
2146 if (pending & SDMMC_INT_CMD_DONE) {
2147 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2148 dw_mci_cmd_interrupt(host, pending);
2151 if (pending & SDMMC_INT_CD) {
2152 mci_writel(host, RINTSTS, SDMMC_INT_CD);
2153 dw_mci_handle_cd(host);
2156 /* Handle SDIO Interrupts */
2157 for (i = 0; i < host->num_slots; i++) {
2158 struct dw_mci_slot *slot = host->slot[i];
2159 if (pending & SDMMC_INT_SDIO(slot->sdio_id)) {
2160 mci_writel(host, RINTSTS,
2161 SDMMC_INT_SDIO(slot->sdio_id));
2162 mmc_signal_sdio_irq(slot->mmc);
2168 #ifdef CONFIG_MMC_DW_IDMAC
2169 /* Handle DMA interrupts */
2170 if (host->dma_64bit_address == 1) {
2171 pending = mci_readl(host, IDSTS64);
2172 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2173 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI |
2174 SDMMC_IDMAC_INT_RI);
2175 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI);
2176 host->dma_ops->complete(host);
2179 pending = mci_readl(host, IDSTS);
2180 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2181 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI |
2182 SDMMC_IDMAC_INT_RI);
2183 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2184 host->dma_ops->complete(host);
2193 /* given a slot id, find out the device node representing that slot */
2194 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2196 struct device_node *np;
2200 if (!dev || !dev->of_node)
2203 for_each_child_of_node(dev->of_node, np) {
2204 addr = of_get_property(np, "reg", &len);
2205 if (!addr || (len < sizeof(int)))
2207 if (be32_to_cpup(addr) == slot)
2213 static struct dw_mci_of_slot_quirks {
2216 } of_slot_quirks[] = {
2218 .quirk = "disable-wp",
2219 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
2223 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2225 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2230 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
2231 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL)) {
2232 dev_warn(dev, "Slot quirk %s is deprecated\n",
2233 of_slot_quirks[idx].quirk);
2234 quirks |= of_slot_quirks[idx].id;
2239 #else /* CONFIG_OF */
2240 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2244 #endif /* CONFIG_OF */
2246 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
2248 struct mmc_host *mmc;
2249 struct dw_mci_slot *slot;
2250 const struct dw_mci_drv_data *drv_data = host->drv_data;
2254 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
2258 slot = mmc_priv(mmc);
2260 slot->sdio_id = host->sdio_id0 + id;
2263 host->slot[id] = slot;
2265 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
2267 mmc->ops = &dw_mci_ops;
2268 if (of_property_read_u32_array(host->dev->of_node,
2269 "clock-freq-min-max", freq, 2)) {
2270 mmc->f_min = DW_MCI_FREQ_MIN;
2271 mmc->f_max = DW_MCI_FREQ_MAX;
2273 mmc->f_min = freq[0];
2274 mmc->f_max = freq[1];
2277 /*if there are external regulators, get them*/
2278 ret = mmc_regulator_get_supply(mmc);
2279 if (ret == -EPROBE_DEFER)
2280 goto err_host_allocated;
2282 if (!mmc->ocr_avail)
2283 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2285 if (host->pdata->caps)
2286 mmc->caps = host->pdata->caps;
2288 if (host->pdata->pm_caps)
2289 mmc->pm_caps = host->pdata->pm_caps;
2291 if (host->dev->of_node) {
2292 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2296 ctrl_id = to_platform_device(host->dev)->id;
2298 if (drv_data && drv_data->caps)
2299 mmc->caps |= drv_data->caps[ctrl_id];
2301 if (host->pdata->caps2)
2302 mmc->caps2 = host->pdata->caps2;
2304 ret = mmc_of_parse(mmc);
2306 goto err_host_allocated;
2308 if (host->pdata->blk_settings) {
2309 mmc->max_segs = host->pdata->blk_settings->max_segs;
2310 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
2311 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
2312 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
2313 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
2315 /* Useful defaults if platform data is unset. */
2316 #ifdef CONFIG_MMC_DW_IDMAC
2317 mmc->max_segs = host->ring_size;
2318 mmc->max_blk_size = 65536;
2319 mmc->max_seg_size = 0x1000;
2320 mmc->max_req_size = mmc->max_seg_size * host->ring_size;
2321 mmc->max_blk_count = mmc->max_req_size / 512;
2324 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
2325 mmc->max_blk_count = 512;
2326 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2327 mmc->max_seg_size = mmc->max_req_size;
2328 #endif /* CONFIG_MMC_DW_IDMAC */
2331 if (dw_mci_get_cd(mmc))
2332 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2334 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2336 ret = mmc_add_host(mmc);
2338 goto err_host_allocated;
2340 #if defined(CONFIG_DEBUG_FS)
2341 dw_mci_init_debugfs(slot);
2351 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2353 /* Debugfs stuff is cleaned up by mmc core */
2354 mmc_remove_host(slot->mmc);
2355 slot->host->slot[id] = NULL;
2356 mmc_free_host(slot->mmc);
2359 static void dw_mci_init_dma(struct dw_mci *host)
2362 /* Check ADDR_CONFIG bit in HCON to find IDMAC address bus width */
2363 addr_config = (mci_readl(host, HCON) >> 27) & 0x01;
2365 if (addr_config == 1) {
2366 /* host supports IDMAC in 64-bit address mode */
2367 host->dma_64bit_address = 1;
2368 dev_info(host->dev, "IDMAC supports 64-bit address mode.\n");
2369 if (!dma_set_mask(host->dev, DMA_BIT_MASK(64)))
2370 dma_set_coherent_mask(host->dev, DMA_BIT_MASK(64));
2372 /* host supports IDMAC in 32-bit address mode */
2373 host->dma_64bit_address = 0;
2374 dev_info(host->dev, "IDMAC supports 32-bit address mode.\n");
2377 /* Alloc memory for sg translation */
2378 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
2379 &host->sg_dma, GFP_KERNEL);
2380 if (!host->sg_cpu) {
2381 dev_err(host->dev, "%s: could not alloc DMA memory\n",
2386 /* Determine which DMA interface to use */
2387 #ifdef CONFIG_MMC_DW_IDMAC
2388 host->dma_ops = &dw_mci_idmac_ops;
2389 dev_info(host->dev, "Using internal DMA controller.\n");
2395 if (host->dma_ops->init && host->dma_ops->start &&
2396 host->dma_ops->stop && host->dma_ops->cleanup) {
2397 if (host->dma_ops->init(host)) {
2398 dev_err(host->dev, "%s: Unable to initialize "
2399 "DMA Controller.\n", __func__);
2403 dev_err(host->dev, "DMA initialization not found.\n");
2411 dev_info(host->dev, "Using PIO mode.\n");
2416 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
2418 unsigned long timeout = jiffies + msecs_to_jiffies(500);
2421 ctrl = mci_readl(host, CTRL);
2423 mci_writel(host, CTRL, ctrl);
2425 /* wait till resets clear */
2427 ctrl = mci_readl(host, CTRL);
2428 if (!(ctrl & reset))
2430 } while (time_before(jiffies, timeout));
2433 "Timeout resetting block (ctrl reset %#x)\n",
2439 static bool dw_mci_reset(struct dw_mci *host)
2441 u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET;
2445 * Reseting generates a block interrupt, hence setting
2446 * the scatter-gather pointer to NULL.
2449 sg_miter_stop(&host->sg_miter);
2454 flags |= SDMMC_CTRL_DMA_RESET;
2456 if (dw_mci_ctrl_reset(host, flags)) {
2458 * In all cases we clear the RAWINTS register to clear any
2461 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2463 /* if using dma we wait for dma_req to clear */
2464 if (host->use_dma) {
2465 unsigned long timeout = jiffies + msecs_to_jiffies(500);
2468 status = mci_readl(host, STATUS);
2469 if (!(status & SDMMC_STATUS_DMA_REQ))
2472 } while (time_before(jiffies, timeout));
2474 if (status & SDMMC_STATUS_DMA_REQ) {
2476 "%s: Timeout waiting for dma_req to "
2477 "clear during reset\n", __func__);
2481 /* when using DMA next we reset the fifo again */
2482 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET))
2486 /* if the controller reset bit did clear, then set clock regs */
2487 if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) {
2488 dev_err(host->dev, "%s: fifo/dma reset bits didn't "
2489 "clear but ciu was reset, doing clock update\n",
2495 #if IS_ENABLED(CONFIG_MMC_DW_IDMAC)
2496 /* It is also recommended that we reset and reprogram idmac */
2497 dw_mci_idmac_reset(host);
2503 /* After a CTRL reset we need to have CIU set clock registers */
2504 mci_send_cmd(host->cur_slot, SDMMC_CMD_UPD_CLK, 0);
2510 static struct dw_mci_of_quirks {
2515 .quirk = "broken-cd",
2516 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
2518 .quirk = "disable-wp",
2519 .id = DW_MCI_QUIRK_NO_WRITE_PROTECT,
2523 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2525 struct dw_mci_board *pdata;
2526 struct device *dev = host->dev;
2527 struct device_node *np = dev->of_node;
2528 const struct dw_mci_drv_data *drv_data = host->drv_data;
2530 u32 clock_frequency;
2532 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2534 return ERR_PTR(-ENOMEM);
2536 /* find out number of slots supported */
2537 if (of_property_read_u32(dev->of_node, "num-slots",
2538 &pdata->num_slots)) {
2539 dev_info(dev, "num-slots property not found, "
2540 "assuming 1 slot is available\n");
2541 pdata->num_slots = 1;
2545 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
2546 if (of_get_property(np, of_quirks[idx].quirk, NULL))
2547 pdata->quirks |= of_quirks[idx].id;
2549 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
2550 dev_info(dev, "fifo-depth property not found, using "
2551 "value of FIFOTH register as default\n");
2553 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
2555 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
2556 pdata->bus_hz = clock_frequency;
2558 if (drv_data && drv_data->parse_dt) {
2559 ret = drv_data->parse_dt(host);
2561 return ERR_PTR(ret);
2564 if (of_find_property(np, "supports-highspeed", NULL))
2565 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
2570 #else /* CONFIG_OF */
2571 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2573 return ERR_PTR(-EINVAL);
2575 #endif /* CONFIG_OF */
2577 int dw_mci_probe(struct dw_mci *host)
2579 const struct dw_mci_drv_data *drv_data = host->drv_data;
2580 int width, i, ret = 0;
2585 host->pdata = dw_mci_parse_dt(host);
2586 if (IS_ERR(host->pdata)) {
2587 dev_err(host->dev, "platform data not available\n");
2592 if (host->pdata->num_slots > 1) {
2594 "Platform data must supply num_slots.\n");
2598 host->biu_clk = devm_clk_get(host->dev, "biu");
2599 if (IS_ERR(host->biu_clk)) {
2600 dev_dbg(host->dev, "biu clock not available\n");
2602 ret = clk_prepare_enable(host->biu_clk);
2604 dev_err(host->dev, "failed to enable biu clock\n");
2609 host->ciu_clk = devm_clk_get(host->dev, "ciu");
2610 if (IS_ERR(host->ciu_clk)) {
2611 dev_dbg(host->dev, "ciu clock not available\n");
2612 host->bus_hz = host->pdata->bus_hz;
2614 ret = clk_prepare_enable(host->ciu_clk);
2616 dev_err(host->dev, "failed to enable ciu clock\n");
2620 if (host->pdata->bus_hz) {
2621 ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
2624 "Unable to set bus rate to %uHz\n",
2625 host->pdata->bus_hz);
2627 host->bus_hz = clk_get_rate(host->ciu_clk);
2630 if (!host->bus_hz) {
2632 "Platform data must supply bus speed\n");
2637 if (drv_data && drv_data->init) {
2638 ret = drv_data->init(host);
2641 "implementation specific init failed\n");
2646 if (drv_data && drv_data->setup_clock) {
2647 ret = drv_data->setup_clock(host);
2650 "implementation specific clock setup failed\n");
2655 host->quirks = host->pdata->quirks;
2657 spin_lock_init(&host->lock);
2658 spin_lock_init(&host->irq_lock);
2659 INIT_LIST_HEAD(&host->queue);
2662 * Get the host data width - this assumes that HCON has been set with
2663 * the correct values.
2665 i = (mci_readl(host, HCON) >> 7) & 0x7;
2667 host->push_data = dw_mci_push_data16;
2668 host->pull_data = dw_mci_pull_data16;
2670 host->data_shift = 1;
2671 } else if (i == 2) {
2672 host->push_data = dw_mci_push_data64;
2673 host->pull_data = dw_mci_pull_data64;
2675 host->data_shift = 3;
2677 /* Check for a reserved value, and warn if it is */
2679 "HCON reports a reserved host data width!\n"
2680 "Defaulting to 32-bit access.\n");
2681 host->push_data = dw_mci_push_data32;
2682 host->pull_data = dw_mci_pull_data32;
2684 host->data_shift = 2;
2687 /* Reset all blocks */
2688 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS))
2691 host->dma_ops = host->pdata->dma_ops;
2692 dw_mci_init_dma(host);
2694 /* Clear the interrupts for the host controller */
2695 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2696 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2698 /* Put in max timeout */
2699 mci_writel(host, TMOUT, 0xFFFFFFFF);
2702 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
2703 * Tx Mark = fifo_size / 2 DMA Size = 8
2705 if (!host->pdata->fifo_depth) {
2707 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2708 * have been overwritten by the bootloader, just like we're
2709 * about to do, so if you know the value for your hardware, you
2710 * should put it in the platform data.
2712 fifo_size = mci_readl(host, FIFOTH);
2713 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
2715 fifo_size = host->pdata->fifo_depth;
2717 host->fifo_depth = fifo_size;
2719 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
2720 mci_writel(host, FIFOTH, host->fifoth_val);
2722 /* disable clock to CIU */
2723 mci_writel(host, CLKENA, 0);
2724 mci_writel(host, CLKSRC, 0);
2727 * In 2.40a spec, Data offset is changed.
2728 * Need to check the version-id and set data-offset for DATA register.
2730 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
2731 dev_info(host->dev, "Version ID is %04x\n", host->verid);
2733 if (host->verid < DW_MMC_240A)
2734 host->data_offset = DATA_OFFSET;
2736 host->data_offset = DATA_240A_OFFSET;
2738 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
2739 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
2740 host->irq_flags, "dw-mci", host);
2744 if (host->pdata->num_slots)
2745 host->num_slots = host->pdata->num_slots;
2747 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
2750 * Enable interrupts for command done, data over, data empty, card det,
2751 * receive ready and error such as transmit, receive timeout, crc error
2753 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2754 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2755 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2756 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2757 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
2759 dev_info(host->dev, "DW MMC controller at irq %d, "
2760 "%d bit host data width, "
2762 host->irq, width, fifo_size);
2764 /* We need at least one slot to succeed */
2765 for (i = 0; i < host->num_slots; i++) {
2766 ret = dw_mci_init_slot(host, i);
2768 dev_dbg(host->dev, "slot %d init failed\n", i);
2774 dev_info(host->dev, "%d slots initialized\n", init_slots);
2776 dev_dbg(host->dev, "attempted to initialize %d slots, "
2777 "but failed on all\n", host->num_slots);
2781 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
2782 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
2787 if (host->use_dma && host->dma_ops->exit)
2788 host->dma_ops->exit(host);
2791 if (!IS_ERR(host->ciu_clk))
2792 clk_disable_unprepare(host->ciu_clk);
2795 if (!IS_ERR(host->biu_clk))
2796 clk_disable_unprepare(host->biu_clk);
2800 EXPORT_SYMBOL(dw_mci_probe);
2802 void dw_mci_remove(struct dw_mci *host)
2806 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2807 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2809 for (i = 0; i < host->num_slots; i++) {
2810 dev_dbg(host->dev, "remove slot %d\n", i);
2812 dw_mci_cleanup_slot(host->slot[i], i);
2815 /* disable clock to CIU */
2816 mci_writel(host, CLKENA, 0);
2817 mci_writel(host, CLKSRC, 0);
2819 if (host->use_dma && host->dma_ops->exit)
2820 host->dma_ops->exit(host);
2822 if (!IS_ERR(host->ciu_clk))
2823 clk_disable_unprepare(host->ciu_clk);
2825 if (!IS_ERR(host->biu_clk))
2826 clk_disable_unprepare(host->biu_clk);
2828 EXPORT_SYMBOL(dw_mci_remove);
2832 #ifdef CONFIG_PM_SLEEP
2834 * TODO: we should probably disable the clock to the card in the suspend path.
2836 int dw_mci_suspend(struct dw_mci *host)
2840 EXPORT_SYMBOL(dw_mci_suspend);
2842 int dw_mci_resume(struct dw_mci *host)
2846 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
2851 if (host->use_dma && host->dma_ops->init)
2852 host->dma_ops->init(host);
2855 * Restore the initial value at FIFOTH register
2856 * And Invalidate the prev_blksz with zero
2858 mci_writel(host, FIFOTH, host->fifoth_val);
2859 host->prev_blksz = 0;
2861 /* Put in max timeout */
2862 mci_writel(host, TMOUT, 0xFFFFFFFF);
2864 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2865 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2866 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2867 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2868 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
2870 for (i = 0; i < host->num_slots; i++) {
2871 struct dw_mci_slot *slot = host->slot[i];
2874 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
2875 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
2876 dw_mci_setup_bus(slot, true);
2881 EXPORT_SYMBOL(dw_mci_resume);
2882 #endif /* CONFIG_PM_SLEEP */
2884 static int __init dw_mci_init(void)
2886 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
2890 static void __exit dw_mci_exit(void)
2894 module_init(dw_mci_init);
2895 module_exit(dw_mci_exit);
2897 MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
2898 MODULE_AUTHOR("NXP Semiconductor VietNam");
2899 MODULE_AUTHOR("Imagination Technologies Ltd");
2900 MODULE_LICENSE("GPL v2");