2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 #include <linux/blkdev.h>
15 #include <linux/clk.h>
16 #include <linux/debugfs.h>
17 #include <linux/device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/ioport.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/seq_file.h>
26 #include <linux/slab.h>
27 #include <linux/stat.h>
28 #include <linux/delay.h>
29 #include <linux/irq.h>
30 #include <linux/mmc/card.h>
31 #include <linux/mmc/host.h>
32 #include <linux/mmc/mmc.h>
33 #include <linux/mmc/sd.h>
34 #include <linux/mmc/sdio.h>
35 #include <linux/mmc/dw_mmc.h>
36 #include <linux/bitops.h>
37 #include <linux/regulator/consumer.h>
39 #include <linux/of_gpio.h>
40 #include <linux/mmc/slot-gpio.h>
44 /* Common flag combinations */
45 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
46 SDMMC_INT_HTO | SDMMC_INT_SBE | \
48 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
50 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
51 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
52 #define DW_MCI_SEND_STATUS 1
53 #define DW_MCI_RECV_STATUS 2
54 #define DW_MCI_DMA_THRESHOLD 16
56 #define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
57 #define DW_MCI_FREQ_MIN 400000 /* unit: HZ */
59 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
60 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
61 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
64 struct idmac_desc_64addr
{
65 u32 des0
; /* Control Descriptor */
67 u32 des1
; /* Reserved */
69 u32 des2
; /*Buffer sizes */
70 #define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \
71 ((d)->des2 = ((d)->des2 & cpu_to_le32(0x03ffe000)) | \
72 ((cpu_to_le32(s)) & cpu_to_le32(0x1fff)))
74 u32 des3
; /* Reserved */
76 u32 des4
; /* Lower 32-bits of Buffer Address Pointer 1*/
77 u32 des5
; /* Upper 32-bits of Buffer Address Pointer 1*/
79 u32 des6
; /* Lower 32-bits of Next Descriptor Address */
80 u32 des7
; /* Upper 32-bits of Next Descriptor Address */
84 __le32 des0
; /* Control Descriptor */
85 #define IDMAC_DES0_DIC BIT(1)
86 #define IDMAC_DES0_LD BIT(2)
87 #define IDMAC_DES0_FD BIT(3)
88 #define IDMAC_DES0_CH BIT(4)
89 #define IDMAC_DES0_ER BIT(5)
90 #define IDMAC_DES0_CES BIT(30)
91 #define IDMAC_DES0_OWN BIT(31)
93 __le32 des1
; /* Buffer sizes */
94 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
95 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
97 __le32 des2
; /* buffer 1 physical address */
99 __le32 des3
; /* buffer 2 physical address */
102 /* Each descriptor can transfer up to 4KB of data in chained mode */
103 #define DW_MCI_DESC_DATA_LENGTH 0x1000
105 static bool dw_mci_reset(struct dw_mci
*host
);
106 static bool dw_mci_ctrl_reset(struct dw_mci
*host
, u32 reset
);
107 static int dw_mci_card_busy(struct mmc_host
*mmc
);
109 #if defined(CONFIG_DEBUG_FS)
110 static int dw_mci_req_show(struct seq_file
*s
, void *v
)
112 struct dw_mci_slot
*slot
= s
->private;
113 struct mmc_request
*mrq
;
114 struct mmc_command
*cmd
;
115 struct mmc_command
*stop
;
116 struct mmc_data
*data
;
118 /* Make sure we get a consistent snapshot */
119 spin_lock_bh(&slot
->host
->lock
);
129 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
130 cmd
->opcode
, cmd
->arg
, cmd
->flags
,
131 cmd
->resp
[0], cmd
->resp
[1], cmd
->resp
[2],
132 cmd
->resp
[2], cmd
->error
);
134 seq_printf(s
, "DATA %u / %u * %u flg %x err %d\n",
135 data
->bytes_xfered
, data
->blocks
,
136 data
->blksz
, data
->flags
, data
->error
);
139 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
140 stop
->opcode
, stop
->arg
, stop
->flags
,
141 stop
->resp
[0], stop
->resp
[1], stop
->resp
[2],
142 stop
->resp
[2], stop
->error
);
145 spin_unlock_bh(&slot
->host
->lock
);
150 static int dw_mci_req_open(struct inode
*inode
, struct file
*file
)
152 return single_open(file
, dw_mci_req_show
, inode
->i_private
);
155 static const struct file_operations dw_mci_req_fops
= {
156 .owner
= THIS_MODULE
,
157 .open
= dw_mci_req_open
,
160 .release
= single_release
,
163 static int dw_mci_regs_show(struct seq_file
*s
, void *v
)
165 seq_printf(s
, "STATUS:\t0x%08x\n", SDMMC_STATUS
);
166 seq_printf(s
, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS
);
167 seq_printf(s
, "CMD:\t0x%08x\n", SDMMC_CMD
);
168 seq_printf(s
, "CTRL:\t0x%08x\n", SDMMC_CTRL
);
169 seq_printf(s
, "INTMASK:\t0x%08x\n", SDMMC_INTMASK
);
170 seq_printf(s
, "CLKENA:\t0x%08x\n", SDMMC_CLKENA
);
175 static int dw_mci_regs_open(struct inode
*inode
, struct file
*file
)
177 return single_open(file
, dw_mci_regs_show
, inode
->i_private
);
180 static const struct file_operations dw_mci_regs_fops
= {
181 .owner
= THIS_MODULE
,
182 .open
= dw_mci_regs_open
,
185 .release
= single_release
,
188 static void dw_mci_init_debugfs(struct dw_mci_slot
*slot
)
190 struct mmc_host
*mmc
= slot
->mmc
;
191 struct dw_mci
*host
= slot
->host
;
195 root
= mmc
->debugfs_root
;
199 node
= debugfs_create_file("regs", S_IRUSR
, root
, host
,
204 node
= debugfs_create_file("req", S_IRUSR
, root
, slot
,
209 node
= debugfs_create_u32("state", S_IRUSR
, root
, (u32
*)&host
->state
);
213 node
= debugfs_create_x32("pending_events", S_IRUSR
, root
,
214 (u32
*)&host
->pending_events
);
218 node
= debugfs_create_x32("completed_events", S_IRUSR
, root
,
219 (u32
*)&host
->completed_events
);
226 dev_err(&mmc
->class_dev
, "failed to initialize debugfs for slot\n");
228 #endif /* defined(CONFIG_DEBUG_FS) */
230 static void mci_send_cmd(struct dw_mci_slot
*slot
, u32 cmd
, u32 arg
);
232 static u32
dw_mci_prepare_command(struct mmc_host
*mmc
, struct mmc_command
*cmd
)
234 struct mmc_data
*data
;
235 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
236 struct dw_mci
*host
= slot
->host
;
239 cmd
->error
= -EINPROGRESS
;
242 if (cmd
->opcode
== MMC_STOP_TRANSMISSION
||
243 cmd
->opcode
== MMC_GO_IDLE_STATE
||
244 cmd
->opcode
== MMC_GO_INACTIVE_STATE
||
245 (cmd
->opcode
== SD_IO_RW_DIRECT
&&
246 ((cmd
->arg
>> 9) & 0x1FFFF) == SDIO_CCCR_ABORT
))
247 cmdr
|= SDMMC_CMD_STOP
;
248 else if (cmd
->opcode
!= MMC_SEND_STATUS
&& cmd
->data
)
249 cmdr
|= SDMMC_CMD_PRV_DAT_WAIT
;
251 if (cmd
->opcode
== SD_SWITCH_VOLTAGE
) {
254 /* Special bit makes CMD11 not die */
255 cmdr
|= SDMMC_CMD_VOLT_SWITCH
;
257 /* Change state to continue to handle CMD11 weirdness */
258 WARN_ON(slot
->host
->state
!= STATE_SENDING_CMD
);
259 slot
->host
->state
= STATE_SENDING_CMD11
;
262 * We need to disable low power mode (automatic clock stop)
263 * while doing voltage switch so we don't confuse the card,
264 * since stopping the clock is a specific part of the UHS
265 * voltage change dance.
267 * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be
268 * unconditionally turned back on in dw_mci_setup_bus() if it's
269 * ever called with a non-zero clock. That shouldn't happen
270 * until the voltage change is all done.
272 clk_en_a
= mci_readl(host
, CLKENA
);
273 clk_en_a
&= ~(SDMMC_CLKEN_LOW_PWR
<< slot
->id
);
274 mci_writel(host
, CLKENA
, clk_en_a
);
275 mci_send_cmd(slot
, SDMMC_CMD_UPD_CLK
|
276 SDMMC_CMD_PRV_DAT_WAIT
, 0);
279 if (cmd
->flags
& MMC_RSP_PRESENT
) {
280 /* We expect a response, so set this bit */
281 cmdr
|= SDMMC_CMD_RESP_EXP
;
282 if (cmd
->flags
& MMC_RSP_136
)
283 cmdr
|= SDMMC_CMD_RESP_LONG
;
286 if (cmd
->flags
& MMC_RSP_CRC
)
287 cmdr
|= SDMMC_CMD_RESP_CRC
;
291 cmdr
|= SDMMC_CMD_DAT_EXP
;
292 if (data
->flags
& MMC_DATA_WRITE
)
293 cmdr
|= SDMMC_CMD_DAT_WR
;
296 if (!test_bit(DW_MMC_CARD_NO_USE_HOLD
, &slot
->flags
))
297 cmdr
|= SDMMC_CMD_USE_HOLD_REG
;
302 static u32
dw_mci_prep_stop_abort(struct dw_mci
*host
, struct mmc_command
*cmd
)
304 struct mmc_command
*stop
;
310 stop
= &host
->stop_abort
;
312 memset(stop
, 0, sizeof(struct mmc_command
));
314 if (cmdr
== MMC_READ_SINGLE_BLOCK
||
315 cmdr
== MMC_READ_MULTIPLE_BLOCK
||
316 cmdr
== MMC_WRITE_BLOCK
||
317 cmdr
== MMC_WRITE_MULTIPLE_BLOCK
||
318 cmdr
== MMC_SEND_TUNING_BLOCK
||
319 cmdr
== MMC_SEND_TUNING_BLOCK_HS200
) {
320 stop
->opcode
= MMC_STOP_TRANSMISSION
;
322 stop
->flags
= MMC_RSP_R1B
| MMC_CMD_AC
;
323 } else if (cmdr
== SD_IO_RW_EXTENDED
) {
324 stop
->opcode
= SD_IO_RW_DIRECT
;
325 stop
->arg
|= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT
<< 9) |
326 ((cmd
->arg
>> 28) & 0x7);
327 stop
->flags
= MMC_RSP_SPI_R5
| MMC_RSP_R5
| MMC_CMD_AC
;
332 cmdr
= stop
->opcode
| SDMMC_CMD_STOP
|
333 SDMMC_CMD_RESP_CRC
| SDMMC_CMD_RESP_EXP
;
338 static void dw_mci_wait_while_busy(struct dw_mci
*host
, u32 cmd_flags
)
340 unsigned long timeout
= jiffies
+ msecs_to_jiffies(500);
343 * Databook says that before issuing a new data transfer command
344 * we need to check to see if the card is busy. Data transfer commands
345 * all have SDMMC_CMD_PRV_DAT_WAIT set, so we'll key off that.
347 * ...also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is
350 if ((cmd_flags
& SDMMC_CMD_PRV_DAT_WAIT
) &&
351 !(cmd_flags
& SDMMC_CMD_VOLT_SWITCH
)) {
352 while (mci_readl(host
, STATUS
) & SDMMC_STATUS_BUSY
) {
353 if (time_after(jiffies
, timeout
)) {
354 /* Command will fail; we'll pass error then */
355 dev_err(host
->dev
, "Busy; trying anyway\n");
363 static void dw_mci_start_command(struct dw_mci
*host
,
364 struct mmc_command
*cmd
, u32 cmd_flags
)
368 "start command: ARGR=0x%08x CMDR=0x%08x\n",
369 cmd
->arg
, cmd_flags
);
371 mci_writel(host
, CMDARG
, cmd
->arg
);
372 wmb(); /* drain writebuffer */
373 dw_mci_wait_while_busy(host
, cmd_flags
);
375 mci_writel(host
, CMD
, cmd_flags
| SDMMC_CMD_START
);
378 static inline void send_stop_abort(struct dw_mci
*host
, struct mmc_data
*data
)
380 struct mmc_command
*stop
= data
->stop
? data
->stop
: &host
->stop_abort
;
382 dw_mci_start_command(host
, stop
, host
->stop_cmdr
);
385 /* DMA interface functions */
386 static void dw_mci_stop_dma(struct dw_mci
*host
)
388 if (host
->using_dma
) {
389 host
->dma_ops
->stop(host
);
390 host
->dma_ops
->cleanup(host
);
393 /* Data transfer was stopped by the interrupt handler */
394 set_bit(EVENT_XFER_COMPLETE
, &host
->pending_events
);
397 static int dw_mci_get_dma_dir(struct mmc_data
*data
)
399 if (data
->flags
& MMC_DATA_WRITE
)
400 return DMA_TO_DEVICE
;
402 return DMA_FROM_DEVICE
;
405 static void dw_mci_dma_cleanup(struct dw_mci
*host
)
407 struct mmc_data
*data
= host
->data
;
410 if (!data
->host_cookie
)
411 dma_unmap_sg(host
->dev
,
414 dw_mci_get_dma_dir(data
));
417 static void dw_mci_idmac_reset(struct dw_mci
*host
)
419 u32 bmod
= mci_readl(host
, BMOD
);
420 /* Software reset of DMA */
421 bmod
|= SDMMC_IDMAC_SWRESET
;
422 mci_writel(host
, BMOD
, bmod
);
425 static void dw_mci_idmac_stop_dma(struct dw_mci
*host
)
429 /* Disable and reset the IDMAC interface */
430 temp
= mci_readl(host
, CTRL
);
431 temp
&= ~SDMMC_CTRL_USE_IDMAC
;
432 temp
|= SDMMC_CTRL_DMA_RESET
;
433 mci_writel(host
, CTRL
, temp
);
435 /* Stop the IDMAC running */
436 temp
= mci_readl(host
, BMOD
);
437 temp
&= ~(SDMMC_IDMAC_ENABLE
| SDMMC_IDMAC_FB
);
438 temp
|= SDMMC_IDMAC_SWRESET
;
439 mci_writel(host
, BMOD
, temp
);
442 static void dw_mci_dmac_complete_dma(void *arg
)
444 struct dw_mci
*host
= arg
;
445 struct mmc_data
*data
= host
->data
;
447 dev_vdbg(host
->dev
, "DMA complete\n");
449 if ((host
->use_dma
== TRANS_MODE_EDMAC
) &&
450 data
&& (data
->flags
& MMC_DATA_READ
))
451 /* Invalidate cache after read */
452 dma_sync_sg_for_cpu(mmc_dev(host
->cur_slot
->mmc
),
457 host
->dma_ops
->cleanup(host
);
460 * If the card was removed, data will be NULL. No point in trying to
461 * send the stop command or waiting for NBUSY in this case.
464 set_bit(EVENT_XFER_COMPLETE
, &host
->pending_events
);
465 tasklet_schedule(&host
->tasklet
);
469 static void dw_mci_translate_sglist(struct dw_mci
*host
, struct mmc_data
*data
,
472 unsigned int desc_len
;
475 if (host
->dma_64bit_address
== 1) {
476 struct idmac_desc_64addr
*desc_first
, *desc_last
, *desc
;
478 desc_first
= desc_last
= desc
= host
->sg_cpu
;
480 for (i
= 0; i
< sg_len
; i
++) {
481 unsigned int length
= sg_dma_len(&data
->sg
[i
]);
483 u64 mem_addr
= sg_dma_address(&data
->sg
[i
]);
485 for ( ; length
; desc
++) {
486 desc_len
= (length
<= DW_MCI_DESC_DATA_LENGTH
) ?
487 length
: DW_MCI_DESC_DATA_LENGTH
;
492 * Set the OWN bit and disable interrupts
493 * for this descriptor
495 desc
->des0
= IDMAC_DES0_OWN
| IDMAC_DES0_DIC
|
499 IDMAC_64ADDR_SET_BUFFER1_SIZE(desc
, desc_len
);
501 /* Physical address to DMA to/from */
502 desc
->des4
= mem_addr
& 0xffffffff;
503 desc
->des5
= mem_addr
>> 32;
505 /* Update physical address for the next desc */
506 mem_addr
+= desc_len
;
508 /* Save pointer to the last descriptor */
513 /* Set first descriptor */
514 desc_first
->des0
|= IDMAC_DES0_FD
;
516 /* Set last descriptor */
517 desc_last
->des0
&= ~(IDMAC_DES0_CH
| IDMAC_DES0_DIC
);
518 desc_last
->des0
|= IDMAC_DES0_LD
;
521 struct idmac_desc
*desc_first
, *desc_last
, *desc
;
523 desc_first
= desc_last
= desc
= host
->sg_cpu
;
525 for (i
= 0; i
< sg_len
; i
++) {
526 unsigned int length
= sg_dma_len(&data
->sg
[i
]);
528 u32 mem_addr
= sg_dma_address(&data
->sg
[i
]);
530 for ( ; length
; desc
++) {
531 desc_len
= (length
<= DW_MCI_DESC_DATA_LENGTH
) ?
532 length
: DW_MCI_DESC_DATA_LENGTH
;
537 * Set the OWN bit and disable interrupts
538 * for this descriptor
540 desc
->des0
= cpu_to_le32(IDMAC_DES0_OWN
|
545 IDMAC_SET_BUFFER1_SIZE(desc
, desc_len
);
547 /* Physical address to DMA to/from */
548 desc
->des2
= cpu_to_le32(mem_addr
);
550 /* Update physical address for the next desc */
551 mem_addr
+= desc_len
;
553 /* Save pointer to the last descriptor */
558 /* Set first descriptor */
559 desc_first
->des0
|= cpu_to_le32(IDMAC_DES0_FD
);
561 /* Set last descriptor */
562 desc_last
->des0
&= cpu_to_le32(~(IDMAC_DES0_CH
|
564 desc_last
->des0
|= cpu_to_le32(IDMAC_DES0_LD
);
567 wmb(); /* drain writebuffer */
570 static int dw_mci_idmac_start_dma(struct dw_mci
*host
, unsigned int sg_len
)
574 dw_mci_translate_sglist(host
, host
->data
, sg_len
);
576 /* Make sure to reset DMA in case we did PIO before this */
577 dw_mci_ctrl_reset(host
, SDMMC_CTRL_DMA_RESET
);
578 dw_mci_idmac_reset(host
);
580 /* Select IDMAC interface */
581 temp
= mci_readl(host
, CTRL
);
582 temp
|= SDMMC_CTRL_USE_IDMAC
;
583 mci_writel(host
, CTRL
, temp
);
585 /* drain writebuffer */
588 /* Enable the IDMAC */
589 temp
= mci_readl(host
, BMOD
);
590 temp
|= SDMMC_IDMAC_ENABLE
| SDMMC_IDMAC_FB
;
591 mci_writel(host
, BMOD
, temp
);
593 /* Start it running */
594 mci_writel(host
, PLDMND
, 1);
599 static int dw_mci_idmac_init(struct dw_mci
*host
)
603 if (host
->dma_64bit_address
== 1) {
604 struct idmac_desc_64addr
*p
;
605 /* Number of descriptors in the ring buffer */
606 host
->ring_size
= PAGE_SIZE
/ sizeof(struct idmac_desc_64addr
);
608 /* Forward link the descriptor list */
609 for (i
= 0, p
= host
->sg_cpu
; i
< host
->ring_size
- 1;
611 p
->des6
= (host
->sg_dma
+
612 (sizeof(struct idmac_desc_64addr
) *
613 (i
+ 1))) & 0xffffffff;
615 p
->des7
= (u64
)(host
->sg_dma
+
616 (sizeof(struct idmac_desc_64addr
) *
618 /* Initialize reserved and buffer size fields to "0" */
624 /* Set the last descriptor as the end-of-ring descriptor */
625 p
->des6
= host
->sg_dma
& 0xffffffff;
626 p
->des7
= (u64
)host
->sg_dma
>> 32;
627 p
->des0
= IDMAC_DES0_ER
;
630 struct idmac_desc
*p
;
631 /* Number of descriptors in the ring buffer */
632 host
->ring_size
= PAGE_SIZE
/ sizeof(struct idmac_desc
);
634 /* Forward link the descriptor list */
635 for (i
= 0, p
= host
->sg_cpu
;
636 i
< host
->ring_size
- 1;
638 p
->des3
= cpu_to_le32(host
->sg_dma
+
639 (sizeof(struct idmac_desc
) * (i
+ 1)));
643 /* Set the last descriptor as the end-of-ring descriptor */
644 p
->des3
= cpu_to_le32(host
->sg_dma
);
645 p
->des0
= cpu_to_le32(IDMAC_DES0_ER
);
648 dw_mci_idmac_reset(host
);
650 if (host
->dma_64bit_address
== 1) {
651 /* Mask out interrupts - get Tx & Rx complete only */
652 mci_writel(host
, IDSTS64
, IDMAC_INT_CLR
);
653 mci_writel(host
, IDINTEN64
, SDMMC_IDMAC_INT_NI
|
654 SDMMC_IDMAC_INT_RI
| SDMMC_IDMAC_INT_TI
);
656 /* Set the descriptor base address */
657 mci_writel(host
, DBADDRL
, host
->sg_dma
& 0xffffffff);
658 mci_writel(host
, DBADDRU
, (u64
)host
->sg_dma
>> 32);
661 /* Mask out interrupts - get Tx & Rx complete only */
662 mci_writel(host
, IDSTS
, IDMAC_INT_CLR
);
663 mci_writel(host
, IDINTEN
, SDMMC_IDMAC_INT_NI
|
664 SDMMC_IDMAC_INT_RI
| SDMMC_IDMAC_INT_TI
);
666 /* Set the descriptor base address */
667 mci_writel(host
, DBADDR
, host
->sg_dma
);
673 static const struct dw_mci_dma_ops dw_mci_idmac_ops
= {
674 .init
= dw_mci_idmac_init
,
675 .start
= dw_mci_idmac_start_dma
,
676 .stop
= dw_mci_idmac_stop_dma
,
677 .complete
= dw_mci_dmac_complete_dma
,
678 .cleanup
= dw_mci_dma_cleanup
,
681 static void dw_mci_edmac_stop_dma(struct dw_mci
*host
)
683 dmaengine_terminate_all(host
->dms
->ch
);
686 static int dw_mci_edmac_start_dma(struct dw_mci
*host
,
689 struct dma_slave_config cfg
;
690 struct dma_async_tx_descriptor
*desc
= NULL
;
691 struct scatterlist
*sgl
= host
->data
->sg
;
692 const u32 mszs
[] = {1, 4, 8, 16, 32, 64, 128, 256};
693 u32 sg_elems
= host
->data
->sg_len
;
695 u32 fifo_offset
= host
->fifo_reg
- host
->regs
;
698 /* Set external dma config: burst size, burst width */
699 cfg
.dst_addr
= host
->phy_regs
+ fifo_offset
;
700 cfg
.src_addr
= cfg
.dst_addr
;
701 cfg
.dst_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
702 cfg
.src_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
704 /* Match burst msize with external dma config */
705 fifoth_val
= mci_readl(host
, FIFOTH
);
706 cfg
.dst_maxburst
= mszs
[(fifoth_val
>> 28) & 0x7];
707 cfg
.src_maxburst
= cfg
.dst_maxburst
;
709 if (host
->data
->flags
& MMC_DATA_WRITE
)
710 cfg
.direction
= DMA_MEM_TO_DEV
;
712 cfg
.direction
= DMA_DEV_TO_MEM
;
714 ret
= dmaengine_slave_config(host
->dms
->ch
, &cfg
);
716 dev_err(host
->dev
, "Failed to config edmac.\n");
720 desc
= dmaengine_prep_slave_sg(host
->dms
->ch
, sgl
,
721 sg_len
, cfg
.direction
,
722 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
724 dev_err(host
->dev
, "Can't prepare slave sg.\n");
728 /* Set dw_mci_dmac_complete_dma as callback */
729 desc
->callback
= dw_mci_dmac_complete_dma
;
730 desc
->callback_param
= (void *)host
;
731 dmaengine_submit(desc
);
733 /* Flush cache before write */
734 if (host
->data
->flags
& MMC_DATA_WRITE
)
735 dma_sync_sg_for_device(mmc_dev(host
->cur_slot
->mmc
), sgl
,
736 sg_elems
, DMA_TO_DEVICE
);
738 dma_async_issue_pending(host
->dms
->ch
);
743 static int dw_mci_edmac_init(struct dw_mci
*host
)
745 /* Request external dma channel */
746 host
->dms
= kzalloc(sizeof(struct dw_mci_dma_slave
), GFP_KERNEL
);
750 host
->dms
->ch
= dma_request_slave_channel(host
->dev
, "rx-tx");
751 if (!host
->dms
->ch
) {
752 dev_err(host
->dev
, "Failed to get external DMA channel.\n");
761 static void dw_mci_edmac_exit(struct dw_mci
*host
)
765 dma_release_channel(host
->dms
->ch
);
766 host
->dms
->ch
= NULL
;
773 static const struct dw_mci_dma_ops dw_mci_edmac_ops
= {
774 .init
= dw_mci_edmac_init
,
775 .exit
= dw_mci_edmac_exit
,
776 .start
= dw_mci_edmac_start_dma
,
777 .stop
= dw_mci_edmac_stop_dma
,
778 .complete
= dw_mci_dmac_complete_dma
,
779 .cleanup
= dw_mci_dma_cleanup
,
782 static int dw_mci_pre_dma_transfer(struct dw_mci
*host
,
783 struct mmc_data
*data
,
786 struct scatterlist
*sg
;
787 unsigned int i
, sg_len
;
789 if (!next
&& data
->host_cookie
)
790 return data
->host_cookie
;
793 * We don't do DMA on "complex" transfers, i.e. with
794 * non-word-aligned buffers or lengths. Also, we don't bother
795 * with all the DMA setup overhead for short transfers.
797 if (data
->blocks
* data
->blksz
< DW_MCI_DMA_THRESHOLD
)
803 for_each_sg(data
->sg
, sg
, data
->sg_len
, i
) {
804 if (sg
->offset
& 3 || sg
->length
& 3)
808 sg_len
= dma_map_sg(host
->dev
,
811 dw_mci_get_dma_dir(data
));
816 data
->host_cookie
= sg_len
;
821 static void dw_mci_pre_req(struct mmc_host
*mmc
,
822 struct mmc_request
*mrq
,
825 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
826 struct mmc_data
*data
= mrq
->data
;
828 if (!slot
->host
->use_dma
|| !data
)
831 if (data
->host_cookie
) {
832 data
->host_cookie
= 0;
836 if (dw_mci_pre_dma_transfer(slot
->host
, mrq
->data
, 1) < 0)
837 data
->host_cookie
= 0;
840 static void dw_mci_post_req(struct mmc_host
*mmc
,
841 struct mmc_request
*mrq
,
844 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
845 struct mmc_data
*data
= mrq
->data
;
847 if (!slot
->host
->use_dma
|| !data
)
850 if (data
->host_cookie
)
851 dma_unmap_sg(slot
->host
->dev
,
854 dw_mci_get_dma_dir(data
));
855 data
->host_cookie
= 0;
858 static void dw_mci_adjust_fifoth(struct dw_mci
*host
, struct mmc_data
*data
)
860 unsigned int blksz
= data
->blksz
;
861 const u32 mszs
[] = {1, 4, 8, 16, 32, 64, 128, 256};
862 u32 fifo_width
= 1 << host
->data_shift
;
863 u32 blksz_depth
= blksz
/ fifo_width
, fifoth_val
;
864 u32 msize
= 0, rx_wmark
= 1, tx_wmark
, tx_wmark_invers
;
865 int idx
= ARRAY_SIZE(mszs
) - 1;
867 /* pio should ship this scenario */
871 tx_wmark
= (host
->fifo_depth
) / 2;
872 tx_wmark_invers
= host
->fifo_depth
- tx_wmark
;
876 * if blksz is not a multiple of the FIFO width
878 if (blksz
% fifo_width
) {
885 if (!((blksz_depth
% mszs
[idx
]) ||
886 (tx_wmark_invers
% mszs
[idx
]))) {
888 rx_wmark
= mszs
[idx
] - 1;
893 * If idx is '0', it won't be tried
894 * Thus, initial values are uesed
897 fifoth_val
= SDMMC_SET_FIFOTH(msize
, rx_wmark
, tx_wmark
);
898 mci_writel(host
, FIFOTH
, fifoth_val
);
901 static void dw_mci_ctrl_rd_thld(struct dw_mci
*host
, struct mmc_data
*data
)
903 unsigned int blksz
= data
->blksz
;
904 u32 blksz_depth
, fifo_depth
;
907 WARN_ON(!(data
->flags
& MMC_DATA_READ
));
910 * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is
911 * in the FIFO region, so we really shouldn't access it).
913 if (host
->verid
< DW_MMC_240A
)
916 if (host
->timing
!= MMC_TIMING_MMC_HS200
&&
917 host
->timing
!= MMC_TIMING_MMC_HS400
&&
918 host
->timing
!= MMC_TIMING_UHS_SDR104
)
921 blksz_depth
= blksz
/ (1 << host
->data_shift
);
922 fifo_depth
= host
->fifo_depth
;
924 if (blksz_depth
> fifo_depth
)
928 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
929 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
930 * Currently just choose blksz.
933 mci_writel(host
, CDTHRCTL
, SDMMC_SET_RD_THLD(thld_size
, 1));
937 mci_writel(host
, CDTHRCTL
, SDMMC_SET_RD_THLD(0, 0));
940 static int dw_mci_submit_data_dma(struct dw_mci
*host
, struct mmc_data
*data
)
942 unsigned long irqflags
;
948 /* If we don't have a channel, we can't do DMA */
952 sg_len
= dw_mci_pre_dma_transfer(host
, data
, 0);
954 host
->dma_ops
->stop(host
);
960 if (host
->use_dma
== TRANS_MODE_IDMAC
)
962 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
963 (unsigned long)host
->sg_cpu
,
964 (unsigned long)host
->sg_dma
,
968 * Decide the MSIZE and RX/TX Watermark.
969 * If current block size is same with previous size,
970 * no need to update fifoth.
972 if (host
->prev_blksz
!= data
->blksz
)
973 dw_mci_adjust_fifoth(host
, data
);
975 /* Enable the DMA interface */
976 temp
= mci_readl(host
, CTRL
);
977 temp
|= SDMMC_CTRL_DMA_ENABLE
;
978 mci_writel(host
, CTRL
, temp
);
980 /* Disable RX/TX IRQs, let DMA handle it */
981 spin_lock_irqsave(&host
->irq_lock
, irqflags
);
982 temp
= mci_readl(host
, INTMASK
);
983 temp
&= ~(SDMMC_INT_RXDR
| SDMMC_INT_TXDR
);
984 mci_writel(host
, INTMASK
, temp
);
985 spin_unlock_irqrestore(&host
->irq_lock
, irqflags
);
987 if (host
->dma_ops
->start(host
, sg_len
)) {
988 /* We can't do DMA */
989 dev_err(host
->dev
, "%s: failed to start DMA.\n", __func__
);
996 static void dw_mci_submit_data(struct dw_mci
*host
, struct mmc_data
*data
)
998 unsigned long irqflags
;
999 int flags
= SG_MITER_ATOMIC
;
1002 data
->error
= -EINPROGRESS
;
1004 WARN_ON(host
->data
);
1008 if (data
->flags
& MMC_DATA_READ
) {
1009 host
->dir_status
= DW_MCI_RECV_STATUS
;
1010 dw_mci_ctrl_rd_thld(host
, data
);
1012 host
->dir_status
= DW_MCI_SEND_STATUS
;
1015 if (dw_mci_submit_data_dma(host
, data
)) {
1016 if (host
->data
->flags
& MMC_DATA_READ
)
1017 flags
|= SG_MITER_TO_SG
;
1019 flags
|= SG_MITER_FROM_SG
;
1021 sg_miter_start(&host
->sg_miter
, data
->sg
, data
->sg_len
, flags
);
1022 host
->sg
= data
->sg
;
1023 host
->part_buf_start
= 0;
1024 host
->part_buf_count
= 0;
1026 mci_writel(host
, RINTSTS
, SDMMC_INT_TXDR
| SDMMC_INT_RXDR
);
1028 spin_lock_irqsave(&host
->irq_lock
, irqflags
);
1029 temp
= mci_readl(host
, INTMASK
);
1030 temp
|= SDMMC_INT_TXDR
| SDMMC_INT_RXDR
;
1031 mci_writel(host
, INTMASK
, temp
);
1032 spin_unlock_irqrestore(&host
->irq_lock
, irqflags
);
1034 temp
= mci_readl(host
, CTRL
);
1035 temp
&= ~SDMMC_CTRL_DMA_ENABLE
;
1036 mci_writel(host
, CTRL
, temp
);
1039 * Use the initial fifoth_val for PIO mode.
1040 * If next issued data may be transfered by DMA mode,
1041 * prev_blksz should be invalidated.
1043 mci_writel(host
, FIFOTH
, host
->fifoth_val
);
1044 host
->prev_blksz
= 0;
1047 * Keep the current block size.
1048 * It will be used to decide whether to update
1049 * fifoth register next time.
1051 host
->prev_blksz
= data
->blksz
;
1055 static void mci_send_cmd(struct dw_mci_slot
*slot
, u32 cmd
, u32 arg
)
1057 struct dw_mci
*host
= slot
->host
;
1058 unsigned long timeout
= jiffies
+ msecs_to_jiffies(500);
1059 unsigned int cmd_status
= 0;
1061 mci_writel(host
, CMDARG
, arg
);
1062 wmb(); /* drain writebuffer */
1063 dw_mci_wait_while_busy(host
, cmd
);
1064 mci_writel(host
, CMD
, SDMMC_CMD_START
| cmd
);
1066 while (time_before(jiffies
, timeout
)) {
1067 cmd_status
= mci_readl(host
, CMD
);
1068 if (!(cmd_status
& SDMMC_CMD_START
))
1071 dev_err(&slot
->mmc
->class_dev
,
1072 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
1073 cmd
, arg
, cmd_status
);
1076 static void dw_mci_setup_bus(struct dw_mci_slot
*slot
, bool force_clkinit
)
1078 struct dw_mci
*host
= slot
->host
;
1079 unsigned int clock
= slot
->clock
;
1082 u32 sdmmc_cmd_bits
= SDMMC_CMD_UPD_CLK
| SDMMC_CMD_PRV_DAT_WAIT
;
1084 /* We must continue to set bit 28 in CMD until the change is complete */
1085 if (host
->state
== STATE_WAITING_CMD11_DONE
)
1086 sdmmc_cmd_bits
|= SDMMC_CMD_VOLT_SWITCH
;
1089 mci_writel(host
, CLKENA
, 0);
1090 mci_send_cmd(slot
, sdmmc_cmd_bits
, 0);
1091 } else if (clock
!= host
->current_speed
|| force_clkinit
) {
1092 div
= host
->bus_hz
/ clock
;
1093 if (host
->bus_hz
% clock
&& host
->bus_hz
> clock
)
1095 * move the + 1 after the divide to prevent
1096 * over-clocking the card.
1100 div
= (host
->bus_hz
!= clock
) ? DIV_ROUND_UP(div
, 2) : 0;
1102 if ((clock
<< div
) != slot
->__clk_old
|| force_clkinit
)
1103 dev_info(&slot
->mmc
->class_dev
,
1104 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
1105 slot
->id
, host
->bus_hz
, clock
,
1106 div
? ((host
->bus_hz
/ div
) >> 1) :
1110 mci_writel(host
, CLKENA
, 0);
1111 mci_writel(host
, CLKSRC
, 0);
1114 mci_send_cmd(slot
, sdmmc_cmd_bits
, 0);
1116 /* set clock to desired speed */
1117 mci_writel(host
, CLKDIV
, div
);
1120 mci_send_cmd(slot
, sdmmc_cmd_bits
, 0);
1122 /* enable clock; only low power if no SDIO */
1123 clk_en_a
= SDMMC_CLKEN_ENABLE
<< slot
->id
;
1124 if (!test_bit(DW_MMC_CARD_NO_LOW_PWR
, &slot
->flags
))
1125 clk_en_a
|= SDMMC_CLKEN_LOW_PWR
<< slot
->id
;
1126 mci_writel(host
, CLKENA
, clk_en_a
);
1129 mci_send_cmd(slot
, sdmmc_cmd_bits
, 0);
1131 /* keep the clock with reflecting clock dividor */
1132 slot
->__clk_old
= clock
<< div
;
1135 host
->current_speed
= clock
;
1137 /* Set the current slot bus width */
1138 mci_writel(host
, CTYPE
, (slot
->ctype
<< slot
->id
));
1141 static void __dw_mci_start_request(struct dw_mci
*host
,
1142 struct dw_mci_slot
*slot
,
1143 struct mmc_command
*cmd
)
1145 struct mmc_request
*mrq
;
1146 struct mmc_data
*data
;
1151 host
->cur_slot
= slot
;
1154 host
->pending_events
= 0;
1155 host
->completed_events
= 0;
1156 host
->cmd_status
= 0;
1157 host
->data_status
= 0;
1158 host
->dir_status
= 0;
1162 mci_writel(host
, TMOUT
, 0xFFFFFFFF);
1163 mci_writel(host
, BYTCNT
, data
->blksz
*data
->blocks
);
1164 mci_writel(host
, BLKSIZ
, data
->blksz
);
1167 cmdflags
= dw_mci_prepare_command(slot
->mmc
, cmd
);
1169 /* this is the first command, send the initialization clock */
1170 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT
, &slot
->flags
))
1171 cmdflags
|= SDMMC_CMD_INIT
;
1174 dw_mci_submit_data(host
, data
);
1175 wmb(); /* drain writebuffer */
1178 dw_mci_start_command(host
, cmd
, cmdflags
);
1180 if (cmd
->opcode
== SD_SWITCH_VOLTAGE
) {
1181 unsigned long irqflags
;
1184 * Databook says to fail after 2ms w/ no response, but evidence
1185 * shows that sometimes the cmd11 interrupt takes over 130ms.
1186 * We'll set to 500ms, plus an extra jiffy just in case jiffies
1187 * is just about to roll over.
1189 * We do this whole thing under spinlock and only if the
1190 * command hasn't already completed (indicating the the irq
1191 * already ran so we don't want the timeout).
1193 spin_lock_irqsave(&host
->irq_lock
, irqflags
);
1194 if (!test_bit(EVENT_CMD_COMPLETE
, &host
->pending_events
))
1195 mod_timer(&host
->cmd11_timer
,
1196 jiffies
+ msecs_to_jiffies(500) + 1);
1197 spin_unlock_irqrestore(&host
->irq_lock
, irqflags
);
1201 host
->stop_cmdr
= dw_mci_prepare_command(slot
->mmc
, mrq
->stop
);
1203 host
->stop_cmdr
= dw_mci_prep_stop_abort(host
, cmd
);
1206 static void dw_mci_start_request(struct dw_mci
*host
,
1207 struct dw_mci_slot
*slot
)
1209 struct mmc_request
*mrq
= slot
->mrq
;
1210 struct mmc_command
*cmd
;
1212 cmd
= mrq
->sbc
? mrq
->sbc
: mrq
->cmd
;
1213 __dw_mci_start_request(host
, slot
, cmd
);
1216 /* must be called with host->lock held */
1217 static void dw_mci_queue_request(struct dw_mci
*host
, struct dw_mci_slot
*slot
,
1218 struct mmc_request
*mrq
)
1220 dev_vdbg(&slot
->mmc
->class_dev
, "queue request: state=%d\n",
1225 if (host
->state
== STATE_WAITING_CMD11_DONE
) {
1226 dev_warn(&slot
->mmc
->class_dev
,
1227 "Voltage change didn't complete\n");
1229 * this case isn't expected to happen, so we can
1230 * either crash here or just try to continue on
1231 * in the closest possible state
1233 host
->state
= STATE_IDLE
;
1236 if (host
->state
== STATE_IDLE
) {
1237 host
->state
= STATE_SENDING_CMD
;
1238 dw_mci_start_request(host
, slot
);
1240 list_add_tail(&slot
->queue_node
, &host
->queue
);
1244 static void dw_mci_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
1246 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1247 struct dw_mci
*host
= slot
->host
;
1252 * The check for card presence and queueing of the request must be
1253 * atomic, otherwise the card could be removed in between and the
1254 * request wouldn't fail until another card was inserted.
1256 spin_lock_bh(&host
->lock
);
1258 if (!test_bit(DW_MMC_CARD_PRESENT
, &slot
->flags
)) {
1259 spin_unlock_bh(&host
->lock
);
1260 mrq
->cmd
->error
= -ENOMEDIUM
;
1261 mmc_request_done(mmc
, mrq
);
1265 dw_mci_queue_request(host
, slot
, mrq
);
1267 spin_unlock_bh(&host
->lock
);
1270 static void dw_mci_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
1272 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1273 const struct dw_mci_drv_data
*drv_data
= slot
->host
->drv_data
;
1277 switch (ios
->bus_width
) {
1278 case MMC_BUS_WIDTH_4
:
1279 slot
->ctype
= SDMMC_CTYPE_4BIT
;
1281 case MMC_BUS_WIDTH_8
:
1282 slot
->ctype
= SDMMC_CTYPE_8BIT
;
1285 /* set default 1 bit mode */
1286 slot
->ctype
= SDMMC_CTYPE_1BIT
;
1289 regs
= mci_readl(slot
->host
, UHS_REG
);
1292 if (ios
->timing
== MMC_TIMING_MMC_DDR52
||
1293 ios
->timing
== MMC_TIMING_UHS_DDR50
||
1294 ios
->timing
== MMC_TIMING_MMC_HS400
)
1295 regs
|= ((0x1 << slot
->id
) << 16);
1297 regs
&= ~((0x1 << slot
->id
) << 16);
1299 mci_writel(slot
->host
, UHS_REG
, regs
);
1300 slot
->host
->timing
= ios
->timing
;
1303 * Use mirror of ios->clock to prevent race with mmc
1304 * core ios update when finding the minimum.
1306 slot
->clock
= ios
->clock
;
1308 if (drv_data
&& drv_data
->set_ios
)
1309 drv_data
->set_ios(slot
->host
, ios
);
1311 switch (ios
->power_mode
) {
1313 if (!IS_ERR(mmc
->supply
.vmmc
)) {
1314 ret
= mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
,
1317 dev_err(slot
->host
->dev
,
1318 "failed to enable vmmc regulator\n");
1319 /*return, if failed turn on vmmc*/
1323 set_bit(DW_MMC_CARD_NEED_INIT
, &slot
->flags
);
1324 regs
= mci_readl(slot
->host
, PWREN
);
1325 regs
|= (1 << slot
->id
);
1326 mci_writel(slot
->host
, PWREN
, regs
);
1329 if (!slot
->host
->vqmmc_enabled
) {
1330 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
1331 ret
= regulator_enable(mmc
->supply
.vqmmc
);
1333 dev_err(slot
->host
->dev
,
1334 "failed to enable vqmmc\n");
1336 slot
->host
->vqmmc_enabled
= true;
1339 /* Keep track so we don't reset again */
1340 slot
->host
->vqmmc_enabled
= true;
1343 /* Reset our state machine after powering on */
1344 dw_mci_ctrl_reset(slot
->host
,
1345 SDMMC_CTRL_ALL_RESET_FLAGS
);
1348 /* Adjust clock / bus width after power is up */
1349 dw_mci_setup_bus(slot
, false);
1353 /* Turn clock off before power goes down */
1354 dw_mci_setup_bus(slot
, false);
1356 if (!IS_ERR(mmc
->supply
.vmmc
))
1357 mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
, 0);
1359 if (!IS_ERR(mmc
->supply
.vqmmc
) && slot
->host
->vqmmc_enabled
)
1360 regulator_disable(mmc
->supply
.vqmmc
);
1361 slot
->host
->vqmmc_enabled
= false;
1363 regs
= mci_readl(slot
->host
, PWREN
);
1364 regs
&= ~(1 << slot
->id
);
1365 mci_writel(slot
->host
, PWREN
, regs
);
1371 if (slot
->host
->state
== STATE_WAITING_CMD11_DONE
&& ios
->clock
!= 0)
1372 slot
->host
->state
= STATE_IDLE
;
1375 static int dw_mci_card_busy(struct mmc_host
*mmc
)
1377 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1381 * Check the busy bit which is low when DAT[3:0]
1382 * (the data lines) are 0000
1384 status
= mci_readl(slot
->host
, STATUS
);
1386 return !!(status
& SDMMC_STATUS_BUSY
);
1389 static int dw_mci_switch_voltage(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
1391 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1392 struct dw_mci
*host
= slot
->host
;
1393 const struct dw_mci_drv_data
*drv_data
= host
->drv_data
;
1395 u32 v18
= SDMMC_UHS_18V
<< slot
->id
;
1398 if (drv_data
&& drv_data
->switch_voltage
)
1399 return drv_data
->switch_voltage(mmc
, ios
);
1402 * Program the voltage. Note that some instances of dw_mmc may use
1403 * the UHS_REG for this. For other instances (like exynos) the UHS_REG
1404 * does no harm but you need to set the regulator directly. Try both.
1406 uhs
= mci_readl(host
, UHS_REG
);
1407 if (ios
->signal_voltage
== MMC_SIGNAL_VOLTAGE_330
)
1412 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
1413 ret
= mmc_regulator_set_vqmmc(mmc
, ios
);
1416 dev_dbg(&mmc
->class_dev
,
1417 "Regulator set error %d - %s V\n",
1418 ret
, uhs
& v18
? "1.8" : "3.3");
1422 mci_writel(host
, UHS_REG
, uhs
);
1427 static int dw_mci_get_ro(struct mmc_host
*mmc
)
1430 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1431 int gpio_ro
= mmc_gpio_get_ro(mmc
);
1433 /* Use platform get_ro function, else try on board write protect */
1434 if (!IS_ERR_VALUE(gpio_ro
))
1435 read_only
= gpio_ro
;
1438 mci_readl(slot
->host
, WRTPRT
) & (1 << slot
->id
) ? 1 : 0;
1440 dev_dbg(&mmc
->class_dev
, "card is %s\n",
1441 read_only
? "read-only" : "read-write");
1446 static int dw_mci_get_cd(struct mmc_host
*mmc
)
1449 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1450 struct dw_mci_board
*brd
= slot
->host
->pdata
;
1451 struct dw_mci
*host
= slot
->host
;
1452 int gpio_cd
= mmc_gpio_get_cd(mmc
);
1454 /* Use platform get_cd function, else try onboard card detect */
1455 if ((brd
->quirks
& DW_MCI_QUIRK_BROKEN_CARD_DETECTION
) ||
1456 (mmc
->caps
& MMC_CAP_NONREMOVABLE
))
1458 else if (!IS_ERR_VALUE(gpio_cd
))
1461 present
= (mci_readl(slot
->host
, CDETECT
) & (1 << slot
->id
))
1464 spin_lock_bh(&host
->lock
);
1466 set_bit(DW_MMC_CARD_PRESENT
, &slot
->flags
);
1467 dev_dbg(&mmc
->class_dev
, "card is present\n");
1469 clear_bit(DW_MMC_CARD_PRESENT
, &slot
->flags
);
1470 dev_dbg(&mmc
->class_dev
, "card is not present\n");
1472 spin_unlock_bh(&host
->lock
);
1477 static void dw_mci_hw_reset(struct mmc_host
*mmc
)
1479 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1480 struct dw_mci
*host
= slot
->host
;
1483 if (host
->use_dma
== TRANS_MODE_IDMAC
)
1484 dw_mci_idmac_reset(host
);
1486 if (!dw_mci_ctrl_reset(host
, SDMMC_CTRL_DMA_RESET
|
1487 SDMMC_CTRL_FIFO_RESET
))
1491 * According to eMMC spec, card reset procedure:
1492 * tRstW >= 1us: RST_n pulse width
1493 * tRSCA >= 200us: RST_n to Command time
1494 * tRSTH >= 1us: RST_n high period
1496 reset
= mci_readl(host
, RST_N
);
1497 reset
&= ~(SDMMC_RST_HWACTIVE
<< slot
->id
);
1498 mci_writel(host
, RST_N
, reset
);
1500 reset
|= SDMMC_RST_HWACTIVE
<< slot
->id
;
1501 mci_writel(host
, RST_N
, reset
);
1502 usleep_range(200, 300);
1505 static void dw_mci_init_card(struct mmc_host
*mmc
, struct mmc_card
*card
)
1507 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1508 struct dw_mci
*host
= slot
->host
;
1511 * Low power mode will stop the card clock when idle. According to the
1512 * description of the CLKENA register we should disable low power mode
1513 * for SDIO cards if we need SDIO interrupts to work.
1515 if (mmc
->caps
& MMC_CAP_SDIO_IRQ
) {
1516 const u32 clken_low_pwr
= SDMMC_CLKEN_LOW_PWR
<< slot
->id
;
1520 clk_en_a_old
= mci_readl(host
, CLKENA
);
1522 if (card
->type
== MMC_TYPE_SDIO
||
1523 card
->type
== MMC_TYPE_SD_COMBO
) {
1524 set_bit(DW_MMC_CARD_NO_LOW_PWR
, &slot
->flags
);
1525 clk_en_a
= clk_en_a_old
& ~clken_low_pwr
;
1527 clear_bit(DW_MMC_CARD_NO_LOW_PWR
, &slot
->flags
);
1528 clk_en_a
= clk_en_a_old
| clken_low_pwr
;
1531 if (clk_en_a
!= clk_en_a_old
) {
1532 mci_writel(host
, CLKENA
, clk_en_a
);
1533 mci_send_cmd(slot
, SDMMC_CMD_UPD_CLK
|
1534 SDMMC_CMD_PRV_DAT_WAIT
, 0);
1539 static void dw_mci_enable_sdio_irq(struct mmc_host
*mmc
, int enb
)
1541 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1542 struct dw_mci
*host
= slot
->host
;
1543 unsigned long irqflags
;
1546 spin_lock_irqsave(&host
->irq_lock
, irqflags
);
1548 /* Enable/disable Slot Specific SDIO interrupt */
1549 int_mask
= mci_readl(host
, INTMASK
);
1551 int_mask
|= SDMMC_INT_SDIO(slot
->sdio_id
);
1553 int_mask
&= ~SDMMC_INT_SDIO(slot
->sdio_id
);
1554 mci_writel(host
, INTMASK
, int_mask
);
1556 spin_unlock_irqrestore(&host
->irq_lock
, irqflags
);
1559 static int dw_mci_execute_tuning(struct mmc_host
*mmc
, u32 opcode
)
1561 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1562 struct dw_mci
*host
= slot
->host
;
1563 const struct dw_mci_drv_data
*drv_data
= host
->drv_data
;
1566 if (drv_data
&& drv_data
->execute_tuning
)
1567 err
= drv_data
->execute_tuning(slot
, opcode
);
1571 static int dw_mci_prepare_hs400_tuning(struct mmc_host
*mmc
,
1572 struct mmc_ios
*ios
)
1574 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1575 struct dw_mci
*host
= slot
->host
;
1576 const struct dw_mci_drv_data
*drv_data
= host
->drv_data
;
1578 if (drv_data
&& drv_data
->prepare_hs400_tuning
)
1579 return drv_data
->prepare_hs400_tuning(host
, ios
);
1584 static const struct mmc_host_ops dw_mci_ops
= {
1585 .request
= dw_mci_request
,
1586 .pre_req
= dw_mci_pre_req
,
1587 .post_req
= dw_mci_post_req
,
1588 .set_ios
= dw_mci_set_ios
,
1589 .get_ro
= dw_mci_get_ro
,
1590 .get_cd
= dw_mci_get_cd
,
1591 .hw_reset
= dw_mci_hw_reset
,
1592 .enable_sdio_irq
= dw_mci_enable_sdio_irq
,
1593 .execute_tuning
= dw_mci_execute_tuning
,
1594 .card_busy
= dw_mci_card_busy
,
1595 .start_signal_voltage_switch
= dw_mci_switch_voltage
,
1596 .init_card
= dw_mci_init_card
,
1597 .prepare_hs400_tuning
= dw_mci_prepare_hs400_tuning
,
1600 static void dw_mci_request_end(struct dw_mci
*host
, struct mmc_request
*mrq
)
1601 __releases(&host
->lock
)
1602 __acquires(&host
->lock
)
1604 struct dw_mci_slot
*slot
;
1605 struct mmc_host
*prev_mmc
= host
->cur_slot
->mmc
;
1607 WARN_ON(host
->cmd
|| host
->data
);
1609 host
->cur_slot
->mrq
= NULL
;
1611 if (!list_empty(&host
->queue
)) {
1612 slot
= list_entry(host
->queue
.next
,
1613 struct dw_mci_slot
, queue_node
);
1614 list_del(&slot
->queue_node
);
1615 dev_vdbg(host
->dev
, "list not empty: %s is next\n",
1616 mmc_hostname(slot
->mmc
));
1617 host
->state
= STATE_SENDING_CMD
;
1618 dw_mci_start_request(host
, slot
);
1620 dev_vdbg(host
->dev
, "list empty\n");
1622 if (host
->state
== STATE_SENDING_CMD11
)
1623 host
->state
= STATE_WAITING_CMD11_DONE
;
1625 host
->state
= STATE_IDLE
;
1628 spin_unlock(&host
->lock
);
1629 mmc_request_done(prev_mmc
, mrq
);
1630 spin_lock(&host
->lock
);
1633 static int dw_mci_command_complete(struct dw_mci
*host
, struct mmc_command
*cmd
)
1635 u32 status
= host
->cmd_status
;
1637 host
->cmd_status
= 0;
1639 /* Read the response from the card (up to 16 bytes) */
1640 if (cmd
->flags
& MMC_RSP_PRESENT
) {
1641 if (cmd
->flags
& MMC_RSP_136
) {
1642 cmd
->resp
[3] = mci_readl(host
, RESP0
);
1643 cmd
->resp
[2] = mci_readl(host
, RESP1
);
1644 cmd
->resp
[1] = mci_readl(host
, RESP2
);
1645 cmd
->resp
[0] = mci_readl(host
, RESP3
);
1647 cmd
->resp
[0] = mci_readl(host
, RESP0
);
1654 if (status
& SDMMC_INT_RTO
)
1655 cmd
->error
= -ETIMEDOUT
;
1656 else if ((cmd
->flags
& MMC_RSP_CRC
) && (status
& SDMMC_INT_RCRC
))
1657 cmd
->error
= -EILSEQ
;
1658 else if (status
& SDMMC_INT_RESP_ERR
)
1666 static int dw_mci_data_complete(struct dw_mci
*host
, struct mmc_data
*data
)
1668 u32 status
= host
->data_status
;
1670 if (status
& DW_MCI_DATA_ERROR_FLAGS
) {
1671 if (status
& SDMMC_INT_DRTO
) {
1672 data
->error
= -ETIMEDOUT
;
1673 } else if (status
& SDMMC_INT_DCRC
) {
1674 data
->error
= -EILSEQ
;
1675 } else if (status
& SDMMC_INT_EBE
) {
1676 if (host
->dir_status
==
1677 DW_MCI_SEND_STATUS
) {
1679 * No data CRC status was returned.
1680 * The number of bytes transferred
1681 * will be exaggerated in PIO mode.
1683 data
->bytes_xfered
= 0;
1684 data
->error
= -ETIMEDOUT
;
1685 } else if (host
->dir_status
==
1686 DW_MCI_RECV_STATUS
) {
1690 /* SDMMC_INT_SBE is included */
1694 dev_dbg(host
->dev
, "data error, status 0x%08x\n", status
);
1697 * After an error, there may be data lingering
1702 data
->bytes_xfered
= data
->blocks
* data
->blksz
;
1709 static void dw_mci_set_drto(struct dw_mci
*host
)
1711 unsigned int drto_clks
;
1712 unsigned int drto_ms
;
1714 drto_clks
= mci_readl(host
, TMOUT
) >> 8;
1715 drto_ms
= DIV_ROUND_UP(drto_clks
, host
->bus_hz
/ 1000);
1717 /* add a bit spare time */
1720 mod_timer(&host
->dto_timer
, jiffies
+ msecs_to_jiffies(drto_ms
));
1723 static void dw_mci_tasklet_func(unsigned long priv
)
1725 struct dw_mci
*host
= (struct dw_mci
*)priv
;
1726 struct mmc_data
*data
;
1727 struct mmc_command
*cmd
;
1728 struct mmc_request
*mrq
;
1729 enum dw_mci_state state
;
1730 enum dw_mci_state prev_state
;
1733 spin_lock(&host
->lock
);
1735 state
= host
->state
;
1744 case STATE_WAITING_CMD11_DONE
:
1747 case STATE_SENDING_CMD11
:
1748 case STATE_SENDING_CMD
:
1749 if (!test_and_clear_bit(EVENT_CMD_COMPLETE
,
1750 &host
->pending_events
))
1755 set_bit(EVENT_CMD_COMPLETE
, &host
->completed_events
);
1756 err
= dw_mci_command_complete(host
, cmd
);
1757 if (cmd
== mrq
->sbc
&& !err
) {
1758 prev_state
= state
= STATE_SENDING_CMD
;
1759 __dw_mci_start_request(host
, host
->cur_slot
,
1764 if (cmd
->data
&& err
) {
1765 dw_mci_stop_dma(host
);
1766 send_stop_abort(host
, data
);
1767 state
= STATE_SENDING_STOP
;
1771 if (!cmd
->data
|| err
) {
1772 dw_mci_request_end(host
, mrq
);
1776 prev_state
= state
= STATE_SENDING_DATA
;
1779 case STATE_SENDING_DATA
:
1781 * We could get a data error and never a transfer
1782 * complete so we'd better check for it here.
1784 * Note that we don't really care if we also got a
1785 * transfer complete; stopping the DMA and sending an
1788 if (test_and_clear_bit(EVENT_DATA_ERROR
,
1789 &host
->pending_events
)) {
1790 dw_mci_stop_dma(host
);
1792 !(host
->data_status
& (SDMMC_INT_DRTO
|
1794 send_stop_abort(host
, data
);
1795 state
= STATE_DATA_ERROR
;
1799 if (!test_and_clear_bit(EVENT_XFER_COMPLETE
,
1800 &host
->pending_events
)) {
1802 * If all data-related interrupts don't come
1803 * within the given time in reading data state.
1805 if ((host
->quirks
& DW_MCI_QUIRK_BROKEN_DTO
) &&
1806 (host
->dir_status
== DW_MCI_RECV_STATUS
))
1807 dw_mci_set_drto(host
);
1811 set_bit(EVENT_XFER_COMPLETE
, &host
->completed_events
);
1814 * Handle an EVENT_DATA_ERROR that might have shown up
1815 * before the transfer completed. This might not have
1816 * been caught by the check above because the interrupt
1817 * could have gone off between the previous check and
1818 * the check for transfer complete.
1820 * Technically this ought not be needed assuming we
1821 * get a DATA_COMPLETE eventually (we'll notice the
1822 * error and end the request), but it shouldn't hurt.
1824 * This has the advantage of sending the stop command.
1826 if (test_and_clear_bit(EVENT_DATA_ERROR
,
1827 &host
->pending_events
)) {
1828 dw_mci_stop_dma(host
);
1830 !(host
->data_status
& (SDMMC_INT_DRTO
|
1832 send_stop_abort(host
, data
);
1833 state
= STATE_DATA_ERROR
;
1836 prev_state
= state
= STATE_DATA_BUSY
;
1840 case STATE_DATA_BUSY
:
1841 if (!test_and_clear_bit(EVENT_DATA_COMPLETE
,
1842 &host
->pending_events
)) {
1844 * If data error interrupt comes but data over
1845 * interrupt doesn't come within the given time.
1846 * in reading data state.
1848 if ((host
->quirks
& DW_MCI_QUIRK_BROKEN_DTO
) &&
1849 (host
->dir_status
== DW_MCI_RECV_STATUS
))
1850 dw_mci_set_drto(host
);
1855 set_bit(EVENT_DATA_COMPLETE
, &host
->completed_events
);
1856 err
= dw_mci_data_complete(host
, data
);
1859 if (!data
->stop
|| mrq
->sbc
) {
1860 if (mrq
->sbc
&& data
->stop
)
1861 data
->stop
->error
= 0;
1862 dw_mci_request_end(host
, mrq
);
1866 /* stop command for open-ended transfer*/
1868 send_stop_abort(host
, data
);
1871 * If we don't have a command complete now we'll
1872 * never get one since we just reset everything;
1873 * better end the request.
1875 * If we do have a command complete we'll fall
1876 * through to the SENDING_STOP command and
1877 * everything will be peachy keen.
1879 if (!test_bit(EVENT_CMD_COMPLETE
,
1880 &host
->pending_events
)) {
1882 dw_mci_request_end(host
, mrq
);
1888 * If err has non-zero,
1889 * stop-abort command has been already issued.
1891 prev_state
= state
= STATE_SENDING_STOP
;
1895 case STATE_SENDING_STOP
:
1896 if (!test_and_clear_bit(EVENT_CMD_COMPLETE
,
1897 &host
->pending_events
))
1900 /* CMD error in data command */
1901 if (mrq
->cmd
->error
&& mrq
->data
)
1908 dw_mci_command_complete(host
, mrq
->stop
);
1910 host
->cmd_status
= 0;
1912 dw_mci_request_end(host
, mrq
);
1915 case STATE_DATA_ERROR
:
1916 if (!test_and_clear_bit(EVENT_XFER_COMPLETE
,
1917 &host
->pending_events
))
1920 state
= STATE_DATA_BUSY
;
1923 } while (state
!= prev_state
);
1925 host
->state
= state
;
1927 spin_unlock(&host
->lock
);
1931 /* push final bytes to part_buf, only use during push */
1932 static void dw_mci_set_part_bytes(struct dw_mci
*host
, void *buf
, int cnt
)
1934 memcpy((void *)&host
->part_buf
, buf
, cnt
);
1935 host
->part_buf_count
= cnt
;
1938 /* append bytes to part_buf, only use during push */
1939 static int dw_mci_push_part_bytes(struct dw_mci
*host
, void *buf
, int cnt
)
1941 cnt
= min(cnt
, (1 << host
->data_shift
) - host
->part_buf_count
);
1942 memcpy((void *)&host
->part_buf
+ host
->part_buf_count
, buf
, cnt
);
1943 host
->part_buf_count
+= cnt
;
1947 /* pull first bytes from part_buf, only use during pull */
1948 static int dw_mci_pull_part_bytes(struct dw_mci
*host
, void *buf
, int cnt
)
1950 cnt
= min_t(int, cnt
, host
->part_buf_count
);
1952 memcpy(buf
, (void *)&host
->part_buf
+ host
->part_buf_start
,
1954 host
->part_buf_count
-= cnt
;
1955 host
->part_buf_start
+= cnt
;
1960 /* pull final bytes from the part_buf, assuming it's just been filled */
1961 static void dw_mci_pull_final_bytes(struct dw_mci
*host
, void *buf
, int cnt
)
1963 memcpy(buf
, &host
->part_buf
, cnt
);
1964 host
->part_buf_start
= cnt
;
1965 host
->part_buf_count
= (1 << host
->data_shift
) - cnt
;
1968 static void dw_mci_push_data16(struct dw_mci
*host
, void *buf
, int cnt
)
1970 struct mmc_data
*data
= host
->data
;
1973 /* try and push anything in the part_buf */
1974 if (unlikely(host
->part_buf_count
)) {
1975 int len
= dw_mci_push_part_bytes(host
, buf
, cnt
);
1979 if (host
->part_buf_count
== 2) {
1980 mci_fifo_writew(host
->fifo_reg
, host
->part_buf16
);
1981 host
->part_buf_count
= 0;
1984 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1985 if (unlikely((unsigned long)buf
& 0x1)) {
1987 u16 aligned_buf
[64];
1988 int len
= min(cnt
& -2, (int)sizeof(aligned_buf
));
1989 int items
= len
>> 1;
1991 /* memcpy from input buffer into aligned buffer */
1992 memcpy(aligned_buf
, buf
, len
);
1995 /* push data from aligned buffer into fifo */
1996 for (i
= 0; i
< items
; ++i
)
1997 mci_fifo_writew(host
->fifo_reg
, aligned_buf
[i
]);
2004 for (; cnt
>= 2; cnt
-= 2)
2005 mci_fifo_writew(host
->fifo_reg
, *pdata
++);
2008 /* put anything remaining in the part_buf */
2010 dw_mci_set_part_bytes(host
, buf
, cnt
);
2011 /* Push data if we have reached the expected data length */
2012 if ((data
->bytes_xfered
+ init_cnt
) ==
2013 (data
->blksz
* data
->blocks
))
2014 mci_fifo_writew(host
->fifo_reg
, host
->part_buf16
);
2018 static void dw_mci_pull_data16(struct dw_mci
*host
, void *buf
, int cnt
)
2020 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2021 if (unlikely((unsigned long)buf
& 0x1)) {
2023 /* pull data from fifo into aligned buffer */
2024 u16 aligned_buf
[64];
2025 int len
= min(cnt
& -2, (int)sizeof(aligned_buf
));
2026 int items
= len
>> 1;
2029 for (i
= 0; i
< items
; ++i
)
2030 aligned_buf
[i
] = mci_fifo_readw(host
->fifo_reg
);
2031 /* memcpy from aligned buffer into output buffer */
2032 memcpy(buf
, aligned_buf
, len
);
2041 for (; cnt
>= 2; cnt
-= 2)
2042 *pdata
++ = mci_fifo_readw(host
->fifo_reg
);
2046 host
->part_buf16
= mci_fifo_readw(host
->fifo_reg
);
2047 dw_mci_pull_final_bytes(host
, buf
, cnt
);
2051 static void dw_mci_push_data32(struct dw_mci
*host
, void *buf
, int cnt
)
2053 struct mmc_data
*data
= host
->data
;
2056 /* try and push anything in the part_buf */
2057 if (unlikely(host
->part_buf_count
)) {
2058 int len
= dw_mci_push_part_bytes(host
, buf
, cnt
);
2062 if (host
->part_buf_count
== 4) {
2063 mci_fifo_writel(host
->fifo_reg
, host
->part_buf32
);
2064 host
->part_buf_count
= 0;
2067 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2068 if (unlikely((unsigned long)buf
& 0x3)) {
2070 u32 aligned_buf
[32];
2071 int len
= min(cnt
& -4, (int)sizeof(aligned_buf
));
2072 int items
= len
>> 2;
2074 /* memcpy from input buffer into aligned buffer */
2075 memcpy(aligned_buf
, buf
, len
);
2078 /* push data from aligned buffer into fifo */
2079 for (i
= 0; i
< items
; ++i
)
2080 mci_fifo_writel(host
->fifo_reg
, aligned_buf
[i
]);
2087 for (; cnt
>= 4; cnt
-= 4)
2088 mci_fifo_writel(host
->fifo_reg
, *pdata
++);
2091 /* put anything remaining in the part_buf */
2093 dw_mci_set_part_bytes(host
, buf
, cnt
);
2094 /* Push data if we have reached the expected data length */
2095 if ((data
->bytes_xfered
+ init_cnt
) ==
2096 (data
->blksz
* data
->blocks
))
2097 mci_fifo_writel(host
->fifo_reg
, host
->part_buf32
);
2101 static void dw_mci_pull_data32(struct dw_mci
*host
, void *buf
, int cnt
)
2103 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2104 if (unlikely((unsigned long)buf
& 0x3)) {
2106 /* pull data from fifo into aligned buffer */
2107 u32 aligned_buf
[32];
2108 int len
= min(cnt
& -4, (int)sizeof(aligned_buf
));
2109 int items
= len
>> 2;
2112 for (i
= 0; i
< items
; ++i
)
2113 aligned_buf
[i
] = mci_fifo_readl(host
->fifo_reg
);
2114 /* memcpy from aligned buffer into output buffer */
2115 memcpy(buf
, aligned_buf
, len
);
2124 for (; cnt
>= 4; cnt
-= 4)
2125 *pdata
++ = mci_fifo_readl(host
->fifo_reg
);
2129 host
->part_buf32
= mci_fifo_readl(host
->fifo_reg
);
2130 dw_mci_pull_final_bytes(host
, buf
, cnt
);
2134 static void dw_mci_push_data64(struct dw_mci
*host
, void *buf
, int cnt
)
2136 struct mmc_data
*data
= host
->data
;
2139 /* try and push anything in the part_buf */
2140 if (unlikely(host
->part_buf_count
)) {
2141 int len
= dw_mci_push_part_bytes(host
, buf
, cnt
);
2146 if (host
->part_buf_count
== 8) {
2147 mci_fifo_writeq(host
->fifo_reg
, host
->part_buf
);
2148 host
->part_buf_count
= 0;
2151 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2152 if (unlikely((unsigned long)buf
& 0x7)) {
2154 u64 aligned_buf
[16];
2155 int len
= min(cnt
& -8, (int)sizeof(aligned_buf
));
2156 int items
= len
>> 3;
2158 /* memcpy from input buffer into aligned buffer */
2159 memcpy(aligned_buf
, buf
, len
);
2162 /* push data from aligned buffer into fifo */
2163 for (i
= 0; i
< items
; ++i
)
2164 mci_fifo_writeq(host
->fifo_reg
, aligned_buf
[i
]);
2171 for (; cnt
>= 8; cnt
-= 8)
2172 mci_fifo_writeq(host
->fifo_reg
, *pdata
++);
2175 /* put anything remaining in the part_buf */
2177 dw_mci_set_part_bytes(host
, buf
, cnt
);
2178 /* Push data if we have reached the expected data length */
2179 if ((data
->bytes_xfered
+ init_cnt
) ==
2180 (data
->blksz
* data
->blocks
))
2181 mci_fifo_writeq(host
->fifo_reg
, host
->part_buf
);
2185 static void dw_mci_pull_data64(struct dw_mci
*host
, void *buf
, int cnt
)
2187 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2188 if (unlikely((unsigned long)buf
& 0x7)) {
2190 /* pull data from fifo into aligned buffer */
2191 u64 aligned_buf
[16];
2192 int len
= min(cnt
& -8, (int)sizeof(aligned_buf
));
2193 int items
= len
>> 3;
2196 for (i
= 0; i
< items
; ++i
)
2197 aligned_buf
[i
] = mci_fifo_readq(host
->fifo_reg
);
2199 /* memcpy from aligned buffer into output buffer */
2200 memcpy(buf
, aligned_buf
, len
);
2209 for (; cnt
>= 8; cnt
-= 8)
2210 *pdata
++ = mci_fifo_readq(host
->fifo_reg
);
2214 host
->part_buf
= mci_fifo_readq(host
->fifo_reg
);
2215 dw_mci_pull_final_bytes(host
, buf
, cnt
);
2219 static void dw_mci_pull_data(struct dw_mci
*host
, void *buf
, int cnt
)
2223 /* get remaining partial bytes */
2224 len
= dw_mci_pull_part_bytes(host
, buf
, cnt
);
2225 if (unlikely(len
== cnt
))
2230 /* get the rest of the data */
2231 host
->pull_data(host
, buf
, cnt
);
2234 static void dw_mci_read_data_pio(struct dw_mci
*host
, bool dto
)
2236 struct sg_mapping_iter
*sg_miter
= &host
->sg_miter
;
2238 unsigned int offset
;
2239 struct mmc_data
*data
= host
->data
;
2240 int shift
= host
->data_shift
;
2243 unsigned int remain
, fcnt
;
2246 if (!sg_miter_next(sg_miter
))
2249 host
->sg
= sg_miter
->piter
.sg
;
2250 buf
= sg_miter
->addr
;
2251 remain
= sg_miter
->length
;
2255 fcnt
= (SDMMC_GET_FCNT(mci_readl(host
, STATUS
))
2256 << shift
) + host
->part_buf_count
;
2257 len
= min(remain
, fcnt
);
2260 dw_mci_pull_data(host
, (void *)(buf
+ offset
), len
);
2261 data
->bytes_xfered
+= len
;
2266 sg_miter
->consumed
= offset
;
2267 status
= mci_readl(host
, MINTSTS
);
2268 mci_writel(host
, RINTSTS
, SDMMC_INT_RXDR
);
2269 /* if the RXDR is ready read again */
2270 } while ((status
& SDMMC_INT_RXDR
) ||
2271 (dto
&& SDMMC_GET_FCNT(mci_readl(host
, STATUS
))));
2274 if (!sg_miter_next(sg_miter
))
2276 sg_miter
->consumed
= 0;
2278 sg_miter_stop(sg_miter
);
2282 sg_miter_stop(sg_miter
);
2284 smp_wmb(); /* drain writebuffer */
2285 set_bit(EVENT_XFER_COMPLETE
, &host
->pending_events
);
2288 static void dw_mci_write_data_pio(struct dw_mci
*host
)
2290 struct sg_mapping_iter
*sg_miter
= &host
->sg_miter
;
2292 unsigned int offset
;
2293 struct mmc_data
*data
= host
->data
;
2294 int shift
= host
->data_shift
;
2297 unsigned int fifo_depth
= host
->fifo_depth
;
2298 unsigned int remain
, fcnt
;
2301 if (!sg_miter_next(sg_miter
))
2304 host
->sg
= sg_miter
->piter
.sg
;
2305 buf
= sg_miter
->addr
;
2306 remain
= sg_miter
->length
;
2310 fcnt
= ((fifo_depth
-
2311 SDMMC_GET_FCNT(mci_readl(host
, STATUS
)))
2312 << shift
) - host
->part_buf_count
;
2313 len
= min(remain
, fcnt
);
2316 host
->push_data(host
, (void *)(buf
+ offset
), len
);
2317 data
->bytes_xfered
+= len
;
2322 sg_miter
->consumed
= offset
;
2323 status
= mci_readl(host
, MINTSTS
);
2324 mci_writel(host
, RINTSTS
, SDMMC_INT_TXDR
);
2325 } while (status
& SDMMC_INT_TXDR
); /* if TXDR write again */
2328 if (!sg_miter_next(sg_miter
))
2330 sg_miter
->consumed
= 0;
2332 sg_miter_stop(sg_miter
);
2336 sg_miter_stop(sg_miter
);
2338 smp_wmb(); /* drain writebuffer */
2339 set_bit(EVENT_XFER_COMPLETE
, &host
->pending_events
);
2342 static void dw_mci_cmd_interrupt(struct dw_mci
*host
, u32 status
)
2344 if (!host
->cmd_status
)
2345 host
->cmd_status
= status
;
2347 smp_wmb(); /* drain writebuffer */
2349 set_bit(EVENT_CMD_COMPLETE
, &host
->pending_events
);
2350 tasklet_schedule(&host
->tasklet
);
2353 static void dw_mci_handle_cd(struct dw_mci
*host
)
2357 for (i
= 0; i
< host
->num_slots
; i
++) {
2358 struct dw_mci_slot
*slot
= host
->slot
[i
];
2363 if (slot
->mmc
->ops
->card_event
)
2364 slot
->mmc
->ops
->card_event(slot
->mmc
);
2365 mmc_detect_change(slot
->mmc
,
2366 msecs_to_jiffies(host
->pdata
->detect_delay_ms
));
2370 static irqreturn_t
dw_mci_interrupt(int irq
, void *dev_id
)
2372 struct dw_mci
*host
= dev_id
;
2376 pending
= mci_readl(host
, MINTSTS
); /* read-only mask reg */
2379 /* Check volt switch first, since it can look like an error */
2380 if ((host
->state
== STATE_SENDING_CMD11
) &&
2381 (pending
& SDMMC_INT_VOLT_SWITCH
)) {
2382 unsigned long irqflags
;
2384 mci_writel(host
, RINTSTS
, SDMMC_INT_VOLT_SWITCH
);
2385 pending
&= ~SDMMC_INT_VOLT_SWITCH
;
2388 * Hold the lock; we know cmd11_timer can't be kicked
2389 * off after the lock is released, so safe to delete.
2391 spin_lock_irqsave(&host
->irq_lock
, irqflags
);
2392 dw_mci_cmd_interrupt(host
, pending
);
2393 spin_unlock_irqrestore(&host
->irq_lock
, irqflags
);
2395 del_timer(&host
->cmd11_timer
);
2398 if (pending
& DW_MCI_CMD_ERROR_FLAGS
) {
2399 mci_writel(host
, RINTSTS
, DW_MCI_CMD_ERROR_FLAGS
);
2400 host
->cmd_status
= pending
;
2401 smp_wmb(); /* drain writebuffer */
2402 set_bit(EVENT_CMD_COMPLETE
, &host
->pending_events
);
2405 if (pending
& DW_MCI_DATA_ERROR_FLAGS
) {
2406 /* if there is an error report DATA_ERROR */
2407 mci_writel(host
, RINTSTS
, DW_MCI_DATA_ERROR_FLAGS
);
2408 host
->data_status
= pending
;
2409 smp_wmb(); /* drain writebuffer */
2410 set_bit(EVENT_DATA_ERROR
, &host
->pending_events
);
2411 tasklet_schedule(&host
->tasklet
);
2414 if (pending
& SDMMC_INT_DATA_OVER
) {
2415 if (host
->quirks
& DW_MCI_QUIRK_BROKEN_DTO
)
2416 del_timer(&host
->dto_timer
);
2418 mci_writel(host
, RINTSTS
, SDMMC_INT_DATA_OVER
);
2419 if (!host
->data_status
)
2420 host
->data_status
= pending
;
2421 smp_wmb(); /* drain writebuffer */
2422 if (host
->dir_status
== DW_MCI_RECV_STATUS
) {
2423 if (host
->sg
!= NULL
)
2424 dw_mci_read_data_pio(host
, true);
2426 set_bit(EVENT_DATA_COMPLETE
, &host
->pending_events
);
2427 tasklet_schedule(&host
->tasklet
);
2430 if (pending
& SDMMC_INT_RXDR
) {
2431 mci_writel(host
, RINTSTS
, SDMMC_INT_RXDR
);
2432 if (host
->dir_status
== DW_MCI_RECV_STATUS
&& host
->sg
)
2433 dw_mci_read_data_pio(host
, false);
2436 if (pending
& SDMMC_INT_TXDR
) {
2437 mci_writel(host
, RINTSTS
, SDMMC_INT_TXDR
);
2438 if (host
->dir_status
== DW_MCI_SEND_STATUS
&& host
->sg
)
2439 dw_mci_write_data_pio(host
);
2442 if (pending
& SDMMC_INT_CMD_DONE
) {
2443 mci_writel(host
, RINTSTS
, SDMMC_INT_CMD_DONE
);
2444 dw_mci_cmd_interrupt(host
, pending
);
2447 if (pending
& SDMMC_INT_CD
) {
2448 mci_writel(host
, RINTSTS
, SDMMC_INT_CD
);
2449 dw_mci_handle_cd(host
);
2452 /* Handle SDIO Interrupts */
2453 for (i
= 0; i
< host
->num_slots
; i
++) {
2454 struct dw_mci_slot
*slot
= host
->slot
[i
];
2459 if (pending
& SDMMC_INT_SDIO(slot
->sdio_id
)) {
2460 mci_writel(host
, RINTSTS
,
2461 SDMMC_INT_SDIO(slot
->sdio_id
));
2462 mmc_signal_sdio_irq(slot
->mmc
);
2468 if (host
->use_dma
!= TRANS_MODE_IDMAC
)
2471 /* Handle IDMA interrupts */
2472 if (host
->dma_64bit_address
== 1) {
2473 pending
= mci_readl(host
, IDSTS64
);
2474 if (pending
& (SDMMC_IDMAC_INT_TI
| SDMMC_IDMAC_INT_RI
)) {
2475 mci_writel(host
, IDSTS64
, SDMMC_IDMAC_INT_TI
|
2476 SDMMC_IDMAC_INT_RI
);
2477 mci_writel(host
, IDSTS64
, SDMMC_IDMAC_INT_NI
);
2478 host
->dma_ops
->complete((void *)host
);
2481 pending
= mci_readl(host
, IDSTS
);
2482 if (pending
& (SDMMC_IDMAC_INT_TI
| SDMMC_IDMAC_INT_RI
)) {
2483 mci_writel(host
, IDSTS
, SDMMC_IDMAC_INT_TI
|
2484 SDMMC_IDMAC_INT_RI
);
2485 mci_writel(host
, IDSTS
, SDMMC_IDMAC_INT_NI
);
2486 host
->dma_ops
->complete((void *)host
);
2494 /* given a slot, find out the device node representing that slot */
2495 static struct device_node
*dw_mci_of_find_slot_node(struct dw_mci_slot
*slot
)
2497 struct device
*dev
= slot
->mmc
->parent
;
2498 struct device_node
*np
;
2502 if (!dev
|| !dev
->of_node
)
2505 for_each_child_of_node(dev
->of_node
, np
) {
2506 addr
= of_get_property(np
, "reg", &len
);
2507 if (!addr
|| (len
< sizeof(int)))
2509 if (be32_to_cpup(addr
) == slot
->id
)
2515 static void dw_mci_slot_of_parse(struct dw_mci_slot
*slot
)
2517 struct device_node
*np
= dw_mci_of_find_slot_node(slot
);
2522 if (of_property_read_bool(np
, "disable-wp")) {
2523 slot
->mmc
->caps2
|= MMC_CAP2_NO_WRITE_PROTECT
;
2524 dev_warn(slot
->mmc
->parent
,
2525 "Slot quirk 'disable-wp' is deprecated\n");
2528 #else /* CONFIG_OF */
2529 static void dw_mci_slot_of_parse(struct dw_mci_slot
*slot
)
2532 #endif /* CONFIG_OF */
2534 static int dw_mci_init_slot(struct dw_mci
*host
, unsigned int id
)
2536 struct mmc_host
*mmc
;
2537 struct dw_mci_slot
*slot
;
2538 const struct dw_mci_drv_data
*drv_data
= host
->drv_data
;
2542 mmc
= mmc_alloc_host(sizeof(struct dw_mci_slot
), host
->dev
);
2546 slot
= mmc_priv(mmc
);
2548 slot
->sdio_id
= host
->sdio_id0
+ id
;
2551 host
->slot
[id
] = slot
;
2553 mmc
->ops
= &dw_mci_ops
;
2554 if (of_property_read_u32_array(host
->dev
->of_node
,
2555 "clock-freq-min-max", freq
, 2)) {
2556 mmc
->f_min
= DW_MCI_FREQ_MIN
;
2557 mmc
->f_max
= DW_MCI_FREQ_MAX
;
2559 mmc
->f_min
= freq
[0];
2560 mmc
->f_max
= freq
[1];
2563 /*if there are external regulators, get them*/
2564 ret
= mmc_regulator_get_supply(mmc
);
2565 if (ret
== -EPROBE_DEFER
)
2566 goto err_host_allocated
;
2568 if (!mmc
->ocr_avail
)
2569 mmc
->ocr_avail
= MMC_VDD_32_33
| MMC_VDD_33_34
;
2571 if (host
->pdata
->caps
)
2572 mmc
->caps
= host
->pdata
->caps
;
2574 if (host
->pdata
->pm_caps
)
2575 mmc
->pm_caps
= host
->pdata
->pm_caps
;
2577 if (host
->dev
->of_node
) {
2578 ctrl_id
= of_alias_get_id(host
->dev
->of_node
, "mshc");
2582 ctrl_id
= to_platform_device(host
->dev
)->id
;
2584 if (drv_data
&& drv_data
->caps
)
2585 mmc
->caps
|= drv_data
->caps
[ctrl_id
];
2587 if (host
->pdata
->caps2
)
2588 mmc
->caps2
= host
->pdata
->caps2
;
2590 dw_mci_slot_of_parse(slot
);
2592 ret
= mmc_of_parse(mmc
);
2594 goto err_host_allocated
;
2596 /* Useful defaults if platform data is unset. */
2597 if (host
->use_dma
== TRANS_MODE_IDMAC
) {
2598 mmc
->max_segs
= host
->ring_size
;
2599 mmc
->max_blk_size
= 65536;
2600 mmc
->max_seg_size
= 0x1000;
2601 mmc
->max_req_size
= mmc
->max_seg_size
* host
->ring_size
;
2602 mmc
->max_blk_count
= mmc
->max_req_size
/ 512;
2603 } else if (host
->use_dma
== TRANS_MODE_EDMAC
) {
2605 mmc
->max_blk_size
= 65536;
2606 mmc
->max_blk_count
= 65535;
2608 mmc
->max_blk_size
* mmc
->max_blk_count
;
2609 mmc
->max_seg_size
= mmc
->max_req_size
;
2611 /* TRANS_MODE_PIO */
2613 mmc
->max_blk_size
= 65536; /* BLKSIZ is 16 bits */
2614 mmc
->max_blk_count
= 512;
2615 mmc
->max_req_size
= mmc
->max_blk_size
*
2617 mmc
->max_seg_size
= mmc
->max_req_size
;
2620 if (dw_mci_get_cd(mmc
))
2621 set_bit(DW_MMC_CARD_PRESENT
, &slot
->flags
);
2623 clear_bit(DW_MMC_CARD_PRESENT
, &slot
->flags
);
2625 ret
= mmc_add_host(mmc
);
2627 goto err_host_allocated
;
2629 #if defined(CONFIG_DEBUG_FS)
2630 dw_mci_init_debugfs(slot
);
2640 static void dw_mci_cleanup_slot(struct dw_mci_slot
*slot
, unsigned int id
)
2642 /* Debugfs stuff is cleaned up by mmc core */
2643 mmc_remove_host(slot
->mmc
);
2644 slot
->host
->slot
[id
] = NULL
;
2645 mmc_free_host(slot
->mmc
);
2648 static void dw_mci_init_dma(struct dw_mci
*host
)
2651 struct device
*dev
= host
->dev
;
2652 struct device_node
*np
= dev
->of_node
;
2655 * Check tansfer mode from HCON[17:16]
2656 * Clear the ambiguous description of dw_mmc databook:
2657 * 2b'00: No DMA Interface -> Actually means using Internal DMA block
2658 * 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block
2659 * 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block
2660 * 2b'11: Non DW DMA Interface -> pio only
2661 * Compared to DesignWare DMA Interface, Generic DMA Interface has a
2662 * simpler request/acknowledge handshake mechanism and both of them
2663 * are regarded as external dma master for dw_mmc.
2665 host
->use_dma
= SDMMC_GET_TRANS_MODE(mci_readl(host
, HCON
));
2666 if (host
->use_dma
== DMA_INTERFACE_IDMA
) {
2667 host
->use_dma
= TRANS_MODE_IDMAC
;
2668 } else if (host
->use_dma
== DMA_INTERFACE_DWDMA
||
2669 host
->use_dma
== DMA_INTERFACE_GDMA
) {
2670 host
->use_dma
= TRANS_MODE_EDMAC
;
2675 /* Determine which DMA interface to use */
2676 if (host
->use_dma
== TRANS_MODE_IDMAC
) {
2678 * Check ADDR_CONFIG bit in HCON to find
2679 * IDMAC address bus width
2681 addr_config
= SDMMC_GET_ADDR_CONFIG(mci_readl(host
, HCON
));
2683 if (addr_config
== 1) {
2684 /* host supports IDMAC in 64-bit address mode */
2685 host
->dma_64bit_address
= 1;
2687 "IDMAC supports 64-bit address mode.\n");
2688 if (!dma_set_mask(host
->dev
, DMA_BIT_MASK(64)))
2689 dma_set_coherent_mask(host
->dev
,
2692 /* host supports IDMAC in 32-bit address mode */
2693 host
->dma_64bit_address
= 0;
2695 "IDMAC supports 32-bit address mode.\n");
2698 /* Alloc memory for sg translation */
2699 host
->sg_cpu
= dmam_alloc_coherent(host
->dev
, PAGE_SIZE
,
2700 &host
->sg_dma
, GFP_KERNEL
);
2701 if (!host
->sg_cpu
) {
2703 "%s: could not alloc DMA memory\n",
2708 host
->dma_ops
= &dw_mci_idmac_ops
;
2709 dev_info(host
->dev
, "Using internal DMA controller.\n");
2711 /* TRANS_MODE_EDMAC: check dma bindings again */
2712 if ((of_property_count_strings(np
, "dma-names") < 0) ||
2713 (!of_find_property(np
, "dmas", NULL
))) {
2716 host
->dma_ops
= &dw_mci_edmac_ops
;
2717 dev_info(host
->dev
, "Using external DMA controller.\n");
2720 if (host
->dma_ops
->init
&& host
->dma_ops
->start
&&
2721 host
->dma_ops
->stop
&& host
->dma_ops
->cleanup
) {
2722 if (host
->dma_ops
->init(host
)) {
2723 dev_err(host
->dev
, "%s: Unable to initialize DMA Controller.\n",
2728 dev_err(host
->dev
, "DMA initialization not found.\n");
2735 dev_info(host
->dev
, "Using PIO mode.\n");
2736 host
->use_dma
= TRANS_MODE_PIO
;
2739 static bool dw_mci_ctrl_reset(struct dw_mci
*host
, u32 reset
)
2741 unsigned long timeout
= jiffies
+ msecs_to_jiffies(500);
2744 ctrl
= mci_readl(host
, CTRL
);
2746 mci_writel(host
, CTRL
, ctrl
);
2748 /* wait till resets clear */
2750 ctrl
= mci_readl(host
, CTRL
);
2751 if (!(ctrl
& reset
))
2753 } while (time_before(jiffies
, timeout
));
2756 "Timeout resetting block (ctrl reset %#x)\n",
2762 static bool dw_mci_reset(struct dw_mci
*host
)
2764 u32 flags
= SDMMC_CTRL_RESET
| SDMMC_CTRL_FIFO_RESET
;
2768 * Reseting generates a block interrupt, hence setting
2769 * the scatter-gather pointer to NULL.
2772 sg_miter_stop(&host
->sg_miter
);
2777 flags
|= SDMMC_CTRL_DMA_RESET
;
2779 if (dw_mci_ctrl_reset(host
, flags
)) {
2781 * In all cases we clear the RAWINTS register to clear any
2784 mci_writel(host
, RINTSTS
, 0xFFFFFFFF);
2786 /* if using dma we wait for dma_req to clear */
2787 if (host
->use_dma
) {
2788 unsigned long timeout
= jiffies
+ msecs_to_jiffies(500);
2792 status
= mci_readl(host
, STATUS
);
2793 if (!(status
& SDMMC_STATUS_DMA_REQ
))
2796 } while (time_before(jiffies
, timeout
));
2798 if (status
& SDMMC_STATUS_DMA_REQ
) {
2800 "%s: Timeout waiting for dma_req to clear during reset\n",
2805 /* when using DMA next we reset the fifo again */
2806 if (!dw_mci_ctrl_reset(host
, SDMMC_CTRL_FIFO_RESET
))
2810 /* if the controller reset bit did clear, then set clock regs */
2811 if (!(mci_readl(host
, CTRL
) & SDMMC_CTRL_RESET
)) {
2813 "%s: fifo/dma reset bits didn't clear but ciu was reset, doing clock update\n",
2819 if (host
->use_dma
== TRANS_MODE_IDMAC
)
2820 /* It is also recommended that we reset and reprogram idmac */
2821 dw_mci_idmac_reset(host
);
2826 /* After a CTRL reset we need to have CIU set clock registers */
2827 mci_send_cmd(host
->cur_slot
, SDMMC_CMD_UPD_CLK
, 0);
2832 static void dw_mci_cmd11_timer(unsigned long arg
)
2834 struct dw_mci
*host
= (struct dw_mci
*)arg
;
2836 if (host
->state
!= STATE_SENDING_CMD11
) {
2837 dev_warn(host
->dev
, "Unexpected CMD11 timeout\n");
2841 host
->cmd_status
= SDMMC_INT_RTO
;
2842 set_bit(EVENT_CMD_COMPLETE
, &host
->pending_events
);
2843 tasklet_schedule(&host
->tasklet
);
2846 static void dw_mci_dto_timer(unsigned long arg
)
2848 struct dw_mci
*host
= (struct dw_mci
*)arg
;
2850 switch (host
->state
) {
2851 case STATE_SENDING_DATA
:
2852 case STATE_DATA_BUSY
:
2854 * If DTO interrupt does NOT come in sending data state,
2855 * we should notify the driver to terminate current transfer
2856 * and report a data timeout to the core.
2858 host
->data_status
= SDMMC_INT_DRTO
;
2859 set_bit(EVENT_DATA_ERROR
, &host
->pending_events
);
2860 set_bit(EVENT_DATA_COMPLETE
, &host
->pending_events
);
2861 tasklet_schedule(&host
->tasklet
);
2869 static struct dw_mci_of_quirks
{
2874 .quirk
= "broken-cd",
2875 .id
= DW_MCI_QUIRK_BROKEN_CARD_DETECTION
,
2879 static struct dw_mci_board
*dw_mci_parse_dt(struct dw_mci
*host
)
2881 struct dw_mci_board
*pdata
;
2882 struct device
*dev
= host
->dev
;
2883 struct device_node
*np
= dev
->of_node
;
2884 const struct dw_mci_drv_data
*drv_data
= host
->drv_data
;
2886 u32 clock_frequency
;
2888 pdata
= devm_kzalloc(dev
, sizeof(*pdata
), GFP_KERNEL
);
2890 return ERR_PTR(-ENOMEM
);
2892 /* find out number of slots supported */
2893 if (of_property_read_u32(dev
->of_node
, "num-slots",
2894 &pdata
->num_slots
)) {
2896 "num-slots property not found, assuming 1 slot is available\n");
2897 pdata
->num_slots
= 1;
2901 for (idx
= 0; idx
< ARRAY_SIZE(of_quirks
); idx
++)
2902 if (of_get_property(np
, of_quirks
[idx
].quirk
, NULL
))
2903 pdata
->quirks
|= of_quirks
[idx
].id
;
2905 if (of_property_read_u32(np
, "fifo-depth", &pdata
->fifo_depth
))
2907 "fifo-depth property not found, using value of FIFOTH register as default\n");
2909 of_property_read_u32(np
, "card-detect-delay", &pdata
->detect_delay_ms
);
2911 if (!of_property_read_u32(np
, "clock-frequency", &clock_frequency
))
2912 pdata
->bus_hz
= clock_frequency
;
2914 if (drv_data
&& drv_data
->parse_dt
) {
2915 ret
= drv_data
->parse_dt(host
);
2917 return ERR_PTR(ret
);
2920 if (of_find_property(np
, "supports-highspeed", NULL
)) {
2921 dev_info(dev
, "supports-highspeed property is deprecated.\n");
2922 pdata
->caps
|= MMC_CAP_SD_HIGHSPEED
| MMC_CAP_MMC_HIGHSPEED
;
2928 #else /* CONFIG_OF */
2929 static struct dw_mci_board
*dw_mci_parse_dt(struct dw_mci
*host
)
2931 return ERR_PTR(-EINVAL
);
2933 #endif /* CONFIG_OF */
2935 static void dw_mci_enable_cd(struct dw_mci
*host
)
2937 struct dw_mci_board
*brd
= host
->pdata
;
2938 unsigned long irqflags
;
2942 /* No need for CD if broken card detection */
2943 if (brd
->quirks
& DW_MCI_QUIRK_BROKEN_CARD_DETECTION
)
2946 /* No need for CD if all slots have a non-error GPIO */
2947 for (i
= 0; i
< host
->num_slots
; i
++) {
2948 struct dw_mci_slot
*slot
= host
->slot
[i
];
2950 if (IS_ERR_VALUE(mmc_gpio_get_cd(slot
->mmc
)))
2953 if (i
== host
->num_slots
)
2956 spin_lock_irqsave(&host
->irq_lock
, irqflags
);
2957 temp
= mci_readl(host
, INTMASK
);
2958 temp
|= SDMMC_INT_CD
;
2959 mci_writel(host
, INTMASK
, temp
);
2960 spin_unlock_irqrestore(&host
->irq_lock
, irqflags
);
2963 int dw_mci_probe(struct dw_mci
*host
)
2965 const struct dw_mci_drv_data
*drv_data
= host
->drv_data
;
2966 int width
, i
, ret
= 0;
2971 host
->pdata
= dw_mci_parse_dt(host
);
2972 if (IS_ERR(host
->pdata
)) {
2973 dev_err(host
->dev
, "platform data not available\n");
2978 if (host
->pdata
->num_slots
< 1) {
2980 "Platform data must supply num_slots.\n");
2984 host
->biu_clk
= devm_clk_get(host
->dev
, "biu");
2985 if (IS_ERR(host
->biu_clk
)) {
2986 dev_dbg(host
->dev
, "biu clock not available\n");
2988 ret
= clk_prepare_enable(host
->biu_clk
);
2990 dev_err(host
->dev
, "failed to enable biu clock\n");
2995 host
->ciu_clk
= devm_clk_get(host
->dev
, "ciu");
2996 if (IS_ERR(host
->ciu_clk
)) {
2997 dev_dbg(host
->dev
, "ciu clock not available\n");
2998 host
->bus_hz
= host
->pdata
->bus_hz
;
3000 ret
= clk_prepare_enable(host
->ciu_clk
);
3002 dev_err(host
->dev
, "failed to enable ciu clock\n");
3006 if (host
->pdata
->bus_hz
) {
3007 ret
= clk_set_rate(host
->ciu_clk
, host
->pdata
->bus_hz
);
3010 "Unable to set bus rate to %uHz\n",
3011 host
->pdata
->bus_hz
);
3013 host
->bus_hz
= clk_get_rate(host
->ciu_clk
);
3016 if (!host
->bus_hz
) {
3018 "Platform data must supply bus speed\n");
3023 if (drv_data
&& drv_data
->init
) {
3024 ret
= drv_data
->init(host
);
3027 "implementation specific init failed\n");
3032 if (drv_data
&& drv_data
->setup_clock
) {
3033 ret
= drv_data
->setup_clock(host
);
3036 "implementation specific clock setup failed\n");
3041 setup_timer(&host
->cmd11_timer
,
3042 dw_mci_cmd11_timer
, (unsigned long)host
);
3044 host
->quirks
= host
->pdata
->quirks
;
3046 if (host
->quirks
& DW_MCI_QUIRK_BROKEN_DTO
)
3047 setup_timer(&host
->dto_timer
,
3048 dw_mci_dto_timer
, (unsigned long)host
);
3050 spin_lock_init(&host
->lock
);
3051 spin_lock_init(&host
->irq_lock
);
3052 INIT_LIST_HEAD(&host
->queue
);
3055 * Get the host data width - this assumes that HCON has been set with
3056 * the correct values.
3058 i
= SDMMC_GET_HDATA_WIDTH(mci_readl(host
, HCON
));
3060 host
->push_data
= dw_mci_push_data16
;
3061 host
->pull_data
= dw_mci_pull_data16
;
3063 host
->data_shift
= 1;
3064 } else if (i
== 2) {
3065 host
->push_data
= dw_mci_push_data64
;
3066 host
->pull_data
= dw_mci_pull_data64
;
3068 host
->data_shift
= 3;
3070 /* Check for a reserved value, and warn if it is */
3072 "HCON reports a reserved host data width!\n"
3073 "Defaulting to 32-bit access.\n");
3074 host
->push_data
= dw_mci_push_data32
;
3075 host
->pull_data
= dw_mci_pull_data32
;
3077 host
->data_shift
= 2;
3080 /* Reset all blocks */
3081 if (!dw_mci_ctrl_reset(host
, SDMMC_CTRL_ALL_RESET_FLAGS
))
3084 host
->dma_ops
= host
->pdata
->dma_ops
;
3085 dw_mci_init_dma(host
);
3087 /* Clear the interrupts for the host controller */
3088 mci_writel(host
, RINTSTS
, 0xFFFFFFFF);
3089 mci_writel(host
, INTMASK
, 0); /* disable all mmc interrupt first */
3091 /* Put in max timeout */
3092 mci_writel(host
, TMOUT
, 0xFFFFFFFF);
3095 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
3096 * Tx Mark = fifo_size / 2 DMA Size = 8
3098 if (!host
->pdata
->fifo_depth
) {
3100 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
3101 * have been overwritten by the bootloader, just like we're
3102 * about to do, so if you know the value for your hardware, you
3103 * should put it in the platform data.
3105 fifo_size
= mci_readl(host
, FIFOTH
);
3106 fifo_size
= 1 + ((fifo_size
>> 16) & 0xfff);
3108 fifo_size
= host
->pdata
->fifo_depth
;
3110 host
->fifo_depth
= fifo_size
;
3112 SDMMC_SET_FIFOTH(0x2, fifo_size
/ 2 - 1, fifo_size
/ 2);
3113 mci_writel(host
, FIFOTH
, host
->fifoth_val
);
3115 /* disable clock to CIU */
3116 mci_writel(host
, CLKENA
, 0);
3117 mci_writel(host
, CLKSRC
, 0);
3120 * In 2.40a spec, Data offset is changed.
3121 * Need to check the version-id and set data-offset for DATA register.
3123 host
->verid
= SDMMC_GET_VERID(mci_readl(host
, VERID
));
3124 dev_info(host
->dev
, "Version ID is %04x\n", host
->verid
);
3126 if (host
->verid
< DW_MMC_240A
)
3127 host
->fifo_reg
= host
->regs
+ DATA_OFFSET
;
3129 host
->fifo_reg
= host
->regs
+ DATA_240A_OFFSET
;
3131 tasklet_init(&host
->tasklet
, dw_mci_tasklet_func
, (unsigned long)host
);
3132 ret
= devm_request_irq(host
->dev
, host
->irq
, dw_mci_interrupt
,
3133 host
->irq_flags
, "dw-mci", host
);
3137 if (host
->pdata
->num_slots
)
3138 host
->num_slots
= host
->pdata
->num_slots
;
3140 host
->num_slots
= SDMMC_GET_SLOT_NUM(mci_readl(host
, HCON
));
3143 * Enable interrupts for command done, data over, data empty,
3144 * receive ready and error such as transmit, receive timeout, crc error
3146 mci_writel(host
, RINTSTS
, 0xFFFFFFFF);
3147 mci_writel(host
, INTMASK
, SDMMC_INT_CMD_DONE
| SDMMC_INT_DATA_OVER
|
3148 SDMMC_INT_TXDR
| SDMMC_INT_RXDR
|
3149 DW_MCI_ERROR_FLAGS
);
3150 /* Enable mci interrupt */
3151 mci_writel(host
, CTRL
, SDMMC_CTRL_INT_ENABLE
);
3154 "DW MMC controller at irq %d,%d bit host data width,%u deep fifo\n",
3155 host
->irq
, width
, fifo_size
);
3157 /* We need at least one slot to succeed */
3158 for (i
= 0; i
< host
->num_slots
; i
++) {
3159 ret
= dw_mci_init_slot(host
, i
);
3161 dev_dbg(host
->dev
, "slot %d init failed\n", i
);
3167 dev_info(host
->dev
, "%d slots initialized\n", init_slots
);
3170 "attempted to initialize %d slots, but failed on all\n",
3175 /* Now that slots are all setup, we can enable card detect */
3176 dw_mci_enable_cd(host
);
3181 if (host
->use_dma
&& host
->dma_ops
->exit
)
3182 host
->dma_ops
->exit(host
);
3185 if (!IS_ERR(host
->ciu_clk
))
3186 clk_disable_unprepare(host
->ciu_clk
);
3189 if (!IS_ERR(host
->biu_clk
))
3190 clk_disable_unprepare(host
->biu_clk
);
3194 EXPORT_SYMBOL(dw_mci_probe
);
3196 void dw_mci_remove(struct dw_mci
*host
)
3200 for (i
= 0; i
< host
->num_slots
; i
++) {
3201 dev_dbg(host
->dev
, "remove slot %d\n", i
);
3203 dw_mci_cleanup_slot(host
->slot
[i
], i
);
3206 mci_writel(host
, RINTSTS
, 0xFFFFFFFF);
3207 mci_writel(host
, INTMASK
, 0); /* disable all mmc interrupt first */
3209 /* disable clock to CIU */
3210 mci_writel(host
, CLKENA
, 0);
3211 mci_writel(host
, CLKSRC
, 0);
3213 if (host
->use_dma
&& host
->dma_ops
->exit
)
3214 host
->dma_ops
->exit(host
);
3216 if (!IS_ERR(host
->ciu_clk
))
3217 clk_disable_unprepare(host
->ciu_clk
);
3219 if (!IS_ERR(host
->biu_clk
))
3220 clk_disable_unprepare(host
->biu_clk
);
3222 EXPORT_SYMBOL(dw_mci_remove
);
3226 #ifdef CONFIG_PM_SLEEP
3228 * TODO: we should probably disable the clock to the card in the suspend path.
3230 int dw_mci_suspend(struct dw_mci
*host
)
3232 if (host
->use_dma
&& host
->dma_ops
->exit
)
3233 host
->dma_ops
->exit(host
);
3237 EXPORT_SYMBOL(dw_mci_suspend
);
3239 int dw_mci_resume(struct dw_mci
*host
)
3243 if (!dw_mci_ctrl_reset(host
, SDMMC_CTRL_ALL_RESET_FLAGS
)) {
3248 if (host
->use_dma
&& host
->dma_ops
->init
)
3249 host
->dma_ops
->init(host
);
3252 * Restore the initial value at FIFOTH register
3253 * And Invalidate the prev_blksz with zero
3255 mci_writel(host
, FIFOTH
, host
->fifoth_val
);
3256 host
->prev_blksz
= 0;
3258 /* Put in max timeout */
3259 mci_writel(host
, TMOUT
, 0xFFFFFFFF);
3261 mci_writel(host
, RINTSTS
, 0xFFFFFFFF);
3262 mci_writel(host
, INTMASK
, SDMMC_INT_CMD_DONE
| SDMMC_INT_DATA_OVER
|
3263 SDMMC_INT_TXDR
| SDMMC_INT_RXDR
|
3264 DW_MCI_ERROR_FLAGS
);
3265 mci_writel(host
, CTRL
, SDMMC_CTRL_INT_ENABLE
);
3267 for (i
= 0; i
< host
->num_slots
; i
++) {
3268 struct dw_mci_slot
*slot
= host
->slot
[i
];
3272 if (slot
->mmc
->pm_flags
& MMC_PM_KEEP_POWER
) {
3273 dw_mci_set_ios(slot
->mmc
, &slot
->mmc
->ios
);
3274 dw_mci_setup_bus(slot
, true);
3278 /* Now that slots are all setup, we can enable card detect */
3279 dw_mci_enable_cd(host
);
3283 EXPORT_SYMBOL(dw_mci_resume
);
3284 #endif /* CONFIG_PM_SLEEP */
3286 static int __init
dw_mci_init(void)
3288 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
3292 static void __exit
dw_mci_exit(void)
3296 module_init(dw_mci_init
);
3297 module_exit(dw_mci_exit
);
3299 MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
3300 MODULE_AUTHOR("NXP Semiconductor VietNam");
3301 MODULE_AUTHOR("Imagination Technologies Ltd");
3302 MODULE_LICENSE("GPL v2");