2 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
4 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
11 * Thanks to the following companies for their support:
13 * - JMicron (hardware and technical support)
16 #include <linux/delay.h>
17 #include <linux/highmem.h>
19 #include <linux/module.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/slab.h>
22 #include <linux/scatterlist.h>
23 #include <linux/regulator/consumer.h>
24 #include <linux/pm_runtime.h>
26 #include <linux/leds.h>
28 #include <linux/mmc/mmc.h>
29 #include <linux/mmc/host.h>
30 #include <linux/mmc/card.h>
31 #include <linux/mmc/sdio.h>
32 #include <linux/mmc/slot-gpio.h>
36 #define DRIVER_NAME "sdhci"
38 #define DBG(f, x...) \
39 pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
41 #if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \
42 defined(CONFIG_MMC_SDHCI_MODULE))
43 #define SDHCI_USE_LEDS_CLASS
46 #define MAX_TUNING_LOOP 40
48 static unsigned int debug_quirks
= 0;
49 static unsigned int debug_quirks2
;
51 static void sdhci_finish_data(struct sdhci_host
*);
53 static void sdhci_finish_command(struct sdhci_host
*);
54 static int sdhci_execute_tuning(struct mmc_host
*mmc
, u32 opcode
);
55 static void sdhci_enable_preset_value(struct sdhci_host
*host
, bool enable
);
56 static int sdhci_do_get_cd(struct sdhci_host
*host
);
58 static void sdhci_dumpregs(struct sdhci_host
*host
)
60 pr_debug(DRIVER_NAME
": =========== REGISTER DUMP (%s)===========\n",
61 mmc_hostname(host
->mmc
));
63 pr_debug(DRIVER_NAME
": Sys addr: 0x%08x | Version: 0x%08x\n",
64 sdhci_readl(host
, SDHCI_DMA_ADDRESS
),
65 sdhci_readw(host
, SDHCI_HOST_VERSION
));
66 pr_debug(DRIVER_NAME
": Blk size: 0x%08x | Blk cnt: 0x%08x\n",
67 sdhci_readw(host
, SDHCI_BLOCK_SIZE
),
68 sdhci_readw(host
, SDHCI_BLOCK_COUNT
));
69 pr_debug(DRIVER_NAME
": Argument: 0x%08x | Trn mode: 0x%08x\n",
70 sdhci_readl(host
, SDHCI_ARGUMENT
),
71 sdhci_readw(host
, SDHCI_TRANSFER_MODE
));
72 pr_debug(DRIVER_NAME
": Present: 0x%08x | Host ctl: 0x%08x\n",
73 sdhci_readl(host
, SDHCI_PRESENT_STATE
),
74 sdhci_readb(host
, SDHCI_HOST_CONTROL
));
75 pr_debug(DRIVER_NAME
": Power: 0x%08x | Blk gap: 0x%08x\n",
76 sdhci_readb(host
, SDHCI_POWER_CONTROL
),
77 sdhci_readb(host
, SDHCI_BLOCK_GAP_CONTROL
));
78 pr_debug(DRIVER_NAME
": Wake-up: 0x%08x | Clock: 0x%08x\n",
79 sdhci_readb(host
, SDHCI_WAKE_UP_CONTROL
),
80 sdhci_readw(host
, SDHCI_CLOCK_CONTROL
));
81 pr_debug(DRIVER_NAME
": Timeout: 0x%08x | Int stat: 0x%08x\n",
82 sdhci_readb(host
, SDHCI_TIMEOUT_CONTROL
),
83 sdhci_readl(host
, SDHCI_INT_STATUS
));
84 pr_debug(DRIVER_NAME
": Int enab: 0x%08x | Sig enab: 0x%08x\n",
85 sdhci_readl(host
, SDHCI_INT_ENABLE
),
86 sdhci_readl(host
, SDHCI_SIGNAL_ENABLE
));
87 pr_debug(DRIVER_NAME
": AC12 err: 0x%08x | Slot int: 0x%08x\n",
88 sdhci_readw(host
, SDHCI_ACMD12_ERR
),
89 sdhci_readw(host
, SDHCI_SLOT_INT_STATUS
));
90 pr_debug(DRIVER_NAME
": Caps: 0x%08x | Caps_1: 0x%08x\n",
91 sdhci_readl(host
, SDHCI_CAPABILITIES
),
92 sdhci_readl(host
, SDHCI_CAPABILITIES_1
));
93 pr_debug(DRIVER_NAME
": Cmd: 0x%08x | Max curr: 0x%08x\n",
94 sdhci_readw(host
, SDHCI_COMMAND
),
95 sdhci_readl(host
, SDHCI_MAX_CURRENT
));
96 pr_debug(DRIVER_NAME
": Host ctl2: 0x%08x\n",
97 sdhci_readw(host
, SDHCI_HOST_CONTROL2
));
99 if (host
->flags
& SDHCI_USE_ADMA
) {
100 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
101 pr_debug(DRIVER_NAME
": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
102 readl(host
->ioaddr
+ SDHCI_ADMA_ERROR
),
103 readl(host
->ioaddr
+ SDHCI_ADMA_ADDRESS_HI
),
104 readl(host
->ioaddr
+ SDHCI_ADMA_ADDRESS
));
106 pr_debug(DRIVER_NAME
": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
107 readl(host
->ioaddr
+ SDHCI_ADMA_ERROR
),
108 readl(host
->ioaddr
+ SDHCI_ADMA_ADDRESS
));
111 pr_debug(DRIVER_NAME
": ===========================================\n");
114 /*****************************************************************************\
116 * Low level functions *
118 \*****************************************************************************/
120 static void sdhci_set_card_detection(struct sdhci_host
*host
, bool enable
)
124 if ((host
->quirks
& SDHCI_QUIRK_BROKEN_CARD_DETECTION
) ||
125 (host
->mmc
->caps
& MMC_CAP_NONREMOVABLE
))
129 present
= sdhci_readl(host
, SDHCI_PRESENT_STATE
) &
132 host
->ier
|= present
? SDHCI_INT_CARD_REMOVE
:
133 SDHCI_INT_CARD_INSERT
;
135 host
->ier
&= ~(SDHCI_INT_CARD_REMOVE
| SDHCI_INT_CARD_INSERT
);
138 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
139 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
142 static void sdhci_enable_card_detection(struct sdhci_host
*host
)
144 sdhci_set_card_detection(host
, true);
147 static void sdhci_disable_card_detection(struct sdhci_host
*host
)
149 sdhci_set_card_detection(host
, false);
152 static void sdhci_runtime_pm_bus_on(struct sdhci_host
*host
)
157 pm_runtime_get_noresume(host
->mmc
->parent
);
160 static void sdhci_runtime_pm_bus_off(struct sdhci_host
*host
)
164 host
->bus_on
= false;
165 pm_runtime_put_noidle(host
->mmc
->parent
);
168 void sdhci_reset(struct sdhci_host
*host
, u8 mask
)
170 unsigned long timeout
;
172 sdhci_writeb(host
, mask
, SDHCI_SOFTWARE_RESET
);
174 if (mask
& SDHCI_RESET_ALL
) {
176 /* Reset-all turns off SD Bus Power */
177 if (host
->quirks2
& SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON
)
178 sdhci_runtime_pm_bus_off(host
);
181 /* Wait max 100 ms */
184 /* hw clears the bit when it's done */
185 while (sdhci_readb(host
, SDHCI_SOFTWARE_RESET
) & mask
) {
187 pr_err("%s: Reset 0x%x never completed.\n",
188 mmc_hostname(host
->mmc
), (int)mask
);
189 sdhci_dumpregs(host
);
196 EXPORT_SYMBOL_GPL(sdhci_reset
);
198 static void sdhci_do_reset(struct sdhci_host
*host
, u8 mask
)
200 if (host
->quirks
& SDHCI_QUIRK_NO_CARD_NO_RESET
) {
201 if (!sdhci_do_get_cd(host
))
205 host
->ops
->reset(host
, mask
);
207 if (mask
& SDHCI_RESET_ALL
) {
208 if (host
->flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
)) {
209 if (host
->ops
->enable_dma
)
210 host
->ops
->enable_dma(host
);
213 /* Resetting the controller clears many */
214 host
->preset_enabled
= false;
218 static void sdhci_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
);
220 static void sdhci_init(struct sdhci_host
*host
, int soft
)
223 sdhci_do_reset(host
, SDHCI_RESET_CMD
|SDHCI_RESET_DATA
);
225 sdhci_do_reset(host
, SDHCI_RESET_ALL
);
227 host
->ier
= SDHCI_INT_BUS_POWER
| SDHCI_INT_DATA_END_BIT
|
228 SDHCI_INT_DATA_CRC
| SDHCI_INT_DATA_TIMEOUT
|
229 SDHCI_INT_INDEX
| SDHCI_INT_END_BIT
| SDHCI_INT_CRC
|
230 SDHCI_INT_TIMEOUT
| SDHCI_INT_DATA_END
|
233 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
234 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
237 /* force clock reconfiguration */
239 sdhci_set_ios(host
->mmc
, &host
->mmc
->ios
);
243 static void sdhci_reinit(struct sdhci_host
*host
)
246 sdhci_enable_card_detection(host
);
249 static void sdhci_activate_led(struct sdhci_host
*host
)
253 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
254 ctrl
|= SDHCI_CTRL_LED
;
255 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
258 static void sdhci_deactivate_led(struct sdhci_host
*host
)
262 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
263 ctrl
&= ~SDHCI_CTRL_LED
;
264 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
267 #ifdef SDHCI_USE_LEDS_CLASS
268 static void sdhci_led_control(struct led_classdev
*led
,
269 enum led_brightness brightness
)
271 struct sdhci_host
*host
= container_of(led
, struct sdhci_host
, led
);
274 spin_lock_irqsave(&host
->lock
, flags
);
276 if (host
->runtime_suspended
)
279 if (brightness
== LED_OFF
)
280 sdhci_deactivate_led(host
);
282 sdhci_activate_led(host
);
284 spin_unlock_irqrestore(&host
->lock
, flags
);
288 /*****************************************************************************\
292 \*****************************************************************************/
294 static void sdhci_read_block_pio(struct sdhci_host
*host
)
297 size_t blksize
, len
, chunk
;
298 u32
uninitialized_var(scratch
);
301 DBG("PIO reading\n");
303 blksize
= host
->data
->blksz
;
306 local_irq_save(flags
);
309 BUG_ON(!sg_miter_next(&host
->sg_miter
));
311 len
= min(host
->sg_miter
.length
, blksize
);
314 host
->sg_miter
.consumed
= len
;
316 buf
= host
->sg_miter
.addr
;
320 scratch
= sdhci_readl(host
, SDHCI_BUFFER
);
324 *buf
= scratch
& 0xFF;
333 sg_miter_stop(&host
->sg_miter
);
335 local_irq_restore(flags
);
338 static void sdhci_write_block_pio(struct sdhci_host
*host
)
341 size_t blksize
, len
, chunk
;
345 DBG("PIO writing\n");
347 blksize
= host
->data
->blksz
;
351 local_irq_save(flags
);
354 BUG_ON(!sg_miter_next(&host
->sg_miter
));
356 len
= min(host
->sg_miter
.length
, blksize
);
359 host
->sg_miter
.consumed
= len
;
361 buf
= host
->sg_miter
.addr
;
364 scratch
|= (u32
)*buf
<< (chunk
* 8);
370 if ((chunk
== 4) || ((len
== 0) && (blksize
== 0))) {
371 sdhci_writel(host
, scratch
, SDHCI_BUFFER
);
378 sg_miter_stop(&host
->sg_miter
);
380 local_irq_restore(flags
);
383 static void sdhci_transfer_pio(struct sdhci_host
*host
)
389 if (host
->blocks
== 0)
392 if (host
->data
->flags
& MMC_DATA_READ
)
393 mask
= SDHCI_DATA_AVAILABLE
;
395 mask
= SDHCI_SPACE_AVAILABLE
;
398 * Some controllers (JMicron JMB38x) mess up the buffer bits
399 * for transfers < 4 bytes. As long as it is just one block,
400 * we can ignore the bits.
402 if ((host
->quirks
& SDHCI_QUIRK_BROKEN_SMALL_PIO
) &&
403 (host
->data
->blocks
== 1))
406 while (sdhci_readl(host
, SDHCI_PRESENT_STATE
) & mask
) {
407 if (host
->quirks
& SDHCI_QUIRK_PIO_NEEDS_DELAY
)
410 if (host
->data
->flags
& MMC_DATA_READ
)
411 sdhci_read_block_pio(host
);
413 sdhci_write_block_pio(host
);
416 if (host
->blocks
== 0)
420 DBG("PIO transfer complete.\n");
423 static int sdhci_pre_dma_transfer(struct sdhci_host
*host
,
424 struct mmc_data
*data
, int cookie
)
429 * If the data buffers are already mapped, return the previous
430 * dma_map_sg() result.
432 if (data
->host_cookie
== COOKIE_PRE_MAPPED
)
433 return data
->sg_count
;
435 sg_count
= dma_map_sg(mmc_dev(host
->mmc
), data
->sg
, data
->sg_len
,
436 data
->flags
& MMC_DATA_WRITE
?
437 DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
442 data
->sg_count
= sg_count
;
443 data
->host_cookie
= cookie
;
448 static char *sdhci_kmap_atomic(struct scatterlist
*sg
, unsigned long *flags
)
450 local_irq_save(*flags
);
451 return kmap_atomic(sg_page(sg
)) + sg
->offset
;
454 static void sdhci_kunmap_atomic(void *buffer
, unsigned long *flags
)
456 kunmap_atomic(buffer
);
457 local_irq_restore(*flags
);
460 static void sdhci_adma_write_desc(struct sdhci_host
*host
, void *desc
,
461 dma_addr_t addr
, int len
, unsigned cmd
)
463 struct sdhci_adma2_64_desc
*dma_desc
= desc
;
465 /* 32-bit and 64-bit descriptors have these members in same position */
466 dma_desc
->cmd
= cpu_to_le16(cmd
);
467 dma_desc
->len
= cpu_to_le16(len
);
468 dma_desc
->addr_lo
= cpu_to_le32((u32
)addr
);
470 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
471 dma_desc
->addr_hi
= cpu_to_le32((u64
)addr
>> 32);
474 static void sdhci_adma_mark_end(void *desc
)
476 struct sdhci_adma2_64_desc
*dma_desc
= desc
;
478 /* 32-bit and 64-bit descriptors have 'cmd' in same position */
479 dma_desc
->cmd
|= cpu_to_le16(ADMA2_END
);
482 static void sdhci_adma_table_pre(struct sdhci_host
*host
,
483 struct mmc_data
*data
, int sg_count
)
485 struct scatterlist
*sg
;
487 dma_addr_t addr
, align_addr
;
493 * The spec does not specify endianness of descriptor table.
494 * We currently guess that it is LE.
497 host
->sg_count
= sg_count
;
499 desc
= host
->adma_table
;
500 align
= host
->align_buffer
;
502 align_addr
= host
->align_addr
;
504 for_each_sg(data
->sg
, sg
, host
->sg_count
, i
) {
505 addr
= sg_dma_address(sg
);
506 len
= sg_dma_len(sg
);
509 * The SDHCI specification states that ADMA addresses must
510 * be 32-bit aligned. If they aren't, then we use a bounce
511 * buffer for the (up to three) bytes that screw up the
514 offset
= (SDHCI_ADMA2_ALIGN
- (addr
& SDHCI_ADMA2_MASK
)) &
517 if (data
->flags
& MMC_DATA_WRITE
) {
518 buffer
= sdhci_kmap_atomic(sg
, &flags
);
519 memcpy(align
, buffer
, offset
);
520 sdhci_kunmap_atomic(buffer
, &flags
);
524 sdhci_adma_write_desc(host
, desc
, align_addr
, offset
,
527 BUG_ON(offset
> 65536);
529 align
+= SDHCI_ADMA2_ALIGN
;
530 align_addr
+= SDHCI_ADMA2_ALIGN
;
532 desc
+= host
->desc_sz
;
542 sdhci_adma_write_desc(host
, desc
, addr
, len
,
544 desc
+= host
->desc_sz
;
548 * If this triggers then we have a calculation bug
551 WARN_ON((desc
- host
->adma_table
) >= host
->adma_table_sz
);
554 if (host
->quirks
& SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC
) {
555 /* Mark the last descriptor as the terminating descriptor */
556 if (desc
!= host
->adma_table
) {
557 desc
-= host
->desc_sz
;
558 sdhci_adma_mark_end(desc
);
561 /* Add a terminating entry - nop, end, valid */
562 sdhci_adma_write_desc(host
, desc
, 0, 0, ADMA2_NOP_END_VALID
);
566 static void sdhci_adma_table_post(struct sdhci_host
*host
,
567 struct mmc_data
*data
)
569 struct scatterlist
*sg
;
575 if (data
->flags
& MMC_DATA_READ
) {
576 bool has_unaligned
= false;
578 /* Do a quick scan of the SG list for any unaligned mappings */
579 for_each_sg(data
->sg
, sg
, host
->sg_count
, i
)
580 if (sg_dma_address(sg
) & SDHCI_ADMA2_MASK
) {
581 has_unaligned
= true;
586 dma_sync_sg_for_cpu(mmc_dev(host
->mmc
), data
->sg
,
587 data
->sg_len
, DMA_FROM_DEVICE
);
589 align
= host
->align_buffer
;
591 for_each_sg(data
->sg
, sg
, host
->sg_count
, i
) {
592 if (sg_dma_address(sg
) & SDHCI_ADMA2_MASK
) {
593 size
= SDHCI_ADMA2_ALIGN
-
594 (sg_dma_address(sg
) & SDHCI_ADMA2_MASK
);
596 buffer
= sdhci_kmap_atomic(sg
, &flags
);
597 memcpy(buffer
, align
, size
);
598 sdhci_kunmap_atomic(buffer
, &flags
);
600 align
+= SDHCI_ADMA2_ALIGN
;
607 static u8
sdhci_calc_timeout(struct sdhci_host
*host
, struct mmc_command
*cmd
)
610 struct mmc_data
*data
= cmd
->data
;
611 unsigned target_timeout
, current_timeout
;
614 * If the host controller provides us with an incorrect timeout
615 * value, just skip the check and use 0xE. The hardware may take
616 * longer to time out, but that's much better than having a too-short
619 if (host
->quirks
& SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
)
622 /* Unspecified timeout, assume max */
623 if (!data
&& !cmd
->busy_timeout
)
628 target_timeout
= cmd
->busy_timeout
* 1000;
630 target_timeout
= DIV_ROUND_UP(data
->timeout_ns
, 1000);
631 if (host
->clock
&& data
->timeout_clks
) {
632 unsigned long long val
;
635 * data->timeout_clks is in units of clock cycles.
636 * host->clock is in Hz. target_timeout is in us.
637 * Hence, us = 1000000 * cycles / Hz. Round up.
639 val
= 1000000 * data
->timeout_clks
;
640 if (do_div(val
, host
->clock
))
642 target_timeout
+= val
;
647 * Figure out needed cycles.
648 * We do this in steps in order to fit inside a 32 bit int.
649 * The first step is the minimum timeout, which will have a
650 * minimum resolution of 6 bits:
651 * (1) 2^13*1000 > 2^22,
652 * (2) host->timeout_clk < 2^16
657 current_timeout
= (1 << 13) * 1000 / host
->timeout_clk
;
658 while (current_timeout
< target_timeout
) {
660 current_timeout
<<= 1;
666 DBG("%s: Too large timeout 0x%x requested for CMD%d!\n",
667 mmc_hostname(host
->mmc
), count
, cmd
->opcode
);
674 static void sdhci_set_transfer_irqs(struct sdhci_host
*host
)
676 u32 pio_irqs
= SDHCI_INT_DATA_AVAIL
| SDHCI_INT_SPACE_AVAIL
;
677 u32 dma_irqs
= SDHCI_INT_DMA_END
| SDHCI_INT_ADMA_ERROR
;
679 if (host
->flags
& SDHCI_REQ_USE_DMA
)
680 host
->ier
= (host
->ier
& ~pio_irqs
) | dma_irqs
;
682 host
->ier
= (host
->ier
& ~dma_irqs
) | pio_irqs
;
684 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
685 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
688 static void sdhci_set_timeout(struct sdhci_host
*host
, struct mmc_command
*cmd
)
692 if (host
->ops
->set_timeout
) {
693 host
->ops
->set_timeout(host
, cmd
);
695 count
= sdhci_calc_timeout(host
, cmd
);
696 sdhci_writeb(host
, count
, SDHCI_TIMEOUT_CONTROL
);
700 static void sdhci_prepare_data(struct sdhci_host
*host
, struct mmc_command
*cmd
)
703 struct mmc_data
*data
= cmd
->data
;
707 if (data
|| (cmd
->flags
& MMC_RSP_BUSY
))
708 sdhci_set_timeout(host
, cmd
);
714 BUG_ON(data
->blksz
* data
->blocks
> 524288);
715 BUG_ON(data
->blksz
> host
->mmc
->max_blk_size
);
716 BUG_ON(data
->blocks
> 65535);
719 host
->data_early
= 0;
720 host
->data
->bytes_xfered
= 0;
722 if (host
->flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
)) {
723 struct scatterlist
*sg
;
724 unsigned int length_mask
, offset_mask
;
727 host
->flags
|= SDHCI_REQ_USE_DMA
;
730 * FIXME: This doesn't account for merging when mapping the
733 * The assumption here being that alignment and lengths are
734 * the same after DMA mapping to device address space.
738 if (host
->flags
& SDHCI_USE_ADMA
) {
739 if (host
->quirks
& SDHCI_QUIRK_32BIT_ADMA_SIZE
) {
742 * As we use up to 3 byte chunks to work
743 * around alignment problems, we need to
744 * check the offset as well.
749 if (host
->quirks
& SDHCI_QUIRK_32BIT_DMA_SIZE
)
751 if (host
->quirks
& SDHCI_QUIRK_32BIT_DMA_ADDR
)
755 if (unlikely(length_mask
| offset_mask
)) {
756 for_each_sg(data
->sg
, sg
, data
->sg_len
, i
) {
757 if (sg
->length
& length_mask
) {
758 DBG("Reverting to PIO because of transfer size (%d)\n",
760 host
->flags
&= ~SDHCI_REQ_USE_DMA
;
763 if (sg
->offset
& offset_mask
) {
764 DBG("Reverting to PIO because of bad alignment\n");
765 host
->flags
&= ~SDHCI_REQ_USE_DMA
;
772 if (host
->flags
& SDHCI_REQ_USE_DMA
) {
773 int sg_cnt
= sdhci_pre_dma_transfer(host
, data
, COOKIE_MAPPED
);
777 * This only happens when someone fed
778 * us an invalid request.
781 host
->flags
&= ~SDHCI_REQ_USE_DMA
;
782 } else if (host
->flags
& SDHCI_USE_ADMA
) {
783 sdhci_adma_table_pre(host
, data
, sg_cnt
);
785 sdhci_writel(host
, host
->adma_addr
, SDHCI_ADMA_ADDRESS
);
786 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
788 (u64
)host
->adma_addr
>> 32,
789 SDHCI_ADMA_ADDRESS_HI
);
791 WARN_ON(sg_cnt
!= 1);
792 sdhci_writel(host
, sg_dma_address(data
->sg
),
798 * Always adjust the DMA selection as some controllers
799 * (e.g. JMicron) can't do PIO properly when the selection
802 if (host
->version
>= SDHCI_SPEC_200
) {
803 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
804 ctrl
&= ~SDHCI_CTRL_DMA_MASK
;
805 if ((host
->flags
& SDHCI_REQ_USE_DMA
) &&
806 (host
->flags
& SDHCI_USE_ADMA
)) {
807 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
808 ctrl
|= SDHCI_CTRL_ADMA64
;
810 ctrl
|= SDHCI_CTRL_ADMA32
;
812 ctrl
|= SDHCI_CTRL_SDMA
;
814 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
817 if (!(host
->flags
& SDHCI_REQ_USE_DMA
)) {
820 flags
= SG_MITER_ATOMIC
;
821 if (host
->data
->flags
& MMC_DATA_READ
)
822 flags
|= SG_MITER_TO_SG
;
824 flags
|= SG_MITER_FROM_SG
;
825 sg_miter_start(&host
->sg_miter
, data
->sg
, data
->sg_len
, flags
);
826 host
->blocks
= data
->blocks
;
829 sdhci_set_transfer_irqs(host
);
831 /* Set the DMA boundary value and block size */
832 sdhci_writew(host
, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG
,
833 data
->blksz
), SDHCI_BLOCK_SIZE
);
834 sdhci_writew(host
, data
->blocks
, SDHCI_BLOCK_COUNT
);
837 static void sdhci_set_transfer_mode(struct sdhci_host
*host
,
838 struct mmc_command
*cmd
)
841 struct mmc_data
*data
= cmd
->data
;
845 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD
) {
846 sdhci_writew(host
, 0x0, SDHCI_TRANSFER_MODE
);
848 /* clear Auto CMD settings for no data CMDs */
849 mode
= sdhci_readw(host
, SDHCI_TRANSFER_MODE
);
850 sdhci_writew(host
, mode
& ~(SDHCI_TRNS_AUTO_CMD12
|
851 SDHCI_TRNS_AUTO_CMD23
), SDHCI_TRANSFER_MODE
);
856 WARN_ON(!host
->data
);
858 if (!(host
->quirks2
& SDHCI_QUIRK2_SUPPORT_SINGLE
))
859 mode
= SDHCI_TRNS_BLK_CNT_EN
;
861 if (mmc_op_multi(cmd
->opcode
) || data
->blocks
> 1) {
862 mode
= SDHCI_TRNS_BLK_CNT_EN
| SDHCI_TRNS_MULTI
;
864 * If we are sending CMD23, CMD12 never gets sent
865 * on successful completion (so no Auto-CMD12).
867 if (!host
->mrq
->sbc
&& (host
->flags
& SDHCI_AUTO_CMD12
) &&
868 (cmd
->opcode
!= SD_IO_RW_EXTENDED
))
869 mode
|= SDHCI_TRNS_AUTO_CMD12
;
870 else if (host
->mrq
->sbc
&& (host
->flags
& SDHCI_AUTO_CMD23
)) {
871 mode
|= SDHCI_TRNS_AUTO_CMD23
;
872 sdhci_writel(host
, host
->mrq
->sbc
->arg
, SDHCI_ARGUMENT2
);
876 if (data
->flags
& MMC_DATA_READ
)
877 mode
|= SDHCI_TRNS_READ
;
878 if (host
->flags
& SDHCI_REQ_USE_DMA
)
879 mode
|= SDHCI_TRNS_DMA
;
881 sdhci_writew(host
, mode
, SDHCI_TRANSFER_MODE
);
884 static void sdhci_finish_data(struct sdhci_host
*host
)
886 struct mmc_data
*data
;
893 if ((host
->flags
& (SDHCI_REQ_USE_DMA
| SDHCI_USE_ADMA
)) ==
894 (SDHCI_REQ_USE_DMA
| SDHCI_USE_ADMA
))
895 sdhci_adma_table_post(host
, data
);
898 * The specification states that the block count register must
899 * be updated, but it does not specify at what point in the
900 * data flow. That makes the register entirely useless to read
901 * back so we have to assume that nothing made it to the card
902 * in the event of an error.
905 data
->bytes_xfered
= 0;
907 data
->bytes_xfered
= data
->blksz
* data
->blocks
;
910 * Need to send CMD12 if -
911 * a) open-ended multiblock transfer (no CMD23)
912 * b) error in multiblock transfer
919 * The controller needs a reset of internal state machines
920 * upon error conditions.
923 sdhci_do_reset(host
, SDHCI_RESET_CMD
);
924 sdhci_do_reset(host
, SDHCI_RESET_DATA
);
927 sdhci_send_command(host
, data
->stop
);
929 tasklet_schedule(&host
->finish_tasklet
);
932 void sdhci_send_command(struct sdhci_host
*host
, struct mmc_command
*cmd
)
936 unsigned long timeout
;
940 /* Initially, a command has no error */
946 mask
= SDHCI_CMD_INHIBIT
;
947 if ((cmd
->data
!= NULL
) || (cmd
->flags
& MMC_RSP_BUSY
))
948 mask
|= SDHCI_DATA_INHIBIT
;
950 /* We shouldn't wait for data inihibit for stop commands, even
951 though they might use busy signaling */
952 if (host
->mrq
->data
&& (cmd
== host
->mrq
->data
->stop
))
953 mask
&= ~SDHCI_DATA_INHIBIT
;
955 while (sdhci_readl(host
, SDHCI_PRESENT_STATE
) & mask
) {
957 pr_err("%s: Controller never released inhibit bit(s).\n",
958 mmc_hostname(host
->mmc
));
959 sdhci_dumpregs(host
);
961 tasklet_schedule(&host
->finish_tasklet
);
969 if (!cmd
->data
&& cmd
->busy_timeout
> 9000)
970 timeout
+= DIV_ROUND_UP(cmd
->busy_timeout
, 1000) * HZ
+ HZ
;
973 mod_timer(&host
->timer
, timeout
);
976 host
->busy_handle
= 0;
978 sdhci_prepare_data(host
, cmd
);
980 sdhci_writel(host
, cmd
->arg
, SDHCI_ARGUMENT
);
982 sdhci_set_transfer_mode(host
, cmd
);
984 if ((cmd
->flags
& MMC_RSP_136
) && (cmd
->flags
& MMC_RSP_BUSY
)) {
985 pr_err("%s: Unsupported response type!\n",
986 mmc_hostname(host
->mmc
));
987 cmd
->error
= -EINVAL
;
988 tasklet_schedule(&host
->finish_tasklet
);
992 if (!(cmd
->flags
& MMC_RSP_PRESENT
))
993 flags
= SDHCI_CMD_RESP_NONE
;
994 else if (cmd
->flags
& MMC_RSP_136
)
995 flags
= SDHCI_CMD_RESP_LONG
;
996 else if (cmd
->flags
& MMC_RSP_BUSY
)
997 flags
= SDHCI_CMD_RESP_SHORT_BUSY
;
999 flags
= SDHCI_CMD_RESP_SHORT
;
1001 if (cmd
->flags
& MMC_RSP_CRC
)
1002 flags
|= SDHCI_CMD_CRC
;
1003 if (cmd
->flags
& MMC_RSP_OPCODE
)
1004 flags
|= SDHCI_CMD_INDEX
;
1006 /* CMD19 is special in that the Data Present Select should be set */
1007 if (cmd
->data
|| cmd
->opcode
== MMC_SEND_TUNING_BLOCK
||
1008 cmd
->opcode
== MMC_SEND_TUNING_BLOCK_HS200
)
1009 flags
|= SDHCI_CMD_DATA
;
1011 sdhci_writew(host
, SDHCI_MAKE_CMD(cmd
->opcode
, flags
), SDHCI_COMMAND
);
1013 EXPORT_SYMBOL_GPL(sdhci_send_command
);
1015 static void sdhci_finish_command(struct sdhci_host
*host
)
1019 BUG_ON(host
->cmd
== NULL
);
1021 if (host
->cmd
->flags
& MMC_RSP_PRESENT
) {
1022 if (host
->cmd
->flags
& MMC_RSP_136
) {
1023 /* CRC is stripped so we need to do some shifting. */
1024 for (i
= 0;i
< 4;i
++) {
1025 host
->cmd
->resp
[i
] = sdhci_readl(host
,
1026 SDHCI_RESPONSE
+ (3-i
)*4) << 8;
1028 host
->cmd
->resp
[i
] |=
1030 SDHCI_RESPONSE
+ (3-i
)*4-1);
1033 host
->cmd
->resp
[0] = sdhci_readl(host
, SDHCI_RESPONSE
);
1037 /* Finished CMD23, now send actual command. */
1038 if (host
->cmd
== host
->mrq
->sbc
) {
1040 sdhci_send_command(host
, host
->mrq
->cmd
);
1043 /* Processed actual command. */
1044 if (host
->data
&& host
->data_early
)
1045 sdhci_finish_data(host
);
1047 if (!host
->cmd
->data
)
1048 tasklet_schedule(&host
->finish_tasklet
);
1054 static u16
sdhci_get_preset_value(struct sdhci_host
*host
)
1058 switch (host
->timing
) {
1059 case MMC_TIMING_UHS_SDR12
:
1060 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_SDR12
);
1062 case MMC_TIMING_UHS_SDR25
:
1063 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_SDR25
);
1065 case MMC_TIMING_UHS_SDR50
:
1066 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_SDR50
);
1068 case MMC_TIMING_UHS_SDR104
:
1069 case MMC_TIMING_MMC_HS200
:
1070 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_SDR104
);
1072 case MMC_TIMING_UHS_DDR50
:
1073 case MMC_TIMING_MMC_DDR52
:
1074 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_DDR50
);
1076 case MMC_TIMING_MMC_HS400
:
1077 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_HS400
);
1080 pr_warn("%s: Invalid UHS-I mode selected\n",
1081 mmc_hostname(host
->mmc
));
1082 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_SDR12
);
1088 u16
sdhci_calc_clk(struct sdhci_host
*host
, unsigned int clock
,
1089 unsigned int *actual_clock
)
1091 int div
= 0; /* Initialized for compiler warning */
1092 int real_div
= div
, clk_mul
= 1;
1094 bool switch_base_clk
= false;
1096 if (host
->version
>= SDHCI_SPEC_300
) {
1097 if (host
->preset_enabled
) {
1100 clk
= sdhci_readw(host
, SDHCI_CLOCK_CONTROL
);
1101 pre_val
= sdhci_get_preset_value(host
);
1102 div
= (pre_val
& SDHCI_PRESET_SDCLK_FREQ_MASK
)
1103 >> SDHCI_PRESET_SDCLK_FREQ_SHIFT
;
1104 if (host
->clk_mul
&&
1105 (pre_val
& SDHCI_PRESET_CLKGEN_SEL_MASK
)) {
1106 clk
= SDHCI_PROG_CLOCK_MODE
;
1108 clk_mul
= host
->clk_mul
;
1110 real_div
= max_t(int, 1, div
<< 1);
1116 * Check if the Host Controller supports Programmable Clock
1119 if (host
->clk_mul
) {
1120 for (div
= 1; div
<= 1024; div
++) {
1121 if ((host
->max_clk
* host
->clk_mul
/ div
)
1125 if ((host
->max_clk
* host
->clk_mul
/ div
) <= clock
) {
1127 * Set Programmable Clock Mode in the Clock
1130 clk
= SDHCI_PROG_CLOCK_MODE
;
1132 clk_mul
= host
->clk_mul
;
1136 * Divisor can be too small to reach clock
1137 * speed requirement. Then use the base clock.
1139 switch_base_clk
= true;
1143 if (!host
->clk_mul
|| switch_base_clk
) {
1144 /* Version 3.00 divisors must be a multiple of 2. */
1145 if (host
->max_clk
<= clock
)
1148 for (div
= 2; div
< SDHCI_MAX_DIV_SPEC_300
;
1150 if ((host
->max_clk
/ div
) <= clock
)
1156 if ((host
->quirks2
& SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN
)
1157 && !div
&& host
->max_clk
<= 25000000)
1161 /* Version 2.00 divisors must be a power of 2. */
1162 for (div
= 1; div
< SDHCI_MAX_DIV_SPEC_200
; div
*= 2) {
1163 if ((host
->max_clk
/ div
) <= clock
)
1172 *actual_clock
= (host
->max_clk
* clk_mul
) / real_div
;
1173 clk
|= (div
& SDHCI_DIV_MASK
) << SDHCI_DIVIDER_SHIFT
;
1174 clk
|= ((div
& SDHCI_DIV_HI_MASK
) >> SDHCI_DIV_MASK_LEN
)
1175 << SDHCI_DIVIDER_HI_SHIFT
;
1179 EXPORT_SYMBOL_GPL(sdhci_calc_clk
);
1181 void sdhci_set_clock(struct sdhci_host
*host
, unsigned int clock
)
1184 unsigned long timeout
;
1186 host
->mmc
->actual_clock
= 0;
1188 sdhci_writew(host
, 0, SDHCI_CLOCK_CONTROL
);
1189 if (host
->quirks2
& SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST
)
1195 clk
= sdhci_calc_clk(host
, clock
, &host
->mmc
->actual_clock
);
1197 clk
|= SDHCI_CLOCK_INT_EN
;
1198 sdhci_writew(host
, clk
, SDHCI_CLOCK_CONTROL
);
1200 /* Wait max 20 ms */
1202 while (!((clk
= sdhci_readw(host
, SDHCI_CLOCK_CONTROL
))
1203 & SDHCI_CLOCK_INT_STABLE
)) {
1205 pr_err("%s: Internal clock never stabilised.\n",
1206 mmc_hostname(host
->mmc
));
1207 sdhci_dumpregs(host
);
1214 clk
|= SDHCI_CLOCK_CARD_EN
;
1215 sdhci_writew(host
, clk
, SDHCI_CLOCK_CONTROL
);
1217 EXPORT_SYMBOL_GPL(sdhci_set_clock
);
1219 static void sdhci_set_power_reg(struct sdhci_host
*host
, unsigned char mode
,
1222 struct mmc_host
*mmc
= host
->mmc
;
1224 spin_unlock_irq(&host
->lock
);
1225 mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
, vdd
);
1226 spin_lock_irq(&host
->lock
);
1228 if (mode
!= MMC_POWER_OFF
)
1229 sdhci_writeb(host
, SDHCI_POWER_ON
, SDHCI_POWER_CONTROL
);
1231 sdhci_writeb(host
, 0, SDHCI_POWER_CONTROL
);
1234 void sdhci_set_power(struct sdhci_host
*host
, unsigned char mode
,
1239 if (mode
!= MMC_POWER_OFF
) {
1241 case MMC_VDD_165_195
:
1242 pwr
= SDHCI_POWER_180
;
1246 pwr
= SDHCI_POWER_300
;
1250 pwr
= SDHCI_POWER_330
;
1253 WARN(1, "%s: Invalid vdd %#x\n",
1254 mmc_hostname(host
->mmc
), vdd
);
1259 if (host
->pwr
== pwr
)
1265 sdhci_writeb(host
, 0, SDHCI_POWER_CONTROL
);
1266 if (host
->quirks2
& SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON
)
1267 sdhci_runtime_pm_bus_off(host
);
1270 * Spec says that we should clear the power reg before setting
1271 * a new value. Some controllers don't seem to like this though.
1273 if (!(host
->quirks
& SDHCI_QUIRK_SINGLE_POWER_WRITE
))
1274 sdhci_writeb(host
, 0, SDHCI_POWER_CONTROL
);
1277 * At least the Marvell CaFe chip gets confused if we set the
1278 * voltage and set turn on power at the same time, so set the
1281 if (host
->quirks
& SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER
)
1282 sdhci_writeb(host
, pwr
, SDHCI_POWER_CONTROL
);
1284 pwr
|= SDHCI_POWER_ON
;
1286 sdhci_writeb(host
, pwr
, SDHCI_POWER_CONTROL
);
1288 if (host
->quirks2
& SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON
)
1289 sdhci_runtime_pm_bus_on(host
);
1292 * Some controllers need an extra 10ms delay of 10ms before
1293 * they can apply clock after applying power
1295 if (host
->quirks
& SDHCI_QUIRK_DELAY_AFTER_POWER
)
1299 EXPORT_SYMBOL_GPL(sdhci_set_power
);
1301 static void __sdhci_set_power(struct sdhci_host
*host
, unsigned char mode
,
1304 struct mmc_host
*mmc
= host
->mmc
;
1306 if (host
->ops
->set_power
)
1307 host
->ops
->set_power(host
, mode
, vdd
);
1308 else if (!IS_ERR(mmc
->supply
.vmmc
))
1309 sdhci_set_power_reg(host
, mode
, vdd
);
1311 sdhci_set_power(host
, mode
, vdd
);
1314 /*****************************************************************************\
1318 \*****************************************************************************/
1320 static void sdhci_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
1322 struct sdhci_host
*host
;
1324 unsigned long flags
;
1326 host
= mmc_priv(mmc
);
1328 /* Firstly check card presence */
1329 present
= mmc
->ops
->get_cd(mmc
);
1331 spin_lock_irqsave(&host
->lock
, flags
);
1333 WARN_ON(host
->mrq
!= NULL
);
1335 #ifndef SDHCI_USE_LEDS_CLASS
1336 sdhci_activate_led(host
);
1340 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
1341 * requests if Auto-CMD12 is enabled.
1343 if (!mrq
->sbc
&& (host
->flags
& SDHCI_AUTO_CMD12
)) {
1345 mrq
->data
->stop
= NULL
;
1352 if (!present
|| host
->flags
& SDHCI_DEVICE_DEAD
) {
1353 host
->mrq
->cmd
->error
= -ENOMEDIUM
;
1354 tasklet_schedule(&host
->finish_tasklet
);
1356 if (mrq
->sbc
&& !(host
->flags
& SDHCI_AUTO_CMD23
))
1357 sdhci_send_command(host
, mrq
->sbc
);
1359 sdhci_send_command(host
, mrq
->cmd
);
1363 spin_unlock_irqrestore(&host
->lock
, flags
);
1366 void sdhci_set_bus_width(struct sdhci_host
*host
, int width
)
1370 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
1371 if (width
== MMC_BUS_WIDTH_8
) {
1372 ctrl
&= ~SDHCI_CTRL_4BITBUS
;
1373 if (host
->version
>= SDHCI_SPEC_300
)
1374 ctrl
|= SDHCI_CTRL_8BITBUS
;
1376 if (host
->version
>= SDHCI_SPEC_300
)
1377 ctrl
&= ~SDHCI_CTRL_8BITBUS
;
1378 if (width
== MMC_BUS_WIDTH_4
)
1379 ctrl
|= SDHCI_CTRL_4BITBUS
;
1381 ctrl
&= ~SDHCI_CTRL_4BITBUS
;
1383 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
1385 EXPORT_SYMBOL_GPL(sdhci_set_bus_width
);
1387 void sdhci_set_uhs_signaling(struct sdhci_host
*host
, unsigned timing
)
1391 ctrl_2
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1392 /* Select Bus Speed Mode for host */
1393 ctrl_2
&= ~SDHCI_CTRL_UHS_MASK
;
1394 if ((timing
== MMC_TIMING_MMC_HS200
) ||
1395 (timing
== MMC_TIMING_UHS_SDR104
))
1396 ctrl_2
|= SDHCI_CTRL_UHS_SDR104
;
1397 else if (timing
== MMC_TIMING_UHS_SDR12
)
1398 ctrl_2
|= SDHCI_CTRL_UHS_SDR12
;
1399 else if (timing
== MMC_TIMING_UHS_SDR25
)
1400 ctrl_2
|= SDHCI_CTRL_UHS_SDR25
;
1401 else if (timing
== MMC_TIMING_UHS_SDR50
)
1402 ctrl_2
|= SDHCI_CTRL_UHS_SDR50
;
1403 else if ((timing
== MMC_TIMING_UHS_DDR50
) ||
1404 (timing
== MMC_TIMING_MMC_DDR52
))
1405 ctrl_2
|= SDHCI_CTRL_UHS_DDR50
;
1406 else if (timing
== MMC_TIMING_MMC_HS400
)
1407 ctrl_2
|= SDHCI_CTRL_HS400
; /* Non-standard */
1408 sdhci_writew(host
, ctrl_2
, SDHCI_HOST_CONTROL2
);
1410 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling
);
1412 static void sdhci_do_set_ios(struct sdhci_host
*host
, struct mmc_ios
*ios
)
1414 unsigned long flags
;
1416 struct mmc_host
*mmc
= host
->mmc
;
1418 spin_lock_irqsave(&host
->lock
, flags
);
1420 if (host
->flags
& SDHCI_DEVICE_DEAD
) {
1421 spin_unlock_irqrestore(&host
->lock
, flags
);
1422 if (!IS_ERR(mmc
->supply
.vmmc
) &&
1423 ios
->power_mode
== MMC_POWER_OFF
)
1424 mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
, 0);
1429 * Reset the chip on each power off.
1430 * Should clear out any weird states.
1432 if (ios
->power_mode
== MMC_POWER_OFF
) {
1433 sdhci_writel(host
, 0, SDHCI_SIGNAL_ENABLE
);
1437 if (host
->version
>= SDHCI_SPEC_300
&&
1438 (ios
->power_mode
== MMC_POWER_UP
) &&
1439 !(host
->quirks2
& SDHCI_QUIRK2_PRESET_VALUE_BROKEN
))
1440 sdhci_enable_preset_value(host
, false);
1442 if (!ios
->clock
|| ios
->clock
!= host
->clock
) {
1443 host
->ops
->set_clock(host
, ios
->clock
);
1444 host
->clock
= ios
->clock
;
1446 if (host
->quirks
& SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK
&&
1448 host
->timeout_clk
= host
->mmc
->actual_clock
?
1449 host
->mmc
->actual_clock
/ 1000 :
1451 host
->mmc
->max_busy_timeout
=
1452 host
->ops
->get_max_timeout_count
?
1453 host
->ops
->get_max_timeout_count(host
) :
1455 host
->mmc
->max_busy_timeout
/= host
->timeout_clk
;
1459 __sdhci_set_power(host
, ios
->power_mode
, ios
->vdd
);
1461 if (host
->ops
->platform_send_init_74_clocks
)
1462 host
->ops
->platform_send_init_74_clocks(host
, ios
->power_mode
);
1464 host
->ops
->set_bus_width(host
, ios
->bus_width
);
1466 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
1468 if ((ios
->timing
== MMC_TIMING_SD_HS
||
1469 ios
->timing
== MMC_TIMING_MMC_HS
)
1470 && !(host
->quirks
& SDHCI_QUIRK_NO_HISPD_BIT
))
1471 ctrl
|= SDHCI_CTRL_HISPD
;
1473 ctrl
&= ~SDHCI_CTRL_HISPD
;
1475 if (host
->version
>= SDHCI_SPEC_300
) {
1478 /* In case of UHS-I modes, set High Speed Enable */
1479 if ((ios
->timing
== MMC_TIMING_MMC_HS400
) ||
1480 (ios
->timing
== MMC_TIMING_MMC_HS200
) ||
1481 (ios
->timing
== MMC_TIMING_MMC_DDR52
) ||
1482 (ios
->timing
== MMC_TIMING_UHS_SDR50
) ||
1483 (ios
->timing
== MMC_TIMING_UHS_SDR104
) ||
1484 (ios
->timing
== MMC_TIMING_UHS_DDR50
) ||
1485 (ios
->timing
== MMC_TIMING_UHS_SDR25
))
1486 ctrl
|= SDHCI_CTRL_HISPD
;
1488 if (!host
->preset_enabled
) {
1489 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
1491 * We only need to set Driver Strength if the
1492 * preset value enable is not set.
1494 ctrl_2
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1495 ctrl_2
&= ~SDHCI_CTRL_DRV_TYPE_MASK
;
1496 if (ios
->drv_type
== MMC_SET_DRIVER_TYPE_A
)
1497 ctrl_2
|= SDHCI_CTRL_DRV_TYPE_A
;
1498 else if (ios
->drv_type
== MMC_SET_DRIVER_TYPE_B
)
1499 ctrl_2
|= SDHCI_CTRL_DRV_TYPE_B
;
1500 else if (ios
->drv_type
== MMC_SET_DRIVER_TYPE_C
)
1501 ctrl_2
|= SDHCI_CTRL_DRV_TYPE_C
;
1502 else if (ios
->drv_type
== MMC_SET_DRIVER_TYPE_D
)
1503 ctrl_2
|= SDHCI_CTRL_DRV_TYPE_D
;
1505 pr_warn("%s: invalid driver type, default to driver type B\n",
1507 ctrl_2
|= SDHCI_CTRL_DRV_TYPE_B
;
1510 sdhci_writew(host
, ctrl_2
, SDHCI_HOST_CONTROL2
);
1513 * According to SDHC Spec v3.00, if the Preset Value
1514 * Enable in the Host Control 2 register is set, we
1515 * need to reset SD Clock Enable before changing High
1516 * Speed Enable to avoid generating clock gliches.
1519 /* Reset SD Clock Enable */
1520 clk
= sdhci_readw(host
, SDHCI_CLOCK_CONTROL
);
1521 clk
&= ~SDHCI_CLOCK_CARD_EN
;
1522 sdhci_writew(host
, clk
, SDHCI_CLOCK_CONTROL
);
1524 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
1526 /* Re-enable SD Clock */
1527 host
->ops
->set_clock(host
, host
->clock
);
1530 /* Reset SD Clock Enable */
1531 clk
= sdhci_readw(host
, SDHCI_CLOCK_CONTROL
);
1532 clk
&= ~SDHCI_CLOCK_CARD_EN
;
1533 sdhci_writew(host
, clk
, SDHCI_CLOCK_CONTROL
);
1535 host
->ops
->set_uhs_signaling(host
, ios
->timing
);
1536 host
->timing
= ios
->timing
;
1538 if (!(host
->quirks2
& SDHCI_QUIRK2_PRESET_VALUE_BROKEN
) &&
1539 ((ios
->timing
== MMC_TIMING_UHS_SDR12
) ||
1540 (ios
->timing
== MMC_TIMING_UHS_SDR25
) ||
1541 (ios
->timing
== MMC_TIMING_UHS_SDR50
) ||
1542 (ios
->timing
== MMC_TIMING_UHS_SDR104
) ||
1543 (ios
->timing
== MMC_TIMING_UHS_DDR50
) ||
1544 (ios
->timing
== MMC_TIMING_MMC_DDR52
))) {
1547 sdhci_enable_preset_value(host
, true);
1548 preset
= sdhci_get_preset_value(host
);
1549 ios
->drv_type
= (preset
& SDHCI_PRESET_DRV_MASK
)
1550 >> SDHCI_PRESET_DRV_SHIFT
;
1553 /* Re-enable SD Clock */
1554 host
->ops
->set_clock(host
, host
->clock
);
1556 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
1559 * Some (ENE) controllers go apeshit on some ios operation,
1560 * signalling timeout and CRC errors even on CMD0. Resetting
1561 * it on each ios seems to solve the problem.
1563 if (host
->quirks
& SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS
)
1564 sdhci_do_reset(host
, SDHCI_RESET_CMD
| SDHCI_RESET_DATA
);
1567 spin_unlock_irqrestore(&host
->lock
, flags
);
1570 static void sdhci_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
1572 struct sdhci_host
*host
= mmc_priv(mmc
);
1574 sdhci_do_set_ios(host
, ios
);
1577 static int sdhci_do_get_cd(struct sdhci_host
*host
)
1579 int gpio_cd
= mmc_gpio_get_cd(host
->mmc
);
1581 if (host
->flags
& SDHCI_DEVICE_DEAD
)
1584 /* If nonremovable, assume that the card is always present. */
1585 if (host
->mmc
->caps
& MMC_CAP_NONREMOVABLE
)
1589 * Try slot gpio detect, if defined it take precedence
1590 * over build in controller functionality
1592 if (!IS_ERR_VALUE(gpio_cd
))
1595 /* If polling, assume that the card is always present. */
1596 if (host
->quirks
& SDHCI_QUIRK_BROKEN_CARD_DETECTION
)
1599 /* Host native card detect */
1600 return !!(sdhci_readl(host
, SDHCI_PRESENT_STATE
) & SDHCI_CARD_PRESENT
);
1603 static int sdhci_get_cd(struct mmc_host
*mmc
)
1605 struct sdhci_host
*host
= mmc_priv(mmc
);
1607 return sdhci_do_get_cd(host
);
1610 static int sdhci_check_ro(struct sdhci_host
*host
)
1612 unsigned long flags
;
1615 spin_lock_irqsave(&host
->lock
, flags
);
1617 if (host
->flags
& SDHCI_DEVICE_DEAD
)
1619 else if (host
->ops
->get_ro
)
1620 is_readonly
= host
->ops
->get_ro(host
);
1622 is_readonly
= !(sdhci_readl(host
, SDHCI_PRESENT_STATE
)
1623 & SDHCI_WRITE_PROTECT
);
1625 spin_unlock_irqrestore(&host
->lock
, flags
);
1627 /* This quirk needs to be replaced by a callback-function later */
1628 return host
->quirks
& SDHCI_QUIRK_INVERTED_WRITE_PROTECT
?
1629 !is_readonly
: is_readonly
;
1632 #define SAMPLE_COUNT 5
1634 static int sdhci_do_get_ro(struct sdhci_host
*host
)
1638 if (!(host
->quirks
& SDHCI_QUIRK_UNSTABLE_RO_DETECT
))
1639 return sdhci_check_ro(host
);
1642 for (i
= 0; i
< SAMPLE_COUNT
; i
++) {
1643 if (sdhci_check_ro(host
)) {
1644 if (++ro_count
> SAMPLE_COUNT
/ 2)
1652 static void sdhci_hw_reset(struct mmc_host
*mmc
)
1654 struct sdhci_host
*host
= mmc_priv(mmc
);
1656 if (host
->ops
&& host
->ops
->hw_reset
)
1657 host
->ops
->hw_reset(host
);
1660 static int sdhci_get_ro(struct mmc_host
*mmc
)
1662 struct sdhci_host
*host
= mmc_priv(mmc
);
1664 return sdhci_do_get_ro(host
);
1667 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host
*host
, int enable
)
1669 if (!(host
->flags
& SDHCI_DEVICE_DEAD
)) {
1671 host
->ier
|= SDHCI_INT_CARD_INT
;
1673 host
->ier
&= ~SDHCI_INT_CARD_INT
;
1675 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
1676 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
1681 static void sdhci_enable_sdio_irq(struct mmc_host
*mmc
, int enable
)
1683 struct sdhci_host
*host
= mmc_priv(mmc
);
1684 unsigned long flags
;
1686 spin_lock_irqsave(&host
->lock
, flags
);
1688 host
->flags
|= SDHCI_SDIO_IRQ_ENABLED
;
1690 host
->flags
&= ~SDHCI_SDIO_IRQ_ENABLED
;
1692 sdhci_enable_sdio_irq_nolock(host
, enable
);
1693 spin_unlock_irqrestore(&host
->lock
, flags
);
1696 static int sdhci_do_start_signal_voltage_switch(struct sdhci_host
*host
,
1697 struct mmc_ios
*ios
)
1699 struct mmc_host
*mmc
= host
->mmc
;
1704 * Signal Voltage Switching is only applicable for Host Controllers
1707 if (host
->version
< SDHCI_SPEC_300
)
1710 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1712 switch (ios
->signal_voltage
) {
1713 case MMC_SIGNAL_VOLTAGE_330
:
1714 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1715 ctrl
&= ~SDHCI_CTRL_VDD_180
;
1716 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
1718 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
1719 ret
= regulator_set_voltage(mmc
->supply
.vqmmc
, 2700000,
1722 pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
1728 usleep_range(5000, 5500);
1730 /* 3.3V regulator output should be stable within 5 ms */
1731 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1732 if (!(ctrl
& SDHCI_CTRL_VDD_180
))
1735 pr_warn("%s: 3.3V regulator output did not became stable\n",
1739 case MMC_SIGNAL_VOLTAGE_180
:
1740 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
1741 ret
= regulator_set_voltage(mmc
->supply
.vqmmc
,
1744 pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
1751 * Enable 1.8V Signal Enable in the Host Control2
1754 ctrl
|= SDHCI_CTRL_VDD_180
;
1755 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
1757 /* Some controller need to do more when switching */
1758 if (host
->ops
->voltage_switch
)
1759 host
->ops
->voltage_switch(host
);
1761 /* 1.8V regulator output should be stable within 5 ms */
1762 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1763 if (ctrl
& SDHCI_CTRL_VDD_180
)
1766 pr_warn("%s: 1.8V regulator output did not became stable\n",
1770 case MMC_SIGNAL_VOLTAGE_120
:
1771 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
1772 ret
= regulator_set_voltage(mmc
->supply
.vqmmc
, 1100000,
1775 pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
1782 /* No signal voltage switch required */
1787 static int sdhci_start_signal_voltage_switch(struct mmc_host
*mmc
,
1788 struct mmc_ios
*ios
)
1790 struct sdhci_host
*host
= mmc_priv(mmc
);
1792 if (host
->version
< SDHCI_SPEC_300
)
1795 return sdhci_do_start_signal_voltage_switch(host
, ios
);
1798 static int sdhci_card_busy(struct mmc_host
*mmc
)
1800 struct sdhci_host
*host
= mmc_priv(mmc
);
1803 /* Check whether DAT[3:0] is 0000 */
1804 present_state
= sdhci_readl(host
, SDHCI_PRESENT_STATE
);
1806 return !(present_state
& SDHCI_DATA_LVL_MASK
);
1809 static int sdhci_prepare_hs400_tuning(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
1811 struct sdhci_host
*host
= mmc_priv(mmc
);
1812 unsigned long flags
;
1814 spin_lock_irqsave(&host
->lock
, flags
);
1815 host
->flags
|= SDHCI_HS400_TUNING
;
1816 spin_unlock_irqrestore(&host
->lock
, flags
);
1821 static int sdhci_execute_tuning(struct mmc_host
*mmc
, u32 opcode
)
1823 struct sdhci_host
*host
= mmc_priv(mmc
);
1825 int tuning_loop_counter
= MAX_TUNING_LOOP
;
1827 unsigned long flags
;
1828 unsigned int tuning_count
= 0;
1831 spin_lock_irqsave(&host
->lock
, flags
);
1833 hs400_tuning
= host
->flags
& SDHCI_HS400_TUNING
;
1834 host
->flags
&= ~SDHCI_HS400_TUNING
;
1836 if (host
->tuning_mode
== SDHCI_TUNING_MODE_1
)
1837 tuning_count
= host
->tuning_count
;
1840 * The Host Controller needs tuning in case of SDR104 and DDR50
1841 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
1842 * the Capabilities register.
1843 * If the Host Controller supports the HS200 mode then the
1844 * tuning function has to be executed.
1846 switch (host
->timing
) {
1847 /* HS400 tuning is done in HS200 mode */
1848 case MMC_TIMING_MMC_HS400
:
1852 case MMC_TIMING_MMC_HS200
:
1854 * Periodic re-tuning for HS400 is not expected to be needed, so
1861 case MMC_TIMING_UHS_SDR104
:
1862 case MMC_TIMING_UHS_DDR50
:
1865 case MMC_TIMING_UHS_SDR50
:
1866 if (host
->flags
& SDHCI_SDR50_NEEDS_TUNING
||
1867 host
->flags
& SDHCI_SDR104_NEEDS_TUNING
)
1875 if (host
->ops
->platform_execute_tuning
) {
1876 spin_unlock_irqrestore(&host
->lock
, flags
);
1877 err
= host
->ops
->platform_execute_tuning(host
, opcode
);
1881 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1882 ctrl
|= SDHCI_CTRL_EXEC_TUNING
;
1883 if (host
->quirks2
& SDHCI_QUIRK2_TUNING_WORK_AROUND
)
1884 ctrl
|= SDHCI_CTRL_TUNED_CLK
;
1885 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
1888 * As per the Host Controller spec v3.00, tuning command
1889 * generates Buffer Read Ready interrupt, so enable that.
1891 * Note: The spec clearly says that when tuning sequence
1892 * is being performed, the controller does not generate
1893 * interrupts other than Buffer Read Ready interrupt. But
1894 * to make sure we don't hit a controller bug, we _only_
1895 * enable Buffer Read Ready interrupt here.
1897 sdhci_writel(host
, SDHCI_INT_DATA_AVAIL
, SDHCI_INT_ENABLE
);
1898 sdhci_writel(host
, SDHCI_INT_DATA_AVAIL
, SDHCI_SIGNAL_ENABLE
);
1901 * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number
1902 * of loops reaches 40 times or a timeout of 150ms occurs.
1905 struct mmc_command cmd
= {0};
1906 struct mmc_request mrq
= {NULL
};
1908 cmd
.opcode
= opcode
;
1910 cmd
.flags
= MMC_RSP_R1
| MMC_CMD_ADTC
;
1915 if (tuning_loop_counter
-- == 0)
1922 * In response to CMD19, the card sends 64 bytes of tuning
1923 * block to the Host Controller. So we set the block size
1926 if (cmd
.opcode
== MMC_SEND_TUNING_BLOCK_HS200
) {
1927 if (mmc
->ios
.bus_width
== MMC_BUS_WIDTH_8
)
1928 sdhci_writew(host
, SDHCI_MAKE_BLKSZ(7, 128),
1930 else if (mmc
->ios
.bus_width
== MMC_BUS_WIDTH_4
)
1931 sdhci_writew(host
, SDHCI_MAKE_BLKSZ(7, 64),
1934 sdhci_writew(host
, SDHCI_MAKE_BLKSZ(7, 64),
1939 * The tuning block is sent by the card to the host controller.
1940 * So we set the TRNS_READ bit in the Transfer Mode register.
1941 * This also takes care of setting DMA Enable and Multi Block
1942 * Select in the same register to 0.
1944 sdhci_writew(host
, SDHCI_TRNS_READ
, SDHCI_TRANSFER_MODE
);
1946 sdhci_send_command(host
, &cmd
);
1951 spin_unlock_irqrestore(&host
->lock
, flags
);
1952 /* Wait for Buffer Read Ready interrupt */
1953 wait_event_interruptible_timeout(host
->buf_ready_int
,
1954 (host
->tuning_done
== 1),
1955 msecs_to_jiffies(50));
1956 spin_lock_irqsave(&host
->lock
, flags
);
1958 if (!host
->tuning_done
) {
1959 pr_info(DRIVER_NAME
": Timeout waiting for Buffer Read Ready interrupt during tuning procedure, falling back to fixed sampling clock\n");
1960 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1961 ctrl
&= ~SDHCI_CTRL_TUNED_CLK
;
1962 ctrl
&= ~SDHCI_CTRL_EXEC_TUNING
;
1963 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
1969 host
->tuning_done
= 0;
1971 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1973 /* eMMC spec does not require a delay between tuning cycles */
1974 if (opcode
== MMC_SEND_TUNING_BLOCK
)
1976 } while (ctrl
& SDHCI_CTRL_EXEC_TUNING
);
1979 * The Host Driver has exhausted the maximum number of loops allowed,
1980 * so use fixed sampling frequency.
1982 if (tuning_loop_counter
< 0) {
1983 ctrl
&= ~SDHCI_CTRL_TUNED_CLK
;
1984 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
1986 if (!(ctrl
& SDHCI_CTRL_TUNED_CLK
)) {
1987 pr_info(DRIVER_NAME
": Tuning procedure failed, falling back to fixed sampling clock\n");
1994 * In case tuning fails, host controllers which support
1995 * re-tuning can try tuning again at a later time, when the
1996 * re-tuning timer expires. So for these controllers, we
1997 * return 0. Since there might be other controllers who do not
1998 * have this capability, we return error for them.
2003 host
->mmc
->retune_period
= err
? 0 : tuning_count
;
2005 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
2006 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
2008 spin_unlock_irqrestore(&host
->lock
, flags
);
2012 static int sdhci_select_drive_strength(struct mmc_card
*card
,
2013 unsigned int max_dtr
, int host_drv
,
2014 int card_drv
, int *drv_type
)
2016 struct sdhci_host
*host
= mmc_priv(card
->host
);
2018 if (!host
->ops
->select_drive_strength
)
2021 return host
->ops
->select_drive_strength(host
, card
, max_dtr
, host_drv
,
2022 card_drv
, drv_type
);
2025 static void sdhci_enable_preset_value(struct sdhci_host
*host
, bool enable
)
2027 /* Host Controller v3.00 defines preset value registers */
2028 if (host
->version
< SDHCI_SPEC_300
)
2032 * We only enable or disable Preset Value if they are not already
2033 * enabled or disabled respectively. Otherwise, we bail out.
2035 if (host
->preset_enabled
!= enable
) {
2036 u16 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
2039 ctrl
|= SDHCI_CTRL_PRESET_VAL_ENABLE
;
2041 ctrl
&= ~SDHCI_CTRL_PRESET_VAL_ENABLE
;
2043 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
2046 host
->flags
|= SDHCI_PV_ENABLED
;
2048 host
->flags
&= ~SDHCI_PV_ENABLED
;
2050 host
->preset_enabled
= enable
;
2054 static void sdhci_post_req(struct mmc_host
*mmc
, struct mmc_request
*mrq
,
2057 struct sdhci_host
*host
= mmc_priv(mmc
);
2058 struct mmc_data
*data
= mrq
->data
;
2060 if (data
->host_cookie
!= COOKIE_UNMAPPED
)
2061 dma_unmap_sg(mmc_dev(host
->mmc
), data
->sg
, data
->sg_len
,
2062 data
->flags
& MMC_DATA_WRITE
?
2063 DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
2065 data
->host_cookie
= COOKIE_UNMAPPED
;
2068 static void sdhci_pre_req(struct mmc_host
*mmc
, struct mmc_request
*mrq
,
2071 struct sdhci_host
*host
= mmc_priv(mmc
);
2073 mrq
->data
->host_cookie
= COOKIE_UNMAPPED
;
2075 if (host
->flags
& SDHCI_REQ_USE_DMA
)
2076 sdhci_pre_dma_transfer(host
, mrq
->data
, COOKIE_PRE_MAPPED
);
2079 static void sdhci_card_event(struct mmc_host
*mmc
)
2081 struct sdhci_host
*host
= mmc_priv(mmc
);
2082 unsigned long flags
;
2085 /* First check if client has provided their own card event */
2086 if (host
->ops
->card_event
)
2087 host
->ops
->card_event(host
);
2089 present
= sdhci_do_get_cd(host
);
2091 spin_lock_irqsave(&host
->lock
, flags
);
2093 /* Check host->mrq first in case we are runtime suspended */
2094 if (host
->mrq
&& !present
) {
2095 pr_err("%s: Card removed during transfer!\n",
2096 mmc_hostname(host
->mmc
));
2097 pr_err("%s: Resetting controller.\n",
2098 mmc_hostname(host
->mmc
));
2100 sdhci_do_reset(host
, SDHCI_RESET_CMD
);
2101 sdhci_do_reset(host
, SDHCI_RESET_DATA
);
2103 host
->mrq
->cmd
->error
= -ENOMEDIUM
;
2104 tasklet_schedule(&host
->finish_tasklet
);
2107 spin_unlock_irqrestore(&host
->lock
, flags
);
2110 static const struct mmc_host_ops sdhci_ops
= {
2111 .request
= sdhci_request
,
2112 .post_req
= sdhci_post_req
,
2113 .pre_req
= sdhci_pre_req
,
2114 .set_ios
= sdhci_set_ios
,
2115 .get_cd
= sdhci_get_cd
,
2116 .get_ro
= sdhci_get_ro
,
2117 .hw_reset
= sdhci_hw_reset
,
2118 .enable_sdio_irq
= sdhci_enable_sdio_irq
,
2119 .start_signal_voltage_switch
= sdhci_start_signal_voltage_switch
,
2120 .prepare_hs400_tuning
= sdhci_prepare_hs400_tuning
,
2121 .execute_tuning
= sdhci_execute_tuning
,
2122 .select_drive_strength
= sdhci_select_drive_strength
,
2123 .card_event
= sdhci_card_event
,
2124 .card_busy
= sdhci_card_busy
,
2127 /*****************************************************************************\
2131 \*****************************************************************************/
2133 static void sdhci_tasklet_finish(unsigned long param
)
2135 struct sdhci_host
*host
;
2136 unsigned long flags
;
2137 struct mmc_request
*mrq
;
2139 host
= (struct sdhci_host
*)param
;
2141 spin_lock_irqsave(&host
->lock
, flags
);
2144 * If this tasklet gets rescheduled while running, it will
2145 * be run again afterwards but without any active request.
2148 spin_unlock_irqrestore(&host
->lock
, flags
);
2152 del_timer(&host
->timer
);
2157 * Always unmap the data buffers if they were mapped by
2158 * sdhci_prepare_data() whenever we finish with a request.
2159 * This avoids leaking DMA mappings on error.
2161 if (host
->flags
& SDHCI_REQ_USE_DMA
) {
2162 struct mmc_data
*data
= mrq
->data
;
2164 if (data
&& data
->host_cookie
== COOKIE_MAPPED
) {
2165 dma_unmap_sg(mmc_dev(host
->mmc
), data
->sg
, data
->sg_len
,
2166 (data
->flags
& MMC_DATA_READ
) ?
2167 DMA_FROM_DEVICE
: DMA_TO_DEVICE
);
2168 data
->host_cookie
= COOKIE_UNMAPPED
;
2173 * The controller needs a reset of internal state machines
2174 * upon error conditions.
2176 if (!(host
->flags
& SDHCI_DEVICE_DEAD
) &&
2177 ((mrq
->cmd
&& mrq
->cmd
->error
) ||
2178 (mrq
->sbc
&& mrq
->sbc
->error
) ||
2179 (mrq
->data
&& ((mrq
->data
->error
&& !mrq
->data
->stop
) ||
2180 (mrq
->data
->stop
&& mrq
->data
->stop
->error
))) ||
2181 (host
->quirks
& SDHCI_QUIRK_RESET_AFTER_REQUEST
))) {
2183 /* Some controllers need this kick or reset won't work here */
2184 if (host
->quirks
& SDHCI_QUIRK_CLOCK_BEFORE_RESET
)
2185 /* This is to force an update */
2186 host
->ops
->set_clock(host
, host
->clock
);
2188 /* Spec says we should do both at the same time, but Ricoh
2189 controllers do not like that. */
2190 sdhci_do_reset(host
, SDHCI_RESET_CMD
);
2191 sdhci_do_reset(host
, SDHCI_RESET_DATA
);
2198 #ifndef SDHCI_USE_LEDS_CLASS
2199 sdhci_deactivate_led(host
);
2203 spin_unlock_irqrestore(&host
->lock
, flags
);
2205 mmc_request_done(host
->mmc
, mrq
);
2208 static void sdhci_timeout_timer(unsigned long data
)
2210 struct sdhci_host
*host
;
2211 unsigned long flags
;
2213 host
= (struct sdhci_host
*)data
;
2215 spin_lock_irqsave(&host
->lock
, flags
);
2218 pr_err("%s: Timeout waiting for hardware interrupt.\n",
2219 mmc_hostname(host
->mmc
));
2220 sdhci_dumpregs(host
);
2223 host
->data
->error
= -ETIMEDOUT
;
2224 sdhci_finish_data(host
);
2227 host
->cmd
->error
= -ETIMEDOUT
;
2229 host
->mrq
->cmd
->error
= -ETIMEDOUT
;
2231 tasklet_schedule(&host
->finish_tasklet
);
2236 spin_unlock_irqrestore(&host
->lock
, flags
);
2239 /*****************************************************************************\
2241 * Interrupt handling *
2243 \*****************************************************************************/
2245 static void sdhci_cmd_irq(struct sdhci_host
*host
, u32 intmask
, u32
*mask
)
2247 BUG_ON(intmask
== 0);
2250 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
2251 mmc_hostname(host
->mmc
), (unsigned)intmask
);
2252 sdhci_dumpregs(host
);
2256 if (intmask
& (SDHCI_INT_TIMEOUT
| SDHCI_INT_CRC
|
2257 SDHCI_INT_END_BIT
| SDHCI_INT_INDEX
)) {
2258 if (intmask
& SDHCI_INT_TIMEOUT
)
2259 host
->cmd
->error
= -ETIMEDOUT
;
2261 host
->cmd
->error
= -EILSEQ
;
2264 * If this command initiates a data phase and a response
2265 * CRC error is signalled, the card can start transferring
2266 * data - the card may have received the command without
2267 * error. We must not terminate the mmc_request early.
2269 * If the card did not receive the command or returned an
2270 * error which prevented it sending data, the data phase
2273 if (host
->cmd
->data
&&
2274 (intmask
& (SDHCI_INT_CRC
| SDHCI_INT_TIMEOUT
)) ==
2280 tasklet_schedule(&host
->finish_tasklet
);
2285 * The host can send and interrupt when the busy state has
2286 * ended, allowing us to wait without wasting CPU cycles.
2287 * Unfortunately this is overloaded on the "data complete"
2288 * interrupt, so we need to take some care when handling
2291 * Note: The 1.0 specification is a bit ambiguous about this
2292 * feature so there might be some problems with older
2295 if (host
->cmd
->flags
& MMC_RSP_BUSY
) {
2296 if (host
->cmd
->data
)
2297 DBG("Cannot wait for busy signal when also doing a data transfer");
2298 else if (!(host
->quirks
& SDHCI_QUIRK_NO_BUSY_IRQ
)
2299 && !host
->busy_handle
) {
2300 /* Mark that command complete before busy is ended */
2301 host
->busy_handle
= 1;
2305 /* The controller does not support the end-of-busy IRQ,
2306 * fall through and take the SDHCI_INT_RESPONSE */
2307 } else if ((host
->quirks2
& SDHCI_QUIRK2_STOP_WITH_TC
) &&
2308 host
->cmd
->opcode
== MMC_STOP_TRANSMISSION
&& !host
->data
) {
2309 *mask
&= ~SDHCI_INT_DATA_END
;
2312 if (intmask
& SDHCI_INT_RESPONSE
)
2313 sdhci_finish_command(host
);
2316 #ifdef CONFIG_MMC_DEBUG
2317 static void sdhci_adma_show_error(struct sdhci_host
*host
)
2319 const char *name
= mmc_hostname(host
->mmc
);
2320 void *desc
= host
->adma_table
;
2322 sdhci_dumpregs(host
);
2325 struct sdhci_adma2_64_desc
*dma_desc
= desc
;
2327 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
2328 DBG("%s: %p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
2329 name
, desc
, le32_to_cpu(dma_desc
->addr_hi
),
2330 le32_to_cpu(dma_desc
->addr_lo
),
2331 le16_to_cpu(dma_desc
->len
),
2332 le16_to_cpu(dma_desc
->cmd
));
2334 DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
2335 name
, desc
, le32_to_cpu(dma_desc
->addr_lo
),
2336 le16_to_cpu(dma_desc
->len
),
2337 le16_to_cpu(dma_desc
->cmd
));
2339 desc
+= host
->desc_sz
;
2341 if (dma_desc
->cmd
& cpu_to_le16(ADMA2_END
))
2346 static void sdhci_adma_show_error(struct sdhci_host
*host
) { }
2349 static void sdhci_data_irq(struct sdhci_host
*host
, u32 intmask
)
2352 BUG_ON(intmask
== 0);
2354 /* CMD19 generates _only_ Buffer Read Ready interrupt */
2355 if (intmask
& SDHCI_INT_DATA_AVAIL
) {
2356 command
= SDHCI_GET_CMD(sdhci_readw(host
, SDHCI_COMMAND
));
2357 if (command
== MMC_SEND_TUNING_BLOCK
||
2358 command
== MMC_SEND_TUNING_BLOCK_HS200
) {
2359 host
->tuning_done
= 1;
2360 wake_up(&host
->buf_ready_int
);
2367 * The "data complete" interrupt is also used to
2368 * indicate that a busy state has ended. See comment
2369 * above in sdhci_cmd_irq().
2371 if (host
->cmd
&& (host
->cmd
->flags
& MMC_RSP_BUSY
)) {
2372 if (intmask
& SDHCI_INT_DATA_TIMEOUT
) {
2373 host
->cmd
->error
= -ETIMEDOUT
;
2374 tasklet_schedule(&host
->finish_tasklet
);
2377 if (intmask
& SDHCI_INT_DATA_END
) {
2379 * Some cards handle busy-end interrupt
2380 * before the command completed, so make
2381 * sure we do things in the proper order.
2383 if (host
->busy_handle
)
2384 sdhci_finish_command(host
);
2386 host
->busy_handle
= 1;
2391 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
2392 mmc_hostname(host
->mmc
), (unsigned)intmask
);
2393 sdhci_dumpregs(host
);
2398 if (intmask
& SDHCI_INT_DATA_TIMEOUT
)
2399 host
->data
->error
= -ETIMEDOUT
;
2400 else if (intmask
& SDHCI_INT_DATA_END_BIT
)
2401 host
->data
->error
= -EILSEQ
;
2402 else if ((intmask
& SDHCI_INT_DATA_CRC
) &&
2403 SDHCI_GET_CMD(sdhci_readw(host
, SDHCI_COMMAND
))
2405 host
->data
->error
= -EILSEQ
;
2406 else if (intmask
& SDHCI_INT_ADMA_ERROR
) {
2407 pr_err("%s: ADMA error\n", mmc_hostname(host
->mmc
));
2408 sdhci_adma_show_error(host
);
2409 host
->data
->error
= -EIO
;
2410 if (host
->ops
->adma_workaround
)
2411 host
->ops
->adma_workaround(host
, intmask
);
2414 if (host
->data
->error
)
2415 sdhci_finish_data(host
);
2417 if (intmask
& (SDHCI_INT_DATA_AVAIL
| SDHCI_INT_SPACE_AVAIL
))
2418 sdhci_transfer_pio(host
);
2421 * We currently don't do anything fancy with DMA
2422 * boundaries, but as we can't disable the feature
2423 * we need to at least restart the transfer.
2425 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
2426 * should return a valid address to continue from, but as
2427 * some controllers are faulty, don't trust them.
2429 if (intmask
& SDHCI_INT_DMA_END
) {
2430 u32 dmastart
, dmanow
;
2431 dmastart
= sg_dma_address(host
->data
->sg
);
2432 dmanow
= dmastart
+ host
->data
->bytes_xfered
;
2434 * Force update to the next DMA block boundary.
2437 ~(SDHCI_DEFAULT_BOUNDARY_SIZE
- 1)) +
2438 SDHCI_DEFAULT_BOUNDARY_SIZE
;
2439 host
->data
->bytes_xfered
= dmanow
- dmastart
;
2440 DBG("%s: DMA base 0x%08x, transferred 0x%06x bytes,"
2442 mmc_hostname(host
->mmc
), dmastart
,
2443 host
->data
->bytes_xfered
, dmanow
);
2444 sdhci_writel(host
, dmanow
, SDHCI_DMA_ADDRESS
);
2447 if (intmask
& SDHCI_INT_DATA_END
) {
2450 * Data managed to finish before the
2451 * command completed. Make sure we do
2452 * things in the proper order.
2454 host
->data_early
= 1;
2456 sdhci_finish_data(host
);
2462 static irqreturn_t
sdhci_irq(int irq
, void *dev_id
)
2464 irqreturn_t result
= IRQ_NONE
;
2465 struct sdhci_host
*host
= dev_id
;
2466 u32 intmask
, mask
, unexpected
= 0;
2469 spin_lock(&host
->lock
);
2471 if (host
->runtime_suspended
&& !sdhci_sdio_irq_enabled(host
)) {
2472 spin_unlock(&host
->lock
);
2476 intmask
= sdhci_readl(host
, SDHCI_INT_STATUS
);
2477 if (!intmask
|| intmask
== 0xffffffff) {
2483 /* Clear selected interrupts. */
2484 mask
= intmask
& (SDHCI_INT_CMD_MASK
| SDHCI_INT_DATA_MASK
|
2485 SDHCI_INT_BUS_POWER
);
2486 sdhci_writel(host
, mask
, SDHCI_INT_STATUS
);
2488 DBG("*** %s got interrupt: 0x%08x\n",
2489 mmc_hostname(host
->mmc
), intmask
);
2491 if (intmask
& (SDHCI_INT_CARD_INSERT
| SDHCI_INT_CARD_REMOVE
)) {
2492 u32 present
= sdhci_readl(host
, SDHCI_PRESENT_STATE
) &
2496 * There is a observation on i.mx esdhc. INSERT
2497 * bit will be immediately set again when it gets
2498 * cleared, if a card is inserted. We have to mask
2499 * the irq to prevent interrupt storm which will
2500 * freeze the system. And the REMOVE gets the
2503 * More testing are needed here to ensure it works
2504 * for other platforms though.
2506 host
->ier
&= ~(SDHCI_INT_CARD_INSERT
|
2507 SDHCI_INT_CARD_REMOVE
);
2508 host
->ier
|= present
? SDHCI_INT_CARD_REMOVE
:
2509 SDHCI_INT_CARD_INSERT
;
2510 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
2511 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
2513 sdhci_writel(host
, intmask
& (SDHCI_INT_CARD_INSERT
|
2514 SDHCI_INT_CARD_REMOVE
), SDHCI_INT_STATUS
);
2516 host
->thread_isr
|= intmask
& (SDHCI_INT_CARD_INSERT
|
2517 SDHCI_INT_CARD_REMOVE
);
2518 result
= IRQ_WAKE_THREAD
;
2521 if (intmask
& SDHCI_INT_CMD_MASK
)
2522 sdhci_cmd_irq(host
, intmask
& SDHCI_INT_CMD_MASK
,
2525 if (intmask
& SDHCI_INT_DATA_MASK
)
2526 sdhci_data_irq(host
, intmask
& SDHCI_INT_DATA_MASK
);
2528 if (intmask
& SDHCI_INT_BUS_POWER
)
2529 pr_err("%s: Card is consuming too much power!\n",
2530 mmc_hostname(host
->mmc
));
2532 if (intmask
& SDHCI_INT_CARD_INT
) {
2533 sdhci_enable_sdio_irq_nolock(host
, false);
2534 host
->thread_isr
|= SDHCI_INT_CARD_INT
;
2535 result
= IRQ_WAKE_THREAD
;
2538 intmask
&= ~(SDHCI_INT_CARD_INSERT
| SDHCI_INT_CARD_REMOVE
|
2539 SDHCI_INT_CMD_MASK
| SDHCI_INT_DATA_MASK
|
2540 SDHCI_INT_ERROR
| SDHCI_INT_BUS_POWER
|
2541 SDHCI_INT_CARD_INT
);
2544 unexpected
|= intmask
;
2545 sdhci_writel(host
, intmask
, SDHCI_INT_STATUS
);
2548 if (result
== IRQ_NONE
)
2549 result
= IRQ_HANDLED
;
2551 intmask
= sdhci_readl(host
, SDHCI_INT_STATUS
);
2552 } while (intmask
&& --max_loops
);
2554 spin_unlock(&host
->lock
);
2557 pr_err("%s: Unexpected interrupt 0x%08x.\n",
2558 mmc_hostname(host
->mmc
), unexpected
);
2559 sdhci_dumpregs(host
);
2565 static irqreturn_t
sdhci_thread_irq(int irq
, void *dev_id
)
2567 struct sdhci_host
*host
= dev_id
;
2568 unsigned long flags
;
2571 spin_lock_irqsave(&host
->lock
, flags
);
2572 isr
= host
->thread_isr
;
2573 host
->thread_isr
= 0;
2574 spin_unlock_irqrestore(&host
->lock
, flags
);
2576 if (isr
& (SDHCI_INT_CARD_INSERT
| SDHCI_INT_CARD_REMOVE
)) {
2577 sdhci_card_event(host
->mmc
);
2578 mmc_detect_change(host
->mmc
, msecs_to_jiffies(200));
2581 if (isr
& SDHCI_INT_CARD_INT
) {
2582 sdio_run_irqs(host
->mmc
);
2584 spin_lock_irqsave(&host
->lock
, flags
);
2585 if (host
->flags
& SDHCI_SDIO_IRQ_ENABLED
)
2586 sdhci_enable_sdio_irq_nolock(host
, true);
2587 spin_unlock_irqrestore(&host
->lock
, flags
);
2590 return isr
? IRQ_HANDLED
: IRQ_NONE
;
2593 /*****************************************************************************\
2597 \*****************************************************************************/
2600 void sdhci_enable_irq_wakeups(struct sdhci_host
*host
)
2603 u8 mask
= SDHCI_WAKE_ON_INSERT
| SDHCI_WAKE_ON_REMOVE
2604 | SDHCI_WAKE_ON_INT
;
2606 val
= sdhci_readb(host
, SDHCI_WAKE_UP_CONTROL
);
2608 /* Avoid fake wake up */
2609 if (host
->quirks
& SDHCI_QUIRK_BROKEN_CARD_DETECTION
)
2610 val
&= ~(SDHCI_WAKE_ON_INSERT
| SDHCI_WAKE_ON_REMOVE
);
2611 sdhci_writeb(host
, val
, SDHCI_WAKE_UP_CONTROL
);
2613 EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups
);
2615 static void sdhci_disable_irq_wakeups(struct sdhci_host
*host
)
2618 u8 mask
= SDHCI_WAKE_ON_INSERT
| SDHCI_WAKE_ON_REMOVE
2619 | SDHCI_WAKE_ON_INT
;
2621 val
= sdhci_readb(host
, SDHCI_WAKE_UP_CONTROL
);
2623 sdhci_writeb(host
, val
, SDHCI_WAKE_UP_CONTROL
);
2626 int sdhci_suspend_host(struct sdhci_host
*host
)
2628 sdhci_disable_card_detection(host
);
2630 mmc_retune_timer_stop(host
->mmc
);
2631 mmc_retune_needed(host
->mmc
);
2633 if (!device_may_wakeup(mmc_dev(host
->mmc
))) {
2635 sdhci_writel(host
, 0, SDHCI_INT_ENABLE
);
2636 sdhci_writel(host
, 0, SDHCI_SIGNAL_ENABLE
);
2637 free_irq(host
->irq
, host
);
2639 sdhci_enable_irq_wakeups(host
);
2640 enable_irq_wake(host
->irq
);
2645 EXPORT_SYMBOL_GPL(sdhci_suspend_host
);
2647 int sdhci_resume_host(struct sdhci_host
*host
)
2651 if (host
->flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
)) {
2652 if (host
->ops
->enable_dma
)
2653 host
->ops
->enable_dma(host
);
2656 if ((host
->mmc
->pm_flags
& MMC_PM_KEEP_POWER
) &&
2657 (host
->quirks2
& SDHCI_QUIRK2_HOST_OFF_CARD_ON
)) {
2658 /* Card keeps power but host controller does not */
2659 sdhci_init(host
, 0);
2662 sdhci_do_set_ios(host
, &host
->mmc
->ios
);
2664 sdhci_init(host
, (host
->mmc
->pm_flags
& MMC_PM_KEEP_POWER
));
2668 if (!device_may_wakeup(mmc_dev(host
->mmc
))) {
2669 ret
= request_threaded_irq(host
->irq
, sdhci_irq
,
2670 sdhci_thread_irq
, IRQF_SHARED
,
2671 mmc_hostname(host
->mmc
), host
);
2675 sdhci_disable_irq_wakeups(host
);
2676 disable_irq_wake(host
->irq
);
2679 sdhci_enable_card_detection(host
);
2684 EXPORT_SYMBOL_GPL(sdhci_resume_host
);
2686 int sdhci_runtime_suspend_host(struct sdhci_host
*host
)
2688 unsigned long flags
;
2690 mmc_retune_timer_stop(host
->mmc
);
2691 mmc_retune_needed(host
->mmc
);
2693 spin_lock_irqsave(&host
->lock
, flags
);
2694 host
->ier
&= SDHCI_INT_CARD_INT
;
2695 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
2696 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
2697 spin_unlock_irqrestore(&host
->lock
, flags
);
2699 synchronize_hardirq(host
->irq
);
2701 spin_lock_irqsave(&host
->lock
, flags
);
2702 host
->runtime_suspended
= true;
2703 spin_unlock_irqrestore(&host
->lock
, flags
);
2707 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host
);
2709 int sdhci_runtime_resume_host(struct sdhci_host
*host
)
2711 unsigned long flags
;
2712 int host_flags
= host
->flags
;
2714 if (host_flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
)) {
2715 if (host
->ops
->enable_dma
)
2716 host
->ops
->enable_dma(host
);
2719 sdhci_init(host
, 0);
2721 /* Force clock and power re-program */
2724 sdhci_do_start_signal_voltage_switch(host
, &host
->mmc
->ios
);
2725 sdhci_do_set_ios(host
, &host
->mmc
->ios
);
2727 if ((host_flags
& SDHCI_PV_ENABLED
) &&
2728 !(host
->quirks2
& SDHCI_QUIRK2_PRESET_VALUE_BROKEN
)) {
2729 spin_lock_irqsave(&host
->lock
, flags
);
2730 sdhci_enable_preset_value(host
, true);
2731 spin_unlock_irqrestore(&host
->lock
, flags
);
2734 spin_lock_irqsave(&host
->lock
, flags
);
2736 host
->runtime_suspended
= false;
2738 /* Enable SDIO IRQ */
2739 if (host
->flags
& SDHCI_SDIO_IRQ_ENABLED
)
2740 sdhci_enable_sdio_irq_nolock(host
, true);
2742 /* Enable Card Detection */
2743 sdhci_enable_card_detection(host
);
2745 spin_unlock_irqrestore(&host
->lock
, flags
);
2749 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host
);
2751 #endif /* CONFIG_PM */
2753 /*****************************************************************************\
2755 * Device allocation/registration *
2757 \*****************************************************************************/
2759 struct sdhci_host
*sdhci_alloc_host(struct device
*dev
,
2762 struct mmc_host
*mmc
;
2763 struct sdhci_host
*host
;
2765 WARN_ON(dev
== NULL
);
2767 mmc
= mmc_alloc_host(sizeof(struct sdhci_host
) + priv_size
, dev
);
2769 return ERR_PTR(-ENOMEM
);
2771 host
= mmc_priv(mmc
);
2773 host
->mmc_host_ops
= sdhci_ops
;
2774 mmc
->ops
= &host
->mmc_host_ops
;
2779 EXPORT_SYMBOL_GPL(sdhci_alloc_host
);
2781 static int sdhci_set_dma_mask(struct sdhci_host
*host
)
2783 struct mmc_host
*mmc
= host
->mmc
;
2784 struct device
*dev
= mmc_dev(mmc
);
2787 if (host
->quirks2
& SDHCI_QUIRK2_BROKEN_64_BIT_DMA
)
2788 host
->flags
&= ~SDHCI_USE_64_BIT_DMA
;
2790 /* Try 64-bit mask if hardware is capable of it */
2791 if (host
->flags
& SDHCI_USE_64_BIT_DMA
) {
2792 ret
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(64));
2794 pr_warn("%s: Failed to set 64-bit DMA mask.\n",
2796 host
->flags
&= ~SDHCI_USE_64_BIT_DMA
;
2800 /* 32-bit mask as default & fallback */
2802 ret
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(32));
2804 pr_warn("%s: Failed to set 32-bit DMA mask.\n",
2811 int sdhci_add_host(struct sdhci_host
*host
)
2813 struct mmc_host
*mmc
;
2814 u32 caps
[2] = {0, 0};
2815 u32 max_current_caps
;
2816 unsigned int ocr_avail
;
2817 unsigned int override_timeout_clk
;
2821 WARN_ON(host
== NULL
);
2828 host
->quirks
= debug_quirks
;
2830 host
->quirks2
= debug_quirks2
;
2832 override_timeout_clk
= host
->timeout_clk
;
2834 sdhci_do_reset(host
, SDHCI_RESET_ALL
);
2836 host
->version
= sdhci_readw(host
, SDHCI_HOST_VERSION
);
2837 host
->version
= (host
->version
& SDHCI_SPEC_VER_MASK
)
2838 >> SDHCI_SPEC_VER_SHIFT
;
2839 if (host
->version
> SDHCI_SPEC_300
) {
2840 pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
2841 mmc_hostname(mmc
), host
->version
);
2844 caps
[0] = (host
->quirks
& SDHCI_QUIRK_MISSING_CAPS
) ? host
->caps
:
2845 sdhci_readl(host
, SDHCI_CAPABILITIES
);
2847 if (host
->version
>= SDHCI_SPEC_300
)
2848 caps
[1] = (host
->quirks
& SDHCI_QUIRK_MISSING_CAPS
) ?
2850 sdhci_readl(host
, SDHCI_CAPABILITIES_1
);
2852 if (host
->quirks
& SDHCI_QUIRK_FORCE_DMA
)
2853 host
->flags
|= SDHCI_USE_SDMA
;
2854 else if (!(caps
[0] & SDHCI_CAN_DO_SDMA
))
2855 DBG("Controller doesn't have SDMA capability\n");
2857 host
->flags
|= SDHCI_USE_SDMA
;
2859 if ((host
->quirks
& SDHCI_QUIRK_BROKEN_DMA
) &&
2860 (host
->flags
& SDHCI_USE_SDMA
)) {
2861 DBG("Disabling DMA as it is marked broken\n");
2862 host
->flags
&= ~SDHCI_USE_SDMA
;
2865 if ((host
->version
>= SDHCI_SPEC_200
) &&
2866 (caps
[0] & SDHCI_CAN_DO_ADMA2
))
2867 host
->flags
|= SDHCI_USE_ADMA
;
2869 if ((host
->quirks
& SDHCI_QUIRK_BROKEN_ADMA
) &&
2870 (host
->flags
& SDHCI_USE_ADMA
)) {
2871 DBG("Disabling ADMA as it is marked broken\n");
2872 host
->flags
&= ~SDHCI_USE_ADMA
;
2876 * It is assumed that a 64-bit capable device has set a 64-bit DMA mask
2877 * and *must* do 64-bit DMA. A driver has the opportunity to change
2878 * that during the first call to ->enable_dma(). Similarly
2879 * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
2882 if (caps
[0] & SDHCI_CAN_64BIT
)
2883 host
->flags
|= SDHCI_USE_64_BIT_DMA
;
2885 if (host
->flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
)) {
2886 ret
= sdhci_set_dma_mask(host
);
2888 if (!ret
&& host
->ops
->enable_dma
)
2889 ret
= host
->ops
->enable_dma(host
);
2892 pr_warn("%s: No suitable DMA available - falling back to PIO\n",
2894 host
->flags
&= ~(SDHCI_USE_SDMA
| SDHCI_USE_ADMA
);
2900 /* SDMA does not support 64-bit DMA */
2901 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
2902 host
->flags
&= ~SDHCI_USE_SDMA
;
2904 if (host
->flags
& SDHCI_USE_ADMA
) {
2909 * The DMA descriptor table size is calculated as the maximum
2910 * number of segments times 2, to allow for an alignment
2911 * descriptor for each segment, plus 1 for a nop end descriptor,
2912 * all multipled by the descriptor size.
2914 if (host
->flags
& SDHCI_USE_64_BIT_DMA
) {
2915 host
->adma_table_sz
= (SDHCI_MAX_SEGS
* 2 + 1) *
2916 SDHCI_ADMA2_64_DESC_SZ
;
2917 host
->desc_sz
= SDHCI_ADMA2_64_DESC_SZ
;
2919 host
->adma_table_sz
= (SDHCI_MAX_SEGS
* 2 + 1) *
2920 SDHCI_ADMA2_32_DESC_SZ
;
2921 host
->desc_sz
= SDHCI_ADMA2_32_DESC_SZ
;
2924 host
->align_buffer_sz
= SDHCI_MAX_SEGS
* SDHCI_ADMA2_ALIGN
;
2925 buf
= dma_alloc_coherent(mmc_dev(mmc
), host
->align_buffer_sz
+
2926 host
->adma_table_sz
, &dma
, GFP_KERNEL
);
2928 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
2930 host
->flags
&= ~SDHCI_USE_ADMA
;
2931 } else if ((dma
+ host
->align_buffer_sz
) &
2932 (SDHCI_ADMA2_DESC_ALIGN
- 1)) {
2933 pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
2935 host
->flags
&= ~SDHCI_USE_ADMA
;
2936 dma_free_coherent(mmc_dev(mmc
), host
->align_buffer_sz
+
2937 host
->adma_table_sz
, buf
, dma
);
2939 host
->align_buffer
= buf
;
2940 host
->align_addr
= dma
;
2942 host
->adma_table
= buf
+ host
->align_buffer_sz
;
2943 host
->adma_addr
= dma
+ host
->align_buffer_sz
;
2948 * If we use DMA, then it's up to the caller to set the DMA
2949 * mask, but PIO does not need the hw shim so we set a new
2950 * mask here in that case.
2952 if (!(host
->flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
))) {
2953 host
->dma_mask
= DMA_BIT_MASK(64);
2954 mmc_dev(mmc
)->dma_mask
= &host
->dma_mask
;
2957 if (host
->version
>= SDHCI_SPEC_300
)
2958 host
->max_clk
= (caps
[0] & SDHCI_CLOCK_V3_BASE_MASK
)
2959 >> SDHCI_CLOCK_BASE_SHIFT
;
2961 host
->max_clk
= (caps
[0] & SDHCI_CLOCK_BASE_MASK
)
2962 >> SDHCI_CLOCK_BASE_SHIFT
;
2964 host
->max_clk
*= 1000000;
2965 if (host
->max_clk
== 0 || host
->quirks
&
2966 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN
) {
2967 if (!host
->ops
->get_max_clock
) {
2968 pr_err("%s: Hardware doesn't specify base clock frequency.\n",
2972 host
->max_clk
= host
->ops
->get_max_clock(host
);
2976 * In case of Host Controller v3.00, find out whether clock
2977 * multiplier is supported.
2979 host
->clk_mul
= (caps
[1] & SDHCI_CLOCK_MUL_MASK
) >>
2980 SDHCI_CLOCK_MUL_SHIFT
;
2983 * In case the value in Clock Multiplier is 0, then programmable
2984 * clock mode is not supported, otherwise the actual clock
2985 * multiplier is one more than the value of Clock Multiplier
2986 * in the Capabilities Register.
2992 * Set host parameters.
2994 max_clk
= host
->max_clk
;
2996 if (host
->ops
->get_min_clock
)
2997 mmc
->f_min
= host
->ops
->get_min_clock(host
);
2998 else if (host
->version
>= SDHCI_SPEC_300
) {
2999 if (host
->clk_mul
) {
3000 mmc
->f_min
= (host
->max_clk
* host
->clk_mul
) / 1024;
3001 max_clk
= host
->max_clk
* host
->clk_mul
;
3003 mmc
->f_min
= host
->max_clk
/ SDHCI_MAX_DIV_SPEC_300
;
3005 mmc
->f_min
= host
->max_clk
/ SDHCI_MAX_DIV_SPEC_200
;
3007 if (!mmc
->f_max
|| (mmc
->f_max
&& (mmc
->f_max
> max_clk
)))
3008 mmc
->f_max
= max_clk
;
3010 if (!(host
->quirks
& SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK
)) {
3011 host
->timeout_clk
= (caps
[0] & SDHCI_TIMEOUT_CLK_MASK
) >>
3012 SDHCI_TIMEOUT_CLK_SHIFT
;
3013 if (host
->timeout_clk
== 0) {
3014 if (host
->ops
->get_timeout_clock
) {
3016 host
->ops
->get_timeout_clock(host
);
3018 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
3024 if (caps
[0] & SDHCI_TIMEOUT_CLK_UNIT
)
3025 host
->timeout_clk
*= 1000;
3027 if (override_timeout_clk
)
3028 host
->timeout_clk
= override_timeout_clk
;
3030 mmc
->max_busy_timeout
= host
->ops
->get_max_timeout_count
?
3031 host
->ops
->get_max_timeout_count(host
) : 1 << 27;
3032 mmc
->max_busy_timeout
/= host
->timeout_clk
;
3035 mmc
->caps
|= MMC_CAP_SDIO_IRQ
| MMC_CAP_ERASE
| MMC_CAP_CMD23
;
3036 mmc
->caps2
|= MMC_CAP2_SDIO_IRQ_NOTHREAD
;
3038 if (host
->quirks
& SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12
)
3039 host
->flags
|= SDHCI_AUTO_CMD12
;
3041 /* Auto-CMD23 stuff only works in ADMA or PIO. */
3042 if ((host
->version
>= SDHCI_SPEC_300
) &&
3043 ((host
->flags
& SDHCI_USE_ADMA
) ||
3044 !(host
->flags
& SDHCI_USE_SDMA
)) &&
3045 !(host
->quirks2
& SDHCI_QUIRK2_ACMD23_BROKEN
)) {
3046 host
->flags
|= SDHCI_AUTO_CMD23
;
3047 DBG("%s: Auto-CMD23 available\n", mmc_hostname(mmc
));
3049 DBG("%s: Auto-CMD23 unavailable\n", mmc_hostname(mmc
));
3053 * A controller may support 8-bit width, but the board itself
3054 * might not have the pins brought out. Boards that support
3055 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
3056 * their platform code before calling sdhci_add_host(), and we
3057 * won't assume 8-bit width for hosts without that CAP.
3059 if (!(host
->quirks
& SDHCI_QUIRK_FORCE_1_BIT_DATA
))
3060 mmc
->caps
|= MMC_CAP_4_BIT_DATA
;
3062 if (host
->quirks2
& SDHCI_QUIRK2_HOST_NO_CMD23
)
3063 mmc
->caps
&= ~MMC_CAP_CMD23
;
3065 if (caps
[0] & SDHCI_CAN_DO_HISPD
)
3066 mmc
->caps
|= MMC_CAP_SD_HIGHSPEED
| MMC_CAP_MMC_HIGHSPEED
;
3068 if ((host
->quirks
& SDHCI_QUIRK_BROKEN_CARD_DETECTION
) &&
3069 !(mmc
->caps
& MMC_CAP_NONREMOVABLE
) &&
3070 IS_ERR_VALUE(mmc_gpio_get_cd(host
->mmc
)))
3071 mmc
->caps
|= MMC_CAP_NEEDS_POLL
;
3073 /* If there are external regulators, get them */
3074 if (mmc_regulator_get_supply(mmc
) == -EPROBE_DEFER
)
3075 return -EPROBE_DEFER
;
3077 /* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
3078 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
3079 ret
= regulator_enable(mmc
->supply
.vqmmc
);
3080 if (!regulator_is_supported_voltage(mmc
->supply
.vqmmc
, 1700000,
3082 caps
[1] &= ~(SDHCI_SUPPORT_SDR104
|
3083 SDHCI_SUPPORT_SDR50
|
3084 SDHCI_SUPPORT_DDR50
);
3086 pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
3087 mmc_hostname(mmc
), ret
);
3088 mmc
->supply
.vqmmc
= ERR_PTR(-EINVAL
);
3092 if (host
->quirks2
& SDHCI_QUIRK2_NO_1_8_V
)
3093 caps
[1] &= ~(SDHCI_SUPPORT_SDR104
| SDHCI_SUPPORT_SDR50
|
3094 SDHCI_SUPPORT_DDR50
);
3096 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
3097 if (caps
[1] & (SDHCI_SUPPORT_SDR104
| SDHCI_SUPPORT_SDR50
|
3098 SDHCI_SUPPORT_DDR50
))
3099 mmc
->caps
|= MMC_CAP_UHS_SDR12
| MMC_CAP_UHS_SDR25
;
3101 /* SDR104 supports also implies SDR50 support */
3102 if (caps
[1] & SDHCI_SUPPORT_SDR104
) {
3103 mmc
->caps
|= MMC_CAP_UHS_SDR104
| MMC_CAP_UHS_SDR50
;
3104 /* SD3.0: SDR104 is supported so (for eMMC) the caps2
3105 * field can be promoted to support HS200.
3107 if (!(host
->quirks2
& SDHCI_QUIRK2_BROKEN_HS200
))
3108 mmc
->caps2
|= MMC_CAP2_HS200
;
3109 } else if (caps
[1] & SDHCI_SUPPORT_SDR50
)
3110 mmc
->caps
|= MMC_CAP_UHS_SDR50
;
3112 if (host
->quirks2
& SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400
&&
3113 (caps
[1] & SDHCI_SUPPORT_HS400
))
3114 mmc
->caps2
|= MMC_CAP2_HS400
;
3116 if ((mmc
->caps2
& MMC_CAP2_HSX00_1_2V
) &&
3117 (IS_ERR(mmc
->supply
.vqmmc
) ||
3118 !regulator_is_supported_voltage(mmc
->supply
.vqmmc
, 1100000,
3120 mmc
->caps2
&= ~MMC_CAP2_HSX00_1_2V
;
3122 if ((caps
[1] & SDHCI_SUPPORT_DDR50
) &&
3123 !(host
->quirks2
& SDHCI_QUIRK2_BROKEN_DDR50
))
3124 mmc
->caps
|= MMC_CAP_UHS_DDR50
;
3126 /* Does the host need tuning for SDR50? */
3127 if (caps
[1] & SDHCI_USE_SDR50_TUNING
)
3128 host
->flags
|= SDHCI_SDR50_NEEDS_TUNING
;
3130 /* Does the host need tuning for SDR104 / HS200? */
3131 if (mmc
->caps2
& MMC_CAP2_HS200
)
3132 host
->flags
|= SDHCI_SDR104_NEEDS_TUNING
;
3134 /* Driver Type(s) (A, C, D) supported by the host */
3135 if (caps
[1] & SDHCI_DRIVER_TYPE_A
)
3136 mmc
->caps
|= MMC_CAP_DRIVER_TYPE_A
;
3137 if (caps
[1] & SDHCI_DRIVER_TYPE_C
)
3138 mmc
->caps
|= MMC_CAP_DRIVER_TYPE_C
;
3139 if (caps
[1] & SDHCI_DRIVER_TYPE_D
)
3140 mmc
->caps
|= MMC_CAP_DRIVER_TYPE_D
;
3142 /* Initial value for re-tuning timer count */
3143 host
->tuning_count
= (caps
[1] & SDHCI_RETUNING_TIMER_COUNT_MASK
) >>
3144 SDHCI_RETUNING_TIMER_COUNT_SHIFT
;
3147 * In case Re-tuning Timer is not disabled, the actual value of
3148 * re-tuning timer will be 2 ^ (n - 1).
3150 if (host
->tuning_count
)
3151 host
->tuning_count
= 1 << (host
->tuning_count
- 1);
3153 /* Re-tuning mode supported by the Host Controller */
3154 host
->tuning_mode
= (caps
[1] & SDHCI_RETUNING_MODE_MASK
) >>
3155 SDHCI_RETUNING_MODE_SHIFT
;
3160 * According to SD Host Controller spec v3.00, if the Host System
3161 * can afford more than 150mA, Host Driver should set XPC to 1. Also
3162 * the value is meaningful only if Voltage Support in the Capabilities
3163 * register is set. The actual current value is 4 times the register
3166 max_current_caps
= sdhci_readl(host
, SDHCI_MAX_CURRENT
);
3167 if (!max_current_caps
&& !IS_ERR(mmc
->supply
.vmmc
)) {
3168 int curr
= regulator_get_current_limit(mmc
->supply
.vmmc
);
3171 /* convert to SDHCI_MAX_CURRENT format */
3172 curr
= curr
/1000; /* convert to mA */
3173 curr
= curr
/SDHCI_MAX_CURRENT_MULTIPLIER
;
3175 curr
= min_t(u32
, curr
, SDHCI_MAX_CURRENT_LIMIT
);
3177 (curr
<< SDHCI_MAX_CURRENT_330_SHIFT
) |
3178 (curr
<< SDHCI_MAX_CURRENT_300_SHIFT
) |
3179 (curr
<< SDHCI_MAX_CURRENT_180_SHIFT
);
3183 if (caps
[0] & SDHCI_CAN_VDD_330
) {
3184 ocr_avail
|= MMC_VDD_32_33
| MMC_VDD_33_34
;
3186 mmc
->max_current_330
= ((max_current_caps
&
3187 SDHCI_MAX_CURRENT_330_MASK
) >>
3188 SDHCI_MAX_CURRENT_330_SHIFT
) *
3189 SDHCI_MAX_CURRENT_MULTIPLIER
;
3191 if (caps
[0] & SDHCI_CAN_VDD_300
) {
3192 ocr_avail
|= MMC_VDD_29_30
| MMC_VDD_30_31
;
3194 mmc
->max_current_300
= ((max_current_caps
&
3195 SDHCI_MAX_CURRENT_300_MASK
) >>
3196 SDHCI_MAX_CURRENT_300_SHIFT
) *
3197 SDHCI_MAX_CURRENT_MULTIPLIER
;
3199 if (caps
[0] & SDHCI_CAN_VDD_180
) {
3200 ocr_avail
|= MMC_VDD_165_195
;
3202 mmc
->max_current_180
= ((max_current_caps
&
3203 SDHCI_MAX_CURRENT_180_MASK
) >>
3204 SDHCI_MAX_CURRENT_180_SHIFT
) *
3205 SDHCI_MAX_CURRENT_MULTIPLIER
;
3208 /* If OCR set by host, use it instead. */
3210 ocr_avail
= host
->ocr_mask
;
3212 /* If OCR set by external regulators, give it highest prio. */
3214 ocr_avail
= mmc
->ocr_avail
;
3216 mmc
->ocr_avail
= ocr_avail
;
3217 mmc
->ocr_avail_sdio
= ocr_avail
;
3218 if (host
->ocr_avail_sdio
)
3219 mmc
->ocr_avail_sdio
&= host
->ocr_avail_sdio
;
3220 mmc
->ocr_avail_sd
= ocr_avail
;
3221 if (host
->ocr_avail_sd
)
3222 mmc
->ocr_avail_sd
&= host
->ocr_avail_sd
;
3223 else /* normal SD controllers don't support 1.8V */
3224 mmc
->ocr_avail_sd
&= ~MMC_VDD_165_195
;
3225 mmc
->ocr_avail_mmc
= ocr_avail
;
3226 if (host
->ocr_avail_mmc
)
3227 mmc
->ocr_avail_mmc
&= host
->ocr_avail_mmc
;
3229 if (mmc
->ocr_avail
== 0) {
3230 pr_err("%s: Hardware doesn't report any support voltages.\n",
3235 spin_lock_init(&host
->lock
);
3238 * Maximum number of segments. Depends on if the hardware
3239 * can do scatter/gather or not.
3241 if (host
->flags
& SDHCI_USE_ADMA
)
3242 mmc
->max_segs
= SDHCI_MAX_SEGS
;
3243 else if (host
->flags
& SDHCI_USE_SDMA
)
3246 mmc
->max_segs
= SDHCI_MAX_SEGS
;
3249 * Maximum number of sectors in one transfer. Limited by SDMA boundary
3250 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
3253 mmc
->max_req_size
= 524288;
3256 * Maximum segment size. Could be one segment with the maximum number
3257 * of bytes. When doing hardware scatter/gather, each entry cannot
3258 * be larger than 64 KiB though.
3260 if (host
->flags
& SDHCI_USE_ADMA
) {
3261 if (host
->quirks
& SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC
)
3262 mmc
->max_seg_size
= 65535;
3264 mmc
->max_seg_size
= 65536;
3266 mmc
->max_seg_size
= mmc
->max_req_size
;
3270 * Maximum block size. This varies from controller to controller and
3271 * is specified in the capabilities register.
3273 if (host
->quirks
& SDHCI_QUIRK_FORCE_BLK_SZ_2048
) {
3274 mmc
->max_blk_size
= 2;
3276 mmc
->max_blk_size
= (caps
[0] & SDHCI_MAX_BLOCK_MASK
) >>
3277 SDHCI_MAX_BLOCK_SHIFT
;
3278 if (mmc
->max_blk_size
>= 3) {
3279 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
3281 mmc
->max_blk_size
= 0;
3285 mmc
->max_blk_size
= 512 << mmc
->max_blk_size
;
3288 * Maximum block count.
3290 mmc
->max_blk_count
= (host
->quirks
& SDHCI_QUIRK_NO_MULTIBLOCK
) ? 1 : 65535;
3295 tasklet_init(&host
->finish_tasklet
,
3296 sdhci_tasklet_finish
, (unsigned long)host
);
3298 setup_timer(&host
->timer
, sdhci_timeout_timer
, (unsigned long)host
);
3300 init_waitqueue_head(&host
->buf_ready_int
);
3302 sdhci_init(host
, 0);
3304 ret
= request_threaded_irq(host
->irq
, sdhci_irq
, sdhci_thread_irq
,
3305 IRQF_SHARED
, mmc_hostname(mmc
), host
);
3307 pr_err("%s: Failed to request IRQ %d: %d\n",
3308 mmc_hostname(mmc
), host
->irq
, ret
);
3312 #ifdef CONFIG_MMC_DEBUG
3313 sdhci_dumpregs(host
);
3316 #ifdef SDHCI_USE_LEDS_CLASS
3317 snprintf(host
->led_name
, sizeof(host
->led_name
),
3318 "%s::", mmc_hostname(mmc
));
3319 host
->led
.name
= host
->led_name
;
3320 host
->led
.brightness
= LED_OFF
;
3321 host
->led
.default_trigger
= mmc_hostname(mmc
);
3322 host
->led
.brightness_set
= sdhci_led_control
;
3324 ret
= led_classdev_register(mmc_dev(mmc
), &host
->led
);
3326 pr_err("%s: Failed to register LED device: %d\n",
3327 mmc_hostname(mmc
), ret
);
3336 pr_info("%s: SDHCI controller on %s [%s] using %s\n",
3337 mmc_hostname(mmc
), host
->hw_name
, dev_name(mmc_dev(mmc
)),
3338 (host
->flags
& SDHCI_USE_ADMA
) ?
3339 (host
->flags
& SDHCI_USE_64_BIT_DMA
) ? "ADMA 64-bit" : "ADMA" :
3340 (host
->flags
& SDHCI_USE_SDMA
) ? "DMA" : "PIO");
3342 sdhci_enable_card_detection(host
);
3346 #ifdef SDHCI_USE_LEDS_CLASS
3348 sdhci_do_reset(host
, SDHCI_RESET_ALL
);
3349 sdhci_writel(host
, 0, SDHCI_INT_ENABLE
);
3350 sdhci_writel(host
, 0, SDHCI_SIGNAL_ENABLE
);
3351 free_irq(host
->irq
, host
);
3354 tasklet_kill(&host
->finish_tasklet
);
3359 EXPORT_SYMBOL_GPL(sdhci_add_host
);
3361 void sdhci_remove_host(struct sdhci_host
*host
, int dead
)
3363 struct mmc_host
*mmc
= host
->mmc
;
3364 unsigned long flags
;
3367 spin_lock_irqsave(&host
->lock
, flags
);
3369 host
->flags
|= SDHCI_DEVICE_DEAD
;
3372 pr_err("%s: Controller removed during "
3373 " transfer!\n", mmc_hostname(mmc
));
3375 host
->mrq
->cmd
->error
= -ENOMEDIUM
;
3376 tasklet_schedule(&host
->finish_tasklet
);
3379 spin_unlock_irqrestore(&host
->lock
, flags
);
3382 sdhci_disable_card_detection(host
);
3384 mmc_remove_host(mmc
);
3386 #ifdef SDHCI_USE_LEDS_CLASS
3387 led_classdev_unregister(&host
->led
);
3391 sdhci_do_reset(host
, SDHCI_RESET_ALL
);
3393 sdhci_writel(host
, 0, SDHCI_INT_ENABLE
);
3394 sdhci_writel(host
, 0, SDHCI_SIGNAL_ENABLE
);
3395 free_irq(host
->irq
, host
);
3397 del_timer_sync(&host
->timer
);
3399 tasklet_kill(&host
->finish_tasklet
);
3401 if (!IS_ERR(mmc
->supply
.vqmmc
))
3402 regulator_disable(mmc
->supply
.vqmmc
);
3404 if (host
->align_buffer
)
3405 dma_free_coherent(mmc_dev(mmc
), host
->align_buffer_sz
+
3406 host
->adma_table_sz
, host
->align_buffer
,
3409 host
->adma_table
= NULL
;
3410 host
->align_buffer
= NULL
;
3413 EXPORT_SYMBOL_GPL(sdhci_remove_host
);
3415 void sdhci_free_host(struct sdhci_host
*host
)
3417 mmc_free_host(host
->mmc
);
3420 EXPORT_SYMBOL_GPL(sdhci_free_host
);
3422 /*****************************************************************************\
3424 * Driver init/exit *
3426 \*****************************************************************************/
3428 static int __init
sdhci_drv_init(void)
3431 ": Secure Digital Host Controller Interface driver\n");
3432 pr_info(DRIVER_NAME
": Copyright(c) Pierre Ossman\n");
3437 static void __exit
sdhci_drv_exit(void)
3441 module_init(sdhci_drv_init
);
3442 module_exit(sdhci_drv_exit
);
3444 module_param(debug_quirks
, uint
, 0444);
3445 module_param(debug_quirks2
, uint
, 0444);
3447 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
3448 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
3449 MODULE_LICENSE("GPL");
3451 MODULE_PARM_DESC(debug_quirks
, "Force certain quirks.");
3452 MODULE_PARM_DESC(debug_quirks2
, "Force certain other quirks.");