2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
5 * Copyright (C) 2010 ST-Ericsson SA
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/init.h>
14 #include <linux/ioport.h>
15 #include <linux/device.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel.h>
18 #include <linux/slab.h>
19 #include <linux/delay.h>
20 #include <linux/err.h>
21 #include <linux/highmem.h>
22 #include <linux/log2.h>
23 #include <linux/mmc/host.h>
24 #include <linux/mmc/card.h>
25 #include <linux/amba/bus.h>
26 #include <linux/clk.h>
27 #include <linux/scatterlist.h>
28 #include <linux/gpio.h>
29 #include <linux/of_gpio.h>
30 #include <linux/regulator/consumer.h>
31 #include <linux/dmaengine.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/amba/mmci.h>
34 #include <linux/pm_runtime.h>
35 #include <linux/types.h>
37 #include <asm/div64.h>
39 #include <asm/sizes.h>
43 #define DRIVER_NAME "mmci-pl18x"
45 static unsigned int fmax
= 515633;
48 * struct variant_data - MMCI variant-specific quirks
49 * @clkreg: default value for MCICLOCK register
50 * @clkreg_enable: enable value for MMCICLOCK register
51 * @datalength_bits: number of bits in the MMCIDATALENGTH register
52 * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY
53 * is asserted (likewise for RX)
54 * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY
55 * is asserted (likewise for RX)
56 * @sdio: variant supports SDIO
57 * @st_clkdiv: true if using a ST-specific clock divider algorithm
58 * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register
59 * @pwrreg_powerup: power up value for MMCIPOWER register
60 * @signal_direction: input/out direction of bus signals can be indicated
64 unsigned int clkreg_enable
;
65 unsigned int datalength_bits
;
66 unsigned int fifosize
;
67 unsigned int fifohalfsize
;
70 bool blksz_datactrl16
;
72 bool signal_direction
;
75 static struct variant_data variant_arm
= {
77 .fifohalfsize
= 8 * 4,
78 .datalength_bits
= 16,
79 .pwrreg_powerup
= MCI_PWR_UP
,
82 static struct variant_data variant_arm_extended_fifo
= {
84 .fifohalfsize
= 64 * 4,
85 .datalength_bits
= 16,
86 .pwrreg_powerup
= MCI_PWR_UP
,
89 static struct variant_data variant_u300
= {
91 .fifohalfsize
= 8 * 4,
92 .clkreg_enable
= MCI_ST_U300_HWFCEN
,
93 .datalength_bits
= 16,
95 .pwrreg_powerup
= MCI_PWR_ON
,
96 .signal_direction
= true,
99 static struct variant_data variant_nomadik
= {
101 .fifohalfsize
= 8 * 4,
102 .clkreg
= MCI_CLK_ENABLE
,
103 .datalength_bits
= 24,
106 .pwrreg_powerup
= MCI_PWR_ON
,
107 .signal_direction
= true,
110 static struct variant_data variant_ux500
= {
112 .fifohalfsize
= 8 * 4,
113 .clkreg
= MCI_CLK_ENABLE
,
114 .clkreg_enable
= MCI_ST_UX500_HWFCEN
,
115 .datalength_bits
= 24,
118 .pwrreg_powerup
= MCI_PWR_ON
,
119 .signal_direction
= true,
122 static struct variant_data variant_ux500v2
= {
124 .fifohalfsize
= 8 * 4,
125 .clkreg
= MCI_CLK_ENABLE
,
126 .clkreg_enable
= MCI_ST_UX500_HWFCEN
,
127 .datalength_bits
= 24,
130 .blksz_datactrl16
= true,
131 .pwrreg_powerup
= MCI_PWR_ON
,
132 .signal_direction
= true,
136 * This must be called with host->lock held
138 static void mmci_write_clkreg(struct mmci_host
*host
, u32 clk
)
140 if (host
->clk_reg
!= clk
) {
142 writel(clk
, host
->base
+ MMCICLOCK
);
147 * This must be called with host->lock held
149 static void mmci_write_pwrreg(struct mmci_host
*host
, u32 pwr
)
151 if (host
->pwr_reg
!= pwr
) {
153 writel(pwr
, host
->base
+ MMCIPOWER
);
158 * This must be called with host->lock held
160 static void mmci_set_clkreg(struct mmci_host
*host
, unsigned int desired
)
162 struct variant_data
*variant
= host
->variant
;
163 u32 clk
= variant
->clkreg
;
166 if (desired
>= host
->mclk
) {
167 clk
= MCI_CLK_BYPASS
;
168 if (variant
->st_clkdiv
)
169 clk
|= MCI_ST_UX500_NEG_EDGE
;
170 host
->cclk
= host
->mclk
;
171 } else if (variant
->st_clkdiv
) {
173 * DB8500 TRM says f = mclk / (clkdiv + 2)
174 * => clkdiv = (mclk / f) - 2
175 * Round the divider up so we don't exceed the max
178 clk
= DIV_ROUND_UP(host
->mclk
, desired
) - 2;
181 host
->cclk
= host
->mclk
/ (clk
+ 2);
184 * PL180 TRM says f = mclk / (2 * (clkdiv + 1))
185 * => clkdiv = mclk / (2 * f) - 1
187 clk
= host
->mclk
/ (2 * desired
) - 1;
190 host
->cclk
= host
->mclk
/ (2 * (clk
+ 1));
193 clk
|= variant
->clkreg_enable
;
194 clk
|= MCI_CLK_ENABLE
;
195 /* This hasn't proven to be worthwhile */
196 /* clk |= MCI_CLK_PWRSAVE; */
199 if (host
->mmc
->ios
.bus_width
== MMC_BUS_WIDTH_4
)
201 if (host
->mmc
->ios
.bus_width
== MMC_BUS_WIDTH_8
)
202 clk
|= MCI_ST_8BIT_BUS
;
204 mmci_write_clkreg(host
, clk
);
208 mmci_request_end(struct mmci_host
*host
, struct mmc_request
*mrq
)
210 writel(0, host
->base
+ MMCICOMMAND
);
217 mmc_request_done(host
->mmc
, mrq
);
219 pm_runtime_mark_last_busy(mmc_dev(host
->mmc
));
220 pm_runtime_put_autosuspend(mmc_dev(host
->mmc
));
223 static void mmci_set_mask1(struct mmci_host
*host
, unsigned int mask
)
225 void __iomem
*base
= host
->base
;
227 if (host
->singleirq
) {
228 unsigned int mask0
= readl(base
+ MMCIMASK0
);
230 mask0
&= ~MCI_IRQ1MASK
;
233 writel(mask0
, base
+ MMCIMASK0
);
236 writel(mask
, base
+ MMCIMASK1
);
239 static void mmci_stop_data(struct mmci_host
*host
)
241 writel(0, host
->base
+ MMCIDATACTRL
);
242 mmci_set_mask1(host
, 0);
246 static void mmci_init_sg(struct mmci_host
*host
, struct mmc_data
*data
)
248 unsigned int flags
= SG_MITER_ATOMIC
;
250 if (data
->flags
& MMC_DATA_READ
)
251 flags
|= SG_MITER_TO_SG
;
253 flags
|= SG_MITER_FROM_SG
;
255 sg_miter_start(&host
->sg_miter
, data
->sg
, data
->sg_len
, flags
);
259 * All the DMA operation mode stuff goes inside this ifdef.
260 * This assumes that you have a generic DMA device interface,
261 * no custom DMA interfaces are supported.
263 #ifdef CONFIG_DMA_ENGINE
264 static void __devinit
mmci_dma_setup(struct mmci_host
*host
)
266 struct mmci_platform_data
*plat
= host
->plat
;
267 const char *rxname
, *txname
;
270 if (!plat
|| !plat
->dma_filter
) {
271 dev_info(mmc_dev(host
->mmc
), "no DMA platform data\n");
275 /* initialize pre request cookie */
276 host
->next_data
.cookie
= 1;
278 /* Try to acquire a generic DMA engine slave channel */
280 dma_cap_set(DMA_SLAVE
, mask
);
283 * If only an RX channel is specified, the driver will
284 * attempt to use it bidirectionally, however if it is
285 * is specified but cannot be located, DMA will be disabled.
287 if (plat
->dma_rx_param
) {
288 host
->dma_rx_channel
= dma_request_channel(mask
,
291 /* E.g if no DMA hardware is present */
292 if (!host
->dma_rx_channel
)
293 dev_err(mmc_dev(host
->mmc
), "no RX DMA channel\n");
296 if (plat
->dma_tx_param
) {
297 host
->dma_tx_channel
= dma_request_channel(mask
,
300 if (!host
->dma_tx_channel
)
301 dev_warn(mmc_dev(host
->mmc
), "no TX DMA channel\n");
303 host
->dma_tx_channel
= host
->dma_rx_channel
;
306 if (host
->dma_rx_channel
)
307 rxname
= dma_chan_name(host
->dma_rx_channel
);
311 if (host
->dma_tx_channel
)
312 txname
= dma_chan_name(host
->dma_tx_channel
);
316 dev_info(mmc_dev(host
->mmc
), "DMA channels RX %s, TX %s\n",
320 * Limit the maximum segment size in any SG entry according to
321 * the parameters of the DMA engine device.
323 if (host
->dma_tx_channel
) {
324 struct device
*dev
= host
->dma_tx_channel
->device
->dev
;
325 unsigned int max_seg_size
= dma_get_max_seg_size(dev
);
327 if (max_seg_size
< host
->mmc
->max_seg_size
)
328 host
->mmc
->max_seg_size
= max_seg_size
;
330 if (host
->dma_rx_channel
) {
331 struct device
*dev
= host
->dma_rx_channel
->device
->dev
;
332 unsigned int max_seg_size
= dma_get_max_seg_size(dev
);
334 if (max_seg_size
< host
->mmc
->max_seg_size
)
335 host
->mmc
->max_seg_size
= max_seg_size
;
340 * This is used in __devinit or __devexit so inline it
341 * so it can be discarded.
343 static inline void mmci_dma_release(struct mmci_host
*host
)
345 struct mmci_platform_data
*plat
= host
->plat
;
347 if (host
->dma_rx_channel
)
348 dma_release_channel(host
->dma_rx_channel
);
349 if (host
->dma_tx_channel
&& plat
->dma_tx_param
)
350 dma_release_channel(host
->dma_tx_channel
);
351 host
->dma_rx_channel
= host
->dma_tx_channel
= NULL
;
354 static void mmci_dma_unmap(struct mmci_host
*host
, struct mmc_data
*data
)
356 struct dma_chan
*chan
= host
->dma_current
;
357 enum dma_data_direction dir
;
361 /* Wait up to 1ms for the DMA to complete */
363 status
= readl(host
->base
+ MMCISTATUS
);
364 if (!(status
& MCI_RXDATAAVLBLMASK
) || i
>= 100)
370 * Check to see whether we still have some data left in the FIFO -
371 * this catches DMA controllers which are unable to monitor the
372 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non-
373 * contiguous buffers. On TX, we'll get a FIFO underrun error.
375 if (status
& MCI_RXDATAAVLBLMASK
) {
376 dmaengine_terminate_all(chan
);
381 if (data
->flags
& MMC_DATA_WRITE
) {
384 dir
= DMA_FROM_DEVICE
;
387 if (!data
->host_cookie
)
388 dma_unmap_sg(chan
->device
->dev
, data
->sg
, data
->sg_len
, dir
);
391 * Use of DMA with scatter-gather is impossible.
392 * Give up with DMA and switch back to PIO mode.
394 if (status
& MCI_RXDATAAVLBLMASK
) {
395 dev_err(mmc_dev(host
->mmc
), "buggy DMA detected. Taking evasive action.\n");
396 mmci_dma_release(host
);
400 static void mmci_dma_data_error(struct mmci_host
*host
)
402 dev_err(mmc_dev(host
->mmc
), "error during DMA transfer!\n");
403 dmaengine_terminate_all(host
->dma_current
);
406 static int mmci_dma_prep_data(struct mmci_host
*host
, struct mmc_data
*data
,
407 struct mmci_host_next
*next
)
409 struct variant_data
*variant
= host
->variant
;
410 struct dma_slave_config conf
= {
411 .src_addr
= host
->phybase
+ MMCIFIFO
,
412 .dst_addr
= host
->phybase
+ MMCIFIFO
,
413 .src_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
,
414 .dst_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
,
415 .src_maxburst
= variant
->fifohalfsize
>> 2, /* # of words */
416 .dst_maxburst
= variant
->fifohalfsize
>> 2, /* # of words */
419 struct dma_chan
*chan
;
420 struct dma_device
*device
;
421 struct dma_async_tx_descriptor
*desc
;
422 enum dma_data_direction buffer_dirn
;
425 /* Check if next job is already prepared */
426 if (data
->host_cookie
&& !next
&&
427 host
->dma_current
&& host
->dma_desc_current
)
431 host
->dma_current
= NULL
;
432 host
->dma_desc_current
= NULL
;
435 if (data
->flags
& MMC_DATA_READ
) {
436 conf
.direction
= DMA_DEV_TO_MEM
;
437 buffer_dirn
= DMA_FROM_DEVICE
;
438 chan
= host
->dma_rx_channel
;
440 conf
.direction
= DMA_MEM_TO_DEV
;
441 buffer_dirn
= DMA_TO_DEVICE
;
442 chan
= host
->dma_tx_channel
;
445 /* If there's no DMA channel, fall back to PIO */
449 /* If less than or equal to the fifo size, don't bother with DMA */
450 if (data
->blksz
* data
->blocks
<= variant
->fifosize
)
453 device
= chan
->device
;
454 nr_sg
= dma_map_sg(device
->dev
, data
->sg
, data
->sg_len
, buffer_dirn
);
458 dmaengine_slave_config(chan
, &conf
);
459 desc
= dmaengine_prep_slave_sg(chan
, data
->sg
, nr_sg
,
460 conf
.direction
, DMA_CTRL_ACK
);
465 next
->dma_chan
= chan
;
466 next
->dma_desc
= desc
;
468 host
->dma_current
= chan
;
469 host
->dma_desc_current
= desc
;
476 dmaengine_terminate_all(chan
);
477 dma_unmap_sg(device
->dev
, data
->sg
, data
->sg_len
, buffer_dirn
);
481 static int mmci_dma_start_data(struct mmci_host
*host
, unsigned int datactrl
)
484 struct mmc_data
*data
= host
->data
;
486 ret
= mmci_dma_prep_data(host
, host
->data
, NULL
);
490 /* Okay, go for it. */
491 dev_vdbg(mmc_dev(host
->mmc
),
492 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
493 data
->sg_len
, data
->blksz
, data
->blocks
, data
->flags
);
494 dmaengine_submit(host
->dma_desc_current
);
495 dma_async_issue_pending(host
->dma_current
);
497 datactrl
|= MCI_DPSM_DMAENABLE
;
499 /* Trigger the DMA transfer */
500 writel(datactrl
, host
->base
+ MMCIDATACTRL
);
503 * Let the MMCI say when the data is ended and it's time
504 * to fire next DMA request. When that happens, MMCI will
505 * call mmci_data_end()
507 writel(readl(host
->base
+ MMCIMASK0
) | MCI_DATAENDMASK
,
508 host
->base
+ MMCIMASK0
);
512 static void mmci_get_next_data(struct mmci_host
*host
, struct mmc_data
*data
)
514 struct mmci_host_next
*next
= &host
->next_data
;
516 if (data
->host_cookie
&& data
->host_cookie
!= next
->cookie
) {
517 pr_warning("[%s] invalid cookie: data->host_cookie %d"
518 " host->next_data.cookie %d\n",
519 __func__
, data
->host_cookie
, host
->next_data
.cookie
);
520 data
->host_cookie
= 0;
523 if (!data
->host_cookie
)
526 host
->dma_desc_current
= next
->dma_desc
;
527 host
->dma_current
= next
->dma_chan
;
529 next
->dma_desc
= NULL
;
530 next
->dma_chan
= NULL
;
533 static void mmci_pre_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
,
536 struct mmci_host
*host
= mmc_priv(mmc
);
537 struct mmc_data
*data
= mrq
->data
;
538 struct mmci_host_next
*nd
= &host
->next_data
;
543 if (data
->host_cookie
) {
544 data
->host_cookie
= 0;
548 /* if config for dma */
549 if (((data
->flags
& MMC_DATA_WRITE
) && host
->dma_tx_channel
) ||
550 ((data
->flags
& MMC_DATA_READ
) && host
->dma_rx_channel
)) {
551 if (mmci_dma_prep_data(host
, data
, nd
))
552 data
->host_cookie
= 0;
554 data
->host_cookie
= ++nd
->cookie
< 0 ? 1 : nd
->cookie
;
558 static void mmci_post_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
,
561 struct mmci_host
*host
= mmc_priv(mmc
);
562 struct mmc_data
*data
= mrq
->data
;
563 struct dma_chan
*chan
;
564 enum dma_data_direction dir
;
569 if (data
->flags
& MMC_DATA_READ
) {
570 dir
= DMA_FROM_DEVICE
;
571 chan
= host
->dma_rx_channel
;
574 chan
= host
->dma_tx_channel
;
578 /* if config for dma */
581 dmaengine_terminate_all(chan
);
582 if (data
->host_cookie
)
583 dma_unmap_sg(mmc_dev(host
->mmc
), data
->sg
,
585 mrq
->data
->host_cookie
= 0;
590 /* Blank functions if the DMA engine is not available */
591 static void mmci_get_next_data(struct mmci_host
*host
, struct mmc_data
*data
)
594 static inline void mmci_dma_setup(struct mmci_host
*host
)
598 static inline void mmci_dma_release(struct mmci_host
*host
)
602 static inline void mmci_dma_unmap(struct mmci_host
*host
, struct mmc_data
*data
)
606 static inline void mmci_dma_data_error(struct mmci_host
*host
)
610 static inline int mmci_dma_start_data(struct mmci_host
*host
, unsigned int datactrl
)
615 #define mmci_pre_request NULL
616 #define mmci_post_request NULL
620 static void mmci_start_data(struct mmci_host
*host
, struct mmc_data
*data
)
622 struct variant_data
*variant
= host
->variant
;
623 unsigned int datactrl
, timeout
, irqmask
;
624 unsigned long long clks
;
628 dev_dbg(mmc_dev(host
->mmc
), "blksz %04x blks %04x flags %08x\n",
629 data
->blksz
, data
->blocks
, data
->flags
);
632 host
->size
= data
->blksz
* data
->blocks
;
633 data
->bytes_xfered
= 0;
635 clks
= (unsigned long long)data
->timeout_ns
* host
->cclk
;
636 do_div(clks
, 1000000000UL);
638 timeout
= data
->timeout_clks
+ (unsigned int)clks
;
641 writel(timeout
, base
+ MMCIDATATIMER
);
642 writel(host
->size
, base
+ MMCIDATALENGTH
);
644 blksz_bits
= ffs(data
->blksz
) - 1;
645 BUG_ON(1 << blksz_bits
!= data
->blksz
);
647 if (variant
->blksz_datactrl16
)
648 datactrl
= MCI_DPSM_ENABLE
| (data
->blksz
<< 16);
650 datactrl
= MCI_DPSM_ENABLE
| blksz_bits
<< 4;
652 if (data
->flags
& MMC_DATA_READ
)
653 datactrl
|= MCI_DPSM_DIRECTION
;
655 /* The ST Micro variants has a special bit to enable SDIO */
656 if (variant
->sdio
&& host
->mmc
->card
)
657 if (mmc_card_sdio(host
->mmc
->card
))
658 datactrl
|= MCI_ST_DPSM_SDIOEN
;
661 * Attempt to use DMA operation mode, if this
662 * should fail, fall back to PIO mode
664 if (!mmci_dma_start_data(host
, datactrl
))
667 /* IRQ mode, map the SG list for CPU reading/writing */
668 mmci_init_sg(host
, data
);
670 if (data
->flags
& MMC_DATA_READ
) {
671 irqmask
= MCI_RXFIFOHALFFULLMASK
;
674 * If we have less than the fifo 'half-full' threshold to
675 * transfer, trigger a PIO interrupt as soon as any data
678 if (host
->size
< variant
->fifohalfsize
)
679 irqmask
|= MCI_RXDATAAVLBLMASK
;
682 * We don't actually need to include "FIFO empty" here
683 * since its implicit in "FIFO half empty".
685 irqmask
= MCI_TXFIFOHALFEMPTYMASK
;
688 writel(datactrl
, base
+ MMCIDATACTRL
);
689 writel(readl(base
+ MMCIMASK0
) & ~MCI_DATAENDMASK
, base
+ MMCIMASK0
);
690 mmci_set_mask1(host
, irqmask
);
694 mmci_start_command(struct mmci_host
*host
, struct mmc_command
*cmd
, u32 c
)
696 void __iomem
*base
= host
->base
;
698 dev_dbg(mmc_dev(host
->mmc
), "op %02x arg %08x flags %08x\n",
699 cmd
->opcode
, cmd
->arg
, cmd
->flags
);
701 if (readl(base
+ MMCICOMMAND
) & MCI_CPSM_ENABLE
) {
702 writel(0, base
+ MMCICOMMAND
);
706 c
|= cmd
->opcode
| MCI_CPSM_ENABLE
;
707 if (cmd
->flags
& MMC_RSP_PRESENT
) {
708 if (cmd
->flags
& MMC_RSP_136
)
709 c
|= MCI_CPSM_LONGRSP
;
710 c
|= MCI_CPSM_RESPONSE
;
713 c
|= MCI_CPSM_INTERRUPT
;
717 writel(cmd
->arg
, base
+ MMCIARGUMENT
);
718 writel(c
, base
+ MMCICOMMAND
);
722 mmci_data_irq(struct mmci_host
*host
, struct mmc_data
*data
,
725 /* First check for errors */
726 if (status
& (MCI_DATACRCFAIL
|MCI_DATATIMEOUT
|MCI_STARTBITERR
|
727 MCI_TXUNDERRUN
|MCI_RXOVERRUN
)) {
730 /* Terminate the DMA transfer */
731 if (dma_inprogress(host
))
732 mmci_dma_data_error(host
);
735 * Calculate how far we are into the transfer. Note that
736 * the data counter gives the number of bytes transferred
737 * on the MMC bus, not on the host side. On reads, this
738 * can be as much as a FIFO-worth of data ahead. This
739 * matters for FIFO overruns only.
741 remain
= readl(host
->base
+ MMCIDATACNT
);
742 success
= data
->blksz
* data
->blocks
- remain
;
744 dev_dbg(mmc_dev(host
->mmc
), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
746 if (status
& MCI_DATACRCFAIL
) {
747 /* Last block was not successful */
749 data
->error
= -EILSEQ
;
750 } else if (status
& MCI_DATATIMEOUT
) {
751 data
->error
= -ETIMEDOUT
;
752 } else if (status
& MCI_STARTBITERR
) {
753 data
->error
= -ECOMM
;
754 } else if (status
& MCI_TXUNDERRUN
) {
756 } else if (status
& MCI_RXOVERRUN
) {
757 if (success
> host
->variant
->fifosize
)
758 success
-= host
->variant
->fifosize
;
763 data
->bytes_xfered
= round_down(success
, data
->blksz
);
766 if (status
& MCI_DATABLOCKEND
)
767 dev_err(mmc_dev(host
->mmc
), "stray MCI_DATABLOCKEND interrupt\n");
769 if (status
& MCI_DATAEND
|| data
->error
) {
770 if (dma_inprogress(host
))
771 mmci_dma_unmap(host
, data
);
772 mmci_stop_data(host
);
775 /* The error clause is handled above, success! */
776 data
->bytes_xfered
= data
->blksz
* data
->blocks
;
779 mmci_request_end(host
, data
->mrq
);
781 mmci_start_command(host
, data
->stop
, 0);
787 mmci_cmd_irq(struct mmci_host
*host
, struct mmc_command
*cmd
,
790 void __iomem
*base
= host
->base
;
794 if (status
& MCI_CMDTIMEOUT
) {
795 cmd
->error
= -ETIMEDOUT
;
796 } else if (status
& MCI_CMDCRCFAIL
&& cmd
->flags
& MMC_RSP_CRC
) {
797 cmd
->error
= -EILSEQ
;
799 cmd
->resp
[0] = readl(base
+ MMCIRESPONSE0
);
800 cmd
->resp
[1] = readl(base
+ MMCIRESPONSE1
);
801 cmd
->resp
[2] = readl(base
+ MMCIRESPONSE2
);
802 cmd
->resp
[3] = readl(base
+ MMCIRESPONSE3
);
805 if (!cmd
->data
|| cmd
->error
) {
807 /* Terminate the DMA transfer */
808 if (dma_inprogress(host
))
809 mmci_dma_data_error(host
);
810 mmci_stop_data(host
);
812 mmci_request_end(host
, cmd
->mrq
);
813 } else if (!(cmd
->data
->flags
& MMC_DATA_READ
)) {
814 mmci_start_data(host
, cmd
->data
);
818 static int mmci_pio_read(struct mmci_host
*host
, char *buffer
, unsigned int remain
)
820 void __iomem
*base
= host
->base
;
823 int host_remain
= host
->size
;
826 int count
= host_remain
- (readl(base
+ MMCIFIFOCNT
) << 2);
835 * SDIO especially may want to send something that is
836 * not divisible by 4 (as opposed to card sectors
837 * etc). Therefore make sure to always read the last bytes
838 * while only doing full 32-bit reads towards the FIFO.
840 if (unlikely(count
& 0x3)) {
842 unsigned char buf
[4];
843 readsl(base
+ MMCIFIFO
, buf
, 1);
844 memcpy(ptr
, buf
, count
);
846 readsl(base
+ MMCIFIFO
, ptr
, count
>> 2);
850 readsl(base
+ MMCIFIFO
, ptr
, count
>> 2);
855 host_remain
-= count
;
860 status
= readl(base
+ MMCISTATUS
);
861 } while (status
& MCI_RXDATAAVLBL
);
866 static int mmci_pio_write(struct mmci_host
*host
, char *buffer
, unsigned int remain
, u32 status
)
868 struct variant_data
*variant
= host
->variant
;
869 void __iomem
*base
= host
->base
;
873 unsigned int count
, maxcnt
;
875 maxcnt
= status
& MCI_TXFIFOEMPTY
?
876 variant
->fifosize
: variant
->fifohalfsize
;
877 count
= min(remain
, maxcnt
);
880 * The ST Micro variant for SDIO transfer sizes
881 * less then 8 bytes should have clock H/W flow
885 mmc_card_sdio(host
->mmc
->card
)) {
888 clk
= host
->clk_reg
& ~variant
->clkreg_enable
;
890 clk
= host
->clk_reg
| variant
->clkreg_enable
;
892 mmci_write_clkreg(host
, clk
);
896 * SDIO especially may want to send something that is
897 * not divisible by 4 (as opposed to card sectors
898 * etc), and the FIFO only accept full 32-bit writes.
899 * So compensate by adding +3 on the count, a single
900 * byte become a 32bit write, 7 bytes will be two
903 writesl(base
+ MMCIFIFO
, ptr
, (count
+ 3) >> 2);
911 status
= readl(base
+ MMCISTATUS
);
912 } while (status
& MCI_TXFIFOHALFEMPTY
);
918 * PIO data transfer IRQ handler.
920 static irqreturn_t
mmci_pio_irq(int irq
, void *dev_id
)
922 struct mmci_host
*host
= dev_id
;
923 struct sg_mapping_iter
*sg_miter
= &host
->sg_miter
;
924 struct variant_data
*variant
= host
->variant
;
925 void __iomem
*base
= host
->base
;
929 status
= readl(base
+ MMCISTATUS
);
931 dev_dbg(mmc_dev(host
->mmc
), "irq1 (pio) %08x\n", status
);
933 local_irq_save(flags
);
936 unsigned int remain
, len
;
940 * For write, we only need to test the half-empty flag
941 * here - if the FIFO is completely empty, then by
942 * definition it is more than half empty.
944 * For read, check for data available.
946 if (!(status
& (MCI_TXFIFOHALFEMPTY
|MCI_RXDATAAVLBL
)))
949 if (!sg_miter_next(sg_miter
))
952 buffer
= sg_miter
->addr
;
953 remain
= sg_miter
->length
;
956 if (status
& MCI_RXACTIVE
)
957 len
= mmci_pio_read(host
, buffer
, remain
);
958 if (status
& MCI_TXACTIVE
)
959 len
= mmci_pio_write(host
, buffer
, remain
, status
);
961 sg_miter
->consumed
= len
;
969 status
= readl(base
+ MMCISTATUS
);
972 sg_miter_stop(sg_miter
);
974 local_irq_restore(flags
);
977 * If we have less than the fifo 'half-full' threshold to transfer,
978 * trigger a PIO interrupt as soon as any data is available.
980 if (status
& MCI_RXACTIVE
&& host
->size
< variant
->fifohalfsize
)
981 mmci_set_mask1(host
, MCI_RXDATAAVLBLMASK
);
984 * If we run out of data, disable the data IRQs; this
985 * prevents a race where the FIFO becomes empty before
986 * the chip itself has disabled the data path, and
987 * stops us racing with our data end IRQ.
989 if (host
->size
== 0) {
990 mmci_set_mask1(host
, 0);
991 writel(readl(base
+ MMCIMASK0
) | MCI_DATAENDMASK
, base
+ MMCIMASK0
);
998 * Handle completion of command and data transfers.
1000 static irqreturn_t
mmci_irq(int irq
, void *dev_id
)
1002 struct mmci_host
*host
= dev_id
;
1006 spin_lock(&host
->lock
);
1009 struct mmc_command
*cmd
;
1010 struct mmc_data
*data
;
1012 status
= readl(host
->base
+ MMCISTATUS
);
1014 if (host
->singleirq
) {
1015 if (status
& readl(host
->base
+ MMCIMASK1
))
1016 mmci_pio_irq(irq
, dev_id
);
1018 status
&= ~MCI_IRQ1MASK
;
1021 status
&= readl(host
->base
+ MMCIMASK0
);
1022 writel(status
, host
->base
+ MMCICLEAR
);
1024 dev_dbg(mmc_dev(host
->mmc
), "irq0 (data+cmd) %08x\n", status
);
1027 if (status
& (MCI_DATACRCFAIL
|MCI_DATATIMEOUT
|MCI_STARTBITERR
|
1028 MCI_TXUNDERRUN
|MCI_RXOVERRUN
|MCI_DATAEND
|
1029 MCI_DATABLOCKEND
) && data
)
1030 mmci_data_irq(host
, data
, status
);
1033 if (status
& (MCI_CMDCRCFAIL
|MCI_CMDTIMEOUT
|MCI_CMDSENT
|MCI_CMDRESPEND
) && cmd
)
1034 mmci_cmd_irq(host
, cmd
, status
);
1039 spin_unlock(&host
->lock
);
1041 return IRQ_RETVAL(ret
);
1044 static void mmci_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
1046 struct mmci_host
*host
= mmc_priv(mmc
);
1047 unsigned long flags
;
1049 WARN_ON(host
->mrq
!= NULL
);
1051 if (mrq
->data
&& !is_power_of_2(mrq
->data
->blksz
)) {
1052 dev_err(mmc_dev(mmc
), "unsupported block size (%d bytes)\n",
1054 mrq
->cmd
->error
= -EINVAL
;
1055 mmc_request_done(mmc
, mrq
);
1059 pm_runtime_get_sync(mmc_dev(mmc
));
1061 spin_lock_irqsave(&host
->lock
, flags
);
1066 mmci_get_next_data(host
, mrq
->data
);
1068 if (mrq
->data
&& mrq
->data
->flags
& MMC_DATA_READ
)
1069 mmci_start_data(host
, mrq
->data
);
1071 mmci_start_command(host
, mrq
->cmd
, 0);
1073 spin_unlock_irqrestore(&host
->lock
, flags
);
1076 static void mmci_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
1078 struct mmci_host
*host
= mmc_priv(mmc
);
1079 struct variant_data
*variant
= host
->variant
;
1081 unsigned long flags
;
1084 pm_runtime_get_sync(mmc_dev(mmc
));
1086 if (host
->plat
->ios_handler
&&
1087 host
->plat
->ios_handler(mmc_dev(mmc
), ios
))
1088 dev_err(mmc_dev(mmc
), "platform ios_handler failed\n");
1090 switch (ios
->power_mode
) {
1093 ret
= mmc_regulator_set_ocr(mmc
, host
->vcc
, 0);
1097 ret
= mmc_regulator_set_ocr(mmc
, host
->vcc
, ios
->vdd
);
1099 dev_err(mmc_dev(mmc
), "unable to set OCR\n");
1101 * The .set_ios() function in the mmc_host_ops
1102 * struct return void, and failing to set the
1103 * power should be rare so we print an error
1110 * The ST Micro variant doesn't have the PL180s MCI_PWR_UP
1111 * and instead uses MCI_PWR_ON so apply whatever value is
1112 * configured in the variant data.
1114 pwr
|= variant
->pwrreg_powerup
;
1122 if (variant
->signal_direction
&& ios
->power_mode
!= MMC_POWER_OFF
) {
1124 * The ST Micro variant has some additional bits
1125 * indicating signal direction for the signals in
1126 * the SD/MMC bus and feedback-clock usage.
1128 pwr
|= host
->plat
->sigdir
;
1130 if (ios
->bus_width
== MMC_BUS_WIDTH_4
)
1131 pwr
&= ~MCI_ST_DATA74DIREN
;
1132 else if (ios
->bus_width
== MMC_BUS_WIDTH_1
)
1133 pwr
&= (~MCI_ST_DATA74DIREN
&
1134 ~MCI_ST_DATA31DIREN
&
1135 ~MCI_ST_DATA2DIREN
);
1138 if (ios
->bus_mode
== MMC_BUSMODE_OPENDRAIN
) {
1139 if (host
->hw_designer
!= AMBA_VENDOR_ST
)
1143 * The ST Micro variant use the ROD bit for something
1144 * else and only has OD (Open Drain).
1150 spin_lock_irqsave(&host
->lock
, flags
);
1152 mmci_set_clkreg(host
, ios
->clock
);
1153 mmci_write_pwrreg(host
, pwr
);
1155 spin_unlock_irqrestore(&host
->lock
, flags
);
1158 pm_runtime_mark_last_busy(mmc_dev(mmc
));
1159 pm_runtime_put_autosuspend(mmc_dev(mmc
));
1162 static int mmci_get_ro(struct mmc_host
*mmc
)
1164 struct mmci_host
*host
= mmc_priv(mmc
);
1166 if (host
->gpio_wp
== -ENOSYS
)
1169 return gpio_get_value_cansleep(host
->gpio_wp
);
1172 static int mmci_get_cd(struct mmc_host
*mmc
)
1174 struct mmci_host
*host
= mmc_priv(mmc
);
1175 struct mmci_platform_data
*plat
= host
->plat
;
1176 unsigned int status
;
1178 if (host
->gpio_cd
== -ENOSYS
) {
1180 return 1; /* Assume always present */
1182 status
= plat
->status(mmc_dev(host
->mmc
));
1184 status
= !!gpio_get_value_cansleep(host
->gpio_cd
)
1188 * Use positive logic throughout - status is zero for no card,
1189 * non-zero for card inserted.
1194 static irqreturn_t
mmci_cd_irq(int irq
, void *dev_id
)
1196 struct mmci_host
*host
= dev_id
;
1198 mmc_detect_change(host
->mmc
, msecs_to_jiffies(500));
1203 static const struct mmc_host_ops mmci_ops
= {
1204 .request
= mmci_request
,
1205 .pre_req
= mmci_pre_request
,
1206 .post_req
= mmci_post_request
,
1207 .set_ios
= mmci_set_ios
,
1208 .get_ro
= mmci_get_ro
,
1209 .get_cd
= mmci_get_cd
,
1213 static void mmci_dt_populate_generic_pdata(struct device_node
*np
,
1214 struct mmci_platform_data
*pdata
)
1218 pdata
->gpio_wp
= of_get_named_gpio(np
, "wp-gpios", 0);
1219 pdata
->gpio_cd
= of_get_named_gpio(np
, "cd-gpios", 0);
1221 if (of_get_property(np
, "cd-inverted", NULL
))
1222 pdata
->cd_invert
= true;
1224 pdata
->cd_invert
= false;
1226 of_property_read_u32(np
, "max-frequency", &pdata
->f_max
);
1228 pr_warn("%s has no 'max-frequency' property\n", np
->full_name
);
1230 if (of_get_property(np
, "mmc-cap-mmc-highspeed", NULL
))
1231 pdata
->capabilities
|= MMC_CAP_MMC_HIGHSPEED
;
1232 if (of_get_property(np
, "mmc-cap-sd-highspeed", NULL
))
1233 pdata
->capabilities
|= MMC_CAP_SD_HIGHSPEED
;
1235 of_property_read_u32(np
, "bus-width", &bus_width
);
1236 switch (bus_width
) {
1238 /* No bus-width supplied. */
1241 pdata
->capabilities
|= MMC_CAP_4_BIT_DATA
;
1244 pdata
->capabilities
|= MMC_CAP_8_BIT_DATA
;
1247 pr_warn("%s: Unsupported bus width\n", np
->full_name
);
1251 static void mmci_dt_populate_generic_pdata(struct device_node
*np
,
1252 struct mmci_platform_data
*pdata
)
1258 static int __devinit
mmci_probe(struct amba_device
*dev
,
1259 const struct amba_id
*id
)
1261 struct mmci_platform_data
*plat
= dev
->dev
.platform_data
;
1262 struct device_node
*np
= dev
->dev
.of_node
;
1263 struct variant_data
*variant
= id
->data
;
1264 struct mmci_host
*host
;
1265 struct mmc_host
*mmc
;
1268 /* Must have platform data or Device Tree. */
1270 dev_err(&dev
->dev
, "No plat data or DT found\n");
1275 plat
= devm_kzalloc(&dev
->dev
, sizeof(*plat
), GFP_KERNEL
);
1281 mmci_dt_populate_generic_pdata(np
, plat
);
1283 ret
= amba_request_regions(dev
, DRIVER_NAME
);
1287 mmc
= mmc_alloc_host(sizeof(struct mmci_host
), &dev
->dev
);
1293 host
= mmc_priv(mmc
);
1296 host
->gpio_wp
= -ENOSYS
;
1297 host
->gpio_cd
= -ENOSYS
;
1298 host
->gpio_cd_irq
= -1;
1300 host
->hw_designer
= amba_manf(dev
);
1301 host
->hw_revision
= amba_rev(dev
);
1302 dev_dbg(mmc_dev(mmc
), "designer ID = 0x%02x\n", host
->hw_designer
);
1303 dev_dbg(mmc_dev(mmc
), "revision = 0x%01x\n", host
->hw_revision
);
1305 host
->clk
= clk_get(&dev
->dev
, NULL
);
1306 if (IS_ERR(host
->clk
)) {
1307 ret
= PTR_ERR(host
->clk
);
1312 ret
= clk_prepare(host
->clk
);
1316 ret
= clk_enable(host
->clk
);
1321 host
->variant
= variant
;
1322 host
->mclk
= clk_get_rate(host
->clk
);
1324 * According to the spec, mclk is max 100 MHz,
1325 * so we try to adjust the clock down to this,
1328 if (host
->mclk
> 100000000) {
1329 ret
= clk_set_rate(host
->clk
, 100000000);
1332 host
->mclk
= clk_get_rate(host
->clk
);
1333 dev_dbg(mmc_dev(mmc
), "eventual mclk rate: %u Hz\n",
1336 host
->phybase
= dev
->res
.start
;
1337 host
->base
= ioremap(dev
->res
.start
, resource_size(&dev
->res
));
1343 mmc
->ops
= &mmci_ops
;
1345 * The ARM and ST versions of the block have slightly different
1346 * clock divider equations which means that the minimum divider
1349 if (variant
->st_clkdiv
)
1350 mmc
->f_min
= DIV_ROUND_UP(host
->mclk
, 257);
1352 mmc
->f_min
= DIV_ROUND_UP(host
->mclk
, 512);
1354 * If the platform data supplies a maximum operating
1355 * frequency, this takes precedence. Else, we fall back
1356 * to using the module parameter, which has a (low)
1357 * default value in case it is not specified. Either
1358 * value must not exceed the clock rate into the block,
1362 mmc
->f_max
= min(host
->mclk
, plat
->f_max
);
1364 mmc
->f_max
= min(host
->mclk
, fmax
);
1365 dev_dbg(mmc_dev(mmc
), "clocking block at %u Hz\n", mmc
->f_max
);
1367 #ifdef CONFIG_REGULATOR
1368 /* If we're using the regulator framework, try to fetch a regulator */
1369 host
->vcc
= regulator_get(&dev
->dev
, "vmmc");
1370 if (IS_ERR(host
->vcc
))
1373 int mask
= mmc_regulator_get_ocrmask(host
->vcc
);
1376 dev_err(&dev
->dev
, "error getting OCR mask (%d)\n",
1379 host
->mmc
->ocr_avail
= (u32
) mask
;
1382 "Provided ocr_mask/setpower will not be used "
1383 "(using regulator instead)\n");
1387 /* Fall back to platform data if no regulator is found */
1388 if (host
->vcc
== NULL
)
1389 mmc
->ocr_avail
= plat
->ocr_mask
;
1390 mmc
->caps
= plat
->capabilities
;
1391 mmc
->caps2
= plat
->capabilities2
;
1396 mmc
->max_segs
= NR_SG
;
1399 * Since only a certain number of bits are valid in the data length
1400 * register, we must ensure that we don't exceed 2^num-1 bytes in a
1403 mmc
->max_req_size
= (1 << variant
->datalength_bits
) - 1;
1406 * Set the maximum segment size. Since we aren't doing DMA
1407 * (yet) we are only limited by the data length register.
1409 mmc
->max_seg_size
= mmc
->max_req_size
;
1412 * Block size can be up to 2048 bytes, but must be a power of two.
1414 mmc
->max_blk_size
= 1 << 11;
1417 * Limit the number of blocks transferred so that we don't overflow
1418 * the maximum request size.
1420 mmc
->max_blk_count
= mmc
->max_req_size
>> 11;
1422 spin_lock_init(&host
->lock
);
1424 writel(0, host
->base
+ MMCIMASK0
);
1425 writel(0, host
->base
+ MMCIMASK1
);
1426 writel(0xfff, host
->base
+ MMCICLEAR
);
1428 if (plat
->gpio_cd
== -EPROBE_DEFER
) {
1429 ret
= -EPROBE_DEFER
;
1432 if (gpio_is_valid(plat
->gpio_cd
)) {
1433 ret
= gpio_request(plat
->gpio_cd
, DRIVER_NAME
" (cd)");
1435 ret
= gpio_direction_input(plat
->gpio_cd
);
1437 host
->gpio_cd
= plat
->gpio_cd
;
1438 else if (ret
!= -ENOSYS
)
1442 * A gpio pin that will detect cards when inserted and removed
1443 * will most likely want to trigger on the edges if it is
1444 * 0 when ejected and 1 when inserted (or mutatis mutandis
1445 * for the inverted case) so we request triggers on both
1448 ret
= request_any_context_irq(gpio_to_irq(plat
->gpio_cd
),
1450 IRQF_TRIGGER_RISING
| IRQF_TRIGGER_FALLING
,
1451 DRIVER_NAME
" (cd)", host
);
1453 host
->gpio_cd_irq
= gpio_to_irq(plat
->gpio_cd
);
1455 if (plat
->gpio_wp
== -EPROBE_DEFER
) {
1456 ret
= -EPROBE_DEFER
;
1459 if (gpio_is_valid(plat
->gpio_wp
)) {
1460 ret
= gpio_request(plat
->gpio_wp
, DRIVER_NAME
" (wp)");
1462 ret
= gpio_direction_input(plat
->gpio_wp
);
1464 host
->gpio_wp
= plat
->gpio_wp
;
1465 else if (ret
!= -ENOSYS
)
1469 if ((host
->plat
->status
|| host
->gpio_cd
!= -ENOSYS
)
1470 && host
->gpio_cd_irq
< 0)
1471 mmc
->caps
|= MMC_CAP_NEEDS_POLL
;
1473 ret
= request_irq(dev
->irq
[0], mmci_irq
, IRQF_SHARED
, DRIVER_NAME
" (cmd)", host
);
1478 host
->singleirq
= true;
1480 ret
= request_irq(dev
->irq
[1], mmci_pio_irq
, IRQF_SHARED
,
1481 DRIVER_NAME
" (pio)", host
);
1486 writel(MCI_IRQENABLE
, host
->base
+ MMCIMASK0
);
1488 amba_set_drvdata(dev
, mmc
);
1490 dev_info(&dev
->dev
, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n",
1491 mmc_hostname(mmc
), amba_part(dev
), amba_manf(dev
),
1492 amba_rev(dev
), (unsigned long long)dev
->res
.start
,
1493 dev
->irq
[0], dev
->irq
[1]);
1495 mmci_dma_setup(host
);
1497 pm_runtime_set_autosuspend_delay(&dev
->dev
, 50);
1498 pm_runtime_use_autosuspend(&dev
->dev
);
1499 pm_runtime_put(&dev
->dev
);
1506 free_irq(dev
->irq
[0], host
);
1508 if (host
->gpio_wp
!= -ENOSYS
)
1509 gpio_free(host
->gpio_wp
);
1511 if (host
->gpio_cd_irq
>= 0)
1512 free_irq(host
->gpio_cd_irq
, host
);
1513 if (host
->gpio_cd
!= -ENOSYS
)
1514 gpio_free(host
->gpio_cd
);
1516 iounmap(host
->base
);
1518 clk_disable(host
->clk
);
1520 clk_unprepare(host
->clk
);
1526 amba_release_regions(dev
);
1531 static int __devexit
mmci_remove(struct amba_device
*dev
)
1533 struct mmc_host
*mmc
= amba_get_drvdata(dev
);
1535 amba_set_drvdata(dev
, NULL
);
1538 struct mmci_host
*host
= mmc_priv(mmc
);
1541 * Undo pm_runtime_put() in probe. We use the _sync
1542 * version here so that we can access the primecell.
1544 pm_runtime_get_sync(&dev
->dev
);
1546 mmc_remove_host(mmc
);
1548 writel(0, host
->base
+ MMCIMASK0
);
1549 writel(0, host
->base
+ MMCIMASK1
);
1551 writel(0, host
->base
+ MMCICOMMAND
);
1552 writel(0, host
->base
+ MMCIDATACTRL
);
1554 mmci_dma_release(host
);
1555 free_irq(dev
->irq
[0], host
);
1556 if (!host
->singleirq
)
1557 free_irq(dev
->irq
[1], host
);
1559 if (host
->gpio_wp
!= -ENOSYS
)
1560 gpio_free(host
->gpio_wp
);
1561 if (host
->gpio_cd_irq
>= 0)
1562 free_irq(host
->gpio_cd_irq
, host
);
1563 if (host
->gpio_cd
!= -ENOSYS
)
1564 gpio_free(host
->gpio_cd
);
1566 iounmap(host
->base
);
1567 clk_disable(host
->clk
);
1568 clk_unprepare(host
->clk
);
1572 mmc_regulator_set_ocr(mmc
, host
->vcc
, 0);
1573 regulator_put(host
->vcc
);
1577 amba_release_regions(dev
);
1583 #ifdef CONFIG_SUSPEND
1584 static int mmci_suspend(struct device
*dev
)
1586 struct amba_device
*adev
= to_amba_device(dev
);
1587 struct mmc_host
*mmc
= amba_get_drvdata(adev
);
1591 struct mmci_host
*host
= mmc_priv(mmc
);
1593 ret
= mmc_suspend_host(mmc
);
1595 pm_runtime_get_sync(dev
);
1596 writel(0, host
->base
+ MMCIMASK0
);
1603 static int mmci_resume(struct device
*dev
)
1605 struct amba_device
*adev
= to_amba_device(dev
);
1606 struct mmc_host
*mmc
= amba_get_drvdata(adev
);
1610 struct mmci_host
*host
= mmc_priv(mmc
);
1612 writel(MCI_IRQENABLE
, host
->base
+ MMCIMASK0
);
1613 pm_runtime_put(dev
);
1615 ret
= mmc_resume_host(mmc
);
1622 static const struct dev_pm_ops mmci_dev_pm_ops
= {
1623 SET_SYSTEM_SLEEP_PM_OPS(mmci_suspend
, mmci_resume
)
1626 static struct amba_id mmci_ids
[] = {
1630 .data
= &variant_arm
,
1635 .data
= &variant_arm_extended_fifo
,
1640 .data
= &variant_arm
,
1642 /* ST Micro variants */
1646 .data
= &variant_u300
,
1651 .data
= &variant_nomadik
,
1656 .data
= &variant_u300
,
1661 .data
= &variant_ux500
,
1666 .data
= &variant_ux500v2
,
1671 MODULE_DEVICE_TABLE(amba
, mmci_ids
);
1673 static struct amba_driver mmci_driver
= {
1675 .name
= DRIVER_NAME
,
1676 .pm
= &mmci_dev_pm_ops
,
1678 .probe
= mmci_probe
,
1679 .remove
= __devexit_p(mmci_remove
),
1680 .id_table
= mmci_ids
,
1683 module_amba_driver(mmci_driver
);
1685 module_param(fmax
, uint
, 0444);
1687 MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
1688 MODULE_LICENSE("GPL");