2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
5 * Copyright (C) 2010 ST-Ericsson SA
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/init.h>
14 #include <linux/ioport.h>
15 #include <linux/device.h>
17 #include <linux/interrupt.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <linux/delay.h>
21 #include <linux/err.h>
22 #include <linux/highmem.h>
23 #include <linux/log2.h>
24 #include <linux/mmc/pm.h>
25 #include <linux/mmc/host.h>
26 #include <linux/mmc/card.h>
27 #include <linux/mmc/slot-gpio.h>
28 #include <linux/amba/bus.h>
29 #include <linux/clk.h>
30 #include <linux/scatterlist.h>
31 #include <linux/gpio.h>
32 #include <linux/of_gpio.h>
33 #include <linux/regulator/consumer.h>
34 #include <linux/dmaengine.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/amba/mmci.h>
37 #include <linux/pm_runtime.h>
38 #include <linux/types.h>
39 #include <linux/pinctrl/consumer.h>
41 #include <asm/div64.h>
43 #include <asm/sizes.h>
47 #define DRIVER_NAME "mmci-pl18x"
49 static unsigned int fmax
= 515633;
52 * struct variant_data - MMCI variant-specific quirks
53 * @clkreg: default value for MCICLOCK register
54 * @clkreg_enable: enable value for MMCICLOCK register
55 * @datalength_bits: number of bits in the MMCIDATALENGTH register
56 * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY
57 * is asserted (likewise for RX)
58 * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY
59 * is asserted (likewise for RX)
60 * @sdio: variant supports SDIO
61 * @st_clkdiv: true if using a ST-specific clock divider algorithm
62 * @datactrl_mask_ddrmode: ddr mode mask in datactrl register.
63 * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register
64 * @blksz_datactrl4: true if Block size is at b4..b16 position in datactrl
66 * @pwrreg_powerup: power up value for MMCIPOWER register
67 * @signal_direction: input/out direction of bus signals can be indicated
68 * @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock
69 * @busy_detect: true if busy detection on dat0 is supported
70 * @pwrreg_nopower: bits in MMCIPOWER don't controls ext. power supply
74 unsigned int clkreg_enable
;
75 unsigned int datalength_bits
;
76 unsigned int fifosize
;
77 unsigned int fifohalfsize
;
78 unsigned int datactrl_mask_ddrmode
;
81 bool blksz_datactrl16
;
84 bool signal_direction
;
90 static struct variant_data variant_arm
= {
92 .fifohalfsize
= 8 * 4,
93 .datalength_bits
= 16,
94 .pwrreg_powerup
= MCI_PWR_UP
,
97 static struct variant_data variant_arm_extended_fifo
= {
99 .fifohalfsize
= 64 * 4,
100 .datalength_bits
= 16,
101 .pwrreg_powerup
= MCI_PWR_UP
,
104 static struct variant_data variant_arm_extended_fifo_hwfc
= {
106 .fifohalfsize
= 64 * 4,
107 .clkreg_enable
= MCI_ARM_HWFCEN
,
108 .datalength_bits
= 16,
109 .pwrreg_powerup
= MCI_PWR_UP
,
112 static struct variant_data variant_u300
= {
114 .fifohalfsize
= 8 * 4,
115 .clkreg_enable
= MCI_ST_U300_HWFCEN
,
116 .datalength_bits
= 16,
118 .pwrreg_powerup
= MCI_PWR_ON
,
119 .signal_direction
= true,
120 .pwrreg_clkgate
= true,
121 .pwrreg_nopower
= true,
124 static struct variant_data variant_nomadik
= {
126 .fifohalfsize
= 8 * 4,
127 .clkreg
= MCI_CLK_ENABLE
,
128 .datalength_bits
= 24,
131 .pwrreg_powerup
= MCI_PWR_ON
,
132 .signal_direction
= true,
133 .pwrreg_clkgate
= true,
134 .pwrreg_nopower
= true,
137 static struct variant_data variant_ux500
= {
139 .fifohalfsize
= 8 * 4,
140 .clkreg
= MCI_CLK_ENABLE
,
141 .clkreg_enable
= MCI_ST_UX500_HWFCEN
,
142 .datalength_bits
= 24,
145 .pwrreg_powerup
= MCI_PWR_ON
,
146 .signal_direction
= true,
147 .pwrreg_clkgate
= true,
149 .pwrreg_nopower
= true,
152 static struct variant_data variant_ux500v2
= {
154 .fifohalfsize
= 8 * 4,
155 .clkreg
= MCI_CLK_ENABLE
,
156 .clkreg_enable
= MCI_ST_UX500_HWFCEN
,
157 .datactrl_mask_ddrmode
= MCI_ST_DPSM_DDRMODE
,
158 .datalength_bits
= 24,
161 .blksz_datactrl16
= true,
162 .pwrreg_powerup
= MCI_PWR_ON
,
163 .signal_direction
= true,
164 .pwrreg_clkgate
= true,
166 .pwrreg_nopower
= true,
169 static int mmci_card_busy(struct mmc_host
*mmc
)
171 struct mmci_host
*host
= mmc_priv(mmc
);
175 pm_runtime_get_sync(mmc_dev(mmc
));
177 spin_lock_irqsave(&host
->lock
, flags
);
178 if (readl(host
->base
+ MMCISTATUS
) & MCI_ST_CARDBUSY
)
180 spin_unlock_irqrestore(&host
->lock
, flags
);
182 pm_runtime_mark_last_busy(mmc_dev(mmc
));
183 pm_runtime_put_autosuspend(mmc_dev(mmc
));
189 * Validate mmc prerequisites
191 static int mmci_validate_data(struct mmci_host
*host
,
192 struct mmc_data
*data
)
197 if (!is_power_of_2(data
->blksz
)) {
198 dev_err(mmc_dev(host
->mmc
),
199 "unsupported block size (%d bytes)\n", data
->blksz
);
206 static void mmci_reg_delay(struct mmci_host
*host
)
209 * According to the spec, at least three feedback clock cycles
210 * of max 52 MHz must pass between two writes to the MMCICLOCK reg.
211 * Three MCLK clock cycles must pass between two MMCIPOWER reg writes.
212 * Worst delay time during card init is at 100 kHz => 30 us.
213 * Worst delay time when up and running is at 25 MHz => 120 ns.
215 if (host
->cclk
< 25000000)
222 * This must be called with host->lock held
224 static void mmci_write_clkreg(struct mmci_host
*host
, u32 clk
)
226 if (host
->clk_reg
!= clk
) {
228 writel(clk
, host
->base
+ MMCICLOCK
);
233 * This must be called with host->lock held
235 static void mmci_write_pwrreg(struct mmci_host
*host
, u32 pwr
)
237 if (host
->pwr_reg
!= pwr
) {
239 writel(pwr
, host
->base
+ MMCIPOWER
);
244 * This must be called with host->lock held
246 static void mmci_write_datactrlreg(struct mmci_host
*host
, u32 datactrl
)
248 /* Keep ST Micro busy mode if enabled */
249 datactrl
|= host
->datactrl_reg
& MCI_ST_DPSM_BUSYMODE
;
251 if (host
->datactrl_reg
!= datactrl
) {
252 host
->datactrl_reg
= datactrl
;
253 writel(datactrl
, host
->base
+ MMCIDATACTRL
);
258 * This must be called with host->lock held
260 static void mmci_set_clkreg(struct mmci_host
*host
, unsigned int desired
)
262 struct variant_data
*variant
= host
->variant
;
263 u32 clk
= variant
->clkreg
;
265 /* Make sure cclk reflects the current calculated clock */
269 if (desired
>= host
->mclk
) {
270 clk
= MCI_CLK_BYPASS
;
271 if (variant
->st_clkdiv
)
272 clk
|= MCI_ST_UX500_NEG_EDGE
;
273 host
->cclk
= host
->mclk
;
274 } else if (variant
->st_clkdiv
) {
276 * DB8500 TRM says f = mclk / (clkdiv + 2)
277 * => clkdiv = (mclk / f) - 2
278 * Round the divider up so we don't exceed the max
281 clk
= DIV_ROUND_UP(host
->mclk
, desired
) - 2;
284 host
->cclk
= host
->mclk
/ (clk
+ 2);
287 * PL180 TRM says f = mclk / (2 * (clkdiv + 1))
288 * => clkdiv = mclk / (2 * f) - 1
290 clk
= host
->mclk
/ (2 * desired
) - 1;
293 host
->cclk
= host
->mclk
/ (2 * (clk
+ 1));
296 clk
|= variant
->clkreg_enable
;
297 clk
|= MCI_CLK_ENABLE
;
298 /* This hasn't proven to be worthwhile */
299 /* clk |= MCI_CLK_PWRSAVE; */
302 /* Set actual clock for debug */
303 host
->mmc
->actual_clock
= host
->cclk
;
305 if (host
->mmc
->ios
.bus_width
== MMC_BUS_WIDTH_4
)
307 if (host
->mmc
->ios
.bus_width
== MMC_BUS_WIDTH_8
)
308 clk
|= MCI_ST_8BIT_BUS
;
310 if (host
->mmc
->ios
.timing
== MMC_TIMING_UHS_DDR50
||
311 host
->mmc
->ios
.timing
== MMC_TIMING_MMC_DDR52
)
312 clk
|= MCI_ST_UX500_NEG_EDGE
;
314 mmci_write_clkreg(host
, clk
);
318 mmci_request_end(struct mmci_host
*host
, struct mmc_request
*mrq
)
320 writel(0, host
->base
+ MMCICOMMAND
);
327 mmc_request_done(host
->mmc
, mrq
);
329 pm_runtime_mark_last_busy(mmc_dev(host
->mmc
));
330 pm_runtime_put_autosuspend(mmc_dev(host
->mmc
));
333 static void mmci_set_mask1(struct mmci_host
*host
, unsigned int mask
)
335 void __iomem
*base
= host
->base
;
337 if (host
->singleirq
) {
338 unsigned int mask0
= readl(base
+ MMCIMASK0
);
340 mask0
&= ~MCI_IRQ1MASK
;
343 writel(mask0
, base
+ MMCIMASK0
);
346 writel(mask
, base
+ MMCIMASK1
);
349 static void mmci_stop_data(struct mmci_host
*host
)
351 mmci_write_datactrlreg(host
, 0);
352 mmci_set_mask1(host
, 0);
356 static void mmci_init_sg(struct mmci_host
*host
, struct mmc_data
*data
)
358 unsigned int flags
= SG_MITER_ATOMIC
;
360 if (data
->flags
& MMC_DATA_READ
)
361 flags
|= SG_MITER_TO_SG
;
363 flags
|= SG_MITER_FROM_SG
;
365 sg_miter_start(&host
->sg_miter
, data
->sg
, data
->sg_len
, flags
);
369 * All the DMA operation mode stuff goes inside this ifdef.
370 * This assumes that you have a generic DMA device interface,
371 * no custom DMA interfaces are supported.
373 #ifdef CONFIG_DMA_ENGINE
374 static void mmci_dma_setup(struct mmci_host
*host
)
376 const char *rxname
, *txname
;
379 host
->dma_rx_channel
= dma_request_slave_channel(mmc_dev(host
->mmc
), "rx");
380 host
->dma_tx_channel
= dma_request_slave_channel(mmc_dev(host
->mmc
), "tx");
382 /* initialize pre request cookie */
383 host
->next_data
.cookie
= 1;
385 /* Try to acquire a generic DMA engine slave channel */
387 dma_cap_set(DMA_SLAVE
, mask
);
390 * If only an RX channel is specified, the driver will
391 * attempt to use it bidirectionally, however if it is
392 * is specified but cannot be located, DMA will be disabled.
394 if (host
->dma_rx_channel
&& !host
->dma_tx_channel
)
395 host
->dma_tx_channel
= host
->dma_rx_channel
;
397 if (host
->dma_rx_channel
)
398 rxname
= dma_chan_name(host
->dma_rx_channel
);
402 if (host
->dma_tx_channel
)
403 txname
= dma_chan_name(host
->dma_tx_channel
);
407 dev_info(mmc_dev(host
->mmc
), "DMA channels RX %s, TX %s\n",
411 * Limit the maximum segment size in any SG entry according to
412 * the parameters of the DMA engine device.
414 if (host
->dma_tx_channel
) {
415 struct device
*dev
= host
->dma_tx_channel
->device
->dev
;
416 unsigned int max_seg_size
= dma_get_max_seg_size(dev
);
418 if (max_seg_size
< host
->mmc
->max_seg_size
)
419 host
->mmc
->max_seg_size
= max_seg_size
;
421 if (host
->dma_rx_channel
) {
422 struct device
*dev
= host
->dma_rx_channel
->device
->dev
;
423 unsigned int max_seg_size
= dma_get_max_seg_size(dev
);
425 if (max_seg_size
< host
->mmc
->max_seg_size
)
426 host
->mmc
->max_seg_size
= max_seg_size
;
431 * This is used in or so inline it
432 * so it can be discarded.
434 static inline void mmci_dma_release(struct mmci_host
*host
)
436 if (host
->dma_rx_channel
)
437 dma_release_channel(host
->dma_rx_channel
);
438 if (host
->dma_tx_channel
)
439 dma_release_channel(host
->dma_tx_channel
);
440 host
->dma_rx_channel
= host
->dma_tx_channel
= NULL
;
443 static void mmci_dma_data_error(struct mmci_host
*host
)
445 dev_err(mmc_dev(host
->mmc
), "error during DMA transfer!\n");
446 dmaengine_terminate_all(host
->dma_current
);
447 host
->dma_current
= NULL
;
448 host
->dma_desc_current
= NULL
;
449 host
->data
->host_cookie
= 0;
452 static void mmci_dma_unmap(struct mmci_host
*host
, struct mmc_data
*data
)
454 struct dma_chan
*chan
;
455 enum dma_data_direction dir
;
457 if (data
->flags
& MMC_DATA_READ
) {
458 dir
= DMA_FROM_DEVICE
;
459 chan
= host
->dma_rx_channel
;
462 chan
= host
->dma_tx_channel
;
465 dma_unmap_sg(chan
->device
->dev
, data
->sg
, data
->sg_len
, dir
);
468 static void mmci_dma_finalize(struct mmci_host
*host
, struct mmc_data
*data
)
473 /* Wait up to 1ms for the DMA to complete */
475 status
= readl(host
->base
+ MMCISTATUS
);
476 if (!(status
& MCI_RXDATAAVLBLMASK
) || i
>= 100)
482 * Check to see whether we still have some data left in the FIFO -
483 * this catches DMA controllers which are unable to monitor the
484 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non-
485 * contiguous buffers. On TX, we'll get a FIFO underrun error.
487 if (status
& MCI_RXDATAAVLBLMASK
) {
488 mmci_dma_data_error(host
);
493 if (!data
->host_cookie
)
494 mmci_dma_unmap(host
, data
);
497 * Use of DMA with scatter-gather is impossible.
498 * Give up with DMA and switch back to PIO mode.
500 if (status
& MCI_RXDATAAVLBLMASK
) {
501 dev_err(mmc_dev(host
->mmc
), "buggy DMA detected. Taking evasive action.\n");
502 mmci_dma_release(host
);
505 host
->dma_current
= NULL
;
506 host
->dma_desc_current
= NULL
;
509 /* prepares DMA channel and DMA descriptor, returns non-zero on failure */
510 static int __mmci_dma_prep_data(struct mmci_host
*host
, struct mmc_data
*data
,
511 struct dma_chan
**dma_chan
,
512 struct dma_async_tx_descriptor
**dma_desc
)
514 struct variant_data
*variant
= host
->variant
;
515 struct dma_slave_config conf
= {
516 .src_addr
= host
->phybase
+ MMCIFIFO
,
517 .dst_addr
= host
->phybase
+ MMCIFIFO
,
518 .src_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
,
519 .dst_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
,
520 .src_maxburst
= variant
->fifohalfsize
>> 2, /* # of words */
521 .dst_maxburst
= variant
->fifohalfsize
>> 2, /* # of words */
524 struct dma_chan
*chan
;
525 struct dma_device
*device
;
526 struct dma_async_tx_descriptor
*desc
;
527 enum dma_data_direction buffer_dirn
;
530 if (data
->flags
& MMC_DATA_READ
) {
531 conf
.direction
= DMA_DEV_TO_MEM
;
532 buffer_dirn
= DMA_FROM_DEVICE
;
533 chan
= host
->dma_rx_channel
;
535 conf
.direction
= DMA_MEM_TO_DEV
;
536 buffer_dirn
= DMA_TO_DEVICE
;
537 chan
= host
->dma_tx_channel
;
540 /* If there's no DMA channel, fall back to PIO */
544 /* If less than or equal to the fifo size, don't bother with DMA */
545 if (data
->blksz
* data
->blocks
<= variant
->fifosize
)
548 device
= chan
->device
;
549 nr_sg
= dma_map_sg(device
->dev
, data
->sg
, data
->sg_len
, buffer_dirn
);
553 dmaengine_slave_config(chan
, &conf
);
554 desc
= dmaengine_prep_slave_sg(chan
, data
->sg
, nr_sg
,
555 conf
.direction
, DMA_CTRL_ACK
);
565 dma_unmap_sg(device
->dev
, data
->sg
, data
->sg_len
, buffer_dirn
);
569 static inline int mmci_dma_prep_data(struct mmci_host
*host
,
570 struct mmc_data
*data
)
572 /* Check if next job is already prepared. */
573 if (host
->dma_current
&& host
->dma_desc_current
)
576 /* No job were prepared thus do it now. */
577 return __mmci_dma_prep_data(host
, data
, &host
->dma_current
,
578 &host
->dma_desc_current
);
581 static inline int mmci_dma_prep_next(struct mmci_host
*host
,
582 struct mmc_data
*data
)
584 struct mmci_host_next
*nd
= &host
->next_data
;
585 return __mmci_dma_prep_data(host
, data
, &nd
->dma_chan
, &nd
->dma_desc
);
588 static int mmci_dma_start_data(struct mmci_host
*host
, unsigned int datactrl
)
591 struct mmc_data
*data
= host
->data
;
593 ret
= mmci_dma_prep_data(host
, host
->data
);
597 /* Okay, go for it. */
598 dev_vdbg(mmc_dev(host
->mmc
),
599 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
600 data
->sg_len
, data
->blksz
, data
->blocks
, data
->flags
);
601 dmaengine_submit(host
->dma_desc_current
);
602 dma_async_issue_pending(host
->dma_current
);
604 datactrl
|= MCI_DPSM_DMAENABLE
;
606 /* Trigger the DMA transfer */
607 mmci_write_datactrlreg(host
, datactrl
);
610 * Let the MMCI say when the data is ended and it's time
611 * to fire next DMA request. When that happens, MMCI will
612 * call mmci_data_end()
614 writel(readl(host
->base
+ MMCIMASK0
) | MCI_DATAENDMASK
,
615 host
->base
+ MMCIMASK0
);
619 static void mmci_get_next_data(struct mmci_host
*host
, struct mmc_data
*data
)
621 struct mmci_host_next
*next
= &host
->next_data
;
623 WARN_ON(data
->host_cookie
&& data
->host_cookie
!= next
->cookie
);
624 WARN_ON(!data
->host_cookie
&& (next
->dma_desc
|| next
->dma_chan
));
626 host
->dma_desc_current
= next
->dma_desc
;
627 host
->dma_current
= next
->dma_chan
;
628 next
->dma_desc
= NULL
;
629 next
->dma_chan
= NULL
;
632 static void mmci_pre_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
,
635 struct mmci_host
*host
= mmc_priv(mmc
);
636 struct mmc_data
*data
= mrq
->data
;
637 struct mmci_host_next
*nd
= &host
->next_data
;
642 BUG_ON(data
->host_cookie
);
644 if (mmci_validate_data(host
, data
))
647 if (!mmci_dma_prep_next(host
, data
))
648 data
->host_cookie
= ++nd
->cookie
< 0 ? 1 : nd
->cookie
;
651 static void mmci_post_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
,
654 struct mmci_host
*host
= mmc_priv(mmc
);
655 struct mmc_data
*data
= mrq
->data
;
657 if (!data
|| !data
->host_cookie
)
660 mmci_dma_unmap(host
, data
);
663 struct mmci_host_next
*next
= &host
->next_data
;
664 struct dma_chan
*chan
;
665 if (data
->flags
& MMC_DATA_READ
)
666 chan
= host
->dma_rx_channel
;
668 chan
= host
->dma_tx_channel
;
669 dmaengine_terminate_all(chan
);
671 next
->dma_desc
= NULL
;
672 next
->dma_chan
= NULL
;
677 /* Blank functions if the DMA engine is not available */
678 static void mmci_get_next_data(struct mmci_host
*host
, struct mmc_data
*data
)
681 static inline void mmci_dma_setup(struct mmci_host
*host
)
685 static inline void mmci_dma_release(struct mmci_host
*host
)
689 static inline void mmci_dma_unmap(struct mmci_host
*host
, struct mmc_data
*data
)
693 static inline void mmci_dma_finalize(struct mmci_host
*host
,
694 struct mmc_data
*data
)
698 static inline void mmci_dma_data_error(struct mmci_host
*host
)
702 static inline int mmci_dma_start_data(struct mmci_host
*host
, unsigned int datactrl
)
707 #define mmci_pre_request NULL
708 #define mmci_post_request NULL
712 static void mmci_start_data(struct mmci_host
*host
, struct mmc_data
*data
)
714 struct variant_data
*variant
= host
->variant
;
715 unsigned int datactrl
, timeout
, irqmask
;
716 unsigned long long clks
;
720 dev_dbg(mmc_dev(host
->mmc
), "blksz %04x blks %04x flags %08x\n",
721 data
->blksz
, data
->blocks
, data
->flags
);
724 host
->size
= data
->blksz
* data
->blocks
;
725 data
->bytes_xfered
= 0;
727 clks
= (unsigned long long)data
->timeout_ns
* host
->cclk
;
728 do_div(clks
, NSEC_PER_SEC
);
730 timeout
= data
->timeout_clks
+ (unsigned int)clks
;
733 writel(timeout
, base
+ MMCIDATATIMER
);
734 writel(host
->size
, base
+ MMCIDATALENGTH
);
736 blksz_bits
= ffs(data
->blksz
) - 1;
737 BUG_ON(1 << blksz_bits
!= data
->blksz
);
739 if (variant
->blksz_datactrl16
)
740 datactrl
= MCI_DPSM_ENABLE
| (data
->blksz
<< 16);
741 else if (variant
->blksz_datactrl4
)
742 datactrl
= MCI_DPSM_ENABLE
| (data
->blksz
<< 4);
744 datactrl
= MCI_DPSM_ENABLE
| blksz_bits
<< 4;
746 if (data
->flags
& MMC_DATA_READ
)
747 datactrl
|= MCI_DPSM_DIRECTION
;
749 /* The ST Micro variants has a special bit to enable SDIO */
750 if (variant
->sdio
&& host
->mmc
->card
)
751 if (mmc_card_sdio(host
->mmc
->card
)) {
753 * The ST Micro variants has a special bit
758 datactrl
|= MCI_ST_DPSM_SDIOEN
;
761 * The ST Micro variant for SDIO small write transfers
762 * needs to have clock H/W flow control disabled,
763 * otherwise the transfer will not start. The threshold
764 * depends on the rate of MCLK.
766 if (data
->flags
& MMC_DATA_WRITE
&&
768 (host
->size
<= 8 && host
->mclk
> 50000000)))
769 clk
= host
->clk_reg
& ~variant
->clkreg_enable
;
771 clk
= host
->clk_reg
| variant
->clkreg_enable
;
773 mmci_write_clkreg(host
, clk
);
776 if (host
->mmc
->ios
.timing
== MMC_TIMING_UHS_DDR50
||
777 host
->mmc
->ios
.timing
== MMC_TIMING_MMC_DDR52
)
778 datactrl
|= variant
->datactrl_mask_ddrmode
;
781 * Attempt to use DMA operation mode, if this
782 * should fail, fall back to PIO mode
784 if (!mmci_dma_start_data(host
, datactrl
))
787 /* IRQ mode, map the SG list for CPU reading/writing */
788 mmci_init_sg(host
, data
);
790 if (data
->flags
& MMC_DATA_READ
) {
791 irqmask
= MCI_RXFIFOHALFFULLMASK
;
794 * If we have less than the fifo 'half-full' threshold to
795 * transfer, trigger a PIO interrupt as soon as any data
798 if (host
->size
< variant
->fifohalfsize
)
799 irqmask
|= MCI_RXDATAAVLBLMASK
;
802 * We don't actually need to include "FIFO empty" here
803 * since its implicit in "FIFO half empty".
805 irqmask
= MCI_TXFIFOHALFEMPTYMASK
;
808 mmci_write_datactrlreg(host
, datactrl
);
809 writel(readl(base
+ MMCIMASK0
) & ~MCI_DATAENDMASK
, base
+ MMCIMASK0
);
810 mmci_set_mask1(host
, irqmask
);
814 mmci_start_command(struct mmci_host
*host
, struct mmc_command
*cmd
, u32 c
)
816 void __iomem
*base
= host
->base
;
818 dev_dbg(mmc_dev(host
->mmc
), "op %02x arg %08x flags %08x\n",
819 cmd
->opcode
, cmd
->arg
, cmd
->flags
);
821 if (readl(base
+ MMCICOMMAND
) & MCI_CPSM_ENABLE
) {
822 writel(0, base
+ MMCICOMMAND
);
823 mmci_reg_delay(host
);
826 c
|= cmd
->opcode
| MCI_CPSM_ENABLE
;
827 if (cmd
->flags
& MMC_RSP_PRESENT
) {
828 if (cmd
->flags
& MMC_RSP_136
)
829 c
|= MCI_CPSM_LONGRSP
;
830 c
|= MCI_CPSM_RESPONSE
;
833 c
|= MCI_CPSM_INTERRUPT
;
837 writel(cmd
->arg
, base
+ MMCIARGUMENT
);
838 writel(c
, base
+ MMCICOMMAND
);
842 mmci_data_irq(struct mmci_host
*host
, struct mmc_data
*data
,
845 /* First check for errors */
846 if (status
& (MCI_DATACRCFAIL
|MCI_DATATIMEOUT
|MCI_STARTBITERR
|
847 MCI_TXUNDERRUN
|MCI_RXOVERRUN
)) {
850 /* Terminate the DMA transfer */
851 if (dma_inprogress(host
)) {
852 mmci_dma_data_error(host
);
853 mmci_dma_unmap(host
, data
);
857 * Calculate how far we are into the transfer. Note that
858 * the data counter gives the number of bytes transferred
859 * on the MMC bus, not on the host side. On reads, this
860 * can be as much as a FIFO-worth of data ahead. This
861 * matters for FIFO overruns only.
863 remain
= readl(host
->base
+ MMCIDATACNT
);
864 success
= data
->blksz
* data
->blocks
- remain
;
866 dev_dbg(mmc_dev(host
->mmc
), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
868 if (status
& MCI_DATACRCFAIL
) {
869 /* Last block was not successful */
871 data
->error
= -EILSEQ
;
872 } else if (status
& MCI_DATATIMEOUT
) {
873 data
->error
= -ETIMEDOUT
;
874 } else if (status
& MCI_STARTBITERR
) {
875 data
->error
= -ECOMM
;
876 } else if (status
& MCI_TXUNDERRUN
) {
878 } else if (status
& MCI_RXOVERRUN
) {
879 if (success
> host
->variant
->fifosize
)
880 success
-= host
->variant
->fifosize
;
885 data
->bytes_xfered
= round_down(success
, data
->blksz
);
888 if (status
& MCI_DATABLOCKEND
)
889 dev_err(mmc_dev(host
->mmc
), "stray MCI_DATABLOCKEND interrupt\n");
891 if (status
& MCI_DATAEND
|| data
->error
) {
892 if (dma_inprogress(host
))
893 mmci_dma_finalize(host
, data
);
894 mmci_stop_data(host
);
897 /* The error clause is handled above, success! */
898 data
->bytes_xfered
= data
->blksz
* data
->blocks
;
900 if (!data
->stop
|| host
->mrq
->sbc
) {
901 mmci_request_end(host
, data
->mrq
);
903 mmci_start_command(host
, data
->stop
, 0);
909 mmci_cmd_irq(struct mmci_host
*host
, struct mmc_command
*cmd
,
912 void __iomem
*base
= host
->base
;
913 bool sbc
= (cmd
== host
->mrq
->sbc
);
914 bool busy_resp
= host
->variant
->busy_detect
&&
915 (cmd
->flags
& MMC_RSP_BUSY
);
917 /* Check if we need to wait for busy completion. */
918 if (host
->busy_status
&& (status
& MCI_ST_CARDBUSY
))
921 /* Enable busy completion if needed and supported. */
922 if (!host
->busy_status
&& busy_resp
&&
923 !(status
& (MCI_CMDCRCFAIL
|MCI_CMDTIMEOUT
)) &&
924 (readl(base
+ MMCISTATUS
) & MCI_ST_CARDBUSY
)) {
925 writel(readl(base
+ MMCIMASK0
) | MCI_ST_BUSYEND
,
927 host
->busy_status
= status
& (MCI_CMDSENT
|MCI_CMDRESPEND
);
931 /* At busy completion, mask the IRQ and complete the request. */
932 if (host
->busy_status
) {
933 writel(readl(base
+ MMCIMASK0
) & ~MCI_ST_BUSYEND
,
935 host
->busy_status
= 0;
940 if (status
& MCI_CMDTIMEOUT
) {
941 cmd
->error
= -ETIMEDOUT
;
942 } else if (status
& MCI_CMDCRCFAIL
&& cmd
->flags
& MMC_RSP_CRC
) {
943 cmd
->error
= -EILSEQ
;
945 cmd
->resp
[0] = readl(base
+ MMCIRESPONSE0
);
946 cmd
->resp
[1] = readl(base
+ MMCIRESPONSE1
);
947 cmd
->resp
[2] = readl(base
+ MMCIRESPONSE2
);
948 cmd
->resp
[3] = readl(base
+ MMCIRESPONSE3
);
951 if ((!sbc
&& !cmd
->data
) || cmd
->error
) {
953 /* Terminate the DMA transfer */
954 if (dma_inprogress(host
)) {
955 mmci_dma_data_error(host
);
956 mmci_dma_unmap(host
, host
->data
);
958 mmci_stop_data(host
);
960 mmci_request_end(host
, host
->mrq
);
962 mmci_start_command(host
, host
->mrq
->cmd
, 0);
963 } else if (!(cmd
->data
->flags
& MMC_DATA_READ
)) {
964 mmci_start_data(host
, cmd
->data
);
968 static int mmci_pio_read(struct mmci_host
*host
, char *buffer
, unsigned int remain
)
970 void __iomem
*base
= host
->base
;
973 int host_remain
= host
->size
;
976 int count
= host_remain
- (readl(base
+ MMCIFIFOCNT
) << 2);
985 * SDIO especially may want to send something that is
986 * not divisible by 4 (as opposed to card sectors
987 * etc). Therefore make sure to always read the last bytes
988 * while only doing full 32-bit reads towards the FIFO.
990 if (unlikely(count
& 0x3)) {
992 unsigned char buf
[4];
993 ioread32_rep(base
+ MMCIFIFO
, buf
, 1);
994 memcpy(ptr
, buf
, count
);
996 ioread32_rep(base
+ MMCIFIFO
, ptr
, count
>> 2);
1000 ioread32_rep(base
+ MMCIFIFO
, ptr
, count
>> 2);
1005 host_remain
-= count
;
1010 status
= readl(base
+ MMCISTATUS
);
1011 } while (status
& MCI_RXDATAAVLBL
);
1013 return ptr
- buffer
;
1016 static int mmci_pio_write(struct mmci_host
*host
, char *buffer
, unsigned int remain
, u32 status
)
1018 struct variant_data
*variant
= host
->variant
;
1019 void __iomem
*base
= host
->base
;
1023 unsigned int count
, maxcnt
;
1025 maxcnt
= status
& MCI_TXFIFOEMPTY
?
1026 variant
->fifosize
: variant
->fifohalfsize
;
1027 count
= min(remain
, maxcnt
);
1030 * SDIO especially may want to send something that is
1031 * not divisible by 4 (as opposed to card sectors
1032 * etc), and the FIFO only accept full 32-bit writes.
1033 * So compensate by adding +3 on the count, a single
1034 * byte become a 32bit write, 7 bytes will be two
1037 iowrite32_rep(base
+ MMCIFIFO
, ptr
, (count
+ 3) >> 2);
1045 status
= readl(base
+ MMCISTATUS
);
1046 } while (status
& MCI_TXFIFOHALFEMPTY
);
1048 return ptr
- buffer
;
1052 * PIO data transfer IRQ handler.
1054 static irqreturn_t
mmci_pio_irq(int irq
, void *dev_id
)
1056 struct mmci_host
*host
= dev_id
;
1057 struct sg_mapping_iter
*sg_miter
= &host
->sg_miter
;
1058 struct variant_data
*variant
= host
->variant
;
1059 void __iomem
*base
= host
->base
;
1060 unsigned long flags
;
1063 status
= readl(base
+ MMCISTATUS
);
1065 dev_dbg(mmc_dev(host
->mmc
), "irq1 (pio) %08x\n", status
);
1067 local_irq_save(flags
);
1070 unsigned int remain
, len
;
1074 * For write, we only need to test the half-empty flag
1075 * here - if the FIFO is completely empty, then by
1076 * definition it is more than half empty.
1078 * For read, check for data available.
1080 if (!(status
& (MCI_TXFIFOHALFEMPTY
|MCI_RXDATAAVLBL
)))
1083 if (!sg_miter_next(sg_miter
))
1086 buffer
= sg_miter
->addr
;
1087 remain
= sg_miter
->length
;
1090 if (status
& MCI_RXACTIVE
)
1091 len
= mmci_pio_read(host
, buffer
, remain
);
1092 if (status
& MCI_TXACTIVE
)
1093 len
= mmci_pio_write(host
, buffer
, remain
, status
);
1095 sg_miter
->consumed
= len
;
1103 status
= readl(base
+ MMCISTATUS
);
1106 sg_miter_stop(sg_miter
);
1108 local_irq_restore(flags
);
1111 * If we have less than the fifo 'half-full' threshold to transfer,
1112 * trigger a PIO interrupt as soon as any data is available.
1114 if (status
& MCI_RXACTIVE
&& host
->size
< variant
->fifohalfsize
)
1115 mmci_set_mask1(host
, MCI_RXDATAAVLBLMASK
);
1118 * If we run out of data, disable the data IRQs; this
1119 * prevents a race where the FIFO becomes empty before
1120 * the chip itself has disabled the data path, and
1121 * stops us racing with our data end IRQ.
1123 if (host
->size
== 0) {
1124 mmci_set_mask1(host
, 0);
1125 writel(readl(base
+ MMCIMASK0
) | MCI_DATAENDMASK
, base
+ MMCIMASK0
);
1132 * Handle completion of command and data transfers.
1134 static irqreturn_t
mmci_irq(int irq
, void *dev_id
)
1136 struct mmci_host
*host
= dev_id
;
1140 spin_lock(&host
->lock
);
1143 struct mmc_command
*cmd
;
1144 struct mmc_data
*data
;
1146 status
= readl(host
->base
+ MMCISTATUS
);
1148 if (host
->singleirq
) {
1149 if (status
& readl(host
->base
+ MMCIMASK1
))
1150 mmci_pio_irq(irq
, dev_id
);
1152 status
&= ~MCI_IRQ1MASK
;
1156 * We intentionally clear the MCI_ST_CARDBUSY IRQ here (if it's
1157 * enabled) since the HW seems to be triggering the IRQ on both
1158 * edges while monitoring DAT0 for busy completion.
1160 status
&= readl(host
->base
+ MMCIMASK0
);
1161 writel(status
, host
->base
+ MMCICLEAR
);
1163 dev_dbg(mmc_dev(host
->mmc
), "irq0 (data+cmd) %08x\n", status
);
1166 if ((status
|host
->busy_status
) & (MCI_CMDCRCFAIL
|MCI_CMDTIMEOUT
|
1167 MCI_CMDSENT
|MCI_CMDRESPEND
) && cmd
)
1168 mmci_cmd_irq(host
, cmd
, status
);
1171 if (status
& (MCI_DATACRCFAIL
|MCI_DATATIMEOUT
|MCI_STARTBITERR
|
1172 MCI_TXUNDERRUN
|MCI_RXOVERRUN
|MCI_DATAEND
|
1173 MCI_DATABLOCKEND
) && data
)
1174 mmci_data_irq(host
, data
, status
);
1176 /* Don't poll for busy completion in irq context. */
1177 if (host
->busy_status
)
1178 status
&= ~MCI_ST_CARDBUSY
;
1183 spin_unlock(&host
->lock
);
1185 return IRQ_RETVAL(ret
);
1188 static void mmci_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
1190 struct mmci_host
*host
= mmc_priv(mmc
);
1191 unsigned long flags
;
1193 WARN_ON(host
->mrq
!= NULL
);
1195 mrq
->cmd
->error
= mmci_validate_data(host
, mrq
->data
);
1196 if (mrq
->cmd
->error
) {
1197 mmc_request_done(mmc
, mrq
);
1201 pm_runtime_get_sync(mmc_dev(mmc
));
1203 spin_lock_irqsave(&host
->lock
, flags
);
1208 mmci_get_next_data(host
, mrq
->data
);
1210 if (mrq
->data
&& mrq
->data
->flags
& MMC_DATA_READ
)
1211 mmci_start_data(host
, mrq
->data
);
1214 mmci_start_command(host
, mrq
->sbc
, 0);
1216 mmci_start_command(host
, mrq
->cmd
, 0);
1218 spin_unlock_irqrestore(&host
->lock
, flags
);
1221 static void mmci_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
1223 struct mmci_host
*host
= mmc_priv(mmc
);
1224 struct variant_data
*variant
= host
->variant
;
1226 unsigned long flags
;
1229 pm_runtime_get_sync(mmc_dev(mmc
));
1231 if (host
->plat
->ios_handler
&&
1232 host
->plat
->ios_handler(mmc_dev(mmc
), ios
))
1233 dev_err(mmc_dev(mmc
), "platform ios_handler failed\n");
1235 switch (ios
->power_mode
) {
1237 if (!IS_ERR(mmc
->supply
.vmmc
))
1238 mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
, 0);
1240 if (!IS_ERR(mmc
->supply
.vqmmc
) && host
->vqmmc_enabled
) {
1241 regulator_disable(mmc
->supply
.vqmmc
);
1242 host
->vqmmc_enabled
= false;
1247 if (!IS_ERR(mmc
->supply
.vmmc
))
1248 mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
, ios
->vdd
);
1251 * The ST Micro variant doesn't have the PL180s MCI_PWR_UP
1252 * and instead uses MCI_PWR_ON so apply whatever value is
1253 * configured in the variant data.
1255 pwr
|= variant
->pwrreg_powerup
;
1259 if (!IS_ERR(mmc
->supply
.vqmmc
) && !host
->vqmmc_enabled
) {
1260 ret
= regulator_enable(mmc
->supply
.vqmmc
);
1262 dev_err(mmc_dev(mmc
),
1263 "failed to enable vqmmc regulator\n");
1265 host
->vqmmc_enabled
= true;
1272 if (variant
->signal_direction
&& ios
->power_mode
!= MMC_POWER_OFF
) {
1274 * The ST Micro variant has some additional bits
1275 * indicating signal direction for the signals in
1276 * the SD/MMC bus and feedback-clock usage.
1278 pwr
|= host
->pwr_reg_add
;
1280 if (ios
->bus_width
== MMC_BUS_WIDTH_4
)
1281 pwr
&= ~MCI_ST_DATA74DIREN
;
1282 else if (ios
->bus_width
== MMC_BUS_WIDTH_1
)
1283 pwr
&= (~MCI_ST_DATA74DIREN
&
1284 ~MCI_ST_DATA31DIREN
&
1285 ~MCI_ST_DATA2DIREN
);
1288 if (ios
->bus_mode
== MMC_BUSMODE_OPENDRAIN
) {
1289 if (host
->hw_designer
!= AMBA_VENDOR_ST
)
1293 * The ST Micro variant use the ROD bit for something
1294 * else and only has OD (Open Drain).
1301 * If clock = 0 and the variant requires the MMCIPOWER to be used for
1302 * gating the clock, the MCI_PWR_ON bit is cleared.
1304 if (!ios
->clock
&& variant
->pwrreg_clkgate
)
1307 spin_lock_irqsave(&host
->lock
, flags
);
1309 mmci_set_clkreg(host
, ios
->clock
);
1310 mmci_write_pwrreg(host
, pwr
);
1311 mmci_reg_delay(host
);
1313 spin_unlock_irqrestore(&host
->lock
, flags
);
1315 pm_runtime_mark_last_busy(mmc_dev(mmc
));
1316 pm_runtime_put_autosuspend(mmc_dev(mmc
));
1319 static int mmci_get_cd(struct mmc_host
*mmc
)
1321 struct mmci_host
*host
= mmc_priv(mmc
);
1322 struct mmci_platform_data
*plat
= host
->plat
;
1323 unsigned int status
= mmc_gpio_get_cd(mmc
);
1325 if (status
== -ENOSYS
) {
1327 return 1; /* Assume always present */
1329 status
= plat
->status(mmc_dev(host
->mmc
));
1334 static int mmci_sig_volt_switch(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
1338 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
1340 pm_runtime_get_sync(mmc_dev(mmc
));
1342 switch (ios
->signal_voltage
) {
1343 case MMC_SIGNAL_VOLTAGE_330
:
1344 ret
= regulator_set_voltage(mmc
->supply
.vqmmc
,
1347 case MMC_SIGNAL_VOLTAGE_180
:
1348 ret
= regulator_set_voltage(mmc
->supply
.vqmmc
,
1351 case MMC_SIGNAL_VOLTAGE_120
:
1352 ret
= regulator_set_voltage(mmc
->supply
.vqmmc
,
1358 dev_warn(mmc_dev(mmc
), "Voltage switch failed\n");
1360 pm_runtime_mark_last_busy(mmc_dev(mmc
));
1361 pm_runtime_put_autosuspend(mmc_dev(mmc
));
1367 static struct mmc_host_ops mmci_ops
= {
1368 .request
= mmci_request
,
1369 .pre_req
= mmci_pre_request
,
1370 .post_req
= mmci_post_request
,
1371 .set_ios
= mmci_set_ios
,
1372 .get_ro
= mmc_gpio_get_ro
,
1373 .get_cd
= mmci_get_cd
,
1374 .start_signal_voltage_switch
= mmci_sig_volt_switch
,
1377 static int mmci_of_parse(struct device_node
*np
, struct mmc_host
*mmc
)
1379 struct mmci_host
*host
= mmc_priv(mmc
);
1380 int ret
= mmc_of_parse(mmc
);
1385 if (of_get_property(np
, "st,sig-dir-dat0", NULL
))
1386 host
->pwr_reg_add
|= MCI_ST_DATA0DIREN
;
1387 if (of_get_property(np
, "st,sig-dir-dat2", NULL
))
1388 host
->pwr_reg_add
|= MCI_ST_DATA2DIREN
;
1389 if (of_get_property(np
, "st,sig-dir-dat31", NULL
))
1390 host
->pwr_reg_add
|= MCI_ST_DATA31DIREN
;
1391 if (of_get_property(np
, "st,sig-dir-dat74", NULL
))
1392 host
->pwr_reg_add
|= MCI_ST_DATA74DIREN
;
1393 if (of_get_property(np
, "st,sig-dir-cmd", NULL
))
1394 host
->pwr_reg_add
|= MCI_ST_CMDDIREN
;
1395 if (of_get_property(np
, "st,sig-pin-fbclk", NULL
))
1396 host
->pwr_reg_add
|= MCI_ST_FBCLKEN
;
1398 if (of_get_property(np
, "mmc-cap-mmc-highspeed", NULL
))
1399 mmc
->caps
|= MMC_CAP_MMC_HIGHSPEED
;
1400 if (of_get_property(np
, "mmc-cap-sd-highspeed", NULL
))
1401 mmc
->caps
|= MMC_CAP_SD_HIGHSPEED
;
1406 static int mmci_probe(struct amba_device
*dev
,
1407 const struct amba_id
*id
)
1409 struct mmci_platform_data
*plat
= dev
->dev
.platform_data
;
1410 struct device_node
*np
= dev
->dev
.of_node
;
1411 struct variant_data
*variant
= id
->data
;
1412 struct mmci_host
*host
;
1413 struct mmc_host
*mmc
;
1416 /* Must have platform data or Device Tree. */
1418 dev_err(&dev
->dev
, "No plat data or DT found\n");
1423 plat
= devm_kzalloc(&dev
->dev
, sizeof(*plat
), GFP_KERNEL
);
1428 mmc
= mmc_alloc_host(sizeof(struct mmci_host
), &dev
->dev
);
1432 ret
= mmci_of_parse(np
, mmc
);
1436 host
= mmc_priv(mmc
);
1439 host
->hw_designer
= amba_manf(dev
);
1440 host
->hw_revision
= amba_rev(dev
);
1441 dev_dbg(mmc_dev(mmc
), "designer ID = 0x%02x\n", host
->hw_designer
);
1442 dev_dbg(mmc_dev(mmc
), "revision = 0x%01x\n", host
->hw_revision
);
1444 host
->clk
= devm_clk_get(&dev
->dev
, NULL
);
1445 if (IS_ERR(host
->clk
)) {
1446 ret
= PTR_ERR(host
->clk
);
1450 ret
= clk_prepare_enable(host
->clk
);
1455 host
->variant
= variant
;
1456 host
->mclk
= clk_get_rate(host
->clk
);
1458 * According to the spec, mclk is max 100 MHz,
1459 * so we try to adjust the clock down to this,
1462 if (host
->mclk
> 100000000) {
1463 ret
= clk_set_rate(host
->clk
, 100000000);
1466 host
->mclk
= clk_get_rate(host
->clk
);
1467 dev_dbg(mmc_dev(mmc
), "eventual mclk rate: %u Hz\n",
1471 host
->phybase
= dev
->res
.start
;
1472 host
->base
= devm_ioremap_resource(&dev
->dev
, &dev
->res
);
1473 if (IS_ERR(host
->base
)) {
1474 ret
= PTR_ERR(host
->base
);
1479 * The ARM and ST versions of the block have slightly different
1480 * clock divider equations which means that the minimum divider
1483 if (variant
->st_clkdiv
)
1484 mmc
->f_min
= DIV_ROUND_UP(host
->mclk
, 257);
1486 mmc
->f_min
= DIV_ROUND_UP(host
->mclk
, 512);
1488 * If no maximum operating frequency is supplied, fall back to use
1489 * the module parameter, which has a (low) default value in case it
1490 * is not specified. Either value must not exceed the clock rate into
1491 * the block, of course.
1494 mmc
->f_max
= min(host
->mclk
, mmc
->f_max
);
1496 mmc
->f_max
= min(host
->mclk
, fmax
);
1497 dev_dbg(mmc_dev(mmc
), "clocking block at %u Hz\n", mmc
->f_max
);
1499 /* Get regulators and the supported OCR mask */
1500 mmc_regulator_get_supply(mmc
);
1501 if (!mmc
->ocr_avail
)
1502 mmc
->ocr_avail
= plat
->ocr_mask
;
1503 else if (plat
->ocr_mask
)
1504 dev_warn(mmc_dev(mmc
), "Platform OCR mask is ignored\n");
1506 /* DT takes precedence over platform data. */
1508 if (!plat
->cd_invert
)
1509 mmc
->caps2
|= MMC_CAP2_CD_ACTIVE_HIGH
;
1510 mmc
->caps2
|= MMC_CAP2_RO_ACTIVE_HIGH
;
1513 /* We support these capabilities. */
1514 mmc
->caps
|= MMC_CAP_CMD23
;
1516 if (variant
->busy_detect
) {
1517 mmci_ops
.card_busy
= mmci_card_busy
;
1518 mmci_write_datactrlreg(host
, MCI_ST_DPSM_BUSYMODE
);
1519 mmc
->caps
|= MMC_CAP_WAIT_WHILE_BUSY
;
1520 mmc
->max_busy_timeout
= 0;
1523 mmc
->ops
= &mmci_ops
;
1525 /* We support these PM capabilities. */
1526 mmc
->pm_caps
|= MMC_PM_KEEP_POWER
;
1531 mmc
->max_segs
= NR_SG
;
1534 * Since only a certain number of bits are valid in the data length
1535 * register, we must ensure that we don't exceed 2^num-1 bytes in a
1538 mmc
->max_req_size
= (1 << variant
->datalength_bits
) - 1;
1541 * Set the maximum segment size. Since we aren't doing DMA
1542 * (yet) we are only limited by the data length register.
1544 mmc
->max_seg_size
= mmc
->max_req_size
;
1547 * Block size can be up to 2048 bytes, but must be a power of two.
1549 mmc
->max_blk_size
= 1 << 11;
1552 * Limit the number of blocks transferred so that we don't overflow
1553 * the maximum request size.
1555 mmc
->max_blk_count
= mmc
->max_req_size
>> 11;
1557 spin_lock_init(&host
->lock
);
1559 writel(0, host
->base
+ MMCIMASK0
);
1560 writel(0, host
->base
+ MMCIMASK1
);
1561 writel(0xfff, host
->base
+ MMCICLEAR
);
1563 /* If DT, cd/wp gpios must be supplied through it. */
1564 if (!np
&& gpio_is_valid(plat
->gpio_cd
)) {
1565 ret
= mmc_gpio_request_cd(mmc
, plat
->gpio_cd
, 0);
1569 if (!np
&& gpio_is_valid(plat
->gpio_wp
)) {
1570 ret
= mmc_gpio_request_ro(mmc
, plat
->gpio_wp
);
1575 ret
= devm_request_irq(&dev
->dev
, dev
->irq
[0], mmci_irq
, IRQF_SHARED
,
1576 DRIVER_NAME
" (cmd)", host
);
1581 host
->singleirq
= true;
1583 ret
= devm_request_irq(&dev
->dev
, dev
->irq
[1], mmci_pio_irq
,
1584 IRQF_SHARED
, DRIVER_NAME
" (pio)", host
);
1589 writel(MCI_IRQENABLE
, host
->base
+ MMCIMASK0
);
1591 amba_set_drvdata(dev
, mmc
);
1593 dev_info(&dev
->dev
, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n",
1594 mmc_hostname(mmc
), amba_part(dev
), amba_manf(dev
),
1595 amba_rev(dev
), (unsigned long long)dev
->res
.start
,
1596 dev
->irq
[0], dev
->irq
[1]);
1598 mmci_dma_setup(host
);
1600 pm_runtime_set_autosuspend_delay(&dev
->dev
, 50);
1601 pm_runtime_use_autosuspend(&dev
->dev
);
1602 pm_runtime_put(&dev
->dev
);
1609 clk_disable_unprepare(host
->clk
);
1615 static int mmci_remove(struct amba_device
*dev
)
1617 struct mmc_host
*mmc
= amba_get_drvdata(dev
);
1620 struct mmci_host
*host
= mmc_priv(mmc
);
1623 * Undo pm_runtime_put() in probe. We use the _sync
1624 * version here so that we can access the primecell.
1626 pm_runtime_get_sync(&dev
->dev
);
1628 mmc_remove_host(mmc
);
1630 writel(0, host
->base
+ MMCIMASK0
);
1631 writel(0, host
->base
+ MMCIMASK1
);
1633 writel(0, host
->base
+ MMCICOMMAND
);
1634 writel(0, host
->base
+ MMCIDATACTRL
);
1636 mmci_dma_release(host
);
1637 clk_disable_unprepare(host
->clk
);
1645 static void mmci_save(struct mmci_host
*host
)
1647 unsigned long flags
;
1649 spin_lock_irqsave(&host
->lock
, flags
);
1651 writel(0, host
->base
+ MMCIMASK0
);
1652 if (host
->variant
->pwrreg_nopower
) {
1653 writel(0, host
->base
+ MMCIDATACTRL
);
1654 writel(0, host
->base
+ MMCIPOWER
);
1655 writel(0, host
->base
+ MMCICLOCK
);
1657 mmci_reg_delay(host
);
1659 spin_unlock_irqrestore(&host
->lock
, flags
);
1662 static void mmci_restore(struct mmci_host
*host
)
1664 unsigned long flags
;
1666 spin_lock_irqsave(&host
->lock
, flags
);
1668 if (host
->variant
->pwrreg_nopower
) {
1669 writel(host
->clk_reg
, host
->base
+ MMCICLOCK
);
1670 writel(host
->datactrl_reg
, host
->base
+ MMCIDATACTRL
);
1671 writel(host
->pwr_reg
, host
->base
+ MMCIPOWER
);
1673 writel(MCI_IRQENABLE
, host
->base
+ MMCIMASK0
);
1674 mmci_reg_delay(host
);
1676 spin_unlock_irqrestore(&host
->lock
, flags
);
1679 static int mmci_runtime_suspend(struct device
*dev
)
1681 struct amba_device
*adev
= to_amba_device(dev
);
1682 struct mmc_host
*mmc
= amba_get_drvdata(adev
);
1685 struct mmci_host
*host
= mmc_priv(mmc
);
1686 pinctrl_pm_select_sleep_state(dev
);
1688 clk_disable_unprepare(host
->clk
);
1694 static int mmci_runtime_resume(struct device
*dev
)
1696 struct amba_device
*adev
= to_amba_device(dev
);
1697 struct mmc_host
*mmc
= amba_get_drvdata(adev
);
1700 struct mmci_host
*host
= mmc_priv(mmc
);
1701 clk_prepare_enable(host
->clk
);
1703 pinctrl_pm_select_default_state(dev
);
1710 static const struct dev_pm_ops mmci_dev_pm_ops
= {
1711 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend
,
1712 pm_runtime_force_resume
)
1713 SET_PM_RUNTIME_PM_OPS(mmci_runtime_suspend
, mmci_runtime_resume
, NULL
)
1716 static struct amba_id mmci_ids
[] = {
1720 .data
= &variant_arm
,
1725 .data
= &variant_arm_extended_fifo
,
1730 .data
= &variant_arm_extended_fifo_hwfc
,
1735 .data
= &variant_arm
,
1737 /* ST Micro variants */
1741 .data
= &variant_u300
,
1746 .data
= &variant_nomadik
,
1751 .data
= &variant_u300
,
1756 .data
= &variant_ux500
,
1761 .data
= &variant_ux500v2
,
1766 MODULE_DEVICE_TABLE(amba
, mmci_ids
);
1768 static struct amba_driver mmci_driver
= {
1770 .name
= DRIVER_NAME
,
1771 .pm
= &mmci_dev_pm_ops
,
1773 .probe
= mmci_probe
,
1774 .remove
= mmci_remove
,
1775 .id_table
= mmci_ids
,
1778 module_amba_driver(mmci_driver
);
1780 module_param(fmax
, uint
, 0444);
1782 MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
1783 MODULE_LICENSE("GPL");