2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
5 * Copyright (C) 2010 ST-Ericsson SA
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/init.h>
14 #include <linux/ioport.h>
15 #include <linux/device.h>
17 #include <linux/interrupt.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <linux/delay.h>
21 #include <linux/err.h>
22 #include <linux/highmem.h>
23 #include <linux/log2.h>
24 #include <linux/mmc/pm.h>
25 #include <linux/mmc/host.h>
26 #include <linux/mmc/card.h>
27 #include <linux/mmc/slot-gpio.h>
28 #include <linux/amba/bus.h>
29 #include <linux/clk.h>
30 #include <linux/scatterlist.h>
31 #include <linux/gpio.h>
32 #include <linux/of_gpio.h>
33 #include <linux/regulator/consumer.h>
34 #include <linux/dmaengine.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/amba/mmci.h>
37 #include <linux/pm_runtime.h>
38 #include <linux/types.h>
39 #include <linux/pinctrl/consumer.h>
41 #include <asm/div64.h>
43 #include <asm/sizes.h>
47 #define DRIVER_NAME "mmci-pl18x"
49 static unsigned int fmax
= 515633;
52 * struct variant_data - MMCI variant-specific quirks
53 * @clkreg: default value for MCICLOCK register
54 * @clkreg_enable: enable value for MMCICLOCK register
55 * @clkreg_8bit_bus_enable: enable value for 8 bit bus
56 * @clkreg_neg_edge_enable: enable value for inverted data/cmd output
57 * @datalength_bits: number of bits in the MMCIDATALENGTH register
58 * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY
59 * is asserted (likewise for RX)
60 * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY
61 * is asserted (likewise for RX)
62 * @data_cmd_enable: enable value for data commands.
63 * @sdio: variant supports SDIO
64 * @st_clkdiv: true if using a ST-specific clock divider algorithm
65 * @datactrl_mask_ddrmode: ddr mode mask in datactrl register.
66 * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register
67 * @blksz_datactrl4: true if Block size is at b4..b16 position in datactrl
69 * @pwrreg_powerup: power up value for MMCIPOWER register
70 * @f_max: maximum clk frequency supported by the controller.
71 * @signal_direction: input/out direction of bus signals can be indicated
72 * @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock
73 * @busy_detect: true if busy detection on dat0 is supported
74 * @pwrreg_nopower: bits in MMCIPOWER don't controls ext. power supply
75 * @explicit_mclk_control: enable explicit mclk control in driver.
76 * @qcom_fifo: enables qcom specific fifo pio read logic.
77 * @reversed_irq_handling: handle data irq before cmd irq.
81 unsigned int clkreg_enable
;
82 unsigned int clkreg_8bit_bus_enable
;
83 unsigned int clkreg_neg_edge_enable
;
84 unsigned int datalength_bits
;
85 unsigned int fifosize
;
86 unsigned int fifohalfsize
;
87 unsigned int data_cmd_enable
;
88 unsigned int datactrl_mask_ddrmode
;
91 bool blksz_datactrl16
;
95 bool signal_direction
;
99 bool explicit_mclk_control
;
101 bool reversed_irq_handling
;
104 static struct variant_data variant_arm
= {
106 .fifohalfsize
= 8 * 4,
107 .datalength_bits
= 16,
108 .pwrreg_powerup
= MCI_PWR_UP
,
110 .reversed_irq_handling
= true,
113 static struct variant_data variant_arm_extended_fifo
= {
115 .fifohalfsize
= 64 * 4,
116 .datalength_bits
= 16,
117 .pwrreg_powerup
= MCI_PWR_UP
,
121 static struct variant_data variant_arm_extended_fifo_hwfc
= {
123 .fifohalfsize
= 64 * 4,
124 .clkreg_enable
= MCI_ARM_HWFCEN
,
125 .datalength_bits
= 16,
126 .pwrreg_powerup
= MCI_PWR_UP
,
130 static struct variant_data variant_u300
= {
132 .fifohalfsize
= 8 * 4,
133 .clkreg_enable
= MCI_ST_U300_HWFCEN
,
134 .clkreg_8bit_bus_enable
= MCI_ST_8BIT_BUS
,
135 .datalength_bits
= 16,
137 .pwrreg_powerup
= MCI_PWR_ON
,
139 .signal_direction
= true,
140 .pwrreg_clkgate
= true,
141 .pwrreg_nopower
= true,
144 static struct variant_data variant_nomadik
= {
146 .fifohalfsize
= 8 * 4,
147 .clkreg
= MCI_CLK_ENABLE
,
148 .datalength_bits
= 24,
151 .pwrreg_powerup
= MCI_PWR_ON
,
153 .signal_direction
= true,
154 .pwrreg_clkgate
= true,
155 .pwrreg_nopower
= true,
158 static struct variant_data variant_ux500
= {
160 .fifohalfsize
= 8 * 4,
161 .clkreg
= MCI_CLK_ENABLE
,
162 .clkreg_enable
= MCI_ST_UX500_HWFCEN
,
163 .clkreg_8bit_bus_enable
= MCI_ST_8BIT_BUS
,
164 .clkreg_neg_edge_enable
= MCI_ST_UX500_NEG_EDGE
,
165 .datalength_bits
= 24,
168 .pwrreg_powerup
= MCI_PWR_ON
,
170 .signal_direction
= true,
171 .pwrreg_clkgate
= true,
173 .pwrreg_nopower
= true,
176 static struct variant_data variant_ux500v2
= {
178 .fifohalfsize
= 8 * 4,
179 .clkreg
= MCI_CLK_ENABLE
,
180 .clkreg_enable
= MCI_ST_UX500_HWFCEN
,
181 .clkreg_8bit_bus_enable
= MCI_ST_8BIT_BUS
,
182 .clkreg_neg_edge_enable
= MCI_ST_UX500_NEG_EDGE
,
183 .datactrl_mask_ddrmode
= MCI_ST_DPSM_DDRMODE
,
184 .datalength_bits
= 24,
187 .blksz_datactrl16
= true,
188 .pwrreg_powerup
= MCI_PWR_ON
,
190 .signal_direction
= true,
191 .pwrreg_clkgate
= true,
193 .pwrreg_nopower
= true,
196 static struct variant_data variant_qcom
= {
198 .fifohalfsize
= 8 * 4,
199 .clkreg
= MCI_CLK_ENABLE
,
200 .clkreg_enable
= MCI_QCOM_CLK_FLOWENA
|
201 MCI_QCOM_CLK_SELECT_IN_FBCLK
,
202 .clkreg_8bit_bus_enable
= MCI_QCOM_CLK_WIDEBUS_8
,
203 .datactrl_mask_ddrmode
= MCI_QCOM_CLK_SELECT_IN_DDR_MODE
,
204 .data_cmd_enable
= MCI_QCOM_CSPM_DATCMD
,
205 .blksz_datactrl4
= true,
206 .datalength_bits
= 24,
207 .pwrreg_powerup
= MCI_PWR_UP
,
209 .explicit_mclk_control
= true,
213 static int mmci_card_busy(struct mmc_host
*mmc
)
215 struct mmci_host
*host
= mmc_priv(mmc
);
219 pm_runtime_get_sync(mmc_dev(mmc
));
221 spin_lock_irqsave(&host
->lock
, flags
);
222 if (readl(host
->base
+ MMCISTATUS
) & MCI_ST_CARDBUSY
)
224 spin_unlock_irqrestore(&host
->lock
, flags
);
226 pm_runtime_mark_last_busy(mmc_dev(mmc
));
227 pm_runtime_put_autosuspend(mmc_dev(mmc
));
233 * Validate mmc prerequisites
235 static int mmci_validate_data(struct mmci_host
*host
,
236 struct mmc_data
*data
)
241 if (!is_power_of_2(data
->blksz
)) {
242 dev_err(mmc_dev(host
->mmc
),
243 "unsupported block size (%d bytes)\n", data
->blksz
);
250 static void mmci_reg_delay(struct mmci_host
*host
)
253 * According to the spec, at least three feedback clock cycles
254 * of max 52 MHz must pass between two writes to the MMCICLOCK reg.
255 * Three MCLK clock cycles must pass between two MMCIPOWER reg writes.
256 * Worst delay time during card init is at 100 kHz => 30 us.
257 * Worst delay time when up and running is at 25 MHz => 120 ns.
259 if (host
->cclk
< 25000000)
266 * This must be called with host->lock held
268 static void mmci_write_clkreg(struct mmci_host
*host
, u32 clk
)
270 if (host
->clk_reg
!= clk
) {
272 writel(clk
, host
->base
+ MMCICLOCK
);
277 * This must be called with host->lock held
279 static void mmci_write_pwrreg(struct mmci_host
*host
, u32 pwr
)
281 if (host
->pwr_reg
!= pwr
) {
283 writel(pwr
, host
->base
+ MMCIPOWER
);
288 * This must be called with host->lock held
290 static void mmci_write_datactrlreg(struct mmci_host
*host
, u32 datactrl
)
292 /* Keep ST Micro busy mode if enabled */
293 datactrl
|= host
->datactrl_reg
& MCI_ST_DPSM_BUSYMODE
;
295 if (host
->datactrl_reg
!= datactrl
) {
296 host
->datactrl_reg
= datactrl
;
297 writel(datactrl
, host
->base
+ MMCIDATACTRL
);
302 * This must be called with host->lock held
304 static void mmci_set_clkreg(struct mmci_host
*host
, unsigned int desired
)
306 struct variant_data
*variant
= host
->variant
;
307 u32 clk
= variant
->clkreg
;
309 /* Make sure cclk reflects the current calculated clock */
313 if (variant
->explicit_mclk_control
) {
314 host
->cclk
= host
->mclk
;
315 } else if (desired
>= host
->mclk
) {
316 clk
= MCI_CLK_BYPASS
;
317 if (variant
->st_clkdiv
)
318 clk
|= MCI_ST_UX500_NEG_EDGE
;
319 host
->cclk
= host
->mclk
;
320 } else if (variant
->st_clkdiv
) {
322 * DB8500 TRM says f = mclk / (clkdiv + 2)
323 * => clkdiv = (mclk / f) - 2
324 * Round the divider up so we don't exceed the max
327 clk
= DIV_ROUND_UP(host
->mclk
, desired
) - 2;
330 host
->cclk
= host
->mclk
/ (clk
+ 2);
333 * PL180 TRM says f = mclk / (2 * (clkdiv + 1))
334 * => clkdiv = mclk / (2 * f) - 1
336 clk
= host
->mclk
/ (2 * desired
) - 1;
339 host
->cclk
= host
->mclk
/ (2 * (clk
+ 1));
342 clk
|= variant
->clkreg_enable
;
343 clk
|= MCI_CLK_ENABLE
;
344 /* This hasn't proven to be worthwhile */
345 /* clk |= MCI_CLK_PWRSAVE; */
348 /* Set actual clock for debug */
349 host
->mmc
->actual_clock
= host
->cclk
;
351 if (host
->mmc
->ios
.bus_width
== MMC_BUS_WIDTH_4
)
353 if (host
->mmc
->ios
.bus_width
== MMC_BUS_WIDTH_8
)
354 clk
|= variant
->clkreg_8bit_bus_enable
;
356 if (host
->mmc
->ios
.timing
== MMC_TIMING_UHS_DDR50
||
357 host
->mmc
->ios
.timing
== MMC_TIMING_MMC_DDR52
)
358 clk
|= variant
->clkreg_neg_edge_enable
;
360 mmci_write_clkreg(host
, clk
);
364 mmci_request_end(struct mmci_host
*host
, struct mmc_request
*mrq
)
366 writel(0, host
->base
+ MMCICOMMAND
);
373 mmc_request_done(host
->mmc
, mrq
);
375 pm_runtime_mark_last_busy(mmc_dev(host
->mmc
));
376 pm_runtime_put_autosuspend(mmc_dev(host
->mmc
));
379 static void mmci_set_mask1(struct mmci_host
*host
, unsigned int mask
)
381 void __iomem
*base
= host
->base
;
383 if (host
->singleirq
) {
384 unsigned int mask0
= readl(base
+ MMCIMASK0
);
386 mask0
&= ~MCI_IRQ1MASK
;
389 writel(mask0
, base
+ MMCIMASK0
);
392 writel(mask
, base
+ MMCIMASK1
);
395 static void mmci_stop_data(struct mmci_host
*host
)
397 mmci_write_datactrlreg(host
, 0);
398 mmci_set_mask1(host
, 0);
402 static void mmci_init_sg(struct mmci_host
*host
, struct mmc_data
*data
)
404 unsigned int flags
= SG_MITER_ATOMIC
;
406 if (data
->flags
& MMC_DATA_READ
)
407 flags
|= SG_MITER_TO_SG
;
409 flags
|= SG_MITER_FROM_SG
;
411 sg_miter_start(&host
->sg_miter
, data
->sg
, data
->sg_len
, flags
);
415 * All the DMA operation mode stuff goes inside this ifdef.
416 * This assumes that you have a generic DMA device interface,
417 * no custom DMA interfaces are supported.
419 #ifdef CONFIG_DMA_ENGINE
420 static void mmci_dma_setup(struct mmci_host
*host
)
422 const char *rxname
, *txname
;
425 host
->dma_rx_channel
= dma_request_slave_channel(mmc_dev(host
->mmc
), "rx");
426 host
->dma_tx_channel
= dma_request_slave_channel(mmc_dev(host
->mmc
), "tx");
428 /* initialize pre request cookie */
429 host
->next_data
.cookie
= 1;
431 /* Try to acquire a generic DMA engine slave channel */
433 dma_cap_set(DMA_SLAVE
, mask
);
436 * If only an RX channel is specified, the driver will
437 * attempt to use it bidirectionally, however if it is
438 * is specified but cannot be located, DMA will be disabled.
440 if (host
->dma_rx_channel
&& !host
->dma_tx_channel
)
441 host
->dma_tx_channel
= host
->dma_rx_channel
;
443 if (host
->dma_rx_channel
)
444 rxname
= dma_chan_name(host
->dma_rx_channel
);
448 if (host
->dma_tx_channel
)
449 txname
= dma_chan_name(host
->dma_tx_channel
);
453 dev_info(mmc_dev(host
->mmc
), "DMA channels RX %s, TX %s\n",
457 * Limit the maximum segment size in any SG entry according to
458 * the parameters of the DMA engine device.
460 if (host
->dma_tx_channel
) {
461 struct device
*dev
= host
->dma_tx_channel
->device
->dev
;
462 unsigned int max_seg_size
= dma_get_max_seg_size(dev
);
464 if (max_seg_size
< host
->mmc
->max_seg_size
)
465 host
->mmc
->max_seg_size
= max_seg_size
;
467 if (host
->dma_rx_channel
) {
468 struct device
*dev
= host
->dma_rx_channel
->device
->dev
;
469 unsigned int max_seg_size
= dma_get_max_seg_size(dev
);
471 if (max_seg_size
< host
->mmc
->max_seg_size
)
472 host
->mmc
->max_seg_size
= max_seg_size
;
477 * This is used in or so inline it
478 * so it can be discarded.
480 static inline void mmci_dma_release(struct mmci_host
*host
)
482 if (host
->dma_rx_channel
)
483 dma_release_channel(host
->dma_rx_channel
);
484 if (host
->dma_tx_channel
)
485 dma_release_channel(host
->dma_tx_channel
);
486 host
->dma_rx_channel
= host
->dma_tx_channel
= NULL
;
489 static void mmci_dma_data_error(struct mmci_host
*host
)
491 dev_err(mmc_dev(host
->mmc
), "error during DMA transfer!\n");
492 dmaengine_terminate_all(host
->dma_current
);
493 host
->dma_current
= NULL
;
494 host
->dma_desc_current
= NULL
;
495 host
->data
->host_cookie
= 0;
498 static void mmci_dma_unmap(struct mmci_host
*host
, struct mmc_data
*data
)
500 struct dma_chan
*chan
;
501 enum dma_data_direction dir
;
503 if (data
->flags
& MMC_DATA_READ
) {
504 dir
= DMA_FROM_DEVICE
;
505 chan
= host
->dma_rx_channel
;
508 chan
= host
->dma_tx_channel
;
511 dma_unmap_sg(chan
->device
->dev
, data
->sg
, data
->sg_len
, dir
);
514 static void mmci_dma_finalize(struct mmci_host
*host
, struct mmc_data
*data
)
519 /* Wait up to 1ms for the DMA to complete */
521 status
= readl(host
->base
+ MMCISTATUS
);
522 if (!(status
& MCI_RXDATAAVLBLMASK
) || i
>= 100)
528 * Check to see whether we still have some data left in the FIFO -
529 * this catches DMA controllers which are unable to monitor the
530 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non-
531 * contiguous buffers. On TX, we'll get a FIFO underrun error.
533 if (status
& MCI_RXDATAAVLBLMASK
) {
534 mmci_dma_data_error(host
);
539 if (!data
->host_cookie
)
540 mmci_dma_unmap(host
, data
);
543 * Use of DMA with scatter-gather is impossible.
544 * Give up with DMA and switch back to PIO mode.
546 if (status
& MCI_RXDATAAVLBLMASK
) {
547 dev_err(mmc_dev(host
->mmc
), "buggy DMA detected. Taking evasive action.\n");
548 mmci_dma_release(host
);
551 host
->dma_current
= NULL
;
552 host
->dma_desc_current
= NULL
;
555 /* prepares DMA channel and DMA descriptor, returns non-zero on failure */
556 static int __mmci_dma_prep_data(struct mmci_host
*host
, struct mmc_data
*data
,
557 struct dma_chan
**dma_chan
,
558 struct dma_async_tx_descriptor
**dma_desc
)
560 struct variant_data
*variant
= host
->variant
;
561 struct dma_slave_config conf
= {
562 .src_addr
= host
->phybase
+ MMCIFIFO
,
563 .dst_addr
= host
->phybase
+ MMCIFIFO
,
564 .src_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
,
565 .dst_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
,
566 .src_maxburst
= variant
->fifohalfsize
>> 2, /* # of words */
567 .dst_maxburst
= variant
->fifohalfsize
>> 2, /* # of words */
570 struct dma_chan
*chan
;
571 struct dma_device
*device
;
572 struct dma_async_tx_descriptor
*desc
;
573 enum dma_data_direction buffer_dirn
;
576 if (data
->flags
& MMC_DATA_READ
) {
577 conf
.direction
= DMA_DEV_TO_MEM
;
578 buffer_dirn
= DMA_FROM_DEVICE
;
579 chan
= host
->dma_rx_channel
;
581 conf
.direction
= DMA_MEM_TO_DEV
;
582 buffer_dirn
= DMA_TO_DEVICE
;
583 chan
= host
->dma_tx_channel
;
586 /* If there's no DMA channel, fall back to PIO */
590 /* If less than or equal to the fifo size, don't bother with DMA */
591 if (data
->blksz
* data
->blocks
<= variant
->fifosize
)
594 device
= chan
->device
;
595 nr_sg
= dma_map_sg(device
->dev
, data
->sg
, data
->sg_len
, buffer_dirn
);
599 dmaengine_slave_config(chan
, &conf
);
600 desc
= dmaengine_prep_slave_sg(chan
, data
->sg
, nr_sg
,
601 conf
.direction
, DMA_CTRL_ACK
);
611 dma_unmap_sg(device
->dev
, data
->sg
, data
->sg_len
, buffer_dirn
);
615 static inline int mmci_dma_prep_data(struct mmci_host
*host
,
616 struct mmc_data
*data
)
618 /* Check if next job is already prepared. */
619 if (host
->dma_current
&& host
->dma_desc_current
)
622 /* No job were prepared thus do it now. */
623 return __mmci_dma_prep_data(host
, data
, &host
->dma_current
,
624 &host
->dma_desc_current
);
627 static inline int mmci_dma_prep_next(struct mmci_host
*host
,
628 struct mmc_data
*data
)
630 struct mmci_host_next
*nd
= &host
->next_data
;
631 return __mmci_dma_prep_data(host
, data
, &nd
->dma_chan
, &nd
->dma_desc
);
634 static int mmci_dma_start_data(struct mmci_host
*host
, unsigned int datactrl
)
637 struct mmc_data
*data
= host
->data
;
639 ret
= mmci_dma_prep_data(host
, host
->data
);
643 /* Okay, go for it. */
644 dev_vdbg(mmc_dev(host
->mmc
),
645 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
646 data
->sg_len
, data
->blksz
, data
->blocks
, data
->flags
);
647 dmaengine_submit(host
->dma_desc_current
);
648 dma_async_issue_pending(host
->dma_current
);
650 datactrl
|= MCI_DPSM_DMAENABLE
;
652 /* Trigger the DMA transfer */
653 mmci_write_datactrlreg(host
, datactrl
);
656 * Let the MMCI say when the data is ended and it's time
657 * to fire next DMA request. When that happens, MMCI will
658 * call mmci_data_end()
660 writel(readl(host
->base
+ MMCIMASK0
) | MCI_DATAENDMASK
,
661 host
->base
+ MMCIMASK0
);
665 static void mmci_get_next_data(struct mmci_host
*host
, struct mmc_data
*data
)
667 struct mmci_host_next
*next
= &host
->next_data
;
669 WARN_ON(data
->host_cookie
&& data
->host_cookie
!= next
->cookie
);
670 WARN_ON(!data
->host_cookie
&& (next
->dma_desc
|| next
->dma_chan
));
672 host
->dma_desc_current
= next
->dma_desc
;
673 host
->dma_current
= next
->dma_chan
;
674 next
->dma_desc
= NULL
;
675 next
->dma_chan
= NULL
;
678 static void mmci_pre_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
,
681 struct mmci_host
*host
= mmc_priv(mmc
);
682 struct mmc_data
*data
= mrq
->data
;
683 struct mmci_host_next
*nd
= &host
->next_data
;
688 BUG_ON(data
->host_cookie
);
690 if (mmci_validate_data(host
, data
))
693 if (!mmci_dma_prep_next(host
, data
))
694 data
->host_cookie
= ++nd
->cookie
< 0 ? 1 : nd
->cookie
;
697 static void mmci_post_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
,
700 struct mmci_host
*host
= mmc_priv(mmc
);
701 struct mmc_data
*data
= mrq
->data
;
703 if (!data
|| !data
->host_cookie
)
706 mmci_dma_unmap(host
, data
);
709 struct mmci_host_next
*next
= &host
->next_data
;
710 struct dma_chan
*chan
;
711 if (data
->flags
& MMC_DATA_READ
)
712 chan
= host
->dma_rx_channel
;
714 chan
= host
->dma_tx_channel
;
715 dmaengine_terminate_all(chan
);
717 next
->dma_desc
= NULL
;
718 next
->dma_chan
= NULL
;
723 /* Blank functions if the DMA engine is not available */
724 static void mmci_get_next_data(struct mmci_host
*host
, struct mmc_data
*data
)
727 static inline void mmci_dma_setup(struct mmci_host
*host
)
731 static inline void mmci_dma_release(struct mmci_host
*host
)
735 static inline void mmci_dma_unmap(struct mmci_host
*host
, struct mmc_data
*data
)
739 static inline void mmci_dma_finalize(struct mmci_host
*host
,
740 struct mmc_data
*data
)
744 static inline void mmci_dma_data_error(struct mmci_host
*host
)
748 static inline int mmci_dma_start_data(struct mmci_host
*host
, unsigned int datactrl
)
753 #define mmci_pre_request NULL
754 #define mmci_post_request NULL
758 static void mmci_start_data(struct mmci_host
*host
, struct mmc_data
*data
)
760 struct variant_data
*variant
= host
->variant
;
761 unsigned int datactrl
, timeout
, irqmask
;
762 unsigned long long clks
;
766 dev_dbg(mmc_dev(host
->mmc
), "blksz %04x blks %04x flags %08x\n",
767 data
->blksz
, data
->blocks
, data
->flags
);
770 host
->size
= data
->blksz
* data
->blocks
;
771 data
->bytes_xfered
= 0;
773 clks
= (unsigned long long)data
->timeout_ns
* host
->cclk
;
774 do_div(clks
, NSEC_PER_SEC
);
776 timeout
= data
->timeout_clks
+ (unsigned int)clks
;
779 writel(timeout
, base
+ MMCIDATATIMER
);
780 writel(host
->size
, base
+ MMCIDATALENGTH
);
782 blksz_bits
= ffs(data
->blksz
) - 1;
783 BUG_ON(1 << blksz_bits
!= data
->blksz
);
785 if (variant
->blksz_datactrl16
)
786 datactrl
= MCI_DPSM_ENABLE
| (data
->blksz
<< 16);
787 else if (variant
->blksz_datactrl4
)
788 datactrl
= MCI_DPSM_ENABLE
| (data
->blksz
<< 4);
790 datactrl
= MCI_DPSM_ENABLE
| blksz_bits
<< 4;
792 if (data
->flags
& MMC_DATA_READ
)
793 datactrl
|= MCI_DPSM_DIRECTION
;
795 /* The ST Micro variants has a special bit to enable SDIO */
796 if (variant
->sdio
&& host
->mmc
->card
)
797 if (mmc_card_sdio(host
->mmc
->card
)) {
799 * The ST Micro variants has a special bit
804 datactrl
|= MCI_ST_DPSM_SDIOEN
;
807 * The ST Micro variant for SDIO small write transfers
808 * needs to have clock H/W flow control disabled,
809 * otherwise the transfer will not start. The threshold
810 * depends on the rate of MCLK.
812 if (data
->flags
& MMC_DATA_WRITE
&&
814 (host
->size
<= 8 && host
->mclk
> 50000000)))
815 clk
= host
->clk_reg
& ~variant
->clkreg_enable
;
817 clk
= host
->clk_reg
| variant
->clkreg_enable
;
819 mmci_write_clkreg(host
, clk
);
822 if (host
->mmc
->ios
.timing
== MMC_TIMING_UHS_DDR50
||
823 host
->mmc
->ios
.timing
== MMC_TIMING_MMC_DDR52
)
824 datactrl
|= variant
->datactrl_mask_ddrmode
;
827 * Attempt to use DMA operation mode, if this
828 * should fail, fall back to PIO mode
830 if (!mmci_dma_start_data(host
, datactrl
))
833 /* IRQ mode, map the SG list for CPU reading/writing */
834 mmci_init_sg(host
, data
);
836 if (data
->flags
& MMC_DATA_READ
) {
837 irqmask
= MCI_RXFIFOHALFFULLMASK
;
840 * If we have less than the fifo 'half-full' threshold to
841 * transfer, trigger a PIO interrupt as soon as any data
844 if (host
->size
< variant
->fifohalfsize
)
845 irqmask
|= MCI_RXDATAAVLBLMASK
;
848 * We don't actually need to include "FIFO empty" here
849 * since its implicit in "FIFO half empty".
851 irqmask
= MCI_TXFIFOHALFEMPTYMASK
;
854 mmci_write_datactrlreg(host
, datactrl
);
855 writel(readl(base
+ MMCIMASK0
) & ~MCI_DATAENDMASK
, base
+ MMCIMASK0
);
856 mmci_set_mask1(host
, irqmask
);
860 mmci_start_command(struct mmci_host
*host
, struct mmc_command
*cmd
, u32 c
)
862 void __iomem
*base
= host
->base
;
864 dev_dbg(mmc_dev(host
->mmc
), "op %02x arg %08x flags %08x\n",
865 cmd
->opcode
, cmd
->arg
, cmd
->flags
);
867 if (readl(base
+ MMCICOMMAND
) & MCI_CPSM_ENABLE
) {
868 writel(0, base
+ MMCICOMMAND
);
869 mmci_reg_delay(host
);
872 c
|= cmd
->opcode
| MCI_CPSM_ENABLE
;
873 if (cmd
->flags
& MMC_RSP_PRESENT
) {
874 if (cmd
->flags
& MMC_RSP_136
)
875 c
|= MCI_CPSM_LONGRSP
;
876 c
|= MCI_CPSM_RESPONSE
;
879 c
|= MCI_CPSM_INTERRUPT
;
881 if (mmc_cmd_type(cmd
) == MMC_CMD_ADTC
)
882 c
|= host
->variant
->data_cmd_enable
;
886 writel(cmd
->arg
, base
+ MMCIARGUMENT
);
887 writel(c
, base
+ MMCICOMMAND
);
891 mmci_data_irq(struct mmci_host
*host
, struct mmc_data
*data
,
894 /* Make sure we have data to handle */
898 /* First check for errors */
899 if (status
& (MCI_DATACRCFAIL
|MCI_DATATIMEOUT
|MCI_STARTBITERR
|
900 MCI_TXUNDERRUN
|MCI_RXOVERRUN
)) {
903 /* Terminate the DMA transfer */
904 if (dma_inprogress(host
)) {
905 mmci_dma_data_error(host
);
906 mmci_dma_unmap(host
, data
);
910 * Calculate how far we are into the transfer. Note that
911 * the data counter gives the number of bytes transferred
912 * on the MMC bus, not on the host side. On reads, this
913 * can be as much as a FIFO-worth of data ahead. This
914 * matters for FIFO overruns only.
916 remain
= readl(host
->base
+ MMCIDATACNT
);
917 success
= data
->blksz
* data
->blocks
- remain
;
919 dev_dbg(mmc_dev(host
->mmc
), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
921 if (status
& MCI_DATACRCFAIL
) {
922 /* Last block was not successful */
924 data
->error
= -EILSEQ
;
925 } else if (status
& MCI_DATATIMEOUT
) {
926 data
->error
= -ETIMEDOUT
;
927 } else if (status
& MCI_STARTBITERR
) {
928 data
->error
= -ECOMM
;
929 } else if (status
& MCI_TXUNDERRUN
) {
931 } else if (status
& MCI_RXOVERRUN
) {
932 if (success
> host
->variant
->fifosize
)
933 success
-= host
->variant
->fifosize
;
938 data
->bytes_xfered
= round_down(success
, data
->blksz
);
941 if (status
& MCI_DATABLOCKEND
)
942 dev_err(mmc_dev(host
->mmc
), "stray MCI_DATABLOCKEND interrupt\n");
944 if (status
& MCI_DATAEND
|| data
->error
) {
945 if (dma_inprogress(host
))
946 mmci_dma_finalize(host
, data
);
947 mmci_stop_data(host
);
950 /* The error clause is handled above, success! */
951 data
->bytes_xfered
= data
->blksz
* data
->blocks
;
953 if (!data
->stop
|| host
->mrq
->sbc
) {
954 mmci_request_end(host
, data
->mrq
);
956 mmci_start_command(host
, data
->stop
, 0);
962 mmci_cmd_irq(struct mmci_host
*host
, struct mmc_command
*cmd
,
965 void __iomem
*base
= host
->base
;
971 sbc
= (cmd
== host
->mrq
->sbc
);
972 busy_resp
= host
->variant
->busy_detect
&& (cmd
->flags
& MMC_RSP_BUSY
);
974 if (!((status
|host
->busy_status
) & (MCI_CMDCRCFAIL
|MCI_CMDTIMEOUT
|
975 MCI_CMDSENT
|MCI_CMDRESPEND
)))
978 /* Check if we need to wait for busy completion. */
979 if (host
->busy_status
&& (status
& MCI_ST_CARDBUSY
))
982 /* Enable busy completion if needed and supported. */
983 if (!host
->busy_status
&& busy_resp
&&
984 !(status
& (MCI_CMDCRCFAIL
|MCI_CMDTIMEOUT
)) &&
985 (readl(base
+ MMCISTATUS
) & MCI_ST_CARDBUSY
)) {
986 writel(readl(base
+ MMCIMASK0
) | MCI_ST_BUSYEND
,
988 host
->busy_status
= status
& (MCI_CMDSENT
|MCI_CMDRESPEND
);
992 /* At busy completion, mask the IRQ and complete the request. */
993 if (host
->busy_status
) {
994 writel(readl(base
+ MMCIMASK0
) & ~MCI_ST_BUSYEND
,
996 host
->busy_status
= 0;
1001 if (status
& MCI_CMDTIMEOUT
) {
1002 cmd
->error
= -ETIMEDOUT
;
1003 } else if (status
& MCI_CMDCRCFAIL
&& cmd
->flags
& MMC_RSP_CRC
) {
1004 cmd
->error
= -EILSEQ
;
1006 cmd
->resp
[0] = readl(base
+ MMCIRESPONSE0
);
1007 cmd
->resp
[1] = readl(base
+ MMCIRESPONSE1
);
1008 cmd
->resp
[2] = readl(base
+ MMCIRESPONSE2
);
1009 cmd
->resp
[3] = readl(base
+ MMCIRESPONSE3
);
1012 if ((!sbc
&& !cmd
->data
) || cmd
->error
) {
1014 /* Terminate the DMA transfer */
1015 if (dma_inprogress(host
)) {
1016 mmci_dma_data_error(host
);
1017 mmci_dma_unmap(host
, host
->data
);
1019 mmci_stop_data(host
);
1021 mmci_request_end(host
, host
->mrq
);
1023 mmci_start_command(host
, host
->mrq
->cmd
, 0);
1024 } else if (!(cmd
->data
->flags
& MMC_DATA_READ
)) {
1025 mmci_start_data(host
, cmd
->data
);
1029 static int mmci_get_rx_fifocnt(struct mmci_host
*host
, u32 status
, int remain
)
1031 return remain
- (readl(host
->base
+ MMCIFIFOCNT
) << 2);
1034 static int mmci_qcom_get_rx_fifocnt(struct mmci_host
*host
, u32 status
, int r
)
1037 * on qcom SDCC4 only 8 words are used in each burst so only 8 addresses
1038 * from the fifo range should be used
1040 if (status
& MCI_RXFIFOHALFFULL
)
1041 return host
->variant
->fifohalfsize
;
1042 else if (status
& MCI_RXDATAAVLBL
)
1048 static int mmci_pio_read(struct mmci_host
*host
, char *buffer
, unsigned int remain
)
1050 void __iomem
*base
= host
->base
;
1052 u32 status
= readl(host
->base
+ MMCISTATUS
);
1053 int host_remain
= host
->size
;
1056 int count
= host
->get_rx_fifocnt(host
, status
, host_remain
);
1065 * SDIO especially may want to send something that is
1066 * not divisible by 4 (as opposed to card sectors
1067 * etc). Therefore make sure to always read the last bytes
1068 * while only doing full 32-bit reads towards the FIFO.
1070 if (unlikely(count
& 0x3)) {
1072 unsigned char buf
[4];
1073 ioread32_rep(base
+ MMCIFIFO
, buf
, 1);
1074 memcpy(ptr
, buf
, count
);
1076 ioread32_rep(base
+ MMCIFIFO
, ptr
, count
>> 2);
1080 ioread32_rep(base
+ MMCIFIFO
, ptr
, count
>> 2);
1085 host_remain
-= count
;
1090 status
= readl(base
+ MMCISTATUS
);
1091 } while (status
& MCI_RXDATAAVLBL
);
1093 return ptr
- buffer
;
1096 static int mmci_pio_write(struct mmci_host
*host
, char *buffer
, unsigned int remain
, u32 status
)
1098 struct variant_data
*variant
= host
->variant
;
1099 void __iomem
*base
= host
->base
;
1103 unsigned int count
, maxcnt
;
1105 maxcnt
= status
& MCI_TXFIFOEMPTY
?
1106 variant
->fifosize
: variant
->fifohalfsize
;
1107 count
= min(remain
, maxcnt
);
1110 * SDIO especially may want to send something that is
1111 * not divisible by 4 (as opposed to card sectors
1112 * etc), and the FIFO only accept full 32-bit writes.
1113 * So compensate by adding +3 on the count, a single
1114 * byte become a 32bit write, 7 bytes will be two
1117 iowrite32_rep(base
+ MMCIFIFO
, ptr
, (count
+ 3) >> 2);
1125 status
= readl(base
+ MMCISTATUS
);
1126 } while (status
& MCI_TXFIFOHALFEMPTY
);
1128 return ptr
- buffer
;
1132 * PIO data transfer IRQ handler.
1134 static irqreturn_t
mmci_pio_irq(int irq
, void *dev_id
)
1136 struct mmci_host
*host
= dev_id
;
1137 struct sg_mapping_iter
*sg_miter
= &host
->sg_miter
;
1138 struct variant_data
*variant
= host
->variant
;
1139 void __iomem
*base
= host
->base
;
1140 unsigned long flags
;
1143 status
= readl(base
+ MMCISTATUS
);
1145 dev_dbg(mmc_dev(host
->mmc
), "irq1 (pio) %08x\n", status
);
1147 local_irq_save(flags
);
1150 unsigned int remain
, len
;
1154 * For write, we only need to test the half-empty flag
1155 * here - if the FIFO is completely empty, then by
1156 * definition it is more than half empty.
1158 * For read, check for data available.
1160 if (!(status
& (MCI_TXFIFOHALFEMPTY
|MCI_RXDATAAVLBL
)))
1163 if (!sg_miter_next(sg_miter
))
1166 buffer
= sg_miter
->addr
;
1167 remain
= sg_miter
->length
;
1170 if (status
& MCI_RXACTIVE
)
1171 len
= mmci_pio_read(host
, buffer
, remain
);
1172 if (status
& MCI_TXACTIVE
)
1173 len
= mmci_pio_write(host
, buffer
, remain
, status
);
1175 sg_miter
->consumed
= len
;
1183 status
= readl(base
+ MMCISTATUS
);
1186 sg_miter_stop(sg_miter
);
1188 local_irq_restore(flags
);
1191 * If we have less than the fifo 'half-full' threshold to transfer,
1192 * trigger a PIO interrupt as soon as any data is available.
1194 if (status
& MCI_RXACTIVE
&& host
->size
< variant
->fifohalfsize
)
1195 mmci_set_mask1(host
, MCI_RXDATAAVLBLMASK
);
1198 * If we run out of data, disable the data IRQs; this
1199 * prevents a race where the FIFO becomes empty before
1200 * the chip itself has disabled the data path, and
1201 * stops us racing with our data end IRQ.
1203 if (host
->size
== 0) {
1204 mmci_set_mask1(host
, 0);
1205 writel(readl(base
+ MMCIMASK0
) | MCI_DATAENDMASK
, base
+ MMCIMASK0
);
1212 * Handle completion of command and data transfers.
1214 static irqreturn_t
mmci_irq(int irq
, void *dev_id
)
1216 struct mmci_host
*host
= dev_id
;
1220 spin_lock(&host
->lock
);
1223 status
= readl(host
->base
+ MMCISTATUS
);
1225 if (host
->singleirq
) {
1226 if (status
& readl(host
->base
+ MMCIMASK1
))
1227 mmci_pio_irq(irq
, dev_id
);
1229 status
&= ~MCI_IRQ1MASK
;
1233 * We intentionally clear the MCI_ST_CARDBUSY IRQ here (if it's
1234 * enabled) since the HW seems to be triggering the IRQ on both
1235 * edges while monitoring DAT0 for busy completion.
1237 status
&= readl(host
->base
+ MMCIMASK0
);
1238 writel(status
, host
->base
+ MMCICLEAR
);
1240 dev_dbg(mmc_dev(host
->mmc
), "irq0 (data+cmd) %08x\n", status
);
1242 if (host
->variant
->reversed_irq_handling
) {
1243 mmci_data_irq(host
, host
->data
, status
);
1244 mmci_cmd_irq(host
, host
->cmd
, status
);
1246 mmci_cmd_irq(host
, host
->cmd
, status
);
1247 mmci_data_irq(host
, host
->data
, status
);
1250 /* Don't poll for busy completion in irq context. */
1251 if (host
->busy_status
)
1252 status
&= ~MCI_ST_CARDBUSY
;
1257 spin_unlock(&host
->lock
);
1259 return IRQ_RETVAL(ret
);
1262 static void mmci_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
1264 struct mmci_host
*host
= mmc_priv(mmc
);
1265 unsigned long flags
;
1267 WARN_ON(host
->mrq
!= NULL
);
1269 mrq
->cmd
->error
= mmci_validate_data(host
, mrq
->data
);
1270 if (mrq
->cmd
->error
) {
1271 mmc_request_done(mmc
, mrq
);
1275 pm_runtime_get_sync(mmc_dev(mmc
));
1277 spin_lock_irqsave(&host
->lock
, flags
);
1282 mmci_get_next_data(host
, mrq
->data
);
1284 if (mrq
->data
&& mrq
->data
->flags
& MMC_DATA_READ
)
1285 mmci_start_data(host
, mrq
->data
);
1288 mmci_start_command(host
, mrq
->sbc
, 0);
1290 mmci_start_command(host
, mrq
->cmd
, 0);
1292 spin_unlock_irqrestore(&host
->lock
, flags
);
1295 static void mmci_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
1297 struct mmci_host
*host
= mmc_priv(mmc
);
1298 struct variant_data
*variant
= host
->variant
;
1300 unsigned long flags
;
1303 pm_runtime_get_sync(mmc_dev(mmc
));
1305 if (host
->plat
->ios_handler
&&
1306 host
->plat
->ios_handler(mmc_dev(mmc
), ios
))
1307 dev_err(mmc_dev(mmc
), "platform ios_handler failed\n");
1309 switch (ios
->power_mode
) {
1311 if (!IS_ERR(mmc
->supply
.vmmc
))
1312 mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
, 0);
1314 if (!IS_ERR(mmc
->supply
.vqmmc
) && host
->vqmmc_enabled
) {
1315 regulator_disable(mmc
->supply
.vqmmc
);
1316 host
->vqmmc_enabled
= false;
1321 if (!IS_ERR(mmc
->supply
.vmmc
))
1322 mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
, ios
->vdd
);
1325 * The ST Micro variant doesn't have the PL180s MCI_PWR_UP
1326 * and instead uses MCI_PWR_ON so apply whatever value is
1327 * configured in the variant data.
1329 pwr
|= variant
->pwrreg_powerup
;
1333 if (!IS_ERR(mmc
->supply
.vqmmc
) && !host
->vqmmc_enabled
) {
1334 ret
= regulator_enable(mmc
->supply
.vqmmc
);
1336 dev_err(mmc_dev(mmc
),
1337 "failed to enable vqmmc regulator\n");
1339 host
->vqmmc_enabled
= true;
1346 if (variant
->signal_direction
&& ios
->power_mode
!= MMC_POWER_OFF
) {
1348 * The ST Micro variant has some additional bits
1349 * indicating signal direction for the signals in
1350 * the SD/MMC bus and feedback-clock usage.
1352 pwr
|= host
->pwr_reg_add
;
1354 if (ios
->bus_width
== MMC_BUS_WIDTH_4
)
1355 pwr
&= ~MCI_ST_DATA74DIREN
;
1356 else if (ios
->bus_width
== MMC_BUS_WIDTH_1
)
1357 pwr
&= (~MCI_ST_DATA74DIREN
&
1358 ~MCI_ST_DATA31DIREN
&
1359 ~MCI_ST_DATA2DIREN
);
1362 if (ios
->bus_mode
== MMC_BUSMODE_OPENDRAIN
) {
1363 if (host
->hw_designer
!= AMBA_VENDOR_ST
)
1367 * The ST Micro variant use the ROD bit for something
1368 * else and only has OD (Open Drain).
1375 * If clock = 0 and the variant requires the MMCIPOWER to be used for
1376 * gating the clock, the MCI_PWR_ON bit is cleared.
1378 if (!ios
->clock
&& variant
->pwrreg_clkgate
)
1381 if (host
->variant
->explicit_mclk_control
&&
1382 ios
->clock
!= host
->clock_cache
) {
1383 ret
= clk_set_rate(host
->clk
, ios
->clock
);
1385 dev_err(mmc_dev(host
->mmc
),
1386 "Error setting clock rate (%d)\n", ret
);
1388 host
->mclk
= clk_get_rate(host
->clk
);
1390 host
->clock_cache
= ios
->clock
;
1392 spin_lock_irqsave(&host
->lock
, flags
);
1394 mmci_set_clkreg(host
, ios
->clock
);
1395 mmci_write_pwrreg(host
, pwr
);
1396 mmci_reg_delay(host
);
1398 spin_unlock_irqrestore(&host
->lock
, flags
);
1400 pm_runtime_mark_last_busy(mmc_dev(mmc
));
1401 pm_runtime_put_autosuspend(mmc_dev(mmc
));
1404 static int mmci_get_cd(struct mmc_host
*mmc
)
1406 struct mmci_host
*host
= mmc_priv(mmc
);
1407 struct mmci_platform_data
*plat
= host
->plat
;
1408 unsigned int status
= mmc_gpio_get_cd(mmc
);
1410 if (status
== -ENOSYS
) {
1412 return 1; /* Assume always present */
1414 status
= plat
->status(mmc_dev(host
->mmc
));
1419 static int mmci_sig_volt_switch(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
1423 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
1425 pm_runtime_get_sync(mmc_dev(mmc
));
1427 switch (ios
->signal_voltage
) {
1428 case MMC_SIGNAL_VOLTAGE_330
:
1429 ret
= regulator_set_voltage(mmc
->supply
.vqmmc
,
1432 case MMC_SIGNAL_VOLTAGE_180
:
1433 ret
= regulator_set_voltage(mmc
->supply
.vqmmc
,
1436 case MMC_SIGNAL_VOLTAGE_120
:
1437 ret
= regulator_set_voltage(mmc
->supply
.vqmmc
,
1443 dev_warn(mmc_dev(mmc
), "Voltage switch failed\n");
1445 pm_runtime_mark_last_busy(mmc_dev(mmc
));
1446 pm_runtime_put_autosuspend(mmc_dev(mmc
));
1452 static struct mmc_host_ops mmci_ops
= {
1453 .request
= mmci_request
,
1454 .pre_req
= mmci_pre_request
,
1455 .post_req
= mmci_post_request
,
1456 .set_ios
= mmci_set_ios
,
1457 .get_ro
= mmc_gpio_get_ro
,
1458 .get_cd
= mmci_get_cd
,
1459 .start_signal_voltage_switch
= mmci_sig_volt_switch
,
1462 static int mmci_of_parse(struct device_node
*np
, struct mmc_host
*mmc
)
1464 struct mmci_host
*host
= mmc_priv(mmc
);
1465 int ret
= mmc_of_parse(mmc
);
1470 if (of_get_property(np
, "st,sig-dir-dat0", NULL
))
1471 host
->pwr_reg_add
|= MCI_ST_DATA0DIREN
;
1472 if (of_get_property(np
, "st,sig-dir-dat2", NULL
))
1473 host
->pwr_reg_add
|= MCI_ST_DATA2DIREN
;
1474 if (of_get_property(np
, "st,sig-dir-dat31", NULL
))
1475 host
->pwr_reg_add
|= MCI_ST_DATA31DIREN
;
1476 if (of_get_property(np
, "st,sig-dir-dat74", NULL
))
1477 host
->pwr_reg_add
|= MCI_ST_DATA74DIREN
;
1478 if (of_get_property(np
, "st,sig-dir-cmd", NULL
))
1479 host
->pwr_reg_add
|= MCI_ST_CMDDIREN
;
1480 if (of_get_property(np
, "st,sig-pin-fbclk", NULL
))
1481 host
->pwr_reg_add
|= MCI_ST_FBCLKEN
;
1483 if (of_get_property(np
, "mmc-cap-mmc-highspeed", NULL
))
1484 mmc
->caps
|= MMC_CAP_MMC_HIGHSPEED
;
1485 if (of_get_property(np
, "mmc-cap-sd-highspeed", NULL
))
1486 mmc
->caps
|= MMC_CAP_SD_HIGHSPEED
;
1491 static int mmci_probe(struct amba_device
*dev
,
1492 const struct amba_id
*id
)
1494 struct mmci_platform_data
*plat
= dev
->dev
.platform_data
;
1495 struct device_node
*np
= dev
->dev
.of_node
;
1496 struct variant_data
*variant
= id
->data
;
1497 struct mmci_host
*host
;
1498 struct mmc_host
*mmc
;
1501 /* Must have platform data or Device Tree. */
1503 dev_err(&dev
->dev
, "No plat data or DT found\n");
1508 plat
= devm_kzalloc(&dev
->dev
, sizeof(*plat
), GFP_KERNEL
);
1513 mmc
= mmc_alloc_host(sizeof(struct mmci_host
), &dev
->dev
);
1517 ret
= mmci_of_parse(np
, mmc
);
1521 host
= mmc_priv(mmc
);
1524 host
->hw_designer
= amba_manf(dev
);
1525 host
->hw_revision
= amba_rev(dev
);
1526 dev_dbg(mmc_dev(mmc
), "designer ID = 0x%02x\n", host
->hw_designer
);
1527 dev_dbg(mmc_dev(mmc
), "revision = 0x%01x\n", host
->hw_revision
);
1529 host
->clk
= devm_clk_get(&dev
->dev
, NULL
);
1530 if (IS_ERR(host
->clk
)) {
1531 ret
= PTR_ERR(host
->clk
);
1535 ret
= clk_prepare_enable(host
->clk
);
1539 if (variant
->qcom_fifo
)
1540 host
->get_rx_fifocnt
= mmci_qcom_get_rx_fifocnt
;
1542 host
->get_rx_fifocnt
= mmci_get_rx_fifocnt
;
1545 host
->variant
= variant
;
1546 host
->mclk
= clk_get_rate(host
->clk
);
1548 * According to the spec, mclk is max 100 MHz,
1549 * so we try to adjust the clock down to this,
1552 if (host
->mclk
> variant
->f_max
) {
1553 ret
= clk_set_rate(host
->clk
, variant
->f_max
);
1556 host
->mclk
= clk_get_rate(host
->clk
);
1557 dev_dbg(mmc_dev(mmc
), "eventual mclk rate: %u Hz\n",
1561 host
->phybase
= dev
->res
.start
;
1562 host
->base
= devm_ioremap_resource(&dev
->dev
, &dev
->res
);
1563 if (IS_ERR(host
->base
)) {
1564 ret
= PTR_ERR(host
->base
);
1569 * The ARM and ST versions of the block have slightly different
1570 * clock divider equations which means that the minimum divider
1572 * on Qualcomm like controllers get the nearest minimum clock to 100Khz
1574 if (variant
->st_clkdiv
)
1575 mmc
->f_min
= DIV_ROUND_UP(host
->mclk
, 257);
1576 else if (variant
->explicit_mclk_control
)
1577 mmc
->f_min
= clk_round_rate(host
->clk
, 100000);
1579 mmc
->f_min
= DIV_ROUND_UP(host
->mclk
, 512);
1581 * If no maximum operating frequency is supplied, fall back to use
1582 * the module parameter, which has a (low) default value in case it
1583 * is not specified. Either value must not exceed the clock rate into
1584 * the block, of course.
1587 mmc
->f_max
= variant
->explicit_mclk_control
?
1588 min(variant
->f_max
, mmc
->f_max
) :
1589 min(host
->mclk
, mmc
->f_max
);
1591 mmc
->f_max
= variant
->explicit_mclk_control
?
1592 fmax
: min(host
->mclk
, fmax
);
1595 dev_dbg(mmc_dev(mmc
), "clocking block at %u Hz\n", mmc
->f_max
);
1597 /* Get regulators and the supported OCR mask */
1598 mmc_regulator_get_supply(mmc
);
1599 if (!mmc
->ocr_avail
)
1600 mmc
->ocr_avail
= plat
->ocr_mask
;
1601 else if (plat
->ocr_mask
)
1602 dev_warn(mmc_dev(mmc
), "Platform OCR mask is ignored\n");
1604 /* DT takes precedence over platform data. */
1606 if (!plat
->cd_invert
)
1607 mmc
->caps2
|= MMC_CAP2_CD_ACTIVE_HIGH
;
1608 mmc
->caps2
|= MMC_CAP2_RO_ACTIVE_HIGH
;
1611 /* We support these capabilities. */
1612 mmc
->caps
|= MMC_CAP_CMD23
;
1614 if (variant
->busy_detect
) {
1615 mmci_ops
.card_busy
= mmci_card_busy
;
1616 mmci_write_datactrlreg(host
, MCI_ST_DPSM_BUSYMODE
);
1617 mmc
->caps
|= MMC_CAP_WAIT_WHILE_BUSY
;
1618 mmc
->max_busy_timeout
= 0;
1621 mmc
->ops
= &mmci_ops
;
1623 /* We support these PM capabilities. */
1624 mmc
->pm_caps
|= MMC_PM_KEEP_POWER
;
1629 mmc
->max_segs
= NR_SG
;
1632 * Since only a certain number of bits are valid in the data length
1633 * register, we must ensure that we don't exceed 2^num-1 bytes in a
1636 mmc
->max_req_size
= (1 << variant
->datalength_bits
) - 1;
1639 * Set the maximum segment size. Since we aren't doing DMA
1640 * (yet) we are only limited by the data length register.
1642 mmc
->max_seg_size
= mmc
->max_req_size
;
1645 * Block size can be up to 2048 bytes, but must be a power of two.
1647 mmc
->max_blk_size
= 1 << 11;
1650 * Limit the number of blocks transferred so that we don't overflow
1651 * the maximum request size.
1653 mmc
->max_blk_count
= mmc
->max_req_size
>> 11;
1655 spin_lock_init(&host
->lock
);
1657 writel(0, host
->base
+ MMCIMASK0
);
1658 writel(0, host
->base
+ MMCIMASK1
);
1659 writel(0xfff, host
->base
+ MMCICLEAR
);
1661 /* If DT, cd/wp gpios must be supplied through it. */
1662 if (!np
&& gpio_is_valid(plat
->gpio_cd
)) {
1663 ret
= mmc_gpio_request_cd(mmc
, plat
->gpio_cd
, 0);
1667 if (!np
&& gpio_is_valid(plat
->gpio_wp
)) {
1668 ret
= mmc_gpio_request_ro(mmc
, plat
->gpio_wp
);
1673 ret
= devm_request_irq(&dev
->dev
, dev
->irq
[0], mmci_irq
, IRQF_SHARED
,
1674 DRIVER_NAME
" (cmd)", host
);
1679 host
->singleirq
= true;
1681 ret
= devm_request_irq(&dev
->dev
, dev
->irq
[1], mmci_pio_irq
,
1682 IRQF_SHARED
, DRIVER_NAME
" (pio)", host
);
1687 writel(MCI_IRQENABLE
, host
->base
+ MMCIMASK0
);
1689 amba_set_drvdata(dev
, mmc
);
1691 dev_info(&dev
->dev
, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n",
1692 mmc_hostname(mmc
), amba_part(dev
), amba_manf(dev
),
1693 amba_rev(dev
), (unsigned long long)dev
->res
.start
,
1694 dev
->irq
[0], dev
->irq
[1]);
1696 mmci_dma_setup(host
);
1698 pm_runtime_set_autosuspend_delay(&dev
->dev
, 50);
1699 pm_runtime_use_autosuspend(&dev
->dev
);
1700 pm_runtime_put(&dev
->dev
);
1707 clk_disable_unprepare(host
->clk
);
1713 static int mmci_remove(struct amba_device
*dev
)
1715 struct mmc_host
*mmc
= amba_get_drvdata(dev
);
1718 struct mmci_host
*host
= mmc_priv(mmc
);
1721 * Undo pm_runtime_put() in probe. We use the _sync
1722 * version here so that we can access the primecell.
1724 pm_runtime_get_sync(&dev
->dev
);
1726 mmc_remove_host(mmc
);
1728 writel(0, host
->base
+ MMCIMASK0
);
1729 writel(0, host
->base
+ MMCIMASK1
);
1731 writel(0, host
->base
+ MMCICOMMAND
);
1732 writel(0, host
->base
+ MMCIDATACTRL
);
1734 mmci_dma_release(host
);
1735 clk_disable_unprepare(host
->clk
);
1743 static void mmci_save(struct mmci_host
*host
)
1745 unsigned long flags
;
1747 spin_lock_irqsave(&host
->lock
, flags
);
1749 writel(0, host
->base
+ MMCIMASK0
);
1750 if (host
->variant
->pwrreg_nopower
) {
1751 writel(0, host
->base
+ MMCIDATACTRL
);
1752 writel(0, host
->base
+ MMCIPOWER
);
1753 writel(0, host
->base
+ MMCICLOCK
);
1755 mmci_reg_delay(host
);
1757 spin_unlock_irqrestore(&host
->lock
, flags
);
1760 static void mmci_restore(struct mmci_host
*host
)
1762 unsigned long flags
;
1764 spin_lock_irqsave(&host
->lock
, flags
);
1766 if (host
->variant
->pwrreg_nopower
) {
1767 writel(host
->clk_reg
, host
->base
+ MMCICLOCK
);
1768 writel(host
->datactrl_reg
, host
->base
+ MMCIDATACTRL
);
1769 writel(host
->pwr_reg
, host
->base
+ MMCIPOWER
);
1771 writel(MCI_IRQENABLE
, host
->base
+ MMCIMASK0
);
1772 mmci_reg_delay(host
);
1774 spin_unlock_irqrestore(&host
->lock
, flags
);
1777 static int mmci_runtime_suspend(struct device
*dev
)
1779 struct amba_device
*adev
= to_amba_device(dev
);
1780 struct mmc_host
*mmc
= amba_get_drvdata(adev
);
1783 struct mmci_host
*host
= mmc_priv(mmc
);
1784 pinctrl_pm_select_sleep_state(dev
);
1786 clk_disable_unprepare(host
->clk
);
1792 static int mmci_runtime_resume(struct device
*dev
)
1794 struct amba_device
*adev
= to_amba_device(dev
);
1795 struct mmc_host
*mmc
= amba_get_drvdata(adev
);
1798 struct mmci_host
*host
= mmc_priv(mmc
);
1799 clk_prepare_enable(host
->clk
);
1801 pinctrl_pm_select_default_state(dev
);
1808 static const struct dev_pm_ops mmci_dev_pm_ops
= {
1809 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend
,
1810 pm_runtime_force_resume
)
1811 SET_PM_RUNTIME_PM_OPS(mmci_runtime_suspend
, mmci_runtime_resume
, NULL
)
1814 static struct amba_id mmci_ids
[] = {
1818 .data
= &variant_arm
,
1823 .data
= &variant_arm_extended_fifo
,
1828 .data
= &variant_arm_extended_fifo_hwfc
,
1833 .data
= &variant_arm
,
1835 /* ST Micro variants */
1839 .data
= &variant_u300
,
1844 .data
= &variant_nomadik
,
1849 .data
= &variant_u300
,
1854 .data
= &variant_ux500
,
1859 .data
= &variant_ux500v2
,
1861 /* Qualcomm variants */
1865 .data
= &variant_qcom
,
1870 MODULE_DEVICE_TABLE(amba
, mmci_ids
);
1872 static struct amba_driver mmci_driver
= {
1874 .name
= DRIVER_NAME
,
1875 .pm
= &mmci_dev_pm_ops
,
1877 .probe
= mmci_probe
,
1878 .remove
= mmci_remove
,
1879 .id_table
= mmci_ids
,
1882 module_amba_driver(mmci_driver
);
1884 module_param(fmax
, uint
, 0444);
1886 MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
1887 MODULE_LICENSE("GPL");