2e6075fdce46148db3a7cdb4d4fb9e7a394f5e10
[deliverable/linux.git] / drivers / mmc / host / mmci.c
1 /*
2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
3 *
4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
5 * Copyright (C) 2010 ST-Ericsson SA
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/init.h>
14 #include <linux/ioport.h>
15 #include <linux/device.h>
16 #include <linux/io.h>
17 #include <linux/interrupt.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <linux/delay.h>
21 #include <linux/err.h>
22 #include <linux/highmem.h>
23 #include <linux/log2.h>
24 #include <linux/mmc/pm.h>
25 #include <linux/mmc/host.h>
26 #include <linux/mmc/card.h>
27 #include <linux/mmc/slot-gpio.h>
28 #include <linux/amba/bus.h>
29 #include <linux/clk.h>
30 #include <linux/scatterlist.h>
31 #include <linux/gpio.h>
32 #include <linux/of_gpio.h>
33 #include <linux/regulator/consumer.h>
34 #include <linux/dmaengine.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/amba/mmci.h>
37 #include <linux/pm_runtime.h>
38 #include <linux/types.h>
39 #include <linux/pinctrl/consumer.h>
40
41 #include <asm/div64.h>
42 #include <asm/io.h>
43 #include <asm/sizes.h>
44
45 #include "mmci.h"
46
47 #define DRIVER_NAME "mmci-pl18x"
48
49 static unsigned int fmax = 515633;
50
51 /**
52 * struct variant_data - MMCI variant-specific quirks
53 * @clkreg: default value for MCICLOCK register
54 * @clkreg_enable: enable value for MMCICLOCK register
55 * @clkreg_8bit_bus_enable: enable value for 8 bit bus
56 * @clkreg_neg_edge_enable: enable value for inverted data/cmd output
57 * @datalength_bits: number of bits in the MMCIDATALENGTH register
58 * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY
59 * is asserted (likewise for RX)
60 * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY
61 * is asserted (likewise for RX)
62 * @data_cmd_enable: enable value for data commands.
63 * @sdio: variant supports SDIO
64 * @st_clkdiv: true if using a ST-specific clock divider algorithm
65 * @datactrl_mask_ddrmode: ddr mode mask in datactrl register.
66 * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register
67 * @blksz_datactrl4: true if Block size is at b4..b16 position in datactrl
68 * register
69 * @pwrreg_powerup: power up value for MMCIPOWER register
70 * @f_max: maximum clk frequency supported by the controller.
71 * @signal_direction: input/out direction of bus signals can be indicated
72 * @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock
73 * @busy_detect: true if busy detection on dat0 is supported
74 * @pwrreg_nopower: bits in MMCIPOWER don't controls ext. power supply
75 * @explicit_mclk_control: enable explicit mclk control in driver.
76 */
77 struct variant_data {
78 unsigned int clkreg;
79 unsigned int clkreg_enable;
80 unsigned int clkreg_8bit_bus_enable;
81 unsigned int clkreg_neg_edge_enable;
82 unsigned int datalength_bits;
83 unsigned int fifosize;
84 unsigned int fifohalfsize;
85 unsigned int data_cmd_enable;
86 unsigned int datactrl_mask_ddrmode;
87 bool sdio;
88 bool st_clkdiv;
89 bool blksz_datactrl16;
90 bool blksz_datactrl4;
91 u32 pwrreg_powerup;
92 u32 f_max;
93 bool signal_direction;
94 bool pwrreg_clkgate;
95 bool busy_detect;
96 bool pwrreg_nopower;
97 bool explicit_mclk_control;
98 };
99
100 static struct variant_data variant_arm = {
101 .fifosize = 16 * 4,
102 .fifohalfsize = 8 * 4,
103 .datalength_bits = 16,
104 .pwrreg_powerup = MCI_PWR_UP,
105 .f_max = 100000000,
106 };
107
108 static struct variant_data variant_arm_extended_fifo = {
109 .fifosize = 128 * 4,
110 .fifohalfsize = 64 * 4,
111 .datalength_bits = 16,
112 .pwrreg_powerup = MCI_PWR_UP,
113 .f_max = 100000000,
114 };
115
116 static struct variant_data variant_arm_extended_fifo_hwfc = {
117 .fifosize = 128 * 4,
118 .fifohalfsize = 64 * 4,
119 .clkreg_enable = MCI_ARM_HWFCEN,
120 .datalength_bits = 16,
121 .pwrreg_powerup = MCI_PWR_UP,
122 .f_max = 100000000,
123 };
124
125 static struct variant_data variant_u300 = {
126 .fifosize = 16 * 4,
127 .fifohalfsize = 8 * 4,
128 .clkreg_enable = MCI_ST_U300_HWFCEN,
129 .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
130 .datalength_bits = 16,
131 .sdio = true,
132 .pwrreg_powerup = MCI_PWR_ON,
133 .f_max = 100000000,
134 .signal_direction = true,
135 .pwrreg_clkgate = true,
136 .pwrreg_nopower = true,
137 };
138
139 static struct variant_data variant_nomadik = {
140 .fifosize = 16 * 4,
141 .fifohalfsize = 8 * 4,
142 .clkreg = MCI_CLK_ENABLE,
143 .datalength_bits = 24,
144 .sdio = true,
145 .st_clkdiv = true,
146 .pwrreg_powerup = MCI_PWR_ON,
147 .f_max = 100000000,
148 .signal_direction = true,
149 .pwrreg_clkgate = true,
150 .pwrreg_nopower = true,
151 };
152
153 static struct variant_data variant_ux500 = {
154 .fifosize = 30 * 4,
155 .fifohalfsize = 8 * 4,
156 .clkreg = MCI_CLK_ENABLE,
157 .clkreg_enable = MCI_ST_UX500_HWFCEN,
158 .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
159 .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
160 .datalength_bits = 24,
161 .sdio = true,
162 .st_clkdiv = true,
163 .pwrreg_powerup = MCI_PWR_ON,
164 .f_max = 100000000,
165 .signal_direction = true,
166 .pwrreg_clkgate = true,
167 .busy_detect = true,
168 .pwrreg_nopower = true,
169 };
170
171 static struct variant_data variant_ux500v2 = {
172 .fifosize = 30 * 4,
173 .fifohalfsize = 8 * 4,
174 .clkreg = MCI_CLK_ENABLE,
175 .clkreg_enable = MCI_ST_UX500_HWFCEN,
176 .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
177 .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
178 .datactrl_mask_ddrmode = MCI_ST_DPSM_DDRMODE,
179 .datalength_bits = 24,
180 .sdio = true,
181 .st_clkdiv = true,
182 .blksz_datactrl16 = true,
183 .pwrreg_powerup = MCI_PWR_ON,
184 .f_max = 100000000,
185 .signal_direction = true,
186 .pwrreg_clkgate = true,
187 .busy_detect = true,
188 .pwrreg_nopower = true,
189 };
190
191 static int mmci_card_busy(struct mmc_host *mmc)
192 {
193 struct mmci_host *host = mmc_priv(mmc);
194 unsigned long flags;
195 int busy = 0;
196
197 pm_runtime_get_sync(mmc_dev(mmc));
198
199 spin_lock_irqsave(&host->lock, flags);
200 if (readl(host->base + MMCISTATUS) & MCI_ST_CARDBUSY)
201 busy = 1;
202 spin_unlock_irqrestore(&host->lock, flags);
203
204 pm_runtime_mark_last_busy(mmc_dev(mmc));
205 pm_runtime_put_autosuspend(mmc_dev(mmc));
206
207 return busy;
208 }
209
210 /*
211 * Validate mmc prerequisites
212 */
213 static int mmci_validate_data(struct mmci_host *host,
214 struct mmc_data *data)
215 {
216 if (!data)
217 return 0;
218
219 if (!is_power_of_2(data->blksz)) {
220 dev_err(mmc_dev(host->mmc),
221 "unsupported block size (%d bytes)\n", data->blksz);
222 return -EINVAL;
223 }
224
225 return 0;
226 }
227
228 static void mmci_reg_delay(struct mmci_host *host)
229 {
230 /*
231 * According to the spec, at least three feedback clock cycles
232 * of max 52 MHz must pass between two writes to the MMCICLOCK reg.
233 * Three MCLK clock cycles must pass between two MMCIPOWER reg writes.
234 * Worst delay time during card init is at 100 kHz => 30 us.
235 * Worst delay time when up and running is at 25 MHz => 120 ns.
236 */
237 if (host->cclk < 25000000)
238 udelay(30);
239 else
240 ndelay(120);
241 }
242
243 /*
244 * This must be called with host->lock held
245 */
246 static void mmci_write_clkreg(struct mmci_host *host, u32 clk)
247 {
248 if (host->clk_reg != clk) {
249 host->clk_reg = clk;
250 writel(clk, host->base + MMCICLOCK);
251 }
252 }
253
254 /*
255 * This must be called with host->lock held
256 */
257 static void mmci_write_pwrreg(struct mmci_host *host, u32 pwr)
258 {
259 if (host->pwr_reg != pwr) {
260 host->pwr_reg = pwr;
261 writel(pwr, host->base + MMCIPOWER);
262 }
263 }
264
265 /*
266 * This must be called with host->lock held
267 */
268 static void mmci_write_datactrlreg(struct mmci_host *host, u32 datactrl)
269 {
270 /* Keep ST Micro busy mode if enabled */
271 datactrl |= host->datactrl_reg & MCI_ST_DPSM_BUSYMODE;
272
273 if (host->datactrl_reg != datactrl) {
274 host->datactrl_reg = datactrl;
275 writel(datactrl, host->base + MMCIDATACTRL);
276 }
277 }
278
279 /*
280 * This must be called with host->lock held
281 */
282 static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
283 {
284 struct variant_data *variant = host->variant;
285 u32 clk = variant->clkreg;
286
287 /* Make sure cclk reflects the current calculated clock */
288 host->cclk = 0;
289
290 if (desired) {
291 if (variant->explicit_mclk_control) {
292 host->cclk = host->mclk;
293 } else if (desired >= host->mclk) {
294 clk = MCI_CLK_BYPASS;
295 if (variant->st_clkdiv)
296 clk |= MCI_ST_UX500_NEG_EDGE;
297 host->cclk = host->mclk;
298 } else if (variant->st_clkdiv) {
299 /*
300 * DB8500 TRM says f = mclk / (clkdiv + 2)
301 * => clkdiv = (mclk / f) - 2
302 * Round the divider up so we don't exceed the max
303 * frequency
304 */
305 clk = DIV_ROUND_UP(host->mclk, desired) - 2;
306 if (clk >= 256)
307 clk = 255;
308 host->cclk = host->mclk / (clk + 2);
309 } else {
310 /*
311 * PL180 TRM says f = mclk / (2 * (clkdiv + 1))
312 * => clkdiv = mclk / (2 * f) - 1
313 */
314 clk = host->mclk / (2 * desired) - 1;
315 if (clk >= 256)
316 clk = 255;
317 host->cclk = host->mclk / (2 * (clk + 1));
318 }
319
320 clk |= variant->clkreg_enable;
321 clk |= MCI_CLK_ENABLE;
322 /* This hasn't proven to be worthwhile */
323 /* clk |= MCI_CLK_PWRSAVE; */
324 }
325
326 /* Set actual clock for debug */
327 host->mmc->actual_clock = host->cclk;
328
329 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
330 clk |= MCI_4BIT_BUS;
331 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
332 clk |= variant->clkreg_8bit_bus_enable;
333
334 if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 ||
335 host->mmc->ios.timing == MMC_TIMING_MMC_DDR52)
336 clk |= variant->clkreg_neg_edge_enable;
337
338 mmci_write_clkreg(host, clk);
339 }
340
341 static void
342 mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
343 {
344 writel(0, host->base + MMCICOMMAND);
345
346 BUG_ON(host->data);
347
348 host->mrq = NULL;
349 host->cmd = NULL;
350
351 mmc_request_done(host->mmc, mrq);
352
353 pm_runtime_mark_last_busy(mmc_dev(host->mmc));
354 pm_runtime_put_autosuspend(mmc_dev(host->mmc));
355 }
356
357 static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
358 {
359 void __iomem *base = host->base;
360
361 if (host->singleirq) {
362 unsigned int mask0 = readl(base + MMCIMASK0);
363
364 mask0 &= ~MCI_IRQ1MASK;
365 mask0 |= mask;
366
367 writel(mask0, base + MMCIMASK0);
368 }
369
370 writel(mask, base + MMCIMASK1);
371 }
372
373 static void mmci_stop_data(struct mmci_host *host)
374 {
375 mmci_write_datactrlreg(host, 0);
376 mmci_set_mask1(host, 0);
377 host->data = NULL;
378 }
379
380 static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
381 {
382 unsigned int flags = SG_MITER_ATOMIC;
383
384 if (data->flags & MMC_DATA_READ)
385 flags |= SG_MITER_TO_SG;
386 else
387 flags |= SG_MITER_FROM_SG;
388
389 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
390 }
391
392 /*
393 * All the DMA operation mode stuff goes inside this ifdef.
394 * This assumes that you have a generic DMA device interface,
395 * no custom DMA interfaces are supported.
396 */
397 #ifdef CONFIG_DMA_ENGINE
398 static void mmci_dma_setup(struct mmci_host *host)
399 {
400 const char *rxname, *txname;
401 dma_cap_mask_t mask;
402
403 host->dma_rx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "rx");
404 host->dma_tx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "tx");
405
406 /* initialize pre request cookie */
407 host->next_data.cookie = 1;
408
409 /* Try to acquire a generic DMA engine slave channel */
410 dma_cap_zero(mask);
411 dma_cap_set(DMA_SLAVE, mask);
412
413 /*
414 * If only an RX channel is specified, the driver will
415 * attempt to use it bidirectionally, however if it is
416 * is specified but cannot be located, DMA will be disabled.
417 */
418 if (host->dma_rx_channel && !host->dma_tx_channel)
419 host->dma_tx_channel = host->dma_rx_channel;
420
421 if (host->dma_rx_channel)
422 rxname = dma_chan_name(host->dma_rx_channel);
423 else
424 rxname = "none";
425
426 if (host->dma_tx_channel)
427 txname = dma_chan_name(host->dma_tx_channel);
428 else
429 txname = "none";
430
431 dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n",
432 rxname, txname);
433
434 /*
435 * Limit the maximum segment size in any SG entry according to
436 * the parameters of the DMA engine device.
437 */
438 if (host->dma_tx_channel) {
439 struct device *dev = host->dma_tx_channel->device->dev;
440 unsigned int max_seg_size = dma_get_max_seg_size(dev);
441
442 if (max_seg_size < host->mmc->max_seg_size)
443 host->mmc->max_seg_size = max_seg_size;
444 }
445 if (host->dma_rx_channel) {
446 struct device *dev = host->dma_rx_channel->device->dev;
447 unsigned int max_seg_size = dma_get_max_seg_size(dev);
448
449 if (max_seg_size < host->mmc->max_seg_size)
450 host->mmc->max_seg_size = max_seg_size;
451 }
452 }
453
454 /*
455 * This is used in or so inline it
456 * so it can be discarded.
457 */
458 static inline void mmci_dma_release(struct mmci_host *host)
459 {
460 if (host->dma_rx_channel)
461 dma_release_channel(host->dma_rx_channel);
462 if (host->dma_tx_channel)
463 dma_release_channel(host->dma_tx_channel);
464 host->dma_rx_channel = host->dma_tx_channel = NULL;
465 }
466
467 static void mmci_dma_data_error(struct mmci_host *host)
468 {
469 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
470 dmaengine_terminate_all(host->dma_current);
471 host->dma_current = NULL;
472 host->dma_desc_current = NULL;
473 host->data->host_cookie = 0;
474 }
475
476 static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
477 {
478 struct dma_chan *chan;
479 enum dma_data_direction dir;
480
481 if (data->flags & MMC_DATA_READ) {
482 dir = DMA_FROM_DEVICE;
483 chan = host->dma_rx_channel;
484 } else {
485 dir = DMA_TO_DEVICE;
486 chan = host->dma_tx_channel;
487 }
488
489 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
490 }
491
492 static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
493 {
494 u32 status;
495 int i;
496
497 /* Wait up to 1ms for the DMA to complete */
498 for (i = 0; ; i++) {
499 status = readl(host->base + MMCISTATUS);
500 if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100)
501 break;
502 udelay(10);
503 }
504
505 /*
506 * Check to see whether we still have some data left in the FIFO -
507 * this catches DMA controllers which are unable to monitor the
508 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non-
509 * contiguous buffers. On TX, we'll get a FIFO underrun error.
510 */
511 if (status & MCI_RXDATAAVLBLMASK) {
512 mmci_dma_data_error(host);
513 if (!data->error)
514 data->error = -EIO;
515 }
516
517 if (!data->host_cookie)
518 mmci_dma_unmap(host, data);
519
520 /*
521 * Use of DMA with scatter-gather is impossible.
522 * Give up with DMA and switch back to PIO mode.
523 */
524 if (status & MCI_RXDATAAVLBLMASK) {
525 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
526 mmci_dma_release(host);
527 }
528
529 host->dma_current = NULL;
530 host->dma_desc_current = NULL;
531 }
532
533 /* prepares DMA channel and DMA descriptor, returns non-zero on failure */
534 static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
535 struct dma_chan **dma_chan,
536 struct dma_async_tx_descriptor **dma_desc)
537 {
538 struct variant_data *variant = host->variant;
539 struct dma_slave_config conf = {
540 .src_addr = host->phybase + MMCIFIFO,
541 .dst_addr = host->phybase + MMCIFIFO,
542 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
543 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
544 .src_maxburst = variant->fifohalfsize >> 2, /* # of words */
545 .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
546 .device_fc = false,
547 };
548 struct dma_chan *chan;
549 struct dma_device *device;
550 struct dma_async_tx_descriptor *desc;
551 enum dma_data_direction buffer_dirn;
552 int nr_sg;
553
554 if (data->flags & MMC_DATA_READ) {
555 conf.direction = DMA_DEV_TO_MEM;
556 buffer_dirn = DMA_FROM_DEVICE;
557 chan = host->dma_rx_channel;
558 } else {
559 conf.direction = DMA_MEM_TO_DEV;
560 buffer_dirn = DMA_TO_DEVICE;
561 chan = host->dma_tx_channel;
562 }
563
564 /* If there's no DMA channel, fall back to PIO */
565 if (!chan)
566 return -EINVAL;
567
568 /* If less than or equal to the fifo size, don't bother with DMA */
569 if (data->blksz * data->blocks <= variant->fifosize)
570 return -EINVAL;
571
572 device = chan->device;
573 nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
574 if (nr_sg == 0)
575 return -EINVAL;
576
577 dmaengine_slave_config(chan, &conf);
578 desc = dmaengine_prep_slave_sg(chan, data->sg, nr_sg,
579 conf.direction, DMA_CTRL_ACK);
580 if (!desc)
581 goto unmap_exit;
582
583 *dma_chan = chan;
584 *dma_desc = desc;
585
586 return 0;
587
588 unmap_exit:
589 dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
590 return -ENOMEM;
591 }
592
593 static inline int mmci_dma_prep_data(struct mmci_host *host,
594 struct mmc_data *data)
595 {
596 /* Check if next job is already prepared. */
597 if (host->dma_current && host->dma_desc_current)
598 return 0;
599
600 /* No job were prepared thus do it now. */
601 return __mmci_dma_prep_data(host, data, &host->dma_current,
602 &host->dma_desc_current);
603 }
604
605 static inline int mmci_dma_prep_next(struct mmci_host *host,
606 struct mmc_data *data)
607 {
608 struct mmci_host_next *nd = &host->next_data;
609 return __mmci_dma_prep_data(host, data, &nd->dma_chan, &nd->dma_desc);
610 }
611
612 static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
613 {
614 int ret;
615 struct mmc_data *data = host->data;
616
617 ret = mmci_dma_prep_data(host, host->data);
618 if (ret)
619 return ret;
620
621 /* Okay, go for it. */
622 dev_vdbg(mmc_dev(host->mmc),
623 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
624 data->sg_len, data->blksz, data->blocks, data->flags);
625 dmaengine_submit(host->dma_desc_current);
626 dma_async_issue_pending(host->dma_current);
627
628 datactrl |= MCI_DPSM_DMAENABLE;
629
630 /* Trigger the DMA transfer */
631 mmci_write_datactrlreg(host, datactrl);
632
633 /*
634 * Let the MMCI say when the data is ended and it's time
635 * to fire next DMA request. When that happens, MMCI will
636 * call mmci_data_end()
637 */
638 writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
639 host->base + MMCIMASK0);
640 return 0;
641 }
642
643 static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
644 {
645 struct mmci_host_next *next = &host->next_data;
646
647 WARN_ON(data->host_cookie && data->host_cookie != next->cookie);
648 WARN_ON(!data->host_cookie && (next->dma_desc || next->dma_chan));
649
650 host->dma_desc_current = next->dma_desc;
651 host->dma_current = next->dma_chan;
652 next->dma_desc = NULL;
653 next->dma_chan = NULL;
654 }
655
656 static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq,
657 bool is_first_req)
658 {
659 struct mmci_host *host = mmc_priv(mmc);
660 struct mmc_data *data = mrq->data;
661 struct mmci_host_next *nd = &host->next_data;
662
663 if (!data)
664 return;
665
666 BUG_ON(data->host_cookie);
667
668 if (mmci_validate_data(host, data))
669 return;
670
671 if (!mmci_dma_prep_next(host, data))
672 data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
673 }
674
675 static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
676 int err)
677 {
678 struct mmci_host *host = mmc_priv(mmc);
679 struct mmc_data *data = mrq->data;
680
681 if (!data || !data->host_cookie)
682 return;
683
684 mmci_dma_unmap(host, data);
685
686 if (err) {
687 struct mmci_host_next *next = &host->next_data;
688 struct dma_chan *chan;
689 if (data->flags & MMC_DATA_READ)
690 chan = host->dma_rx_channel;
691 else
692 chan = host->dma_tx_channel;
693 dmaengine_terminate_all(chan);
694
695 next->dma_desc = NULL;
696 next->dma_chan = NULL;
697 }
698 }
699
700 #else
701 /* Blank functions if the DMA engine is not available */
702 static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
703 {
704 }
705 static inline void mmci_dma_setup(struct mmci_host *host)
706 {
707 }
708
709 static inline void mmci_dma_release(struct mmci_host *host)
710 {
711 }
712
713 static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
714 {
715 }
716
717 static inline void mmci_dma_finalize(struct mmci_host *host,
718 struct mmc_data *data)
719 {
720 }
721
722 static inline void mmci_dma_data_error(struct mmci_host *host)
723 {
724 }
725
726 static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
727 {
728 return -ENOSYS;
729 }
730
731 #define mmci_pre_request NULL
732 #define mmci_post_request NULL
733
734 #endif
735
736 static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
737 {
738 struct variant_data *variant = host->variant;
739 unsigned int datactrl, timeout, irqmask;
740 unsigned long long clks;
741 void __iomem *base;
742 int blksz_bits;
743
744 dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n",
745 data->blksz, data->blocks, data->flags);
746
747 host->data = data;
748 host->size = data->blksz * data->blocks;
749 data->bytes_xfered = 0;
750
751 clks = (unsigned long long)data->timeout_ns * host->cclk;
752 do_div(clks, NSEC_PER_SEC);
753
754 timeout = data->timeout_clks + (unsigned int)clks;
755
756 base = host->base;
757 writel(timeout, base + MMCIDATATIMER);
758 writel(host->size, base + MMCIDATALENGTH);
759
760 blksz_bits = ffs(data->blksz) - 1;
761 BUG_ON(1 << blksz_bits != data->blksz);
762
763 if (variant->blksz_datactrl16)
764 datactrl = MCI_DPSM_ENABLE | (data->blksz << 16);
765 else if (variant->blksz_datactrl4)
766 datactrl = MCI_DPSM_ENABLE | (data->blksz << 4);
767 else
768 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
769
770 if (data->flags & MMC_DATA_READ)
771 datactrl |= MCI_DPSM_DIRECTION;
772
773 /* The ST Micro variants has a special bit to enable SDIO */
774 if (variant->sdio && host->mmc->card)
775 if (mmc_card_sdio(host->mmc->card)) {
776 /*
777 * The ST Micro variants has a special bit
778 * to enable SDIO.
779 */
780 u32 clk;
781
782 datactrl |= MCI_ST_DPSM_SDIOEN;
783
784 /*
785 * The ST Micro variant for SDIO small write transfers
786 * needs to have clock H/W flow control disabled,
787 * otherwise the transfer will not start. The threshold
788 * depends on the rate of MCLK.
789 */
790 if (data->flags & MMC_DATA_WRITE &&
791 (host->size < 8 ||
792 (host->size <= 8 && host->mclk > 50000000)))
793 clk = host->clk_reg & ~variant->clkreg_enable;
794 else
795 clk = host->clk_reg | variant->clkreg_enable;
796
797 mmci_write_clkreg(host, clk);
798 }
799
800 if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 ||
801 host->mmc->ios.timing == MMC_TIMING_MMC_DDR52)
802 datactrl |= variant->datactrl_mask_ddrmode;
803
804 /*
805 * Attempt to use DMA operation mode, if this
806 * should fail, fall back to PIO mode
807 */
808 if (!mmci_dma_start_data(host, datactrl))
809 return;
810
811 /* IRQ mode, map the SG list for CPU reading/writing */
812 mmci_init_sg(host, data);
813
814 if (data->flags & MMC_DATA_READ) {
815 irqmask = MCI_RXFIFOHALFFULLMASK;
816
817 /*
818 * If we have less than the fifo 'half-full' threshold to
819 * transfer, trigger a PIO interrupt as soon as any data
820 * is available.
821 */
822 if (host->size < variant->fifohalfsize)
823 irqmask |= MCI_RXDATAAVLBLMASK;
824 } else {
825 /*
826 * We don't actually need to include "FIFO empty" here
827 * since its implicit in "FIFO half empty".
828 */
829 irqmask = MCI_TXFIFOHALFEMPTYMASK;
830 }
831
832 mmci_write_datactrlreg(host, datactrl);
833 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
834 mmci_set_mask1(host, irqmask);
835 }
836
837 static void
838 mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
839 {
840 void __iomem *base = host->base;
841
842 dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n",
843 cmd->opcode, cmd->arg, cmd->flags);
844
845 if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
846 writel(0, base + MMCICOMMAND);
847 mmci_reg_delay(host);
848 }
849
850 c |= cmd->opcode | MCI_CPSM_ENABLE;
851 if (cmd->flags & MMC_RSP_PRESENT) {
852 if (cmd->flags & MMC_RSP_136)
853 c |= MCI_CPSM_LONGRSP;
854 c |= MCI_CPSM_RESPONSE;
855 }
856 if (/*interrupt*/0)
857 c |= MCI_CPSM_INTERRUPT;
858
859 if (mmc_cmd_type(cmd) == MMC_CMD_ADTC)
860 c |= host->variant->data_cmd_enable;
861
862 host->cmd = cmd;
863
864 writel(cmd->arg, base + MMCIARGUMENT);
865 writel(c, base + MMCICOMMAND);
866 }
867
868 static void
869 mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
870 unsigned int status)
871 {
872 /* First check for errors */
873 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
874 MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
875 u32 remain, success;
876
877 /* Terminate the DMA transfer */
878 if (dma_inprogress(host)) {
879 mmci_dma_data_error(host);
880 mmci_dma_unmap(host, data);
881 }
882
883 /*
884 * Calculate how far we are into the transfer. Note that
885 * the data counter gives the number of bytes transferred
886 * on the MMC bus, not on the host side. On reads, this
887 * can be as much as a FIFO-worth of data ahead. This
888 * matters for FIFO overruns only.
889 */
890 remain = readl(host->base + MMCIDATACNT);
891 success = data->blksz * data->blocks - remain;
892
893 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
894 status, success);
895 if (status & MCI_DATACRCFAIL) {
896 /* Last block was not successful */
897 success -= 1;
898 data->error = -EILSEQ;
899 } else if (status & MCI_DATATIMEOUT) {
900 data->error = -ETIMEDOUT;
901 } else if (status & MCI_STARTBITERR) {
902 data->error = -ECOMM;
903 } else if (status & MCI_TXUNDERRUN) {
904 data->error = -EIO;
905 } else if (status & MCI_RXOVERRUN) {
906 if (success > host->variant->fifosize)
907 success -= host->variant->fifosize;
908 else
909 success = 0;
910 data->error = -EIO;
911 }
912 data->bytes_xfered = round_down(success, data->blksz);
913 }
914
915 if (status & MCI_DATABLOCKEND)
916 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
917
918 if (status & MCI_DATAEND || data->error) {
919 if (dma_inprogress(host))
920 mmci_dma_finalize(host, data);
921 mmci_stop_data(host);
922
923 if (!data->error)
924 /* The error clause is handled above, success! */
925 data->bytes_xfered = data->blksz * data->blocks;
926
927 if (!data->stop || host->mrq->sbc) {
928 mmci_request_end(host, data->mrq);
929 } else {
930 mmci_start_command(host, data->stop, 0);
931 }
932 }
933 }
934
935 static void
936 mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
937 unsigned int status)
938 {
939 void __iomem *base = host->base;
940 bool sbc = (cmd == host->mrq->sbc);
941 bool busy_resp = host->variant->busy_detect &&
942 (cmd->flags & MMC_RSP_BUSY);
943
944 /* Check if we need to wait for busy completion. */
945 if (host->busy_status && (status & MCI_ST_CARDBUSY))
946 return;
947
948 /* Enable busy completion if needed and supported. */
949 if (!host->busy_status && busy_resp &&
950 !(status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT)) &&
951 (readl(base + MMCISTATUS) & MCI_ST_CARDBUSY)) {
952 writel(readl(base + MMCIMASK0) | MCI_ST_BUSYEND,
953 base + MMCIMASK0);
954 host->busy_status = status & (MCI_CMDSENT|MCI_CMDRESPEND);
955 return;
956 }
957
958 /* At busy completion, mask the IRQ and complete the request. */
959 if (host->busy_status) {
960 writel(readl(base + MMCIMASK0) & ~MCI_ST_BUSYEND,
961 base + MMCIMASK0);
962 host->busy_status = 0;
963 }
964
965 host->cmd = NULL;
966
967 if (status & MCI_CMDTIMEOUT) {
968 cmd->error = -ETIMEDOUT;
969 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
970 cmd->error = -EILSEQ;
971 } else {
972 cmd->resp[0] = readl(base + MMCIRESPONSE0);
973 cmd->resp[1] = readl(base + MMCIRESPONSE1);
974 cmd->resp[2] = readl(base + MMCIRESPONSE2);
975 cmd->resp[3] = readl(base + MMCIRESPONSE3);
976 }
977
978 if ((!sbc && !cmd->data) || cmd->error) {
979 if (host->data) {
980 /* Terminate the DMA transfer */
981 if (dma_inprogress(host)) {
982 mmci_dma_data_error(host);
983 mmci_dma_unmap(host, host->data);
984 }
985 mmci_stop_data(host);
986 }
987 mmci_request_end(host, host->mrq);
988 } else if (sbc) {
989 mmci_start_command(host, host->mrq->cmd, 0);
990 } else if (!(cmd->data->flags & MMC_DATA_READ)) {
991 mmci_start_data(host, cmd->data);
992 }
993 }
994
995 static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain)
996 {
997 void __iomem *base = host->base;
998 char *ptr = buffer;
999 u32 status;
1000 int host_remain = host->size;
1001
1002 do {
1003 int count = host_remain - (readl(base + MMCIFIFOCNT) << 2);
1004
1005 if (count > remain)
1006 count = remain;
1007
1008 if (count <= 0)
1009 break;
1010
1011 /*
1012 * SDIO especially may want to send something that is
1013 * not divisible by 4 (as opposed to card sectors
1014 * etc). Therefore make sure to always read the last bytes
1015 * while only doing full 32-bit reads towards the FIFO.
1016 */
1017 if (unlikely(count & 0x3)) {
1018 if (count < 4) {
1019 unsigned char buf[4];
1020 ioread32_rep(base + MMCIFIFO, buf, 1);
1021 memcpy(ptr, buf, count);
1022 } else {
1023 ioread32_rep(base + MMCIFIFO, ptr, count >> 2);
1024 count &= ~0x3;
1025 }
1026 } else {
1027 ioread32_rep(base + MMCIFIFO, ptr, count >> 2);
1028 }
1029
1030 ptr += count;
1031 remain -= count;
1032 host_remain -= count;
1033
1034 if (remain == 0)
1035 break;
1036
1037 status = readl(base + MMCISTATUS);
1038 } while (status & MCI_RXDATAAVLBL);
1039
1040 return ptr - buffer;
1041 }
1042
1043 static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status)
1044 {
1045 struct variant_data *variant = host->variant;
1046 void __iomem *base = host->base;
1047 char *ptr = buffer;
1048
1049 do {
1050 unsigned int count, maxcnt;
1051
1052 maxcnt = status & MCI_TXFIFOEMPTY ?
1053 variant->fifosize : variant->fifohalfsize;
1054 count = min(remain, maxcnt);
1055
1056 /*
1057 * SDIO especially may want to send something that is
1058 * not divisible by 4 (as opposed to card sectors
1059 * etc), and the FIFO only accept full 32-bit writes.
1060 * So compensate by adding +3 on the count, a single
1061 * byte become a 32bit write, 7 bytes will be two
1062 * 32bit writes etc.
1063 */
1064 iowrite32_rep(base + MMCIFIFO, ptr, (count + 3) >> 2);
1065
1066 ptr += count;
1067 remain -= count;
1068
1069 if (remain == 0)
1070 break;
1071
1072 status = readl(base + MMCISTATUS);
1073 } while (status & MCI_TXFIFOHALFEMPTY);
1074
1075 return ptr - buffer;
1076 }
1077
1078 /*
1079 * PIO data transfer IRQ handler.
1080 */
1081 static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
1082 {
1083 struct mmci_host *host = dev_id;
1084 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1085 struct variant_data *variant = host->variant;
1086 void __iomem *base = host->base;
1087 unsigned long flags;
1088 u32 status;
1089
1090 status = readl(base + MMCISTATUS);
1091
1092 dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status);
1093
1094 local_irq_save(flags);
1095
1096 do {
1097 unsigned int remain, len;
1098 char *buffer;
1099
1100 /*
1101 * For write, we only need to test the half-empty flag
1102 * here - if the FIFO is completely empty, then by
1103 * definition it is more than half empty.
1104 *
1105 * For read, check for data available.
1106 */
1107 if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL)))
1108 break;
1109
1110 if (!sg_miter_next(sg_miter))
1111 break;
1112
1113 buffer = sg_miter->addr;
1114 remain = sg_miter->length;
1115
1116 len = 0;
1117 if (status & MCI_RXACTIVE)
1118 len = mmci_pio_read(host, buffer, remain);
1119 if (status & MCI_TXACTIVE)
1120 len = mmci_pio_write(host, buffer, remain, status);
1121
1122 sg_miter->consumed = len;
1123
1124 host->size -= len;
1125 remain -= len;
1126
1127 if (remain)
1128 break;
1129
1130 status = readl(base + MMCISTATUS);
1131 } while (1);
1132
1133 sg_miter_stop(sg_miter);
1134
1135 local_irq_restore(flags);
1136
1137 /*
1138 * If we have less than the fifo 'half-full' threshold to transfer,
1139 * trigger a PIO interrupt as soon as any data is available.
1140 */
1141 if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize)
1142 mmci_set_mask1(host, MCI_RXDATAAVLBLMASK);
1143
1144 /*
1145 * If we run out of data, disable the data IRQs; this
1146 * prevents a race where the FIFO becomes empty before
1147 * the chip itself has disabled the data path, and
1148 * stops us racing with our data end IRQ.
1149 */
1150 if (host->size == 0) {
1151 mmci_set_mask1(host, 0);
1152 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);
1153 }
1154
1155 return IRQ_HANDLED;
1156 }
1157
1158 /*
1159 * Handle completion of command and data transfers.
1160 */
1161 static irqreturn_t mmci_irq(int irq, void *dev_id)
1162 {
1163 struct mmci_host *host = dev_id;
1164 u32 status;
1165 int ret = 0;
1166
1167 spin_lock(&host->lock);
1168
1169 do {
1170 struct mmc_command *cmd;
1171 struct mmc_data *data;
1172
1173 status = readl(host->base + MMCISTATUS);
1174
1175 if (host->singleirq) {
1176 if (status & readl(host->base + MMCIMASK1))
1177 mmci_pio_irq(irq, dev_id);
1178
1179 status &= ~MCI_IRQ1MASK;
1180 }
1181
1182 /*
1183 * We intentionally clear the MCI_ST_CARDBUSY IRQ here (if it's
1184 * enabled) since the HW seems to be triggering the IRQ on both
1185 * edges while monitoring DAT0 for busy completion.
1186 */
1187 status &= readl(host->base + MMCIMASK0);
1188 writel(status, host->base + MMCICLEAR);
1189
1190 dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
1191
1192 cmd = host->cmd;
1193 if ((status|host->busy_status) & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|
1194 MCI_CMDSENT|MCI_CMDRESPEND) && cmd)
1195 mmci_cmd_irq(host, cmd, status);
1196
1197 data = host->data;
1198 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
1199 MCI_TXUNDERRUN|MCI_RXOVERRUN|MCI_DATAEND|
1200 MCI_DATABLOCKEND) && data)
1201 mmci_data_irq(host, data, status);
1202
1203 /* Don't poll for busy completion in irq context. */
1204 if (host->busy_status)
1205 status &= ~MCI_ST_CARDBUSY;
1206
1207 ret = 1;
1208 } while (status);
1209
1210 spin_unlock(&host->lock);
1211
1212 return IRQ_RETVAL(ret);
1213 }
1214
1215 static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1216 {
1217 struct mmci_host *host = mmc_priv(mmc);
1218 unsigned long flags;
1219
1220 WARN_ON(host->mrq != NULL);
1221
1222 mrq->cmd->error = mmci_validate_data(host, mrq->data);
1223 if (mrq->cmd->error) {
1224 mmc_request_done(mmc, mrq);
1225 return;
1226 }
1227
1228 pm_runtime_get_sync(mmc_dev(mmc));
1229
1230 spin_lock_irqsave(&host->lock, flags);
1231
1232 host->mrq = mrq;
1233
1234 if (mrq->data)
1235 mmci_get_next_data(host, mrq->data);
1236
1237 if (mrq->data && mrq->data->flags & MMC_DATA_READ)
1238 mmci_start_data(host, mrq->data);
1239
1240 if (mrq->sbc)
1241 mmci_start_command(host, mrq->sbc, 0);
1242 else
1243 mmci_start_command(host, mrq->cmd, 0);
1244
1245 spin_unlock_irqrestore(&host->lock, flags);
1246 }
1247
1248 static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1249 {
1250 struct mmci_host *host = mmc_priv(mmc);
1251 struct variant_data *variant = host->variant;
1252 u32 pwr = 0;
1253 unsigned long flags;
1254 int ret;
1255
1256 pm_runtime_get_sync(mmc_dev(mmc));
1257
1258 if (host->plat->ios_handler &&
1259 host->plat->ios_handler(mmc_dev(mmc), ios))
1260 dev_err(mmc_dev(mmc), "platform ios_handler failed\n");
1261
1262 switch (ios->power_mode) {
1263 case MMC_POWER_OFF:
1264 if (!IS_ERR(mmc->supply.vmmc))
1265 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1266
1267 if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) {
1268 regulator_disable(mmc->supply.vqmmc);
1269 host->vqmmc_enabled = false;
1270 }
1271
1272 break;
1273 case MMC_POWER_UP:
1274 if (!IS_ERR(mmc->supply.vmmc))
1275 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
1276
1277 /*
1278 * The ST Micro variant doesn't have the PL180s MCI_PWR_UP
1279 * and instead uses MCI_PWR_ON so apply whatever value is
1280 * configured in the variant data.
1281 */
1282 pwr |= variant->pwrreg_powerup;
1283
1284 break;
1285 case MMC_POWER_ON:
1286 if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) {
1287 ret = regulator_enable(mmc->supply.vqmmc);
1288 if (ret < 0)
1289 dev_err(mmc_dev(mmc),
1290 "failed to enable vqmmc regulator\n");
1291 else
1292 host->vqmmc_enabled = true;
1293 }
1294
1295 pwr |= MCI_PWR_ON;
1296 break;
1297 }
1298
1299 if (variant->signal_direction && ios->power_mode != MMC_POWER_OFF) {
1300 /*
1301 * The ST Micro variant has some additional bits
1302 * indicating signal direction for the signals in
1303 * the SD/MMC bus and feedback-clock usage.
1304 */
1305 pwr |= host->pwr_reg_add;
1306
1307 if (ios->bus_width == MMC_BUS_WIDTH_4)
1308 pwr &= ~MCI_ST_DATA74DIREN;
1309 else if (ios->bus_width == MMC_BUS_WIDTH_1)
1310 pwr &= (~MCI_ST_DATA74DIREN &
1311 ~MCI_ST_DATA31DIREN &
1312 ~MCI_ST_DATA2DIREN);
1313 }
1314
1315 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {
1316 if (host->hw_designer != AMBA_VENDOR_ST)
1317 pwr |= MCI_ROD;
1318 else {
1319 /*
1320 * The ST Micro variant use the ROD bit for something
1321 * else and only has OD (Open Drain).
1322 */
1323 pwr |= MCI_OD;
1324 }
1325 }
1326
1327 /*
1328 * If clock = 0 and the variant requires the MMCIPOWER to be used for
1329 * gating the clock, the MCI_PWR_ON bit is cleared.
1330 */
1331 if (!ios->clock && variant->pwrreg_clkgate)
1332 pwr &= ~MCI_PWR_ON;
1333
1334 if (host->variant->explicit_mclk_control &&
1335 ios->clock != host->clock_cache) {
1336 ret = clk_set_rate(host->clk, ios->clock);
1337 if (ret < 0)
1338 dev_err(mmc_dev(host->mmc),
1339 "Error setting clock rate (%d)\n", ret);
1340 else
1341 host->mclk = clk_get_rate(host->clk);
1342 }
1343 host->clock_cache = ios->clock;
1344
1345 spin_lock_irqsave(&host->lock, flags);
1346
1347 mmci_set_clkreg(host, ios->clock);
1348 mmci_write_pwrreg(host, pwr);
1349 mmci_reg_delay(host);
1350
1351 spin_unlock_irqrestore(&host->lock, flags);
1352
1353 pm_runtime_mark_last_busy(mmc_dev(mmc));
1354 pm_runtime_put_autosuspend(mmc_dev(mmc));
1355 }
1356
1357 static int mmci_get_cd(struct mmc_host *mmc)
1358 {
1359 struct mmci_host *host = mmc_priv(mmc);
1360 struct mmci_platform_data *plat = host->plat;
1361 unsigned int status = mmc_gpio_get_cd(mmc);
1362
1363 if (status == -ENOSYS) {
1364 if (!plat->status)
1365 return 1; /* Assume always present */
1366
1367 status = plat->status(mmc_dev(host->mmc));
1368 }
1369 return status;
1370 }
1371
1372 static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
1373 {
1374 int ret = 0;
1375
1376 if (!IS_ERR(mmc->supply.vqmmc)) {
1377
1378 pm_runtime_get_sync(mmc_dev(mmc));
1379
1380 switch (ios->signal_voltage) {
1381 case MMC_SIGNAL_VOLTAGE_330:
1382 ret = regulator_set_voltage(mmc->supply.vqmmc,
1383 2700000, 3600000);
1384 break;
1385 case MMC_SIGNAL_VOLTAGE_180:
1386 ret = regulator_set_voltage(mmc->supply.vqmmc,
1387 1700000, 1950000);
1388 break;
1389 case MMC_SIGNAL_VOLTAGE_120:
1390 ret = regulator_set_voltage(mmc->supply.vqmmc,
1391 1100000, 1300000);
1392 break;
1393 }
1394
1395 if (ret)
1396 dev_warn(mmc_dev(mmc), "Voltage switch failed\n");
1397
1398 pm_runtime_mark_last_busy(mmc_dev(mmc));
1399 pm_runtime_put_autosuspend(mmc_dev(mmc));
1400 }
1401
1402 return ret;
1403 }
1404
1405 static struct mmc_host_ops mmci_ops = {
1406 .request = mmci_request,
1407 .pre_req = mmci_pre_request,
1408 .post_req = mmci_post_request,
1409 .set_ios = mmci_set_ios,
1410 .get_ro = mmc_gpio_get_ro,
1411 .get_cd = mmci_get_cd,
1412 .start_signal_voltage_switch = mmci_sig_volt_switch,
1413 };
1414
1415 static int mmci_of_parse(struct device_node *np, struct mmc_host *mmc)
1416 {
1417 struct mmci_host *host = mmc_priv(mmc);
1418 int ret = mmc_of_parse(mmc);
1419
1420 if (ret)
1421 return ret;
1422
1423 if (of_get_property(np, "st,sig-dir-dat0", NULL))
1424 host->pwr_reg_add |= MCI_ST_DATA0DIREN;
1425 if (of_get_property(np, "st,sig-dir-dat2", NULL))
1426 host->pwr_reg_add |= MCI_ST_DATA2DIREN;
1427 if (of_get_property(np, "st,sig-dir-dat31", NULL))
1428 host->pwr_reg_add |= MCI_ST_DATA31DIREN;
1429 if (of_get_property(np, "st,sig-dir-dat74", NULL))
1430 host->pwr_reg_add |= MCI_ST_DATA74DIREN;
1431 if (of_get_property(np, "st,sig-dir-cmd", NULL))
1432 host->pwr_reg_add |= MCI_ST_CMDDIREN;
1433 if (of_get_property(np, "st,sig-pin-fbclk", NULL))
1434 host->pwr_reg_add |= MCI_ST_FBCLKEN;
1435
1436 if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL))
1437 mmc->caps |= MMC_CAP_MMC_HIGHSPEED;
1438 if (of_get_property(np, "mmc-cap-sd-highspeed", NULL))
1439 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
1440
1441 return 0;
1442 }
1443
1444 static int mmci_probe(struct amba_device *dev,
1445 const struct amba_id *id)
1446 {
1447 struct mmci_platform_data *plat = dev->dev.platform_data;
1448 struct device_node *np = dev->dev.of_node;
1449 struct variant_data *variant = id->data;
1450 struct mmci_host *host;
1451 struct mmc_host *mmc;
1452 int ret;
1453
1454 /* Must have platform data or Device Tree. */
1455 if (!plat && !np) {
1456 dev_err(&dev->dev, "No plat data or DT found\n");
1457 return -EINVAL;
1458 }
1459
1460 if (!plat) {
1461 plat = devm_kzalloc(&dev->dev, sizeof(*plat), GFP_KERNEL);
1462 if (!plat)
1463 return -ENOMEM;
1464 }
1465
1466 mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev);
1467 if (!mmc)
1468 return -ENOMEM;
1469
1470 ret = mmci_of_parse(np, mmc);
1471 if (ret)
1472 goto host_free;
1473
1474 host = mmc_priv(mmc);
1475 host->mmc = mmc;
1476
1477 host->hw_designer = amba_manf(dev);
1478 host->hw_revision = amba_rev(dev);
1479 dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer);
1480 dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision);
1481
1482 host->clk = devm_clk_get(&dev->dev, NULL);
1483 if (IS_ERR(host->clk)) {
1484 ret = PTR_ERR(host->clk);
1485 goto host_free;
1486 }
1487
1488 ret = clk_prepare_enable(host->clk);
1489 if (ret)
1490 goto host_free;
1491
1492 host->plat = plat;
1493 host->variant = variant;
1494 host->mclk = clk_get_rate(host->clk);
1495 /*
1496 * According to the spec, mclk is max 100 MHz,
1497 * so we try to adjust the clock down to this,
1498 * (if possible).
1499 */
1500 if (host->mclk > variant->f_max) {
1501 ret = clk_set_rate(host->clk, variant->f_max);
1502 if (ret < 0)
1503 goto clk_disable;
1504 host->mclk = clk_get_rate(host->clk);
1505 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
1506 host->mclk);
1507 }
1508
1509 host->phybase = dev->res.start;
1510 host->base = devm_ioremap_resource(&dev->dev, &dev->res);
1511 if (IS_ERR(host->base)) {
1512 ret = PTR_ERR(host->base);
1513 goto clk_disable;
1514 }
1515
1516 /*
1517 * The ARM and ST versions of the block have slightly different
1518 * clock divider equations which means that the minimum divider
1519 * differs too.
1520 * on Qualcomm like controllers get the nearest minimum clock to 100Khz
1521 */
1522 if (variant->st_clkdiv)
1523 mmc->f_min = DIV_ROUND_UP(host->mclk, 257);
1524 else if (variant->explicit_mclk_control)
1525 mmc->f_min = clk_round_rate(host->clk, 100000);
1526 else
1527 mmc->f_min = DIV_ROUND_UP(host->mclk, 512);
1528 /*
1529 * If no maximum operating frequency is supplied, fall back to use
1530 * the module parameter, which has a (low) default value in case it
1531 * is not specified. Either value must not exceed the clock rate into
1532 * the block, of course.
1533 */
1534 if (mmc->f_max)
1535 mmc->f_max = variant->explicit_mclk_control ?
1536 min(variant->f_max, mmc->f_max) :
1537 min(host->mclk, mmc->f_max);
1538 else
1539 mmc->f_max = variant->explicit_mclk_control ?
1540 fmax : min(host->mclk, fmax);
1541
1542
1543 dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
1544
1545 /* Get regulators and the supported OCR mask */
1546 mmc_regulator_get_supply(mmc);
1547 if (!mmc->ocr_avail)
1548 mmc->ocr_avail = plat->ocr_mask;
1549 else if (plat->ocr_mask)
1550 dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
1551
1552 /* DT takes precedence over platform data. */
1553 if (!np) {
1554 if (!plat->cd_invert)
1555 mmc->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
1556 mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
1557 }
1558
1559 /* We support these capabilities. */
1560 mmc->caps |= MMC_CAP_CMD23;
1561
1562 if (variant->busy_detect) {
1563 mmci_ops.card_busy = mmci_card_busy;
1564 mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE);
1565 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
1566 mmc->max_busy_timeout = 0;
1567 }
1568
1569 mmc->ops = &mmci_ops;
1570
1571 /* We support these PM capabilities. */
1572 mmc->pm_caps |= MMC_PM_KEEP_POWER;
1573
1574 /*
1575 * We can do SGIO
1576 */
1577 mmc->max_segs = NR_SG;
1578
1579 /*
1580 * Since only a certain number of bits are valid in the data length
1581 * register, we must ensure that we don't exceed 2^num-1 bytes in a
1582 * single request.
1583 */
1584 mmc->max_req_size = (1 << variant->datalength_bits) - 1;
1585
1586 /*
1587 * Set the maximum segment size. Since we aren't doing DMA
1588 * (yet) we are only limited by the data length register.
1589 */
1590 mmc->max_seg_size = mmc->max_req_size;
1591
1592 /*
1593 * Block size can be up to 2048 bytes, but must be a power of two.
1594 */
1595 mmc->max_blk_size = 1 << 11;
1596
1597 /*
1598 * Limit the number of blocks transferred so that we don't overflow
1599 * the maximum request size.
1600 */
1601 mmc->max_blk_count = mmc->max_req_size >> 11;
1602
1603 spin_lock_init(&host->lock);
1604
1605 writel(0, host->base + MMCIMASK0);
1606 writel(0, host->base + MMCIMASK1);
1607 writel(0xfff, host->base + MMCICLEAR);
1608
1609 /* If DT, cd/wp gpios must be supplied through it. */
1610 if (!np && gpio_is_valid(plat->gpio_cd)) {
1611 ret = mmc_gpio_request_cd(mmc, plat->gpio_cd, 0);
1612 if (ret)
1613 goto clk_disable;
1614 }
1615 if (!np && gpio_is_valid(plat->gpio_wp)) {
1616 ret = mmc_gpio_request_ro(mmc, plat->gpio_wp);
1617 if (ret)
1618 goto clk_disable;
1619 }
1620
1621 ret = devm_request_irq(&dev->dev, dev->irq[0], mmci_irq, IRQF_SHARED,
1622 DRIVER_NAME " (cmd)", host);
1623 if (ret)
1624 goto clk_disable;
1625
1626 if (!dev->irq[1])
1627 host->singleirq = true;
1628 else {
1629 ret = devm_request_irq(&dev->dev, dev->irq[1], mmci_pio_irq,
1630 IRQF_SHARED, DRIVER_NAME " (pio)", host);
1631 if (ret)
1632 goto clk_disable;
1633 }
1634
1635 writel(MCI_IRQENABLE, host->base + MMCIMASK0);
1636
1637 amba_set_drvdata(dev, mmc);
1638
1639 dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n",
1640 mmc_hostname(mmc), amba_part(dev), amba_manf(dev),
1641 amba_rev(dev), (unsigned long long)dev->res.start,
1642 dev->irq[0], dev->irq[1]);
1643
1644 mmci_dma_setup(host);
1645
1646 pm_runtime_set_autosuspend_delay(&dev->dev, 50);
1647 pm_runtime_use_autosuspend(&dev->dev);
1648 pm_runtime_put(&dev->dev);
1649
1650 mmc_add_host(mmc);
1651
1652 return 0;
1653
1654 clk_disable:
1655 clk_disable_unprepare(host->clk);
1656 host_free:
1657 mmc_free_host(mmc);
1658 return ret;
1659 }
1660
1661 static int mmci_remove(struct amba_device *dev)
1662 {
1663 struct mmc_host *mmc = amba_get_drvdata(dev);
1664
1665 if (mmc) {
1666 struct mmci_host *host = mmc_priv(mmc);
1667
1668 /*
1669 * Undo pm_runtime_put() in probe. We use the _sync
1670 * version here so that we can access the primecell.
1671 */
1672 pm_runtime_get_sync(&dev->dev);
1673
1674 mmc_remove_host(mmc);
1675
1676 writel(0, host->base + MMCIMASK0);
1677 writel(0, host->base + MMCIMASK1);
1678
1679 writel(0, host->base + MMCICOMMAND);
1680 writel(0, host->base + MMCIDATACTRL);
1681
1682 mmci_dma_release(host);
1683 clk_disable_unprepare(host->clk);
1684 mmc_free_host(mmc);
1685 }
1686
1687 return 0;
1688 }
1689
1690 #ifdef CONFIG_PM
1691 static void mmci_save(struct mmci_host *host)
1692 {
1693 unsigned long flags;
1694
1695 spin_lock_irqsave(&host->lock, flags);
1696
1697 writel(0, host->base + MMCIMASK0);
1698 if (host->variant->pwrreg_nopower) {
1699 writel(0, host->base + MMCIDATACTRL);
1700 writel(0, host->base + MMCIPOWER);
1701 writel(0, host->base + MMCICLOCK);
1702 }
1703 mmci_reg_delay(host);
1704
1705 spin_unlock_irqrestore(&host->lock, flags);
1706 }
1707
1708 static void mmci_restore(struct mmci_host *host)
1709 {
1710 unsigned long flags;
1711
1712 spin_lock_irqsave(&host->lock, flags);
1713
1714 if (host->variant->pwrreg_nopower) {
1715 writel(host->clk_reg, host->base + MMCICLOCK);
1716 writel(host->datactrl_reg, host->base + MMCIDATACTRL);
1717 writel(host->pwr_reg, host->base + MMCIPOWER);
1718 }
1719 writel(MCI_IRQENABLE, host->base + MMCIMASK0);
1720 mmci_reg_delay(host);
1721
1722 spin_unlock_irqrestore(&host->lock, flags);
1723 }
1724
1725 static int mmci_runtime_suspend(struct device *dev)
1726 {
1727 struct amba_device *adev = to_amba_device(dev);
1728 struct mmc_host *mmc = amba_get_drvdata(adev);
1729
1730 if (mmc) {
1731 struct mmci_host *host = mmc_priv(mmc);
1732 pinctrl_pm_select_sleep_state(dev);
1733 mmci_save(host);
1734 clk_disable_unprepare(host->clk);
1735 }
1736
1737 return 0;
1738 }
1739
1740 static int mmci_runtime_resume(struct device *dev)
1741 {
1742 struct amba_device *adev = to_amba_device(dev);
1743 struct mmc_host *mmc = amba_get_drvdata(adev);
1744
1745 if (mmc) {
1746 struct mmci_host *host = mmc_priv(mmc);
1747 clk_prepare_enable(host->clk);
1748 mmci_restore(host);
1749 pinctrl_pm_select_default_state(dev);
1750 }
1751
1752 return 0;
1753 }
1754 #endif
1755
1756 static const struct dev_pm_ops mmci_dev_pm_ops = {
1757 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1758 pm_runtime_force_resume)
1759 SET_PM_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL)
1760 };
1761
1762 static struct amba_id mmci_ids[] = {
1763 {
1764 .id = 0x00041180,
1765 .mask = 0xff0fffff,
1766 .data = &variant_arm,
1767 },
1768 {
1769 .id = 0x01041180,
1770 .mask = 0xff0fffff,
1771 .data = &variant_arm_extended_fifo,
1772 },
1773 {
1774 .id = 0x02041180,
1775 .mask = 0xff0fffff,
1776 .data = &variant_arm_extended_fifo_hwfc,
1777 },
1778 {
1779 .id = 0x00041181,
1780 .mask = 0x000fffff,
1781 .data = &variant_arm,
1782 },
1783 /* ST Micro variants */
1784 {
1785 .id = 0x00180180,
1786 .mask = 0x00ffffff,
1787 .data = &variant_u300,
1788 },
1789 {
1790 .id = 0x10180180,
1791 .mask = 0xf0ffffff,
1792 .data = &variant_nomadik,
1793 },
1794 {
1795 .id = 0x00280180,
1796 .mask = 0x00ffffff,
1797 .data = &variant_u300,
1798 },
1799 {
1800 .id = 0x00480180,
1801 .mask = 0xf0ffffff,
1802 .data = &variant_ux500,
1803 },
1804 {
1805 .id = 0x10480180,
1806 .mask = 0xf0ffffff,
1807 .data = &variant_ux500v2,
1808 },
1809 { 0, 0 },
1810 };
1811
1812 MODULE_DEVICE_TABLE(amba, mmci_ids);
1813
1814 static struct amba_driver mmci_driver = {
1815 .drv = {
1816 .name = DRIVER_NAME,
1817 .pm = &mmci_dev_pm_ops,
1818 },
1819 .probe = mmci_probe,
1820 .remove = mmci_remove,
1821 .id_table = mmci_ids,
1822 };
1823
1824 module_amba_driver(mmci_driver);
1825
1826 module_param(fmax, uint, 0444);
1827
1828 MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
1829 MODULE_LICENSE("GPL");
This page took 0.064892 seconds and 4 git commands to generate.