bb47ff465c04d30acaeb1fc2a0463b785bb63917
[deliverable/linux.git] / drivers / mmc / host / pxamci.c
1 /*
2 * linux/drivers/mmc/host/pxa.c - PXA MMCI driver
3 *
4 * Copyright (C) 2003 Russell King, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This hardware is really sick:
11 * - No way to clear interrupts.
12 * - Have to turn off the clock whenever we touch the device.
13 * - Doesn't tell you how many data blocks were transferred.
14 * Yuck!
15 *
16 * 1 and 3 byte data transfers not supported
17 * max block length up to 1023
18 */
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/ioport.h>
22 #include <linux/platform_device.h>
23 #include <linux/delay.h>
24 #include <linux/interrupt.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/clk.h>
27 #include <linux/err.h>
28 #include <linux/mmc/host.h>
29 #include <linux/io.h>
30 #include <linux/regulator/consumer.h>
31 #include <linux/gpio.h>
32
33 #include <asm/sizes.h>
34
35 #include <mach/hardware.h>
36 #include <mach/dma.h>
37 #include <mach/mmc.h>
38
39 #include "pxamci.h"
40
41 #define DRIVER_NAME "pxa2xx-mci"
42
43 #define NR_SG 1
44 #define CLKRT_OFF (~0)
45
46 #define mmc_has_26MHz() (cpu_is_pxa300() || cpu_is_pxa310() \
47 || cpu_is_pxa935())
48
49 struct pxamci_host {
50 struct mmc_host *mmc;
51 spinlock_t lock;
52 struct resource *res;
53 void __iomem *base;
54 struct clk *clk;
55 unsigned long clkrate;
56 int irq;
57 int dma;
58 unsigned int clkrt;
59 unsigned int cmdat;
60 unsigned int imask;
61 unsigned int power_mode;
62 struct pxamci_platform_data *pdata;
63
64 struct mmc_request *mrq;
65 struct mmc_command *cmd;
66 struct mmc_data *data;
67
68 dma_addr_t sg_dma;
69 struct pxa_dma_desc *sg_cpu;
70 unsigned int dma_len;
71
72 unsigned int dma_dir;
73 unsigned int dma_drcmrrx;
74 unsigned int dma_drcmrtx;
75
76 struct regulator *vcc;
77 };
78
79 static inline void pxamci_init_ocr(struct pxamci_host *host)
80 {
81 #ifdef CONFIG_REGULATOR
82 host->vcc = regulator_get(mmc_dev(host->mmc), "vmmc");
83
84 if (IS_ERR(host->vcc))
85 host->vcc = NULL;
86 else {
87 host->mmc->ocr_avail = mmc_regulator_get_ocrmask(host->vcc);
88 if (host->pdata && host->pdata->ocr_mask)
89 dev_warn(mmc_dev(host->mmc),
90 "ocr_mask/setpower will not be used\n");
91 }
92 #endif
93 if (host->vcc == NULL) {
94 /* fall-back to platform data */
95 host->mmc->ocr_avail = host->pdata ?
96 host->pdata->ocr_mask :
97 MMC_VDD_32_33 | MMC_VDD_33_34;
98 }
99 }
100
101 static inline void pxamci_set_power(struct pxamci_host *host, unsigned int vdd)
102 {
103 int on;
104
105 #ifdef CONFIG_REGULATOR
106 if (host->vcc)
107 mmc_regulator_set_ocr(host->vcc, vdd);
108 #endif
109 if (!host->vcc && host->pdata &&
110 gpio_is_valid(host->pdata->gpio_power)) {
111 on = ((1 << vdd) & host->pdata->ocr_mask);
112 gpio_set_value(host->pdata->gpio_power,
113 !!on ^ host->pdata->gpio_power_invert);
114 }
115 if (!host->vcc && host->pdata && host->pdata->setpower)
116 host->pdata->setpower(mmc_dev(host->mmc), vdd);
117 }
118
119 static void pxamci_stop_clock(struct pxamci_host *host)
120 {
121 if (readl(host->base + MMC_STAT) & STAT_CLK_EN) {
122 unsigned long timeout = 10000;
123 unsigned int v;
124
125 writel(STOP_CLOCK, host->base + MMC_STRPCL);
126
127 do {
128 v = readl(host->base + MMC_STAT);
129 if (!(v & STAT_CLK_EN))
130 break;
131 udelay(1);
132 } while (timeout--);
133
134 if (v & STAT_CLK_EN)
135 dev_err(mmc_dev(host->mmc), "unable to stop clock\n");
136 }
137 }
138
139 static void pxamci_enable_irq(struct pxamci_host *host, unsigned int mask)
140 {
141 unsigned long flags;
142
143 spin_lock_irqsave(&host->lock, flags);
144 host->imask &= ~mask;
145 writel(host->imask, host->base + MMC_I_MASK);
146 spin_unlock_irqrestore(&host->lock, flags);
147 }
148
149 static void pxamci_disable_irq(struct pxamci_host *host, unsigned int mask)
150 {
151 unsigned long flags;
152
153 spin_lock_irqsave(&host->lock, flags);
154 host->imask |= mask;
155 writel(host->imask, host->base + MMC_I_MASK);
156 spin_unlock_irqrestore(&host->lock, flags);
157 }
158
159 static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
160 {
161 unsigned int nob = data->blocks;
162 unsigned long long clks;
163 unsigned int timeout;
164 bool dalgn = 0;
165 u32 dcmd;
166 int i;
167
168 host->data = data;
169
170 if (data->flags & MMC_DATA_STREAM)
171 nob = 0xffff;
172
173 writel(nob, host->base + MMC_NOB);
174 writel(data->blksz, host->base + MMC_BLKLEN);
175
176 clks = (unsigned long long)data->timeout_ns * host->clkrate;
177 do_div(clks, 1000000000UL);
178 timeout = (unsigned int)clks + (data->timeout_clks << host->clkrt);
179 writel((timeout + 255) / 256, host->base + MMC_RDTO);
180
181 if (data->flags & MMC_DATA_READ) {
182 host->dma_dir = DMA_FROM_DEVICE;
183 dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC;
184 DRCMR(host->dma_drcmrtx) = 0;
185 DRCMR(host->dma_drcmrrx) = host->dma | DRCMR_MAPVLD;
186 } else {
187 host->dma_dir = DMA_TO_DEVICE;
188 dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG;
189 DRCMR(host->dma_drcmrrx) = 0;
190 DRCMR(host->dma_drcmrtx) = host->dma | DRCMR_MAPVLD;
191 }
192
193 dcmd |= DCMD_BURST32 | DCMD_WIDTH1;
194
195 host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
196 host->dma_dir);
197
198 for (i = 0; i < host->dma_len; i++) {
199 unsigned int length = sg_dma_len(&data->sg[i]);
200 host->sg_cpu[i].dcmd = dcmd | length;
201 if (length & 31 && !(data->flags & MMC_DATA_READ))
202 host->sg_cpu[i].dcmd |= DCMD_ENDIRQEN;
203 /* Not aligned to 8-byte boundary? */
204 if (sg_dma_address(&data->sg[i]) & 0x7)
205 dalgn = 1;
206 if (data->flags & MMC_DATA_READ) {
207 host->sg_cpu[i].dsadr = host->res->start + MMC_RXFIFO;
208 host->sg_cpu[i].dtadr = sg_dma_address(&data->sg[i]);
209 } else {
210 host->sg_cpu[i].dsadr = sg_dma_address(&data->sg[i]);
211 host->sg_cpu[i].dtadr = host->res->start + MMC_TXFIFO;
212 }
213 host->sg_cpu[i].ddadr = host->sg_dma + (i + 1) *
214 sizeof(struct pxa_dma_desc);
215 }
216 host->sg_cpu[host->dma_len - 1].ddadr = DDADR_STOP;
217 wmb();
218
219 /*
220 * The PXA27x DMA controller encounters overhead when working with
221 * unaligned (to 8-byte boundaries) data, so switch on byte alignment
222 * mode only if we have unaligned data.
223 */
224 if (dalgn)
225 DALGN |= (1 << host->dma);
226 else
227 DALGN &= ~(1 << host->dma);
228 DDADR(host->dma) = host->sg_dma;
229
230 /*
231 * workaround for erratum #91:
232 * only start DMA now if we are doing a read,
233 * otherwise we wait until CMD/RESP has finished
234 * before starting DMA.
235 */
236 if (!cpu_is_pxa27x() || data->flags & MMC_DATA_READ)
237 DCSR(host->dma) = DCSR_RUN;
238 }
239
240 static void pxamci_start_cmd(struct pxamci_host *host, struct mmc_command *cmd, unsigned int cmdat)
241 {
242 WARN_ON(host->cmd != NULL);
243 host->cmd = cmd;
244
245 if (cmd->flags & MMC_RSP_BUSY)
246 cmdat |= CMDAT_BUSY;
247
248 #define RSP_TYPE(x) ((x) & ~(MMC_RSP_BUSY|MMC_RSP_OPCODE))
249 switch (RSP_TYPE(mmc_resp_type(cmd))) {
250 case RSP_TYPE(MMC_RSP_R1): /* r1, r1b, r6, r7 */
251 cmdat |= CMDAT_RESP_SHORT;
252 break;
253 case RSP_TYPE(MMC_RSP_R3):
254 cmdat |= CMDAT_RESP_R3;
255 break;
256 case RSP_TYPE(MMC_RSP_R2):
257 cmdat |= CMDAT_RESP_R2;
258 break;
259 default:
260 break;
261 }
262
263 writel(cmd->opcode, host->base + MMC_CMD);
264 writel(cmd->arg >> 16, host->base + MMC_ARGH);
265 writel(cmd->arg & 0xffff, host->base + MMC_ARGL);
266 writel(cmdat, host->base + MMC_CMDAT);
267 writel(host->clkrt, host->base + MMC_CLKRT);
268
269 writel(START_CLOCK, host->base + MMC_STRPCL);
270
271 pxamci_enable_irq(host, END_CMD_RES);
272 }
273
274 static void pxamci_finish_request(struct pxamci_host *host, struct mmc_request *mrq)
275 {
276 host->mrq = NULL;
277 host->cmd = NULL;
278 host->data = NULL;
279 mmc_request_done(host->mmc, mrq);
280 }
281
282 static int pxamci_cmd_done(struct pxamci_host *host, unsigned int stat)
283 {
284 struct mmc_command *cmd = host->cmd;
285 int i;
286 u32 v;
287
288 if (!cmd)
289 return 0;
290
291 host->cmd = NULL;
292
293 /*
294 * Did I mention this is Sick. We always need to
295 * discard the upper 8 bits of the first 16-bit word.
296 */
297 v = readl(host->base + MMC_RES) & 0xffff;
298 for (i = 0; i < 4; i++) {
299 u32 w1 = readl(host->base + MMC_RES) & 0xffff;
300 u32 w2 = readl(host->base + MMC_RES) & 0xffff;
301 cmd->resp[i] = v << 24 | w1 << 8 | w2 >> 8;
302 v = w2;
303 }
304
305 if (stat & STAT_TIME_OUT_RESPONSE) {
306 cmd->error = -ETIMEDOUT;
307 } else if (stat & STAT_RES_CRC_ERR && cmd->flags & MMC_RSP_CRC) {
308 /*
309 * workaround for erratum #42:
310 * Intel PXA27x Family Processor Specification Update Rev 001
311 * A bogus CRC error can appear if the msb of a 136 bit
312 * response is a one.
313 */
314 if (cpu_is_pxa27x() &&
315 (cmd->flags & MMC_RSP_136 && cmd->resp[0] & 0x80000000))
316 pr_debug("ignoring CRC from command %d - *risky*\n", cmd->opcode);
317 else
318 cmd->error = -EILSEQ;
319 }
320
321 pxamci_disable_irq(host, END_CMD_RES);
322 if (host->data && !cmd->error) {
323 pxamci_enable_irq(host, DATA_TRAN_DONE);
324 /*
325 * workaround for erratum #91, if doing write
326 * enable DMA late
327 */
328 if (cpu_is_pxa27x() && host->data->flags & MMC_DATA_WRITE)
329 DCSR(host->dma) = DCSR_RUN;
330 } else {
331 pxamci_finish_request(host, host->mrq);
332 }
333
334 return 1;
335 }
336
337 static int pxamci_data_done(struct pxamci_host *host, unsigned int stat)
338 {
339 struct mmc_data *data = host->data;
340
341 if (!data)
342 return 0;
343
344 DCSR(host->dma) = 0;
345 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
346 host->dma_dir);
347
348 if (stat & STAT_READ_TIME_OUT)
349 data->error = -ETIMEDOUT;
350 else if (stat & (STAT_CRC_READ_ERROR|STAT_CRC_WRITE_ERROR))
351 data->error = -EILSEQ;
352
353 /*
354 * There appears to be a hardware design bug here. There seems to
355 * be no way to find out how much data was transferred to the card.
356 * This means that if there was an error on any block, we mark all
357 * data blocks as being in error.
358 */
359 if (!data->error)
360 data->bytes_xfered = data->blocks * data->blksz;
361 else
362 data->bytes_xfered = 0;
363
364 pxamci_disable_irq(host, DATA_TRAN_DONE);
365
366 host->data = NULL;
367 if (host->mrq->stop) {
368 pxamci_stop_clock(host);
369 pxamci_start_cmd(host, host->mrq->stop, host->cmdat);
370 } else {
371 pxamci_finish_request(host, host->mrq);
372 }
373
374 return 1;
375 }
376
377 static irqreturn_t pxamci_irq(int irq, void *devid)
378 {
379 struct pxamci_host *host = devid;
380 unsigned int ireg;
381 int handled = 0;
382
383 ireg = readl(host->base + MMC_I_REG) & ~readl(host->base + MMC_I_MASK);
384
385 if (ireg) {
386 unsigned stat = readl(host->base + MMC_STAT);
387
388 pr_debug("PXAMCI: irq %08x stat %08x\n", ireg, stat);
389
390 if (ireg & END_CMD_RES)
391 handled |= pxamci_cmd_done(host, stat);
392 if (ireg & DATA_TRAN_DONE)
393 handled |= pxamci_data_done(host, stat);
394 if (ireg & SDIO_INT) {
395 mmc_signal_sdio_irq(host->mmc);
396 handled = 1;
397 }
398 }
399
400 return IRQ_RETVAL(handled);
401 }
402
403 static void pxamci_request(struct mmc_host *mmc, struct mmc_request *mrq)
404 {
405 struct pxamci_host *host = mmc_priv(mmc);
406 unsigned int cmdat;
407
408 WARN_ON(host->mrq != NULL);
409
410 host->mrq = mrq;
411
412 pxamci_stop_clock(host);
413
414 cmdat = host->cmdat;
415 host->cmdat &= ~CMDAT_INIT;
416
417 if (mrq->data) {
418 pxamci_setup_data(host, mrq->data);
419
420 cmdat &= ~CMDAT_BUSY;
421 cmdat |= CMDAT_DATAEN | CMDAT_DMAEN;
422 if (mrq->data->flags & MMC_DATA_WRITE)
423 cmdat |= CMDAT_WRITE;
424
425 if (mrq->data->flags & MMC_DATA_STREAM)
426 cmdat |= CMDAT_STREAM;
427 }
428
429 pxamci_start_cmd(host, mrq->cmd, cmdat);
430 }
431
432 static int pxamci_get_ro(struct mmc_host *mmc)
433 {
434 struct pxamci_host *host = mmc_priv(mmc);
435
436 if (host->pdata && gpio_is_valid(host->pdata->gpio_card_ro)) {
437 if (host->pdata->gpio_card_ro_invert)
438 return !gpio_get_value(host->pdata->gpio_card_ro);
439 else
440 return gpio_get_value(host->pdata->gpio_card_ro);
441 }
442 if (host->pdata && host->pdata->get_ro)
443 return !!host->pdata->get_ro(mmc_dev(mmc));
444 /*
445 * Board doesn't support read only detection; let the mmc core
446 * decide what to do.
447 */
448 return -ENOSYS;
449 }
450
451 static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
452 {
453 struct pxamci_host *host = mmc_priv(mmc);
454
455 if (ios->clock) {
456 unsigned long rate = host->clkrate;
457 unsigned int clk = rate / ios->clock;
458
459 if (host->clkrt == CLKRT_OFF)
460 clk_enable(host->clk);
461
462 if (ios->clock == 26000000) {
463 /* to support 26MHz */
464 host->clkrt = 7;
465 } else {
466 /* to handle (19.5MHz, 26MHz) */
467 if (!clk)
468 clk = 1;
469
470 /*
471 * clk might result in a lower divisor than we
472 * desire. check for that condition and adjust
473 * as appropriate.
474 */
475 if (rate / clk > ios->clock)
476 clk <<= 1;
477 host->clkrt = fls(clk) - 1;
478 }
479
480 /*
481 * we write clkrt on the next command
482 */
483 } else {
484 pxamci_stop_clock(host);
485 if (host->clkrt != CLKRT_OFF) {
486 host->clkrt = CLKRT_OFF;
487 clk_disable(host->clk);
488 }
489 }
490
491 if (host->power_mode != ios->power_mode) {
492 host->power_mode = ios->power_mode;
493
494 pxamci_set_power(host, ios->vdd);
495
496 if (ios->power_mode == MMC_POWER_ON)
497 host->cmdat |= CMDAT_INIT;
498 }
499
500 if (ios->bus_width == MMC_BUS_WIDTH_4)
501 host->cmdat |= CMDAT_SD_4DAT;
502 else
503 host->cmdat &= ~CMDAT_SD_4DAT;
504
505 pr_debug("PXAMCI: clkrt = %x cmdat = %x\n",
506 host->clkrt, host->cmdat);
507 }
508
509 static void pxamci_enable_sdio_irq(struct mmc_host *host, int enable)
510 {
511 struct pxamci_host *pxa_host = mmc_priv(host);
512
513 if (enable)
514 pxamci_enable_irq(pxa_host, SDIO_INT);
515 else
516 pxamci_disable_irq(pxa_host, SDIO_INT);
517 }
518
519 static const struct mmc_host_ops pxamci_ops = {
520 .request = pxamci_request,
521 .get_ro = pxamci_get_ro,
522 .set_ios = pxamci_set_ios,
523 .enable_sdio_irq = pxamci_enable_sdio_irq,
524 };
525
526 static void pxamci_dma_irq(int dma, void *devid)
527 {
528 struct pxamci_host *host = devid;
529 int dcsr = DCSR(dma);
530 DCSR(dma) = dcsr & ~DCSR_STOPIRQEN;
531
532 if (dcsr & DCSR_ENDINTR) {
533 writel(BUF_PART_FULL, host->base + MMC_PRTBUF);
534 } else {
535 printk(KERN_ERR "%s: DMA error on channel %d (DCSR=%#x)\n",
536 mmc_hostname(host->mmc), dma, dcsr);
537 host->data->error = -EIO;
538 pxamci_data_done(host, 0);
539 }
540 }
541
542 static irqreturn_t pxamci_detect_irq(int irq, void *devid)
543 {
544 struct pxamci_host *host = mmc_priv(devid);
545
546 mmc_detect_change(devid, host->pdata->detect_delay);
547 return IRQ_HANDLED;
548 }
549
550 static int pxamci_probe(struct platform_device *pdev)
551 {
552 struct mmc_host *mmc;
553 struct pxamci_host *host = NULL;
554 struct resource *r, *dmarx, *dmatx;
555 int ret, irq, gpio_cd = -1, gpio_ro = -1, gpio_power = -1;
556
557 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
558 irq = platform_get_irq(pdev, 0);
559 if (!r || irq < 0)
560 return -ENXIO;
561
562 r = request_mem_region(r->start, SZ_4K, DRIVER_NAME);
563 if (!r)
564 return -EBUSY;
565
566 mmc = mmc_alloc_host(sizeof(struct pxamci_host), &pdev->dev);
567 if (!mmc) {
568 ret = -ENOMEM;
569 goto out;
570 }
571
572 mmc->ops = &pxamci_ops;
573
574 /*
575 * We can do SG-DMA, but we don't because we never know how much
576 * data we successfully wrote to the card.
577 */
578 mmc->max_phys_segs = NR_SG;
579
580 /*
581 * Our hardware DMA can handle a maximum of one page per SG entry.
582 */
583 mmc->max_seg_size = PAGE_SIZE;
584
585 /*
586 * Block length register is only 10 bits before PXA27x.
587 */
588 mmc->max_blk_size = cpu_is_pxa25x() ? 1023 : 2048;
589
590 /*
591 * Block count register is 16 bits.
592 */
593 mmc->max_blk_count = 65535;
594
595 host = mmc_priv(mmc);
596 host->mmc = mmc;
597 host->dma = -1;
598 host->pdata = pdev->dev.platform_data;
599 host->clkrt = CLKRT_OFF;
600
601 host->clk = clk_get(&pdev->dev, NULL);
602 if (IS_ERR(host->clk)) {
603 ret = PTR_ERR(host->clk);
604 host->clk = NULL;
605 goto out;
606 }
607
608 host->clkrate = clk_get_rate(host->clk);
609
610 /*
611 * Calculate minimum clock rate, rounding up.
612 */
613 mmc->f_min = (host->clkrate + 63) / 64;
614 mmc->f_max = (mmc_has_26MHz()) ? 26000000 : host->clkrate;
615
616 pxamci_init_ocr(host);
617
618 mmc->caps = 0;
619 host->cmdat = 0;
620 if (!cpu_is_pxa25x()) {
621 mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
622 host->cmdat |= CMDAT_SDIO_INT_EN;
623 if (mmc_has_26MHz())
624 mmc->caps |= MMC_CAP_MMC_HIGHSPEED |
625 MMC_CAP_SD_HIGHSPEED;
626 }
627
628 host->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &host->sg_dma, GFP_KERNEL);
629 if (!host->sg_cpu) {
630 ret = -ENOMEM;
631 goto out;
632 }
633
634 spin_lock_init(&host->lock);
635 host->res = r;
636 host->irq = irq;
637 host->imask = MMC_I_MASK_ALL;
638
639 host->base = ioremap(r->start, SZ_4K);
640 if (!host->base) {
641 ret = -ENOMEM;
642 goto out;
643 }
644
645 /*
646 * Ensure that the host controller is shut down, and setup
647 * with our defaults.
648 */
649 pxamci_stop_clock(host);
650 writel(0, host->base + MMC_SPI);
651 writel(64, host->base + MMC_RESTO);
652 writel(host->imask, host->base + MMC_I_MASK);
653
654 host->dma = pxa_request_dma(DRIVER_NAME, DMA_PRIO_LOW,
655 pxamci_dma_irq, host);
656 if (host->dma < 0) {
657 ret = -EBUSY;
658 goto out;
659 }
660
661 ret = request_irq(host->irq, pxamci_irq, 0, DRIVER_NAME, host);
662 if (ret)
663 goto out;
664
665 platform_set_drvdata(pdev, mmc);
666
667 dmarx = platform_get_resource(pdev, IORESOURCE_DMA, 0);
668 if (!dmarx) {
669 ret = -ENXIO;
670 goto out;
671 }
672 host->dma_drcmrrx = dmarx->start;
673
674 dmatx = platform_get_resource(pdev, IORESOURCE_DMA, 1);
675 if (!dmatx) {
676 ret = -ENXIO;
677 goto out;
678 }
679 host->dma_drcmrtx = dmatx->start;
680
681 if (host->pdata) {
682 gpio_cd = host->pdata->gpio_card_detect;
683 gpio_ro = host->pdata->gpio_card_ro;
684 gpio_power = host->pdata->gpio_power;
685 }
686 if (gpio_is_valid(gpio_power)) {
687 ret = gpio_request(gpio_power, "mmc card power");
688 if (ret) {
689 dev_err(&pdev->dev, "Failed requesting gpio_power %d\n", gpio_power);
690 goto out;
691 }
692 gpio_direction_output(gpio_power,
693 host->pdata->gpio_power_invert);
694 }
695 if (gpio_is_valid(gpio_ro)) {
696 ret = gpio_request(gpio_ro, "mmc card read only");
697 if (ret) {
698 dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro);
699 goto err_gpio_ro;
700 }
701 gpio_direction_input(gpio_ro);
702 }
703 if (gpio_is_valid(gpio_cd)) {
704 ret = gpio_request(gpio_cd, "mmc card detect");
705 if (ret) {
706 dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_cd);
707 goto err_gpio_cd;
708 }
709 gpio_direction_input(gpio_cd);
710
711 ret = request_irq(gpio_to_irq(gpio_cd), pxamci_detect_irq,
712 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
713 "mmc card detect", mmc);
714 if (ret) {
715 dev_err(&pdev->dev, "failed to request card detect IRQ\n");
716 goto err_request_irq;
717 }
718 }
719
720 if (host->pdata && host->pdata->init)
721 host->pdata->init(&pdev->dev, pxamci_detect_irq, mmc);
722
723 if (gpio_is_valid(gpio_power) && host->pdata->setpower)
724 dev_warn(&pdev->dev, "gpio_power and setpower() both defined\n");
725 if (gpio_is_valid(gpio_ro) && host->pdata->get_ro)
726 dev_warn(&pdev->dev, "gpio_ro and get_ro() both defined\n");
727
728 mmc_add_host(mmc);
729
730 return 0;
731
732 err_request_irq:
733 gpio_free(gpio_cd);
734 err_gpio_cd:
735 gpio_free(gpio_ro);
736 err_gpio_ro:
737 gpio_free(gpio_power);
738 out:
739 if (host) {
740 if (host->dma >= 0)
741 pxa_free_dma(host->dma);
742 if (host->base)
743 iounmap(host->base);
744 if (host->sg_cpu)
745 dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
746 if (host->clk)
747 clk_put(host->clk);
748 }
749 if (mmc)
750 mmc_free_host(mmc);
751 release_resource(r);
752 return ret;
753 }
754
755 static int pxamci_remove(struct platform_device *pdev)
756 {
757 struct mmc_host *mmc = platform_get_drvdata(pdev);
758 int gpio_cd = -1, gpio_ro = -1, gpio_power = -1;
759
760 platform_set_drvdata(pdev, NULL);
761
762 if (mmc) {
763 struct pxamci_host *host = mmc_priv(mmc);
764
765 mmc_remove_host(mmc);
766
767 if (host->pdata) {
768 gpio_cd = host->pdata->gpio_card_detect;
769 gpio_ro = host->pdata->gpio_card_ro;
770 gpio_power = host->pdata->gpio_power;
771 }
772 if (gpio_is_valid(gpio_cd)) {
773 free_irq(gpio_to_irq(gpio_cd), mmc);
774 gpio_free(gpio_cd);
775 }
776 if (gpio_is_valid(gpio_ro))
777 gpio_free(gpio_ro);
778 if (gpio_is_valid(gpio_power))
779 gpio_free(gpio_power);
780 if (host->vcc)
781 regulator_put(host->vcc);
782
783 if (host->pdata && host->pdata->exit)
784 host->pdata->exit(&pdev->dev, mmc);
785
786 pxamci_stop_clock(host);
787 writel(TXFIFO_WR_REQ|RXFIFO_RD_REQ|CLK_IS_OFF|STOP_CMD|
788 END_CMD_RES|PRG_DONE|DATA_TRAN_DONE,
789 host->base + MMC_I_MASK);
790
791 DRCMR(host->dma_drcmrrx) = 0;
792 DRCMR(host->dma_drcmrtx) = 0;
793
794 free_irq(host->irq, host);
795 pxa_free_dma(host->dma);
796 iounmap(host->base);
797 dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
798
799 clk_put(host->clk);
800
801 release_resource(host->res);
802
803 mmc_free_host(mmc);
804 }
805 return 0;
806 }
807
808 #ifdef CONFIG_PM
809 static int pxamci_suspend(struct device *dev)
810 {
811 struct mmc_host *mmc = dev_get_drvdata(dev);
812 int ret = 0;
813
814 if (mmc)
815 ret = mmc_suspend_host(mmc, PMSG_SUSPEND);
816
817 return ret;
818 }
819
820 static int pxamci_resume(struct device *dev)
821 {
822 struct mmc_host *mmc = dev_get_drvdata(dev);
823 int ret = 0;
824
825 if (mmc)
826 ret = mmc_resume_host(mmc);
827
828 return ret;
829 }
830
831 static struct dev_pm_ops pxamci_pm_ops = {
832 .suspend = pxamci_suspend,
833 .resume = pxamci_resume,
834 };
835 #endif
836
837 static struct platform_driver pxamci_driver = {
838 .probe = pxamci_probe,
839 .remove = pxamci_remove,
840 .driver = {
841 .name = DRIVER_NAME,
842 .owner = THIS_MODULE,
843 #ifdef CONFIG_PM
844 .pm = &pxamci_pm_ops,
845 #endif
846 },
847 };
848
849 static int __init pxamci_init(void)
850 {
851 return platform_driver_register(&pxamci_driver);
852 }
853
854 static void __exit pxamci_exit(void)
855 {
856 platform_driver_unregister(&pxamci_driver);
857 }
858
859 module_init(pxamci_init);
860 module_exit(pxamci_exit);
861
862 MODULE_DESCRIPTION("PXA Multimedia Card Interface Driver");
863 MODULE_LICENSE("GPL");
864 MODULE_ALIAS("platform:pxa2xx-mci");
This page took 0.135604 seconds and 5 git commands to generate.