V4L/DVB (5101): Renamed video_mux to cx88_video_mux
[deliverable/linux.git] / drivers / mmc / mmci.c
1 /*
2 * linux/drivers/mmc/mmci.c - ARM PrimeCell MMCI PL180/1 driver
3 *
4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include <linux/module.h>
11 #include <linux/moduleparam.h>
12 #include <linux/init.h>
13 #include <linux/ioport.h>
14 #include <linux/device.h>
15 #include <linux/interrupt.h>
16 #include <linux/delay.h>
17 #include <linux/err.h>
18 #include <linux/highmem.h>
19 #include <linux/mmc/host.h>
20 #include <linux/mmc/protocol.h>
21 #include <linux/amba/bus.h>
22 #include <linux/clk.h>
23
24 #include <asm/cacheflush.h>
25 #include <asm/div64.h>
26 #include <asm/io.h>
27 #include <asm/scatterlist.h>
28 #include <asm/sizes.h>
29 #include <asm/mach/mmc.h>
30
31 #include "mmci.h"
32
33 #define DRIVER_NAME "mmci-pl18x"
34
35 #define DBG(host,fmt,args...) \
36 pr_debug("%s: %s: " fmt, mmc_hostname(host->mmc), __func__ , args)
37
38 static unsigned int fmax = 515633;
39
40 static void
41 mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
42 {
43 writel(0, host->base + MMCICOMMAND);
44
45 BUG_ON(host->data);
46
47 host->mrq = NULL;
48 host->cmd = NULL;
49
50 if (mrq->data)
51 mrq->data->bytes_xfered = host->data_xfered;
52
53 /*
54 * Need to drop the host lock here; mmc_request_done may call
55 * back into the driver...
56 */
57 spin_unlock(&host->lock);
58 mmc_request_done(host->mmc, mrq);
59 spin_lock(&host->lock);
60 }
61
62 static void mmci_stop_data(struct mmci_host *host)
63 {
64 writel(0, host->base + MMCIDATACTRL);
65 writel(0, host->base + MMCIMASK1);
66 host->data = NULL;
67 }
68
69 static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
70 {
71 unsigned int datactrl, timeout, irqmask;
72 unsigned long long clks;
73 void __iomem *base;
74 int blksz_bits;
75
76 DBG(host, "blksz %04x blks %04x flags %08x\n",
77 data->blksz, data->blocks, data->flags);
78
79 host->data = data;
80 host->size = data->blksz;
81 host->data_xfered = 0;
82
83 mmci_init_sg(host, data);
84
85 clks = (unsigned long long)data->timeout_ns * host->cclk;
86 do_div(clks, 1000000000UL);
87
88 timeout = data->timeout_clks + (unsigned int)clks;
89
90 base = host->base;
91 writel(timeout, base + MMCIDATATIMER);
92 writel(host->size, base + MMCIDATALENGTH);
93
94 blksz_bits = ffs(data->blksz) - 1;
95 BUG_ON(1 << blksz_bits != data->blksz);
96
97 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
98 if (data->flags & MMC_DATA_READ) {
99 datactrl |= MCI_DPSM_DIRECTION;
100 irqmask = MCI_RXFIFOHALFFULLMASK;
101
102 /*
103 * If we have less than a FIFOSIZE of bytes to transfer,
104 * trigger a PIO interrupt as soon as any data is available.
105 */
106 if (host->size < MCI_FIFOSIZE)
107 irqmask |= MCI_RXDATAAVLBLMASK;
108 } else {
109 /*
110 * We don't actually need to include "FIFO empty" here
111 * since its implicit in "FIFO half empty".
112 */
113 irqmask = MCI_TXFIFOHALFEMPTYMASK;
114 }
115
116 writel(datactrl, base + MMCIDATACTRL);
117 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
118 writel(irqmask, base + MMCIMASK1);
119 }
120
121 static void
122 mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
123 {
124 void __iomem *base = host->base;
125
126 DBG(host, "op %02x arg %08x flags %08x\n",
127 cmd->opcode, cmd->arg, cmd->flags);
128
129 if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
130 writel(0, base + MMCICOMMAND);
131 udelay(1);
132 }
133
134 c |= cmd->opcode | MCI_CPSM_ENABLE;
135 if (cmd->flags & MMC_RSP_PRESENT) {
136 if (cmd->flags & MMC_RSP_136)
137 c |= MCI_CPSM_LONGRSP;
138 c |= MCI_CPSM_RESPONSE;
139 }
140 if (/*interrupt*/0)
141 c |= MCI_CPSM_INTERRUPT;
142
143 host->cmd = cmd;
144
145 writel(cmd->arg, base + MMCIARGUMENT);
146 writel(c, base + MMCICOMMAND);
147 }
148
149 static void
150 mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
151 unsigned int status)
152 {
153 if (status & MCI_DATABLOCKEND) {
154 host->data_xfered += data->blksz;
155 }
156 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
157 if (status & MCI_DATACRCFAIL)
158 data->error = MMC_ERR_BADCRC;
159 else if (status & MCI_DATATIMEOUT)
160 data->error = MMC_ERR_TIMEOUT;
161 else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN))
162 data->error = MMC_ERR_FIFO;
163 status |= MCI_DATAEND;
164
165 /*
166 * We hit an error condition. Ensure that any data
167 * partially written to a page is properly coherent.
168 */
169 if (host->sg_len && data->flags & MMC_DATA_READ)
170 flush_dcache_page(host->sg_ptr->page);
171 }
172 if (status & MCI_DATAEND) {
173 mmci_stop_data(host);
174
175 if (!data->stop) {
176 mmci_request_end(host, data->mrq);
177 } else {
178 mmci_start_command(host, data->stop, 0);
179 }
180 }
181 }
182
183 static void
184 mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
185 unsigned int status)
186 {
187 void __iomem *base = host->base;
188
189 host->cmd = NULL;
190
191 cmd->resp[0] = readl(base + MMCIRESPONSE0);
192 cmd->resp[1] = readl(base + MMCIRESPONSE1);
193 cmd->resp[2] = readl(base + MMCIRESPONSE2);
194 cmd->resp[3] = readl(base + MMCIRESPONSE3);
195
196 if (status & MCI_CMDTIMEOUT) {
197 cmd->error = MMC_ERR_TIMEOUT;
198 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
199 cmd->error = MMC_ERR_BADCRC;
200 }
201
202 if (!cmd->data || cmd->error != MMC_ERR_NONE) {
203 if (host->data)
204 mmci_stop_data(host);
205 mmci_request_end(host, cmd->mrq);
206 } else if (!(cmd->data->flags & MMC_DATA_READ)) {
207 mmci_start_data(host, cmd->data);
208 }
209 }
210
211 static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain)
212 {
213 void __iomem *base = host->base;
214 char *ptr = buffer;
215 u32 status;
216
217 do {
218 int count = host->size - (readl(base + MMCIFIFOCNT) << 2);
219
220 if (count > remain)
221 count = remain;
222
223 if (count <= 0)
224 break;
225
226 readsl(base + MMCIFIFO, ptr, count >> 2);
227
228 ptr += count;
229 remain -= count;
230
231 if (remain == 0)
232 break;
233
234 status = readl(base + MMCISTATUS);
235 } while (status & MCI_RXDATAAVLBL);
236
237 return ptr - buffer;
238 }
239
240 static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status)
241 {
242 void __iomem *base = host->base;
243 char *ptr = buffer;
244
245 do {
246 unsigned int count, maxcnt;
247
248 maxcnt = status & MCI_TXFIFOEMPTY ? MCI_FIFOSIZE : MCI_FIFOHALFSIZE;
249 count = min(remain, maxcnt);
250
251 writesl(base + MMCIFIFO, ptr, count >> 2);
252
253 ptr += count;
254 remain -= count;
255
256 if (remain == 0)
257 break;
258
259 status = readl(base + MMCISTATUS);
260 } while (status & MCI_TXFIFOHALFEMPTY);
261
262 return ptr - buffer;
263 }
264
265 /*
266 * PIO data transfer IRQ handler.
267 */
268 static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
269 {
270 struct mmci_host *host = dev_id;
271 void __iomem *base = host->base;
272 u32 status;
273
274 status = readl(base + MMCISTATUS);
275
276 DBG(host, "irq1 %08x\n", status);
277
278 do {
279 unsigned long flags;
280 unsigned int remain, len;
281 char *buffer;
282
283 /*
284 * For write, we only need to test the half-empty flag
285 * here - if the FIFO is completely empty, then by
286 * definition it is more than half empty.
287 *
288 * For read, check for data available.
289 */
290 if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL)))
291 break;
292
293 /*
294 * Map the current scatter buffer.
295 */
296 buffer = mmci_kmap_atomic(host, &flags) + host->sg_off;
297 remain = host->sg_ptr->length - host->sg_off;
298
299 len = 0;
300 if (status & MCI_RXACTIVE)
301 len = mmci_pio_read(host, buffer, remain);
302 if (status & MCI_TXACTIVE)
303 len = mmci_pio_write(host, buffer, remain, status);
304
305 /*
306 * Unmap the buffer.
307 */
308 mmci_kunmap_atomic(host, buffer, &flags);
309
310 host->sg_off += len;
311 host->size -= len;
312 remain -= len;
313
314 if (remain)
315 break;
316
317 /*
318 * If we were reading, and we have completed this
319 * page, ensure that the data cache is coherent.
320 */
321 if (status & MCI_RXACTIVE)
322 flush_dcache_page(host->sg_ptr->page);
323
324 if (!mmci_next_sg(host))
325 break;
326
327 status = readl(base + MMCISTATUS);
328 } while (1);
329
330 /*
331 * If we're nearing the end of the read, switch to
332 * "any data available" mode.
333 */
334 if (status & MCI_RXACTIVE && host->size < MCI_FIFOSIZE)
335 writel(MCI_RXDATAAVLBLMASK, base + MMCIMASK1);
336
337 /*
338 * If we run out of data, disable the data IRQs; this
339 * prevents a race where the FIFO becomes empty before
340 * the chip itself has disabled the data path, and
341 * stops us racing with our data end IRQ.
342 */
343 if (host->size == 0) {
344 writel(0, base + MMCIMASK1);
345 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);
346 }
347
348 return IRQ_HANDLED;
349 }
350
351 /*
352 * Handle completion of command and data transfers.
353 */
354 static irqreturn_t mmci_irq(int irq, void *dev_id)
355 {
356 struct mmci_host *host = dev_id;
357 u32 status;
358 int ret = 0;
359
360 spin_lock(&host->lock);
361
362 do {
363 struct mmc_command *cmd;
364 struct mmc_data *data;
365
366 status = readl(host->base + MMCISTATUS);
367 status &= readl(host->base + MMCIMASK0);
368 writel(status, host->base + MMCICLEAR);
369
370 DBG(host, "irq0 %08x\n", status);
371
372 data = host->data;
373 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|
374 MCI_RXOVERRUN|MCI_DATAEND|MCI_DATABLOCKEND) && data)
375 mmci_data_irq(host, data, status);
376
377 cmd = host->cmd;
378 if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd)
379 mmci_cmd_irq(host, cmd, status);
380
381 ret = 1;
382 } while (status);
383
384 spin_unlock(&host->lock);
385
386 return IRQ_RETVAL(ret);
387 }
388
389 static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
390 {
391 struct mmci_host *host = mmc_priv(mmc);
392
393 WARN_ON(host->mrq != NULL);
394
395 spin_lock_irq(&host->lock);
396
397 host->mrq = mrq;
398
399 if (mrq->data && mrq->data->flags & MMC_DATA_READ)
400 mmci_start_data(host, mrq->data);
401
402 mmci_start_command(host, mrq->cmd, 0);
403
404 spin_unlock_irq(&host->lock);
405 }
406
407 static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
408 {
409 struct mmci_host *host = mmc_priv(mmc);
410 u32 clk = 0, pwr = 0;
411
412 if (ios->clock) {
413 if (ios->clock >= host->mclk) {
414 clk = MCI_CLK_BYPASS;
415 host->cclk = host->mclk;
416 } else {
417 clk = host->mclk / (2 * ios->clock) - 1;
418 if (clk > 256)
419 clk = 255;
420 host->cclk = host->mclk / (2 * (clk + 1));
421 }
422 clk |= MCI_CLK_ENABLE;
423 }
424
425 if (host->plat->translate_vdd)
426 pwr |= host->plat->translate_vdd(mmc_dev(mmc), ios->vdd);
427
428 switch (ios->power_mode) {
429 case MMC_POWER_OFF:
430 break;
431 case MMC_POWER_UP:
432 pwr |= MCI_PWR_UP;
433 break;
434 case MMC_POWER_ON:
435 pwr |= MCI_PWR_ON;
436 break;
437 }
438
439 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
440 pwr |= MCI_ROD;
441
442 writel(clk, host->base + MMCICLOCK);
443
444 if (host->pwr != pwr) {
445 host->pwr = pwr;
446 writel(pwr, host->base + MMCIPOWER);
447 }
448 }
449
450 static const struct mmc_host_ops mmci_ops = {
451 .request = mmci_request,
452 .set_ios = mmci_set_ios,
453 };
454
455 static void mmci_check_status(unsigned long data)
456 {
457 struct mmci_host *host = (struct mmci_host *)data;
458 unsigned int status;
459
460 status = host->plat->status(mmc_dev(host->mmc));
461 if (status ^ host->oldstat)
462 mmc_detect_change(host->mmc, 0);
463
464 host->oldstat = status;
465 mod_timer(&host->timer, jiffies + HZ);
466 }
467
468 static int mmci_probe(struct amba_device *dev, void *id)
469 {
470 struct mmc_platform_data *plat = dev->dev.platform_data;
471 struct mmci_host *host;
472 struct mmc_host *mmc;
473 int ret;
474
475 /* must have platform data */
476 if (!plat) {
477 ret = -EINVAL;
478 goto out;
479 }
480
481 ret = amba_request_regions(dev, DRIVER_NAME);
482 if (ret)
483 goto out;
484
485 mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev);
486 if (!mmc) {
487 ret = -ENOMEM;
488 goto rel_regions;
489 }
490
491 host = mmc_priv(mmc);
492 host->clk = clk_get(&dev->dev, "MCLK");
493 if (IS_ERR(host->clk)) {
494 ret = PTR_ERR(host->clk);
495 host->clk = NULL;
496 goto host_free;
497 }
498
499 ret = clk_enable(host->clk);
500 if (ret)
501 goto clk_free;
502
503 host->plat = plat;
504 host->mclk = clk_get_rate(host->clk);
505 host->mmc = mmc;
506 host->base = ioremap(dev->res.start, SZ_4K);
507 if (!host->base) {
508 ret = -ENOMEM;
509 goto clk_disable;
510 }
511
512 mmc->ops = &mmci_ops;
513 mmc->f_min = (host->mclk + 511) / 512;
514 mmc->f_max = min(host->mclk, fmax);
515 mmc->ocr_avail = plat->ocr_mask;
516 mmc->caps = MMC_CAP_MULTIWRITE;
517
518 /*
519 * We can do SGIO
520 */
521 mmc->max_hw_segs = 16;
522 mmc->max_phys_segs = NR_SG;
523
524 /*
525 * Since we only have a 16-bit data length register, we must
526 * ensure that we don't exceed 2^16-1 bytes in a single request.
527 */
528 mmc->max_req_size = 65535;
529
530 /*
531 * Set the maximum segment size. Since we aren't doing DMA
532 * (yet) we are only limited by the data length register.
533 */
534 mmc->max_seg_size = mmc->max_req_size;
535
536 /*
537 * Block size can be up to 2048 bytes, but must be a power of two.
538 */
539 mmc->max_blk_size = 2048;
540
541 /*
542 * No limit on the number of blocks transferred.
543 */
544 mmc->max_blk_count = mmc->max_req_size;
545
546 spin_lock_init(&host->lock);
547
548 writel(0, host->base + MMCIMASK0);
549 writel(0, host->base + MMCIMASK1);
550 writel(0xfff, host->base + MMCICLEAR);
551
552 ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host);
553 if (ret)
554 goto unmap;
555
556 ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, DRIVER_NAME " (pio)", host);
557 if (ret)
558 goto irq0_free;
559
560 writel(MCI_IRQENABLE, host->base + MMCIMASK0);
561
562 amba_set_drvdata(dev, mmc);
563
564 mmc_add_host(mmc);
565
566 printk(KERN_INFO "%s: MMCI rev %x cfg %02x at 0x%016llx irq %d,%d\n",
567 mmc_hostname(mmc), amba_rev(dev), amba_config(dev),
568 (unsigned long long)dev->res.start, dev->irq[0], dev->irq[1]);
569
570 init_timer(&host->timer);
571 host->timer.data = (unsigned long)host;
572 host->timer.function = mmci_check_status;
573 host->timer.expires = jiffies + HZ;
574 add_timer(&host->timer);
575
576 return 0;
577
578 irq0_free:
579 free_irq(dev->irq[0], host);
580 unmap:
581 iounmap(host->base);
582 clk_disable:
583 clk_disable(host->clk);
584 clk_free:
585 clk_put(host->clk);
586 host_free:
587 mmc_free_host(mmc);
588 rel_regions:
589 amba_release_regions(dev);
590 out:
591 return ret;
592 }
593
594 static int mmci_remove(struct amba_device *dev)
595 {
596 struct mmc_host *mmc = amba_get_drvdata(dev);
597
598 amba_set_drvdata(dev, NULL);
599
600 if (mmc) {
601 struct mmci_host *host = mmc_priv(mmc);
602
603 del_timer_sync(&host->timer);
604
605 mmc_remove_host(mmc);
606
607 writel(0, host->base + MMCIMASK0);
608 writel(0, host->base + MMCIMASK1);
609
610 writel(0, host->base + MMCICOMMAND);
611 writel(0, host->base + MMCIDATACTRL);
612
613 free_irq(dev->irq[0], host);
614 free_irq(dev->irq[1], host);
615
616 iounmap(host->base);
617 clk_disable(host->clk);
618 clk_put(host->clk);
619
620 mmc_free_host(mmc);
621
622 amba_release_regions(dev);
623 }
624
625 return 0;
626 }
627
628 #ifdef CONFIG_PM
629 static int mmci_suspend(struct amba_device *dev, pm_message_t state)
630 {
631 struct mmc_host *mmc = amba_get_drvdata(dev);
632 int ret = 0;
633
634 if (mmc) {
635 struct mmci_host *host = mmc_priv(mmc);
636
637 ret = mmc_suspend_host(mmc, state);
638 if (ret == 0)
639 writel(0, host->base + MMCIMASK0);
640 }
641
642 return ret;
643 }
644
645 static int mmci_resume(struct amba_device *dev)
646 {
647 struct mmc_host *mmc = amba_get_drvdata(dev);
648 int ret = 0;
649
650 if (mmc) {
651 struct mmci_host *host = mmc_priv(mmc);
652
653 writel(MCI_IRQENABLE, host->base + MMCIMASK0);
654
655 ret = mmc_resume_host(mmc);
656 }
657
658 return ret;
659 }
660 #else
661 #define mmci_suspend NULL
662 #define mmci_resume NULL
663 #endif
664
665 static struct amba_id mmci_ids[] = {
666 {
667 .id = 0x00041180,
668 .mask = 0x000fffff,
669 },
670 {
671 .id = 0x00041181,
672 .mask = 0x000fffff,
673 },
674 { 0, 0 },
675 };
676
677 static struct amba_driver mmci_driver = {
678 .drv = {
679 .name = DRIVER_NAME,
680 },
681 .probe = mmci_probe,
682 .remove = mmci_remove,
683 .suspend = mmci_suspend,
684 .resume = mmci_resume,
685 .id_table = mmci_ids,
686 };
687
688 static int __init mmci_init(void)
689 {
690 return amba_driver_register(&mmci_driver);
691 }
692
693 static void __exit mmci_exit(void)
694 {
695 amba_driver_unregister(&mmci_driver);
696 }
697
698 module_init(mmci_init);
699 module_exit(mmci_exit);
700 module_param(fmax, uint, 0444);
701
702 MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
703 MODULE_LICENSE("GPL");
This page took 0.097909 seconds and 5 git commands to generate.