ARM: 6785/1: mmci: separate out ST Micro register defines
[deliverable/linux.git] / drivers / mmc / host / mmci.c
CommitLineData
1da177e4 1/*
70f10482 2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
1da177e4
LT
3 *
4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
c8ebae37 5 * Copyright (C) 2010 ST-Ericsson SA
1da177e4
LT
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
1da177e4
LT
11#include <linux/module.h>
12#include <linux/moduleparam.h>
13#include <linux/init.h>
14#include <linux/ioport.h>
15#include <linux/device.h>
16#include <linux/interrupt.h>
613b152c 17#include <linux/kernel.h>
1da177e4
LT
18#include <linux/delay.h>
19#include <linux/err.h>
20#include <linux/highmem.h>
019a5f56 21#include <linux/log2.h>
1da177e4 22#include <linux/mmc/host.h>
34177802 23#include <linux/mmc/card.h>
a62c80e5 24#include <linux/amba/bus.h>
f8ce2547 25#include <linux/clk.h>
bd6dee6f 26#include <linux/scatterlist.h>
89001446 27#include <linux/gpio.h>
34e84f39 28#include <linux/regulator/consumer.h>
c8ebae37
RK
29#include <linux/dmaengine.h>
30#include <linux/dma-mapping.h>
31#include <linux/amba/mmci.h>
1da177e4 32
7b09cdac 33#include <asm/div64.h>
1da177e4 34#include <asm/io.h>
c6b8fdad 35#include <asm/sizes.h>
1da177e4
LT
36
37#include "mmci.h"
38
39#define DRIVER_NAME "mmci-pl18x"
40
1da177e4
LT
41static unsigned int fmax = 515633;
42
4956e109
RV
43/**
44 * struct variant_data - MMCI variant-specific quirks
45 * @clkreg: default value for MCICLOCK register
4380c14f 46 * @clkreg_enable: enable value for MMCICLOCK register
08458ef6 47 * @datalength_bits: number of bits in the MMCIDATALENGTH register
8301bb68
RV
48 * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY
49 * is asserted (likewise for RX)
50 * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY
51 * is asserted (likewise for RX)
34177802 52 * @sdio: variant supports SDIO
b70a67f9 53 * @st_clkdiv: true if using a ST-specific clock divider algorithm
4956e109
RV
54 */
55struct variant_data {
56 unsigned int clkreg;
4380c14f 57 unsigned int clkreg_enable;
08458ef6 58 unsigned int datalength_bits;
8301bb68
RV
59 unsigned int fifosize;
60 unsigned int fifohalfsize;
34177802 61 bool sdio;
b70a67f9 62 bool st_clkdiv;
4956e109
RV
63};
64
65static struct variant_data variant_arm = {
8301bb68
RV
66 .fifosize = 16 * 4,
67 .fifohalfsize = 8 * 4,
08458ef6 68 .datalength_bits = 16,
4956e109
RV
69};
70
768fbc18
PM
71static struct variant_data variant_arm_extended_fifo = {
72 .fifosize = 128 * 4,
73 .fifohalfsize = 64 * 4,
74 .datalength_bits = 16,
75};
76
4956e109 77static struct variant_data variant_u300 = {
8301bb68
RV
78 .fifosize = 16 * 4,
79 .fifohalfsize = 8 * 4,
49ac215e 80 .clkreg_enable = MCI_ST_U300_HWFCEN,
08458ef6 81 .datalength_bits = 16,
34177802 82 .sdio = true,
4956e109
RV
83};
84
85static struct variant_data variant_ux500 = {
8301bb68
RV
86 .fifosize = 30 * 4,
87 .fifohalfsize = 8 * 4,
4956e109 88 .clkreg = MCI_CLK_ENABLE,
49ac215e 89 .clkreg_enable = MCI_ST_UX500_HWFCEN,
08458ef6 90 .datalength_bits = 24,
34177802 91 .sdio = true,
b70a67f9 92 .st_clkdiv = true,
4956e109 93};
b70a67f9 94
a6a6464a
LW
95/*
96 * This must be called with host->lock held
97 */
98static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
99{
4956e109
RV
100 struct variant_data *variant = host->variant;
101 u32 clk = variant->clkreg;
a6a6464a
LW
102
103 if (desired) {
104 if (desired >= host->mclk) {
991a86e1 105 clk = MCI_CLK_BYPASS;
a6a6464a 106 host->cclk = host->mclk;
b70a67f9
LW
107 } else if (variant->st_clkdiv) {
108 /*
109 * DB8500 TRM says f = mclk / (clkdiv + 2)
110 * => clkdiv = (mclk / f) - 2
111 * Round the divider up so we don't exceed the max
112 * frequency
113 */
114 clk = DIV_ROUND_UP(host->mclk, desired) - 2;
115 if (clk >= 256)
116 clk = 255;
117 host->cclk = host->mclk / (clk + 2);
a6a6464a 118 } else {
b70a67f9
LW
119 /*
120 * PL180 TRM says f = mclk / (2 * (clkdiv + 1))
121 * => clkdiv = mclk / (2 * f) - 1
122 */
a6a6464a
LW
123 clk = host->mclk / (2 * desired) - 1;
124 if (clk >= 256)
125 clk = 255;
126 host->cclk = host->mclk / (2 * (clk + 1));
127 }
4380c14f
RV
128
129 clk |= variant->clkreg_enable;
a6a6464a
LW
130 clk |= MCI_CLK_ENABLE;
131 /* This hasn't proven to be worthwhile */
132 /* clk |= MCI_CLK_PWRSAVE; */
133 }
134
9e6c82cd 135 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
771dc157
LW
136 clk |= MCI_4BIT_BUS;
137 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
138 clk |= MCI_ST_8BIT_BUS;
9e6c82cd 139
a6a6464a
LW
140 writel(clk, host->base + MMCICLOCK);
141}
142
1da177e4
LT
143static void
144mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
145{
146 writel(0, host->base + MMCICOMMAND);
147
e47c222b
RK
148 BUG_ON(host->data);
149
1da177e4
LT
150 host->mrq = NULL;
151 host->cmd = NULL;
152
1da177e4
LT
153 /*
154 * Need to drop the host lock here; mmc_request_done may call
155 * back into the driver...
156 */
157 spin_unlock(&host->lock);
158 mmc_request_done(host->mmc, mrq);
159 spin_lock(&host->lock);
160}
161
2686b4b4
LW
162static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
163{
164 void __iomem *base = host->base;
165
166 if (host->singleirq) {
167 unsigned int mask0 = readl(base + MMCIMASK0);
168
169 mask0 &= ~MCI_IRQ1MASK;
170 mask0 |= mask;
171
172 writel(mask0, base + MMCIMASK0);
173 }
174
175 writel(mask, base + MMCIMASK1);
176}
177
1da177e4
LT
178static void mmci_stop_data(struct mmci_host *host)
179{
180 writel(0, host->base + MMCIDATACTRL);
2686b4b4 181 mmci_set_mask1(host, 0);
1da177e4
LT
182 host->data = NULL;
183}
184
4ce1d6cb
RV
185static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
186{
187 unsigned int flags = SG_MITER_ATOMIC;
188
189 if (data->flags & MMC_DATA_READ)
190 flags |= SG_MITER_TO_SG;
191 else
192 flags |= SG_MITER_FROM_SG;
193
194 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
195}
196
c8ebae37
RK
197/*
198 * All the DMA operation mode stuff goes inside this ifdef.
199 * This assumes that you have a generic DMA device interface,
200 * no custom DMA interfaces are supported.
201 */
202#ifdef CONFIG_DMA_ENGINE
203static void __devinit mmci_dma_setup(struct mmci_host *host)
204{
205 struct mmci_platform_data *plat = host->plat;
206 const char *rxname, *txname;
207 dma_cap_mask_t mask;
208
209 if (!plat || !plat->dma_filter) {
210 dev_info(mmc_dev(host->mmc), "no DMA platform data\n");
211 return;
212 }
213
214 /* Try to acquire a generic DMA engine slave channel */
215 dma_cap_zero(mask);
216 dma_cap_set(DMA_SLAVE, mask);
217
218 /*
219 * If only an RX channel is specified, the driver will
220 * attempt to use it bidirectionally, however if it is
221 * is specified but cannot be located, DMA will be disabled.
222 */
223 if (plat->dma_rx_param) {
224 host->dma_rx_channel = dma_request_channel(mask,
225 plat->dma_filter,
226 plat->dma_rx_param);
227 /* E.g if no DMA hardware is present */
228 if (!host->dma_rx_channel)
229 dev_err(mmc_dev(host->mmc), "no RX DMA channel\n");
230 }
231
232 if (plat->dma_tx_param) {
233 host->dma_tx_channel = dma_request_channel(mask,
234 plat->dma_filter,
235 plat->dma_tx_param);
236 if (!host->dma_tx_channel)
237 dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n");
238 } else {
239 host->dma_tx_channel = host->dma_rx_channel;
240 }
241
242 if (host->dma_rx_channel)
243 rxname = dma_chan_name(host->dma_rx_channel);
244 else
245 rxname = "none";
246
247 if (host->dma_tx_channel)
248 txname = dma_chan_name(host->dma_tx_channel);
249 else
250 txname = "none";
251
252 dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n",
253 rxname, txname);
254
255 /*
256 * Limit the maximum segment size in any SG entry according to
257 * the parameters of the DMA engine device.
258 */
259 if (host->dma_tx_channel) {
260 struct device *dev = host->dma_tx_channel->device->dev;
261 unsigned int max_seg_size = dma_get_max_seg_size(dev);
262
263 if (max_seg_size < host->mmc->max_seg_size)
264 host->mmc->max_seg_size = max_seg_size;
265 }
266 if (host->dma_rx_channel) {
267 struct device *dev = host->dma_rx_channel->device->dev;
268 unsigned int max_seg_size = dma_get_max_seg_size(dev);
269
270 if (max_seg_size < host->mmc->max_seg_size)
271 host->mmc->max_seg_size = max_seg_size;
272 }
273}
274
275/*
276 * This is used in __devinit or __devexit so inline it
277 * so it can be discarded.
278 */
279static inline void mmci_dma_release(struct mmci_host *host)
280{
281 struct mmci_platform_data *plat = host->plat;
282
283 if (host->dma_rx_channel)
284 dma_release_channel(host->dma_rx_channel);
285 if (host->dma_tx_channel && plat->dma_tx_param)
286 dma_release_channel(host->dma_tx_channel);
287 host->dma_rx_channel = host->dma_tx_channel = NULL;
288}
289
290static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
291{
292 struct dma_chan *chan = host->dma_current;
293 enum dma_data_direction dir;
294 u32 status;
295 int i;
296
297 /* Wait up to 1ms for the DMA to complete */
298 for (i = 0; ; i++) {
299 status = readl(host->base + MMCISTATUS);
300 if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100)
301 break;
302 udelay(10);
303 }
304
305 /*
306 * Check to see whether we still have some data left in the FIFO -
307 * this catches DMA controllers which are unable to monitor the
308 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non-
309 * contiguous buffers. On TX, we'll get a FIFO underrun error.
310 */
311 if (status & MCI_RXDATAAVLBLMASK) {
312 dmaengine_terminate_all(chan);
313 if (!data->error)
314 data->error = -EIO;
315 }
316
317 if (data->flags & MMC_DATA_WRITE) {
318 dir = DMA_TO_DEVICE;
319 } else {
320 dir = DMA_FROM_DEVICE;
321 }
322
323 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
324
325 /*
326 * Use of DMA with scatter-gather is impossible.
327 * Give up with DMA and switch back to PIO mode.
328 */
329 if (status & MCI_RXDATAAVLBLMASK) {
330 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
331 mmci_dma_release(host);
332 }
333}
334
335static void mmci_dma_data_error(struct mmci_host *host)
336{
337 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
338 dmaengine_terminate_all(host->dma_current);
339}
340
341static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
342{
343 struct variant_data *variant = host->variant;
344 struct dma_slave_config conf = {
345 .src_addr = host->phybase + MMCIFIFO,
346 .dst_addr = host->phybase + MMCIFIFO,
347 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
348 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
349 .src_maxburst = variant->fifohalfsize >> 2, /* # of words */
350 .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
351 };
352 struct mmc_data *data = host->data;
353 struct dma_chan *chan;
354 struct dma_device *device;
355 struct dma_async_tx_descriptor *desc;
356 int nr_sg;
357
358 host->dma_current = NULL;
359
360 if (data->flags & MMC_DATA_READ) {
361 conf.direction = DMA_FROM_DEVICE;
362 chan = host->dma_rx_channel;
363 } else {
364 conf.direction = DMA_TO_DEVICE;
365 chan = host->dma_tx_channel;
366 }
367
368 /* If there's no DMA channel, fall back to PIO */
369 if (!chan)
370 return -EINVAL;
371
372 /* If less than or equal to the fifo size, don't bother with DMA */
373 if (host->size <= variant->fifosize)
374 return -EINVAL;
375
376 device = chan->device;
377 nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, conf.direction);
378 if (nr_sg == 0)
379 return -EINVAL;
380
381 dmaengine_slave_config(chan, &conf);
382 desc = device->device_prep_slave_sg(chan, data->sg, nr_sg,
383 conf.direction, DMA_CTRL_ACK);
384 if (!desc)
385 goto unmap_exit;
386
387 /* Okay, go for it. */
388 host->dma_current = chan;
389
390 dev_vdbg(mmc_dev(host->mmc),
391 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
392 data->sg_len, data->blksz, data->blocks, data->flags);
393 dmaengine_submit(desc);
394 dma_async_issue_pending(chan);
395
396 datactrl |= MCI_DPSM_DMAENABLE;
397
398 /* Trigger the DMA transfer */
399 writel(datactrl, host->base + MMCIDATACTRL);
400
401 /*
402 * Let the MMCI say when the data is ended and it's time
403 * to fire next DMA request. When that happens, MMCI will
404 * call mmci_data_end()
405 */
406 writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
407 host->base + MMCIMASK0);
408 return 0;
409
410unmap_exit:
411 dmaengine_terminate_all(chan);
412 dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction);
413 return -ENOMEM;
414}
415#else
416/* Blank functions if the DMA engine is not available */
417static inline void mmci_dma_setup(struct mmci_host *host)
418{
419}
420
421static inline void mmci_dma_release(struct mmci_host *host)
422{
423}
424
425static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
426{
427}
428
429static inline void mmci_dma_data_error(struct mmci_host *host)
430{
431}
432
433static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
434{
435 return -ENOSYS;
436}
437#endif
438
1da177e4
LT
439static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
440{
8301bb68 441 struct variant_data *variant = host->variant;
1da177e4 442 unsigned int datactrl, timeout, irqmask;
7b09cdac 443 unsigned long long clks;
1da177e4 444 void __iomem *base;
3bc87f24 445 int blksz_bits;
1da177e4 446
64de0289
LW
447 dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n",
448 data->blksz, data->blocks, data->flags);
1da177e4
LT
449
450 host->data = data;
528320db 451 host->size = data->blksz * data->blocks;
51d4375d 452 data->bytes_xfered = 0;
1da177e4 453
7b09cdac
RK
454 clks = (unsigned long long)data->timeout_ns * host->cclk;
455 do_div(clks, 1000000000UL);
456
457 timeout = data->timeout_clks + (unsigned int)clks;
1da177e4
LT
458
459 base = host->base;
460 writel(timeout, base + MMCIDATATIMER);
461 writel(host->size, base + MMCIDATALENGTH);
462
3bc87f24
RK
463 blksz_bits = ffs(data->blksz) - 1;
464 BUG_ON(1 << blksz_bits != data->blksz);
465
466 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
c8ebae37
RK
467
468 if (data->flags & MMC_DATA_READ)
1da177e4 469 datactrl |= MCI_DPSM_DIRECTION;
c8ebae37
RK
470
471 /*
472 * Attempt to use DMA operation mode, if this
473 * should fail, fall back to PIO mode
474 */
475 if (!mmci_dma_start_data(host, datactrl))
476 return;
477
478 /* IRQ mode, map the SG list for CPU reading/writing */
479 mmci_init_sg(host, data);
480
481 if (data->flags & MMC_DATA_READ) {
1da177e4 482 irqmask = MCI_RXFIFOHALFFULLMASK;
0425a142
RK
483
484 /*
c4d877c1
RK
485 * If we have less than the fifo 'half-full' threshold to
486 * transfer, trigger a PIO interrupt as soon as any data
487 * is available.
0425a142 488 */
c4d877c1 489 if (host->size < variant->fifohalfsize)
0425a142 490 irqmask |= MCI_RXDATAAVLBLMASK;
1da177e4
LT
491 } else {
492 /*
493 * We don't actually need to include "FIFO empty" here
494 * since its implicit in "FIFO half empty".
495 */
496 irqmask = MCI_TXFIFOHALFEMPTYMASK;
497 }
498
34177802
LW
499 /* The ST Micro variants has a special bit to enable SDIO */
500 if (variant->sdio && host->mmc->card)
501 if (mmc_card_sdio(host->mmc->card))
502 datactrl |= MCI_ST_DPSM_SDIOEN;
503
1da177e4
LT
504 writel(datactrl, base + MMCIDATACTRL);
505 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
2686b4b4 506 mmci_set_mask1(host, irqmask);
1da177e4
LT
507}
508
509static void
510mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
511{
512 void __iomem *base = host->base;
513
64de0289 514 dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n",
1da177e4
LT
515 cmd->opcode, cmd->arg, cmd->flags);
516
517 if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
518 writel(0, base + MMCICOMMAND);
519 udelay(1);
520 }
521
522 c |= cmd->opcode | MCI_CPSM_ENABLE;
e9225176
RK
523 if (cmd->flags & MMC_RSP_PRESENT) {
524 if (cmd->flags & MMC_RSP_136)
525 c |= MCI_CPSM_LONGRSP;
1da177e4 526 c |= MCI_CPSM_RESPONSE;
1da177e4
LT
527 }
528 if (/*interrupt*/0)
529 c |= MCI_CPSM_INTERRUPT;
530
531 host->cmd = cmd;
532
533 writel(cmd->arg, base + MMCIARGUMENT);
534 writel(c, base + MMCICOMMAND);
535}
536
537static void
538mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
539 unsigned int status)
540{
f20f8f21 541 /* First check for errors */
1da177e4 542 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
8cb28155 543 u32 remain, success;
f20f8f21 544
c8ebae37
RK
545 /* Terminate the DMA transfer */
546 if (dma_inprogress(host))
547 mmci_dma_data_error(host);
e9c091b4
RK
548
549 /*
c8afc9d5
RK
550 * Calculate how far we are into the transfer. Note that
551 * the data counter gives the number of bytes transferred
552 * on the MMC bus, not on the host side. On reads, this
553 * can be as much as a FIFO-worth of data ahead. This
554 * matters for FIFO overruns only.
e9c091b4 555 */
f5a106d9 556 remain = readl(host->base + MMCIDATACNT);
8cb28155
LW
557 success = data->blksz * data->blocks - remain;
558
c8afc9d5
RK
559 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
560 status, success);
8cb28155
LW
561 if (status & MCI_DATACRCFAIL) {
562 /* Last block was not successful */
c8afc9d5 563 success -= 1;
17b0429d 564 data->error = -EILSEQ;
8cb28155 565 } else if (status & MCI_DATATIMEOUT) {
17b0429d 566 data->error = -ETIMEDOUT;
c8afc9d5
RK
567 } else if (status & MCI_TXUNDERRUN) {
568 data->error = -EIO;
569 } else if (status & MCI_RXOVERRUN) {
570 if (success > host->variant->fifosize)
571 success -= host->variant->fifosize;
572 else
573 success = 0;
17b0429d 574 data->error = -EIO;
4ce1d6cb 575 }
51d4375d 576 data->bytes_xfered = round_down(success, data->blksz);
1da177e4 577 }
f20f8f21 578
8cb28155
LW
579 if (status & MCI_DATABLOCKEND)
580 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
f20f8f21 581
ccff9b51 582 if (status & MCI_DATAEND || data->error) {
c8ebae37
RK
583 if (dma_inprogress(host))
584 mmci_dma_unmap(host, data);
1da177e4
LT
585 mmci_stop_data(host);
586
8cb28155
LW
587 if (!data->error)
588 /* The error clause is handled above, success! */
51d4375d 589 data->bytes_xfered = data->blksz * data->blocks;
f20f8f21 590
1da177e4
LT
591 if (!data->stop) {
592 mmci_request_end(host, data->mrq);
593 } else {
594 mmci_start_command(host, data->stop, 0);
595 }
596 }
597}
598
599static void
600mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
601 unsigned int status)
602{
603 void __iomem *base = host->base;
604
605 host->cmd = NULL;
606
1da177e4 607 if (status & MCI_CMDTIMEOUT) {
17b0429d 608 cmd->error = -ETIMEDOUT;
1da177e4 609 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
17b0429d 610 cmd->error = -EILSEQ;
9047b435
RKAL
611 } else {
612 cmd->resp[0] = readl(base + MMCIRESPONSE0);
613 cmd->resp[1] = readl(base + MMCIRESPONSE1);
614 cmd->resp[2] = readl(base + MMCIRESPONSE2);
615 cmd->resp[3] = readl(base + MMCIRESPONSE3);
1da177e4
LT
616 }
617
17b0429d 618 if (!cmd->data || cmd->error) {
e47c222b
RK
619 if (host->data)
620 mmci_stop_data(host);
1da177e4
LT
621 mmci_request_end(host, cmd->mrq);
622 } else if (!(cmd->data->flags & MMC_DATA_READ)) {
623 mmci_start_data(host, cmd->data);
624 }
625}
626
627static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain)
628{
629 void __iomem *base = host->base;
630 char *ptr = buffer;
631 u32 status;
26eed9a5 632 int host_remain = host->size;
1da177e4
LT
633
634 do {
26eed9a5 635 int count = host_remain - (readl(base + MMCIFIFOCNT) << 2);
1da177e4
LT
636
637 if (count > remain)
638 count = remain;
639
640 if (count <= 0)
641 break;
642
643 readsl(base + MMCIFIFO, ptr, count >> 2);
644
645 ptr += count;
646 remain -= count;
26eed9a5 647 host_remain -= count;
1da177e4
LT
648
649 if (remain == 0)
650 break;
651
652 status = readl(base + MMCISTATUS);
653 } while (status & MCI_RXDATAAVLBL);
654
655 return ptr - buffer;
656}
657
658static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status)
659{
8301bb68 660 struct variant_data *variant = host->variant;
1da177e4
LT
661 void __iomem *base = host->base;
662 char *ptr = buffer;
663
664 do {
665 unsigned int count, maxcnt;
666
8301bb68
RV
667 maxcnt = status & MCI_TXFIFOEMPTY ?
668 variant->fifosize : variant->fifohalfsize;
1da177e4
LT
669 count = min(remain, maxcnt);
670
34177802
LW
671 /*
672 * The ST Micro variant for SDIO transfer sizes
673 * less then 8 bytes should have clock H/W flow
674 * control disabled.
675 */
676 if (variant->sdio &&
677 mmc_card_sdio(host->mmc->card)) {
678 if (count < 8)
679 writel(readl(host->base + MMCICLOCK) &
680 ~variant->clkreg_enable,
681 host->base + MMCICLOCK);
682 else
683 writel(readl(host->base + MMCICLOCK) |
684 variant->clkreg_enable,
685 host->base + MMCICLOCK);
686 }
687
688 /*
689 * SDIO especially may want to send something that is
690 * not divisible by 4 (as opposed to card sectors
691 * etc), and the FIFO only accept full 32-bit writes.
692 * So compensate by adding +3 on the count, a single
693 * byte become a 32bit write, 7 bytes will be two
694 * 32bit writes etc.
695 */
696 writesl(base + MMCIFIFO, ptr, (count + 3) >> 2);
1da177e4
LT
697
698 ptr += count;
699 remain -= count;
700
701 if (remain == 0)
702 break;
703
704 status = readl(base + MMCISTATUS);
705 } while (status & MCI_TXFIFOHALFEMPTY);
706
707 return ptr - buffer;
708}
709
710/*
711 * PIO data transfer IRQ handler.
712 */
7d12e780 713static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
1da177e4
LT
714{
715 struct mmci_host *host = dev_id;
4ce1d6cb 716 struct sg_mapping_iter *sg_miter = &host->sg_miter;
8301bb68 717 struct variant_data *variant = host->variant;
1da177e4 718 void __iomem *base = host->base;
4ce1d6cb 719 unsigned long flags;
1da177e4
LT
720 u32 status;
721
722 status = readl(base + MMCISTATUS);
723
64de0289 724 dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status);
1da177e4 725
4ce1d6cb
RV
726 local_irq_save(flags);
727
1da177e4 728 do {
1da177e4
LT
729 unsigned int remain, len;
730 char *buffer;
731
732 /*
733 * For write, we only need to test the half-empty flag
734 * here - if the FIFO is completely empty, then by
735 * definition it is more than half empty.
736 *
737 * For read, check for data available.
738 */
739 if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL)))
740 break;
741
4ce1d6cb
RV
742 if (!sg_miter_next(sg_miter))
743 break;
744
745 buffer = sg_miter->addr;
746 remain = sg_miter->length;
1da177e4
LT
747
748 len = 0;
749 if (status & MCI_RXACTIVE)
750 len = mmci_pio_read(host, buffer, remain);
751 if (status & MCI_TXACTIVE)
752 len = mmci_pio_write(host, buffer, remain, status);
753
4ce1d6cb 754 sg_miter->consumed = len;
1da177e4 755
1da177e4
LT
756 host->size -= len;
757 remain -= len;
758
759 if (remain)
760 break;
761
1da177e4
LT
762 status = readl(base + MMCISTATUS);
763 } while (1);
764
4ce1d6cb
RV
765 sg_miter_stop(sg_miter);
766
767 local_irq_restore(flags);
768
1da177e4 769 /*
c4d877c1
RK
770 * If we have less than the fifo 'half-full' threshold to transfer,
771 * trigger a PIO interrupt as soon as any data is available.
1da177e4 772 */
c4d877c1 773 if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize)
2686b4b4 774 mmci_set_mask1(host, MCI_RXDATAAVLBLMASK);
1da177e4
LT
775
776 /*
777 * If we run out of data, disable the data IRQs; this
778 * prevents a race where the FIFO becomes empty before
779 * the chip itself has disabled the data path, and
780 * stops us racing with our data end IRQ.
781 */
782 if (host->size == 0) {
2686b4b4 783 mmci_set_mask1(host, 0);
1da177e4
LT
784 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);
785 }
786
787 return IRQ_HANDLED;
788}
789
790/*
791 * Handle completion of command and data transfers.
792 */
7d12e780 793static irqreturn_t mmci_irq(int irq, void *dev_id)
1da177e4
LT
794{
795 struct mmci_host *host = dev_id;
796 u32 status;
797 int ret = 0;
798
799 spin_lock(&host->lock);
800
801 do {
802 struct mmc_command *cmd;
803 struct mmc_data *data;
804
805 status = readl(host->base + MMCISTATUS);
2686b4b4
LW
806
807 if (host->singleirq) {
808 if (status & readl(host->base + MMCIMASK1))
809 mmci_pio_irq(irq, dev_id);
810
811 status &= ~MCI_IRQ1MASK;
812 }
813
1da177e4
LT
814 status &= readl(host->base + MMCIMASK0);
815 writel(status, host->base + MMCICLEAR);
816
64de0289 817 dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
1da177e4
LT
818
819 data = host->data;
820 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|
821 MCI_RXOVERRUN|MCI_DATAEND|MCI_DATABLOCKEND) && data)
822 mmci_data_irq(host, data, status);
823
824 cmd = host->cmd;
825 if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd)
826 mmci_cmd_irq(host, cmd, status);
827
828 ret = 1;
829 } while (status);
830
831 spin_unlock(&host->lock);
832
833 return IRQ_RETVAL(ret);
834}
835
836static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
837{
838 struct mmci_host *host = mmc_priv(mmc);
9e943021 839 unsigned long flags;
1da177e4
LT
840
841 WARN_ON(host->mrq != NULL);
842
019a5f56 843 if (mrq->data && !is_power_of_2(mrq->data->blksz)) {
64de0289
LW
844 dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n",
845 mrq->data->blksz);
255d01af
PO
846 mrq->cmd->error = -EINVAL;
847 mmc_request_done(mmc, mrq);
848 return;
849 }
850
9e943021 851 spin_lock_irqsave(&host->lock, flags);
1da177e4
LT
852
853 host->mrq = mrq;
854
855 if (mrq->data && mrq->data->flags & MMC_DATA_READ)
856 mmci_start_data(host, mrq->data);
857
858 mmci_start_command(host, mrq->cmd, 0);
859
9e943021 860 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
861}
862
863static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
864{
865 struct mmci_host *host = mmc_priv(mmc);
a6a6464a
LW
866 u32 pwr = 0;
867 unsigned long flags;
99fc5131 868 int ret;
1da177e4 869
1da177e4
LT
870 switch (ios->power_mode) {
871 case MMC_POWER_OFF:
99fc5131
LW
872 if (host->vcc)
873 ret = mmc_regulator_set_ocr(mmc, host->vcc, 0);
1da177e4
LT
874 break;
875 case MMC_POWER_UP:
99fc5131
LW
876 if (host->vcc) {
877 ret = mmc_regulator_set_ocr(mmc, host->vcc, ios->vdd);
878 if (ret) {
879 dev_err(mmc_dev(mmc), "unable to set OCR\n");
880 /*
881 * The .set_ios() function in the mmc_host_ops
882 * struct return void, and failing to set the
883 * power should be rare so we print an error
884 * and return here.
885 */
886 return;
887 }
888 }
bb8f563c
RV
889 if (host->plat->vdd_handler)
890 pwr |= host->plat->vdd_handler(mmc_dev(mmc), ios->vdd,
891 ios->power_mode);
cc30d60e 892 /* The ST version does not have this, fall through to POWER_ON */
f17a1f06 893 if (host->hw_designer != AMBA_VENDOR_ST) {
cc30d60e
LW
894 pwr |= MCI_PWR_UP;
895 break;
896 }
1da177e4
LT
897 case MMC_POWER_ON:
898 pwr |= MCI_PWR_ON;
899 break;
900 }
901
cc30d60e 902 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {
f17a1f06 903 if (host->hw_designer != AMBA_VENDOR_ST)
cc30d60e
LW
904 pwr |= MCI_ROD;
905 else {
906 /*
907 * The ST Micro variant use the ROD bit for something
908 * else and only has OD (Open Drain).
909 */
910 pwr |= MCI_OD;
911 }
912 }
1da177e4 913
a6a6464a
LW
914 spin_lock_irqsave(&host->lock, flags);
915
916 mmci_set_clkreg(host, ios->clock);
1da177e4
LT
917
918 if (host->pwr != pwr) {
919 host->pwr = pwr;
920 writel(pwr, host->base + MMCIPOWER);
921 }
a6a6464a
LW
922
923 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
924}
925
89001446
RK
926static int mmci_get_ro(struct mmc_host *mmc)
927{
928 struct mmci_host *host = mmc_priv(mmc);
929
930 if (host->gpio_wp == -ENOSYS)
931 return -ENOSYS;
932
18a06301 933 return gpio_get_value_cansleep(host->gpio_wp);
89001446
RK
934}
935
936static int mmci_get_cd(struct mmc_host *mmc)
937{
938 struct mmci_host *host = mmc_priv(mmc);
29719445 939 struct mmci_platform_data *plat = host->plat;
89001446
RK
940 unsigned int status;
941
4b8caec0
RV
942 if (host->gpio_cd == -ENOSYS) {
943 if (!plat->status)
944 return 1; /* Assume always present */
945
29719445 946 status = plat->status(mmc_dev(host->mmc));
4b8caec0 947 } else
18a06301
LW
948 status = !!gpio_get_value_cansleep(host->gpio_cd)
949 ^ plat->cd_invert;
89001446 950
74bc8093
RK
951 /*
952 * Use positive logic throughout - status is zero for no card,
953 * non-zero for card inserted.
954 */
955 return status;
89001446
RK
956}
957
148b8b39
RV
958static irqreturn_t mmci_cd_irq(int irq, void *dev_id)
959{
960 struct mmci_host *host = dev_id;
961
962 mmc_detect_change(host->mmc, msecs_to_jiffies(500));
963
964 return IRQ_HANDLED;
965}
966
ab7aefd0 967static const struct mmc_host_ops mmci_ops = {
1da177e4
LT
968 .request = mmci_request,
969 .set_ios = mmci_set_ios,
89001446
RK
970 .get_ro = mmci_get_ro,
971 .get_cd = mmci_get_cd,
1da177e4
LT
972};
973
aa25afad
RK
974static int __devinit mmci_probe(struct amba_device *dev,
975 const struct amba_id *id)
1da177e4 976{
6ef297f8 977 struct mmci_platform_data *plat = dev->dev.platform_data;
4956e109 978 struct variant_data *variant = id->data;
1da177e4
LT
979 struct mmci_host *host;
980 struct mmc_host *mmc;
981 int ret;
982
983 /* must have platform data */
984 if (!plat) {
985 ret = -EINVAL;
986 goto out;
987 }
988
989 ret = amba_request_regions(dev, DRIVER_NAME);
990 if (ret)
991 goto out;
992
993 mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev);
994 if (!mmc) {
995 ret = -ENOMEM;
996 goto rel_regions;
997 }
998
999 host = mmc_priv(mmc);
4ea580f1 1000 host->mmc = mmc;
012b7d33 1001
89001446
RK
1002 host->gpio_wp = -ENOSYS;
1003 host->gpio_cd = -ENOSYS;
148b8b39 1004 host->gpio_cd_irq = -1;
89001446 1005
012b7d33
RK
1006 host->hw_designer = amba_manf(dev);
1007 host->hw_revision = amba_rev(dev);
64de0289
LW
1008 dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer);
1009 dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision);
012b7d33 1010
ee569c43 1011 host->clk = clk_get(&dev->dev, NULL);
1da177e4
LT
1012 if (IS_ERR(host->clk)) {
1013 ret = PTR_ERR(host->clk);
1014 host->clk = NULL;
1015 goto host_free;
1016 }
1017
1da177e4
LT
1018 ret = clk_enable(host->clk);
1019 if (ret)
a8d3584a 1020 goto clk_free;
1da177e4
LT
1021
1022 host->plat = plat;
4956e109 1023 host->variant = variant;
1da177e4 1024 host->mclk = clk_get_rate(host->clk);
c8df9a53
LW
1025 /*
1026 * According to the spec, mclk is max 100 MHz,
1027 * so we try to adjust the clock down to this,
1028 * (if possible).
1029 */
1030 if (host->mclk > 100000000) {
1031 ret = clk_set_rate(host->clk, 100000000);
1032 if (ret < 0)
1033 goto clk_disable;
1034 host->mclk = clk_get_rate(host->clk);
64de0289
LW
1035 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
1036 host->mclk);
c8df9a53 1037 }
c8ebae37 1038 host->phybase = dev->res.start;
dc890c2d 1039 host->base = ioremap(dev->res.start, resource_size(&dev->res));
1da177e4
LT
1040 if (!host->base) {
1041 ret = -ENOMEM;
1042 goto clk_disable;
1043 }
1044
1045 mmc->ops = &mmci_ops;
1046 mmc->f_min = (host->mclk + 511) / 512;
808d97cc
LW
1047 /*
1048 * If the platform data supplies a maximum operating
1049 * frequency, this takes precedence. Else, we fall back
1050 * to using the module parameter, which has a (low)
1051 * default value in case it is not specified. Either
1052 * value must not exceed the clock rate into the block,
1053 * of course.
1054 */
1055 if (plat->f_max)
1056 mmc->f_max = min(host->mclk, plat->f_max);
1057 else
1058 mmc->f_max = min(host->mclk, fmax);
64de0289
LW
1059 dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
1060
34e84f39
LW
1061#ifdef CONFIG_REGULATOR
1062 /* If we're using the regulator framework, try to fetch a regulator */
1063 host->vcc = regulator_get(&dev->dev, "vmmc");
1064 if (IS_ERR(host->vcc))
1065 host->vcc = NULL;
1066 else {
1067 int mask = mmc_regulator_get_ocrmask(host->vcc);
1068
1069 if (mask < 0)
1070 dev_err(&dev->dev, "error getting OCR mask (%d)\n",
1071 mask);
1072 else {
1073 host->mmc->ocr_avail = (u32) mask;
1074 if (plat->ocr_mask)
1075 dev_warn(&dev->dev,
1076 "Provided ocr_mask/setpower will not be used "
1077 "(using regulator instead)\n");
1078 }
1079 }
1080#endif
1081 /* Fall back to platform data if no regulator is found */
1082 if (host->vcc == NULL)
1083 mmc->ocr_avail = plat->ocr_mask;
9e6c82cd 1084 mmc->caps = plat->capabilities;
1da177e4
LT
1085
1086 /*
1087 * We can do SGIO
1088 */
a36274e0 1089 mmc->max_segs = NR_SG;
1da177e4
LT
1090
1091 /*
08458ef6
RV
1092 * Since only a certain number of bits are valid in the data length
1093 * register, we must ensure that we don't exceed 2^num-1 bytes in a
1094 * single request.
1da177e4 1095 */
08458ef6 1096 mmc->max_req_size = (1 << variant->datalength_bits) - 1;
1da177e4
LT
1097
1098 /*
1099 * Set the maximum segment size. Since we aren't doing DMA
1100 * (yet) we are only limited by the data length register.
1101 */
55db890a 1102 mmc->max_seg_size = mmc->max_req_size;
1da177e4 1103
fe4a3c7a
PO
1104 /*
1105 * Block size can be up to 2048 bytes, but must be a power of two.
1106 */
1107 mmc->max_blk_size = 2048;
1108
55db890a
PO
1109 /*
1110 * No limit on the number of blocks transferred.
1111 */
1112 mmc->max_blk_count = mmc->max_req_size;
1113
1da177e4
LT
1114 spin_lock_init(&host->lock);
1115
1116 writel(0, host->base + MMCIMASK0);
1117 writel(0, host->base + MMCIMASK1);
1118 writel(0xfff, host->base + MMCICLEAR);
1119
89001446
RK
1120 if (gpio_is_valid(plat->gpio_cd)) {
1121 ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)");
1122 if (ret == 0)
1123 ret = gpio_direction_input(plat->gpio_cd);
1124 if (ret == 0)
1125 host->gpio_cd = plat->gpio_cd;
1126 else if (ret != -ENOSYS)
1127 goto err_gpio_cd;
148b8b39
RV
1128
1129 ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd),
1130 mmci_cd_irq, 0,
1131 DRIVER_NAME " (cd)", host);
1132 if (ret >= 0)
1133 host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd);
89001446
RK
1134 }
1135 if (gpio_is_valid(plat->gpio_wp)) {
1136 ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)");
1137 if (ret == 0)
1138 ret = gpio_direction_input(plat->gpio_wp);
1139 if (ret == 0)
1140 host->gpio_wp = plat->gpio_wp;
1141 else if (ret != -ENOSYS)
1142 goto err_gpio_wp;
1143 }
1144
4b8caec0
RV
1145 if ((host->plat->status || host->gpio_cd != -ENOSYS)
1146 && host->gpio_cd_irq < 0)
148b8b39
RV
1147 mmc->caps |= MMC_CAP_NEEDS_POLL;
1148
dace1453 1149 ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host);
1da177e4
LT
1150 if (ret)
1151 goto unmap;
1152
2686b4b4
LW
1153 if (dev->irq[1] == NO_IRQ)
1154 host->singleirq = true;
1155 else {
1156 ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED,
1157 DRIVER_NAME " (pio)", host);
1158 if (ret)
1159 goto irq0_free;
1160 }
1da177e4 1161
8cb28155 1162 writel(MCI_IRQENABLE, host->base + MMCIMASK0);
1da177e4
LT
1163
1164 amba_set_drvdata(dev, mmc);
1165
c8ebae37
RK
1166 dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n",
1167 mmc_hostname(mmc), amba_part(dev), amba_manf(dev),
1168 amba_rev(dev), (unsigned long long)dev->res.start,
1169 dev->irq[0], dev->irq[1]);
1170
1171 mmci_dma_setup(host);
1da177e4 1172
8c11a94d
RK
1173 mmc_add_host(mmc);
1174
1da177e4
LT
1175 return 0;
1176
1177 irq0_free:
1178 free_irq(dev->irq[0], host);
1179 unmap:
89001446
RK
1180 if (host->gpio_wp != -ENOSYS)
1181 gpio_free(host->gpio_wp);
1182 err_gpio_wp:
148b8b39
RV
1183 if (host->gpio_cd_irq >= 0)
1184 free_irq(host->gpio_cd_irq, host);
89001446
RK
1185 if (host->gpio_cd != -ENOSYS)
1186 gpio_free(host->gpio_cd);
1187 err_gpio_cd:
1da177e4
LT
1188 iounmap(host->base);
1189 clk_disable:
1190 clk_disable(host->clk);
1da177e4
LT
1191 clk_free:
1192 clk_put(host->clk);
1193 host_free:
1194 mmc_free_host(mmc);
1195 rel_regions:
1196 amba_release_regions(dev);
1197 out:
1198 return ret;
1199}
1200
6dc4a47a 1201static int __devexit mmci_remove(struct amba_device *dev)
1da177e4
LT
1202{
1203 struct mmc_host *mmc = amba_get_drvdata(dev);
1204
1205 amba_set_drvdata(dev, NULL);
1206
1207 if (mmc) {
1208 struct mmci_host *host = mmc_priv(mmc);
1209
1da177e4
LT
1210 mmc_remove_host(mmc);
1211
1212 writel(0, host->base + MMCIMASK0);
1213 writel(0, host->base + MMCIMASK1);
1214
1215 writel(0, host->base + MMCICOMMAND);
1216 writel(0, host->base + MMCIDATACTRL);
1217
c8ebae37 1218 mmci_dma_release(host);
1da177e4 1219 free_irq(dev->irq[0], host);
2686b4b4
LW
1220 if (!host->singleirq)
1221 free_irq(dev->irq[1], host);
1da177e4 1222
89001446
RK
1223 if (host->gpio_wp != -ENOSYS)
1224 gpio_free(host->gpio_wp);
148b8b39
RV
1225 if (host->gpio_cd_irq >= 0)
1226 free_irq(host->gpio_cd_irq, host);
89001446
RK
1227 if (host->gpio_cd != -ENOSYS)
1228 gpio_free(host->gpio_cd);
1229
1da177e4
LT
1230 iounmap(host->base);
1231 clk_disable(host->clk);
1da177e4
LT
1232 clk_put(host->clk);
1233
99fc5131
LW
1234 if (host->vcc)
1235 mmc_regulator_set_ocr(mmc, host->vcc, 0);
34e84f39
LW
1236 regulator_put(host->vcc);
1237
1da177e4
LT
1238 mmc_free_host(mmc);
1239
1240 amba_release_regions(dev);
1241 }
1242
1243 return 0;
1244}
1245
1246#ifdef CONFIG_PM
e5378ca8 1247static int mmci_suspend(struct amba_device *dev, pm_message_t state)
1da177e4
LT
1248{
1249 struct mmc_host *mmc = amba_get_drvdata(dev);
1250 int ret = 0;
1251
1252 if (mmc) {
1253 struct mmci_host *host = mmc_priv(mmc);
1254
1a13f8fa 1255 ret = mmc_suspend_host(mmc);
1da177e4
LT
1256 if (ret == 0)
1257 writel(0, host->base + MMCIMASK0);
1258 }
1259
1260 return ret;
1261}
1262
1263static int mmci_resume(struct amba_device *dev)
1264{
1265 struct mmc_host *mmc = amba_get_drvdata(dev);
1266 int ret = 0;
1267
1268 if (mmc) {
1269 struct mmci_host *host = mmc_priv(mmc);
1270
1271 writel(MCI_IRQENABLE, host->base + MMCIMASK0);
1272
1273 ret = mmc_resume_host(mmc);
1274 }
1275
1276 return ret;
1277}
1278#else
1279#define mmci_suspend NULL
1280#define mmci_resume NULL
1281#endif
1282
1283static struct amba_id mmci_ids[] = {
1284 {
1285 .id = 0x00041180,
768fbc18 1286 .mask = 0xff0fffff,
4956e109 1287 .data = &variant_arm,
1da177e4 1288 },
768fbc18
PM
1289 {
1290 .id = 0x01041180,
1291 .mask = 0xff0fffff,
1292 .data = &variant_arm_extended_fifo,
1293 },
1da177e4
LT
1294 {
1295 .id = 0x00041181,
1296 .mask = 0x000fffff,
4956e109 1297 .data = &variant_arm,
1da177e4 1298 },
cc30d60e
LW
1299 /* ST Micro variants */
1300 {
1301 .id = 0x00180180,
1302 .mask = 0x00ffffff,
4956e109 1303 .data = &variant_u300,
cc30d60e
LW
1304 },
1305 {
1306 .id = 0x00280180,
1307 .mask = 0x00ffffff,
4956e109
RV
1308 .data = &variant_u300,
1309 },
1310 {
1311 .id = 0x00480180,
1312 .mask = 0x00ffffff,
1313 .data = &variant_ux500,
cc30d60e 1314 },
1da177e4
LT
1315 { 0, 0 },
1316};
1317
1318static struct amba_driver mmci_driver = {
1319 .drv = {
1320 .name = DRIVER_NAME,
1321 },
1322 .probe = mmci_probe,
6dc4a47a 1323 .remove = __devexit_p(mmci_remove),
1da177e4
LT
1324 .suspend = mmci_suspend,
1325 .resume = mmci_resume,
1326 .id_table = mmci_ids,
1327};
1328
1329static int __init mmci_init(void)
1330{
1331 return amba_driver_register(&mmci_driver);
1332}
1333
1334static void __exit mmci_exit(void)
1335{
1336 amba_driver_unregister(&mmci_driver);
1337}
1338
1339module_init(mmci_init);
1340module_exit(mmci_exit);
1341module_param(fmax, uint, 0444);
1342
1343MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
1344MODULE_LICENSE("GPL");
This page took 0.566172 seconds and 5 git commands to generate.