mmc: atmel-mci: change the state machine for compatibility with old IP
[deliverable/linux.git] / drivers / mmc / host / atmel-mci.c
CommitLineData
7d2be074
HS
1/*
2 * Atmel MultiMedia Card Interface driver
3 *
4 * Copyright (C) 2004-2008 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/blkdev.h>
11#include <linux/clk.h>
deec9ae3 12#include <linux/debugfs.h>
7d2be074 13#include <linux/device.h>
65e8b083
HS
14#include <linux/dmaengine.h>
15#include <linux/dma-mapping.h>
fbfca4b8 16#include <linux/err.h>
3c26e170 17#include <linux/gpio.h>
7d2be074
HS
18#include <linux/init.h>
19#include <linux/interrupt.h>
20#include <linux/ioport.h>
21#include <linux/module.h>
22#include <linux/platform_device.h>
23#include <linux/scatterlist.h>
deec9ae3 24#include <linux/seq_file.h>
5a0e3ad6 25#include <linux/slab.h>
deec9ae3 26#include <linux/stat.h>
e2b35f3d 27#include <linux/types.h>
7d2be074
HS
28
29#include <linux/mmc/host.h>
2f1d7918 30#include <linux/mmc/sdio.h>
2635d1ba
NF
31
32#include <mach/atmel-mci.h>
c42aa775 33#include <linux/atmel-mci.h>
796211b7 34#include <linux/atmel_pdc.h>
7d2be074 35
7d2be074
HS
36#include <asm/io.h>
37#include <asm/unaligned.h>
38
04d699c3 39#include <mach/cpu.h>
3663b736 40#include <mach/board.h>
7d2be074
HS
41
42#include "atmel-mci-regs.h"
43
2c96a293 44#define ATMCI_DATA_ERROR_FLAGS (ATMCI_DCRCE | ATMCI_DTOE | ATMCI_OVRE | ATMCI_UNRE)
65e8b083 45#define ATMCI_DMA_THRESHOLD 16
7d2be074
HS
46
47enum {
f5177547 48 EVENT_CMD_RDY = 0,
7d2be074 49 EVENT_XFER_COMPLETE,
f5177547 50 EVENT_NOTBUSY,
c06ad258
HS
51 EVENT_DATA_ERROR,
52};
53
54enum atmel_mci_state {
965ebf33
HS
55 STATE_IDLE = 0,
56 STATE_SENDING_CMD,
f5177547
LD
57 STATE_DATA_XFER,
58 STATE_WAITING_NOTBUSY,
c06ad258 59 STATE_SENDING_STOP,
f5177547 60 STATE_END_REQUEST,
7d2be074
HS
61};
62
796211b7
LD
63enum atmci_xfer_dir {
64 XFER_RECEIVE = 0,
65 XFER_TRANSMIT,
66};
67
68enum atmci_pdc_buf {
69 PDC_FIRST_BUF = 0,
70 PDC_SECOND_BUF,
71};
72
73struct atmel_mci_caps {
74 bool has_dma;
75 bool has_pdc;
76 bool has_cfg_reg;
77 bool has_cstor_reg;
78 bool has_highspeed;
79 bool has_rwproof;
faf8180b 80 bool has_odd_clk_div;
796211b7
LD
81};
82
65e8b083 83struct atmel_mci_dma {
65e8b083
HS
84 struct dma_chan *chan;
85 struct dma_async_tx_descriptor *data_desc;
65e8b083
HS
86};
87
965ebf33
HS
88/**
89 * struct atmel_mci - MMC controller state shared between all slots
90 * @lock: Spinlock protecting the queue and associated data.
91 * @regs: Pointer to MMIO registers.
796211b7 92 * @sg: Scatterlist entry currently being processed by PIO or PDC code.
965ebf33 93 * @pio_offset: Offset into the current scatterlist entry.
7a90dcc2
LD
94 * @buffer: Buffer used if we don't have the r/w proof capability. We
95 * don't have the time to switch pdc buffers so we have to use only
96 * one buffer for the full transaction.
97 * @buf_size: size of the buffer.
98 * @phys_buf_addr: buffer address needed for pdc.
965ebf33
HS
99 * @cur_slot: The slot which is currently using the controller.
100 * @mrq: The request currently being processed on @cur_slot,
101 * or NULL if the controller is idle.
102 * @cmd: The command currently being sent to the card, or NULL.
103 * @data: The data currently being transferred, or NULL if no data
104 * transfer is in progress.
796211b7 105 * @data_size: just data->blocks * data->blksz.
65e8b083
HS
106 * @dma: DMA client state.
107 * @data_chan: DMA channel being used for the current data transfer.
965ebf33
HS
108 * @cmd_status: Snapshot of SR taken upon completion of the current
109 * command. Only valid when EVENT_CMD_COMPLETE is pending.
110 * @data_status: Snapshot of SR taken upon completion of the current
111 * data transfer. Only valid when EVENT_DATA_COMPLETE or
112 * EVENT_DATA_ERROR is pending.
113 * @stop_cmdr: Value to be loaded into CMDR when the stop command is
114 * to be sent.
115 * @tasklet: Tasklet running the request state machine.
116 * @pending_events: Bitmask of events flagged by the interrupt handler
117 * to be processed by the tasklet.
118 * @completed_events: Bitmask of events which the state machine has
119 * processed.
120 * @state: Tasklet state.
121 * @queue: List of slots waiting for access to the controller.
122 * @need_clock_update: Update the clock rate before the next request.
123 * @need_reset: Reset controller before next request.
124 * @mode_reg: Value of the MR register.
74791a2d 125 * @cfg_reg: Value of the CFG register.
965ebf33
HS
126 * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus
127 * rate and timeout calculations.
128 * @mapbase: Physical address of the MMIO registers.
129 * @mck: The peripheral bus clock hooked up to the MMC controller.
130 * @pdev: Platform device associated with the MMC controller.
131 * @slot: Slots sharing this MMC controller.
796211b7
LD
132 * @caps: MCI capabilities depending on MCI version.
133 * @prepare_data: function to setup MCI before data transfer which
134 * depends on MCI capabilities.
135 * @submit_data: function to start data transfer which depends on MCI
136 * capabilities.
137 * @stop_transfer: function to stop data transfer which depends on MCI
138 * capabilities.
965ebf33
HS
139 *
140 * Locking
141 * =======
142 *
143 * @lock is a softirq-safe spinlock protecting @queue as well as
144 * @cur_slot, @mrq and @state. These must always be updated
145 * at the same time while holding @lock.
146 *
147 * @lock also protects mode_reg and need_clock_update since these are
148 * used to synchronize mode register updates with the queue
149 * processing.
150 *
151 * The @mrq field of struct atmel_mci_slot is also protected by @lock,
152 * and must always be written at the same time as the slot is added to
153 * @queue.
154 *
155 * @pending_events and @completed_events are accessed using atomic bit
156 * operations, so they don't need any locking.
157 *
158 * None of the fields touched by the interrupt handler need any
159 * locking. However, ordering is important: Before EVENT_DATA_ERROR or
160 * EVENT_DATA_COMPLETE is set in @pending_events, all data-related
161 * interrupts must be disabled and @data_status updated with a
162 * snapshot of SR. Similarly, before EVENT_CMD_COMPLETE is set, the
25985edc 163 * CMDRDY interrupt must be disabled and @cmd_status updated with a
965ebf33
HS
164 * snapshot of SR, and before EVENT_XFER_COMPLETE can be set, the
165 * bytes_xfered field of @data must be written. This is ensured by
166 * using barriers.
167 */
7d2be074 168struct atmel_mci {
965ebf33 169 spinlock_t lock;
7d2be074
HS
170 void __iomem *regs;
171
172 struct scatterlist *sg;
173 unsigned int pio_offset;
7a90dcc2
LD
174 unsigned int *buffer;
175 unsigned int buf_size;
176 dma_addr_t buf_phys_addr;
7d2be074 177
965ebf33 178 struct atmel_mci_slot *cur_slot;
7d2be074
HS
179 struct mmc_request *mrq;
180 struct mmc_command *cmd;
181 struct mmc_data *data;
796211b7 182 unsigned int data_size;
7d2be074 183
65e8b083
HS
184 struct atmel_mci_dma dma;
185 struct dma_chan *data_chan;
e2b35f3d 186 struct dma_slave_config dma_conf;
65e8b083 187
7d2be074
HS
188 u32 cmd_status;
189 u32 data_status;
7d2be074
HS
190 u32 stop_cmdr;
191
7d2be074
HS
192 struct tasklet_struct tasklet;
193 unsigned long pending_events;
194 unsigned long completed_events;
c06ad258 195 enum atmel_mci_state state;
965ebf33 196 struct list_head queue;
7d2be074 197
965ebf33
HS
198 bool need_clock_update;
199 bool need_reset;
200 u32 mode_reg;
74791a2d 201 u32 cfg_reg;
7d2be074
HS
202 unsigned long bus_hz;
203 unsigned long mapbase;
204 struct clk *mck;
205 struct platform_device *pdev;
965ebf33 206
2c96a293 207 struct atmel_mci_slot *slot[ATMCI_MAX_NR_SLOTS];
796211b7
LD
208
209 struct atmel_mci_caps caps;
210
211 u32 (*prepare_data)(struct atmel_mci *host, struct mmc_data *data);
212 void (*submit_data)(struct atmel_mci *host, struct mmc_data *data);
213 void (*stop_transfer)(struct atmel_mci *host);
965ebf33
HS
214};
215
216/**
217 * struct atmel_mci_slot - MMC slot state
218 * @mmc: The mmc_host representing this slot.
219 * @host: The MMC controller this slot is using.
220 * @sdc_reg: Value of SDCR to be written before using this slot.
88ff82ed 221 * @sdio_irq: SDIO irq mask for this slot.
965ebf33
HS
222 * @mrq: mmc_request currently being processed or waiting to be
223 * processed, or NULL when the slot is idle.
224 * @queue_node: List node for placing this node in the @queue list of
225 * &struct atmel_mci.
226 * @clock: Clock rate configured by set_ios(). Protected by host->lock.
227 * @flags: Random state bits associated with the slot.
228 * @detect_pin: GPIO pin used for card detection, or negative if not
229 * available.
230 * @wp_pin: GPIO pin used for card write protect sending, or negative
231 * if not available.
1c1452be 232 * @detect_is_active_high: The state of the detect pin when it is active.
965ebf33
HS
233 * @detect_timer: Timer used for debouncing @detect_pin interrupts.
234 */
235struct atmel_mci_slot {
236 struct mmc_host *mmc;
237 struct atmel_mci *host;
238
239 u32 sdc_reg;
88ff82ed 240 u32 sdio_irq;
965ebf33
HS
241
242 struct mmc_request *mrq;
243 struct list_head queue_node;
244
245 unsigned int clock;
246 unsigned long flags;
247#define ATMCI_CARD_PRESENT 0
248#define ATMCI_CARD_NEED_INIT 1
249#define ATMCI_SHUTDOWN 2
5c2f2b9b 250#define ATMCI_SUSPENDED 3
965ebf33
HS
251
252 int detect_pin;
253 int wp_pin;
1c1452be 254 bool detect_is_active_high;
965ebf33
HS
255
256 struct timer_list detect_timer;
7d2be074
HS
257};
258
7d2be074
HS
259#define atmci_test_and_clear_pending(host, event) \
260 test_and_clear_bit(event, &host->pending_events)
7d2be074
HS
261#define atmci_set_completed(host, event) \
262 set_bit(event, &host->completed_events)
263#define atmci_set_pending(host, event) \
264 set_bit(event, &host->pending_events)
7d2be074 265
deec9ae3
HS
266/*
267 * The debugfs stuff below is mostly optimized away when
268 * CONFIG_DEBUG_FS is not set.
269 */
270static int atmci_req_show(struct seq_file *s, void *v)
271{
965ebf33
HS
272 struct atmel_mci_slot *slot = s->private;
273 struct mmc_request *mrq;
deec9ae3
HS
274 struct mmc_command *cmd;
275 struct mmc_command *stop;
276 struct mmc_data *data;
277
278 /* Make sure we get a consistent snapshot */
965ebf33
HS
279 spin_lock_bh(&slot->host->lock);
280 mrq = slot->mrq;
deec9ae3
HS
281
282 if (mrq) {
283 cmd = mrq->cmd;
284 data = mrq->data;
285 stop = mrq->stop;
286
287 if (cmd)
288 seq_printf(s,
289 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
290 cmd->opcode, cmd->arg, cmd->flags,
291 cmd->resp[0], cmd->resp[1], cmd->resp[2],
d586ebbb 292 cmd->resp[3], cmd->error);
deec9ae3
HS
293 if (data)
294 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
295 data->bytes_xfered, data->blocks,
296 data->blksz, data->flags, data->error);
297 if (stop)
298 seq_printf(s,
299 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
300 stop->opcode, stop->arg, stop->flags,
301 stop->resp[0], stop->resp[1], stop->resp[2],
d586ebbb 302 stop->resp[3], stop->error);
deec9ae3
HS
303 }
304
965ebf33 305 spin_unlock_bh(&slot->host->lock);
deec9ae3
HS
306
307 return 0;
308}
309
310static int atmci_req_open(struct inode *inode, struct file *file)
311{
312 return single_open(file, atmci_req_show, inode->i_private);
313}
314
315static const struct file_operations atmci_req_fops = {
316 .owner = THIS_MODULE,
317 .open = atmci_req_open,
318 .read = seq_read,
319 .llseek = seq_lseek,
320 .release = single_release,
321};
322
323static void atmci_show_status_reg(struct seq_file *s,
324 const char *regname, u32 value)
325{
326 static const char *sr_bit[] = {
327 [0] = "CMDRDY",
328 [1] = "RXRDY",
329 [2] = "TXRDY",
330 [3] = "BLKE",
331 [4] = "DTIP",
332 [5] = "NOTBUSY",
04d699c3
RE
333 [6] = "ENDRX",
334 [7] = "ENDTX",
deec9ae3
HS
335 [8] = "SDIOIRQA",
336 [9] = "SDIOIRQB",
04d699c3
RE
337 [12] = "SDIOWAIT",
338 [14] = "RXBUFF",
339 [15] = "TXBUFE",
deec9ae3
HS
340 [16] = "RINDE",
341 [17] = "RDIRE",
342 [18] = "RCRCE",
343 [19] = "RENDE",
344 [20] = "RTOE",
345 [21] = "DCRCE",
346 [22] = "DTOE",
04d699c3
RE
347 [23] = "CSTOE",
348 [24] = "BLKOVRE",
349 [25] = "DMADONE",
350 [26] = "FIFOEMPTY",
351 [27] = "XFRDONE",
deec9ae3
HS
352 [30] = "OVRE",
353 [31] = "UNRE",
354 };
355 unsigned int i;
356
357 seq_printf(s, "%s:\t0x%08x", regname, value);
358 for (i = 0; i < ARRAY_SIZE(sr_bit); i++) {
359 if (value & (1 << i)) {
360 if (sr_bit[i])
361 seq_printf(s, " %s", sr_bit[i]);
362 else
363 seq_puts(s, " UNKNOWN");
364 }
365 }
366 seq_putc(s, '\n');
367}
368
369static int atmci_regs_show(struct seq_file *s, void *v)
370{
371 struct atmel_mci *host = s->private;
372 u32 *buf;
373
2c96a293 374 buf = kmalloc(ATMCI_REGS_SIZE, GFP_KERNEL);
deec9ae3
HS
375 if (!buf)
376 return -ENOMEM;
377
965ebf33
HS
378 /*
379 * Grab a more or less consistent snapshot. Note that we're
380 * not disabling interrupts, so IMR and SR may not be
381 * consistent.
382 */
383 spin_lock_bh(&host->lock);
87e60f2b 384 clk_enable(host->mck);
2c96a293 385 memcpy_fromio(buf, host->regs, ATMCI_REGS_SIZE);
87e60f2b 386 clk_disable(host->mck);
965ebf33 387 spin_unlock_bh(&host->lock);
deec9ae3
HS
388
389 seq_printf(s, "MR:\t0x%08x%s%s CLKDIV=%u\n",
2c96a293
LD
390 buf[ATMCI_MR / 4],
391 buf[ATMCI_MR / 4] & ATMCI_MR_RDPROOF ? " RDPROOF" : "",
392 buf[ATMCI_MR / 4] & ATMCI_MR_WRPROOF ? " WRPROOF" : "",
393 buf[ATMCI_MR / 4] & 0xff);
394 seq_printf(s, "DTOR:\t0x%08x\n", buf[ATMCI_DTOR / 4]);
395 seq_printf(s, "SDCR:\t0x%08x\n", buf[ATMCI_SDCR / 4]);
396 seq_printf(s, "ARGR:\t0x%08x\n", buf[ATMCI_ARGR / 4]);
deec9ae3 397 seq_printf(s, "BLKR:\t0x%08x BCNT=%u BLKLEN=%u\n",
2c96a293
LD
398 buf[ATMCI_BLKR / 4],
399 buf[ATMCI_BLKR / 4] & 0xffff,
400 (buf[ATMCI_BLKR / 4] >> 16) & 0xffff);
796211b7 401 if (host->caps.has_cstor_reg)
2c96a293 402 seq_printf(s, "CSTOR:\t0x%08x\n", buf[ATMCI_CSTOR / 4]);
deec9ae3
HS
403
404 /* Don't read RSPR and RDR; it will consume the data there */
405
2c96a293
LD
406 atmci_show_status_reg(s, "SR", buf[ATMCI_SR / 4]);
407 atmci_show_status_reg(s, "IMR", buf[ATMCI_IMR / 4]);
deec9ae3 408
796211b7 409 if (host->caps.has_dma) {
74791a2d
NF
410 u32 val;
411
2c96a293 412 val = buf[ATMCI_DMA / 4];
74791a2d
NF
413 seq_printf(s, "DMA:\t0x%08x OFFSET=%u CHKSIZE=%u%s\n",
414 val, val & 3,
415 ((val >> 4) & 3) ?
416 1 << (((val >> 4) & 3) + 1) : 1,
2c96a293 417 val & ATMCI_DMAEN ? " DMAEN" : "");
796211b7
LD
418 }
419 if (host->caps.has_cfg_reg) {
420 u32 val;
74791a2d 421
2c96a293 422 val = buf[ATMCI_CFG / 4];
74791a2d
NF
423 seq_printf(s, "CFG:\t0x%08x%s%s%s%s\n",
424 val,
2c96a293
LD
425 val & ATMCI_CFG_FIFOMODE_1DATA ? " FIFOMODE_ONE_DATA" : "",
426 val & ATMCI_CFG_FERRCTRL_COR ? " FERRCTRL_CLEAR_ON_READ" : "",
427 val & ATMCI_CFG_HSMODE ? " HSMODE" : "",
428 val & ATMCI_CFG_LSYNC ? " LSYNC" : "");
74791a2d
NF
429 }
430
b17339a1
HS
431 kfree(buf);
432
deec9ae3
HS
433 return 0;
434}
435
436static int atmci_regs_open(struct inode *inode, struct file *file)
437{
438 return single_open(file, atmci_regs_show, inode->i_private);
439}
440
441static const struct file_operations atmci_regs_fops = {
442 .owner = THIS_MODULE,
443 .open = atmci_regs_open,
444 .read = seq_read,
445 .llseek = seq_lseek,
446 .release = single_release,
447};
448
965ebf33 449static void atmci_init_debugfs(struct atmel_mci_slot *slot)
deec9ae3 450{
965ebf33
HS
451 struct mmc_host *mmc = slot->mmc;
452 struct atmel_mci *host = slot->host;
453 struct dentry *root;
454 struct dentry *node;
deec9ae3 455
deec9ae3
HS
456 root = mmc->debugfs_root;
457 if (!root)
458 return;
459
460 node = debugfs_create_file("regs", S_IRUSR, root, host,
461 &atmci_regs_fops);
462 if (IS_ERR(node))
463 return;
464 if (!node)
465 goto err;
466
965ebf33 467 node = debugfs_create_file("req", S_IRUSR, root, slot, &atmci_req_fops);
deec9ae3
HS
468 if (!node)
469 goto err;
470
c06ad258
HS
471 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
472 if (!node)
473 goto err;
474
deec9ae3
HS
475 node = debugfs_create_x32("pending_events", S_IRUSR, root,
476 (u32 *)&host->pending_events);
477 if (!node)
478 goto err;
479
480 node = debugfs_create_x32("completed_events", S_IRUSR, root,
481 (u32 *)&host->completed_events);
482 if (!node)
483 goto err;
484
485 return;
486
487err:
965ebf33 488 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
deec9ae3 489}
7d2be074 490
7a90dcc2
LD
491static inline unsigned int atmci_get_version(struct atmel_mci *host)
492{
493 return atmci_readl(host, ATMCI_VERSION) & 0x00000fff;
494}
495
2c96a293 496static inline unsigned int atmci_ns_to_clocks(struct atmel_mci *host,
7d2be074
HS
497 unsigned int ns)
498{
66292ad9
LD
499 /*
500 * It is easier here to use us instead of ns for the timeout,
501 * it prevents from overflows during calculation.
502 */
503 unsigned int us = DIV_ROUND_UP(ns, 1000);
504
505 /* Maximum clock frequency is host->bus_hz/2 */
506 return us * (DIV_ROUND_UP(host->bus_hz, 2000000));
7d2be074
HS
507}
508
509static void atmci_set_timeout(struct atmel_mci *host,
965ebf33 510 struct atmel_mci_slot *slot, struct mmc_data *data)
7d2be074
HS
511{
512 static unsigned dtomul_to_shift[] = {
513 0, 4, 7, 8, 10, 12, 16, 20
514 };
515 unsigned timeout;
516 unsigned dtocyc;
517 unsigned dtomul;
518
2c96a293
LD
519 timeout = atmci_ns_to_clocks(host, data->timeout_ns)
520 + data->timeout_clks;
7d2be074
HS
521
522 for (dtomul = 0; dtomul < 8; dtomul++) {
523 unsigned shift = dtomul_to_shift[dtomul];
524 dtocyc = (timeout + (1 << shift) - 1) >> shift;
525 if (dtocyc < 15)
526 break;
527 }
528
529 if (dtomul >= 8) {
530 dtomul = 7;
531 dtocyc = 15;
532 }
533
965ebf33 534 dev_vdbg(&slot->mmc->class_dev, "setting timeout to %u cycles\n",
7d2be074 535 dtocyc << dtomul_to_shift[dtomul]);
03fc9a7f 536 atmci_writel(host, ATMCI_DTOR, (ATMCI_DTOMUL(dtomul) | ATMCI_DTOCYC(dtocyc)));
7d2be074
HS
537}
538
539/*
540 * Return mask with command flags to be enabled for this command.
541 */
542static u32 atmci_prepare_command(struct mmc_host *mmc,
543 struct mmc_command *cmd)
544{
545 struct mmc_data *data;
546 u32 cmdr;
547
548 cmd->error = -EINPROGRESS;
549
2c96a293 550 cmdr = ATMCI_CMDR_CMDNB(cmd->opcode);
7d2be074
HS
551
552 if (cmd->flags & MMC_RSP_PRESENT) {
553 if (cmd->flags & MMC_RSP_136)
2c96a293 554 cmdr |= ATMCI_CMDR_RSPTYP_136BIT;
7d2be074 555 else
2c96a293 556 cmdr |= ATMCI_CMDR_RSPTYP_48BIT;
7d2be074
HS
557 }
558
559 /*
560 * This should really be MAXLAT_5 for CMD2 and ACMD41, but
561 * it's too difficult to determine whether this is an ACMD or
562 * not. Better make it 64.
563 */
2c96a293 564 cmdr |= ATMCI_CMDR_MAXLAT_64CYC;
7d2be074
HS
565
566 if (mmc->ios.bus_mode == MMC_BUSMODE_OPENDRAIN)
2c96a293 567 cmdr |= ATMCI_CMDR_OPDCMD;
7d2be074
HS
568
569 data = cmd->data;
570 if (data) {
2c96a293 571 cmdr |= ATMCI_CMDR_START_XFER;
2f1d7918
NF
572
573 if (cmd->opcode == SD_IO_RW_EXTENDED) {
2c96a293 574 cmdr |= ATMCI_CMDR_SDIO_BLOCK;
2f1d7918
NF
575 } else {
576 if (data->flags & MMC_DATA_STREAM)
2c96a293 577 cmdr |= ATMCI_CMDR_STREAM;
2f1d7918 578 else if (data->blocks > 1)
2c96a293 579 cmdr |= ATMCI_CMDR_MULTI_BLOCK;
2f1d7918 580 else
2c96a293 581 cmdr |= ATMCI_CMDR_BLOCK;
2f1d7918 582 }
7d2be074
HS
583
584 if (data->flags & MMC_DATA_READ)
2c96a293 585 cmdr |= ATMCI_CMDR_TRDIR_READ;
7d2be074
HS
586 }
587
588 return cmdr;
589}
590
11d1488b 591static void atmci_send_command(struct atmel_mci *host,
965ebf33 592 struct mmc_command *cmd, u32 cmd_flags)
7d2be074 593{
7d2be074
HS
594 WARN_ON(host->cmd);
595 host->cmd = cmd;
596
965ebf33 597 dev_vdbg(&host->pdev->dev,
7d2be074
HS
598 "start command: ARGR=0x%08x CMDR=0x%08x\n",
599 cmd->arg, cmd_flags);
600
03fc9a7f
LD
601 atmci_writel(host, ATMCI_ARGR, cmd->arg);
602 atmci_writel(host, ATMCI_CMDR, cmd_flags);
7d2be074
HS
603}
604
2c96a293 605static void atmci_send_stop_cmd(struct atmel_mci *host, struct mmc_data *data)
7d2be074 606{
11d1488b 607 atmci_send_command(host, data->stop, host->stop_cmdr);
03fc9a7f 608 atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
7d2be074
HS
609}
610
796211b7
LD
611/*
612 * Configure given PDC buffer taking care of alignement issues.
613 * Update host->data_size and host->sg.
614 */
615static void atmci_pdc_set_single_buf(struct atmel_mci *host,
616 enum atmci_xfer_dir dir, enum atmci_pdc_buf buf_nb)
617{
618 u32 pointer_reg, counter_reg;
7a90dcc2 619 unsigned int buf_size;
796211b7
LD
620
621 if (dir == XFER_RECEIVE) {
622 pointer_reg = ATMEL_PDC_RPR;
623 counter_reg = ATMEL_PDC_RCR;
624 } else {
625 pointer_reg = ATMEL_PDC_TPR;
626 counter_reg = ATMEL_PDC_TCR;
627 }
628
629 if (buf_nb == PDC_SECOND_BUF) {
1ebbe3d3
LD
630 pointer_reg += ATMEL_PDC_SCND_BUF_OFF;
631 counter_reg += ATMEL_PDC_SCND_BUF_OFF;
796211b7
LD
632 }
633
7a90dcc2
LD
634 if (!host->caps.has_rwproof) {
635 buf_size = host->buf_size;
636 atmci_writel(host, pointer_reg, host->buf_phys_addr);
637 } else {
638 buf_size = sg_dma_len(host->sg);
639 atmci_writel(host, pointer_reg, sg_dma_address(host->sg));
640 }
641
642 if (host->data_size <= buf_size) {
796211b7
LD
643 if (host->data_size & 0x3) {
644 /* If size is different from modulo 4, transfer bytes */
645 atmci_writel(host, counter_reg, host->data_size);
646 atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCFBYTE);
647 } else {
648 /* Else transfer 32-bits words */
649 atmci_writel(host, counter_reg, host->data_size / 4);
650 }
651 host->data_size = 0;
652 } else {
653 /* We assume the size of a page is 32-bits aligned */
341fa4c3
LD
654 atmci_writel(host, counter_reg, sg_dma_len(host->sg) / 4);
655 host->data_size -= sg_dma_len(host->sg);
796211b7
LD
656 if (host->data_size)
657 host->sg = sg_next(host->sg);
658 }
659}
660
661/*
662 * Configure PDC buffer according to the data size ie configuring one or two
663 * buffers. Don't use this function if you want to configure only the second
664 * buffer. In this case, use atmci_pdc_set_single_buf.
665 */
666static void atmci_pdc_set_both_buf(struct atmel_mci *host, int dir)
65e8b083 667{
796211b7
LD
668 atmci_pdc_set_single_buf(host, dir, PDC_FIRST_BUF);
669 if (host->data_size)
670 atmci_pdc_set_single_buf(host, dir, PDC_SECOND_BUF);
671}
672
673/*
674 * Unmap sg lists, called when transfer is finished.
675 */
676static void atmci_pdc_cleanup(struct atmel_mci *host)
677{
678 struct mmc_data *data = host->data;
65e8b083 679
009a891b 680 if (data)
796211b7
LD
681 dma_unmap_sg(&host->pdev->dev,
682 data->sg, data->sg_len,
683 ((data->flags & MMC_DATA_WRITE)
684 ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
65e8b083
HS
685}
686
796211b7
LD
687/*
688 * Disable PDC transfers. Update pending flags to EVENT_XFER_COMPLETE after
689 * having received ATMCI_TXBUFE or ATMCI_RXBUFF interrupt. Enable ATMCI_NOTBUSY
690 * interrupt needed for both transfer directions.
691 */
692static void atmci_pdc_complete(struct atmel_mci *host)
65e8b083 693{
7a90dcc2
LD
694 int transfer_size = host->data->blocks * host->data->blksz;
695
796211b7 696 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
7a90dcc2
LD
697
698 if ((!host->caps.has_rwproof)
699 && (host->data->flags & MMC_DATA_READ))
700 sg_copy_from_buffer(host->data->sg, host->data->sg_len,
701 host->buffer, transfer_size);
702
796211b7 703 atmci_pdc_cleanup(host);
65e8b083 704
796211b7
LD
705 /*
706 * If the card was removed, data will be NULL. No point trying
707 * to send the stop command or waiting for NBUSY in this case.
708 */
709 if (host->data) {
65e8b083 710 atmci_set_pending(host, EVENT_XFER_COMPLETE);
796211b7 711 tasklet_schedule(&host->tasklet);
65e8b083
HS
712 }
713}
714
796211b7
LD
715static void atmci_dma_cleanup(struct atmel_mci *host)
716{
717 struct mmc_data *data = host->data;
718
719 if (data)
720 dma_unmap_sg(host->dma.chan->device->dev,
721 data->sg, data->sg_len,
722 ((data->flags & MMC_DATA_WRITE)
723 ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
724}
725
726/*
727 * This function is called by the DMA driver from tasklet context.
728 */
65e8b083
HS
729static void atmci_dma_complete(void *arg)
730{
731 struct atmel_mci *host = arg;
732 struct mmc_data *data = host->data;
733
734 dev_vdbg(&host->pdev->dev, "DMA complete\n");
735
796211b7 736 if (host->caps.has_dma)
74791a2d 737 /* Disable DMA hardware handshaking on MCI */
03fc9a7f 738 atmci_writel(host, ATMCI_DMA, atmci_readl(host, ATMCI_DMA) & ~ATMCI_DMAEN);
74791a2d 739
65e8b083
HS
740 atmci_dma_cleanup(host);
741
742 /*
743 * If the card was removed, data will be NULL. No point trying
744 * to send the stop command or waiting for NBUSY in this case.
745 */
746 if (data) {
747 atmci_set_pending(host, EVENT_XFER_COMPLETE);
748 tasklet_schedule(&host->tasklet);
749
750 /*
751 * Regardless of what the documentation says, we have
752 * to wait for NOTBUSY even after block read
753 * operations.
754 *
755 * When the DMA transfer is complete, the controller
756 * may still be reading the CRC from the card, i.e.
757 * the data transfer is still in progress and we
758 * haven't seen all the potential error bits yet.
759 *
760 * The interrupt handler will schedule a different
761 * tasklet to finish things up when the data transfer
762 * is completely done.
763 *
764 * We may not complete the mmc request here anyway
765 * because the mmc layer may call back and cause us to
766 * violate the "don't submit new operations from the
767 * completion callback" rule of the dma engine
768 * framework.
769 */
03fc9a7f 770 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
65e8b083
HS
771 }
772}
773
796211b7
LD
774/*
775 * Returns a mask of interrupt flags to be enabled after the whole
776 * request has been prepared.
777 */
778static u32 atmci_prepare_data(struct atmel_mci *host, struct mmc_data *data)
779{
780 u32 iflags;
781
782 data->error = -EINPROGRESS;
783
784 host->sg = data->sg;
785 host->data = data;
786 host->data_chan = NULL;
787
788 iflags = ATMCI_DATA_ERROR_FLAGS;
789
790 /*
791 * Errata: MMC data write operation with less than 12
792 * bytes is impossible.
793 *
794 * Errata: MCI Transmit Data Register (TDR) FIFO
795 * corruption when length is not multiple of 4.
796 */
797 if (data->blocks * data->blksz < 12
798 || (data->blocks * data->blksz) & 3)
799 host->need_reset = true;
800
801 host->pio_offset = 0;
802 if (data->flags & MMC_DATA_READ)
803 iflags |= ATMCI_RXRDY;
804 else
805 iflags |= ATMCI_TXRDY;
806
807 return iflags;
808}
809
810/*
811 * Set interrupt flags and set block length into the MCI mode register even
812 * if this value is also accessible in the MCI block register. It seems to be
813 * necessary before the High Speed MCI version. It also map sg and configure
814 * PDC registers.
815 */
816static u32
817atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
818{
819 u32 iflags, tmp;
820 unsigned int sg_len;
821 enum dma_data_direction dir;
822
823 data->error = -EINPROGRESS;
824
825 host->data = data;
826 host->sg = data->sg;
827 iflags = ATMCI_DATA_ERROR_FLAGS;
828
829 /* Enable pdc mode */
830 atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCMODE);
831
832 if (data->flags & MMC_DATA_READ) {
833 dir = DMA_FROM_DEVICE;
834 iflags |= ATMCI_ENDRX | ATMCI_RXBUFF;
835 } else {
836 dir = DMA_TO_DEVICE;
f5177547 837 iflags |= ATMCI_ENDTX | ATMCI_TXBUFE | ATMCI_BLKE;
796211b7
LD
838 }
839
840 /* Set BLKLEN */
841 tmp = atmci_readl(host, ATMCI_MR);
842 tmp &= 0x0000ffff;
843 tmp |= ATMCI_BLKLEN(data->blksz);
844 atmci_writel(host, ATMCI_MR, tmp);
845
846 /* Configure PDC */
847 host->data_size = data->blocks * data->blksz;
848 sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, dir);
7a90dcc2
LD
849
850 if ((!host->caps.has_rwproof)
851 && (host->data->flags & MMC_DATA_WRITE))
852 sg_copy_to_buffer(host->data->sg, host->data->sg_len,
853 host->buffer, host->data_size);
854
796211b7
LD
855 if (host->data_size)
856 atmci_pdc_set_both_buf(host,
857 ((dir == DMA_FROM_DEVICE) ? XFER_RECEIVE : XFER_TRANSMIT));
858
859 return iflags;
860}
861
862static u32
74791a2d 863atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
65e8b083
HS
864{
865 struct dma_chan *chan;
866 struct dma_async_tx_descriptor *desc;
867 struct scatterlist *sg;
868 unsigned int i;
869 enum dma_data_direction direction;
05f5799c 870 enum dma_transfer_direction slave_dirn;
657a77fa 871 unsigned int sglen;
796211b7
LD
872 u32 iflags;
873
874 data->error = -EINPROGRESS;
875
876 WARN_ON(host->data);
877 host->sg = NULL;
878 host->data = data;
879
880 iflags = ATMCI_DATA_ERROR_FLAGS;
65e8b083
HS
881
882 /*
883 * We don't do DMA on "complex" transfers, i.e. with
884 * non-word-aligned buffers or lengths. Also, we don't bother
885 * with all the DMA setup overhead for short transfers.
886 */
796211b7
LD
887 if (data->blocks * data->blksz < ATMCI_DMA_THRESHOLD)
888 return atmci_prepare_data(host, data);
65e8b083 889 if (data->blksz & 3)
796211b7 890 return atmci_prepare_data(host, data);
65e8b083
HS
891
892 for_each_sg(data->sg, sg, data->sg_len, i) {
893 if (sg->offset & 3 || sg->length & 3)
796211b7 894 return atmci_prepare_data(host, data);
65e8b083
HS
895 }
896
897 /* If we don't have a channel, we can't do DMA */
898 chan = host->dma.chan;
6f49a57a 899 if (chan)
65e8b083 900 host->data_chan = chan;
65e8b083
HS
901
902 if (!chan)
903 return -ENODEV;
904
796211b7 905 if (host->caps.has_dma)
03fc9a7f 906 atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(3) | ATMCI_DMAEN);
74791a2d 907
05f5799c 908 if (data->flags & MMC_DATA_READ) {
65e8b083 909 direction = DMA_FROM_DEVICE;
e2b35f3d 910 host->dma_conf.direction = slave_dirn = DMA_DEV_TO_MEM;
05f5799c 911 } else {
65e8b083 912 direction = DMA_TO_DEVICE;
e2b35f3d 913 host->dma_conf.direction = slave_dirn = DMA_MEM_TO_DEV;
05f5799c 914 }
65e8b083 915
266ac3f2 916 sglen = dma_map_sg(chan->device->dev, data->sg,
796211b7 917 data->sg_len, direction);
88ce4db3 918
e2b35f3d 919 dmaengine_slave_config(chan, &host->dma_conf);
16052827 920 desc = dmaengine_prep_slave_sg(chan,
05f5799c 921 data->sg, sglen, slave_dirn,
65e8b083
HS
922 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
923 if (!desc)
657a77fa 924 goto unmap_exit;
65e8b083
HS
925
926 host->dma.data_desc = desc;
927 desc->callback = atmci_dma_complete;
928 desc->callback_param = host;
65e8b083 929
796211b7 930 return iflags;
657a77fa 931unmap_exit:
88ce4db3 932 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, direction);
657a77fa 933 return -ENOMEM;
65e8b083
HS
934}
935
796211b7
LD
936static void
937atmci_submit_data(struct atmel_mci *host, struct mmc_data *data)
938{
939 return;
940}
941
942/*
943 * Start PDC according to transfer direction.
944 */
945static void
946atmci_submit_data_pdc(struct atmel_mci *host, struct mmc_data *data)
947{
948 if (data->flags & MMC_DATA_READ)
949 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
950 else
951 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
952}
953
954static void
955atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
74791a2d
NF
956{
957 struct dma_chan *chan = host->data_chan;
958 struct dma_async_tx_descriptor *desc = host->dma.data_desc;
959
960 if (chan) {
5328906a
LW
961 dmaengine_submit(desc);
962 dma_async_issue_pending(chan);
74791a2d
NF
963 }
964}
965
796211b7 966static void atmci_stop_transfer(struct atmel_mci *host)
65e8b083 967{
65e8b083 968 atmci_set_pending(host, EVENT_XFER_COMPLETE);
03fc9a7f 969 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
65e8b083
HS
970}
971
7d2be074 972/*
796211b7 973 * Stop data transfer because error(s) occured.
7d2be074 974 */
796211b7 975static void atmci_stop_transfer_pdc(struct atmel_mci *host)
7d2be074 976{
f5177547 977 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
796211b7 978}
965ebf33 979
796211b7
LD
980static void atmci_stop_transfer_dma(struct atmel_mci *host)
981{
982 struct dma_chan *chan = host->data_chan;
965ebf33 983
796211b7
LD
984 if (chan) {
985 dmaengine_terminate_all(chan);
986 atmci_dma_cleanup(host);
987 } else {
988 /* Data transfer was stopped by the interrupt handler */
989 atmci_set_pending(host, EVENT_XFER_COMPLETE);
990 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
65e8b083 991 }
7d2be074
HS
992}
993
796211b7
LD
994/*
995 * Start a request: prepare data if needed, prepare the command and activate
996 * interrupts.
997 */
965ebf33
HS
998static void atmci_start_request(struct atmel_mci *host,
999 struct atmel_mci_slot *slot)
7d2be074 1000{
965ebf33 1001 struct mmc_request *mrq;
7d2be074 1002 struct mmc_command *cmd;
965ebf33 1003 struct mmc_data *data;
7d2be074 1004 u32 iflags;
965ebf33 1005 u32 cmdflags;
7d2be074 1006
965ebf33
HS
1007 mrq = slot->mrq;
1008 host->cur_slot = slot;
7d2be074 1009 host->mrq = mrq;
965ebf33 1010
7d2be074
HS
1011 host->pending_events = 0;
1012 host->completed_events = 0;
f5177547 1013 host->cmd_status = 0;
ca55f46e 1014 host->data_status = 0;
7d2be074 1015
965ebf33 1016 if (host->need_reset) {
18ee684b
LD
1017 iflags = atmci_readl(host, ATMCI_IMR);
1018 iflags &= (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB);
03fc9a7f
LD
1019 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
1020 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
1021 atmci_writel(host, ATMCI_MR, host->mode_reg);
796211b7 1022 if (host->caps.has_cfg_reg)
03fc9a7f 1023 atmci_writel(host, ATMCI_CFG, host->cfg_reg);
18ee684b 1024 atmci_writel(host, ATMCI_IER, iflags);
965ebf33
HS
1025 host->need_reset = false;
1026 }
03fc9a7f 1027 atmci_writel(host, ATMCI_SDCR, slot->sdc_reg);
965ebf33 1028
03fc9a7f 1029 iflags = atmci_readl(host, ATMCI_IMR);
2c96a293 1030 if (iflags & ~(ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
f5177547 1031 dev_dbg(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n",
965ebf33
HS
1032 iflags);
1033
1034 if (unlikely(test_and_clear_bit(ATMCI_CARD_NEED_INIT, &slot->flags))) {
1035 /* Send init sequence (74 clock cycles) */
03fc9a7f
LD
1036 atmci_writel(host, ATMCI_CMDR, ATMCI_CMDR_SPCMD_INIT);
1037 while (!(atmci_readl(host, ATMCI_SR) & ATMCI_CMDRDY))
965ebf33
HS
1038 cpu_relax();
1039 }
74791a2d 1040 iflags = 0;
7d2be074
HS
1041 data = mrq->data;
1042 if (data) {
965ebf33 1043 atmci_set_timeout(host, slot, data);
a252e3e3
HS
1044
1045 /* Must set block count/size before sending command */
03fc9a7f 1046 atmci_writel(host, ATMCI_BLKR, ATMCI_BCNT(data->blocks)
2c96a293 1047 | ATMCI_BLKLEN(data->blksz));
965ebf33 1048 dev_vdbg(&slot->mmc->class_dev, "BLKR=0x%08x\n",
2c96a293 1049 ATMCI_BCNT(data->blocks) | ATMCI_BLKLEN(data->blksz));
74791a2d 1050
796211b7 1051 iflags |= host->prepare_data(host, data);
7d2be074
HS
1052 }
1053
2c96a293 1054 iflags |= ATMCI_CMDRDY;
7d2be074 1055 cmd = mrq->cmd;
965ebf33 1056 cmdflags = atmci_prepare_command(slot->mmc, cmd);
11d1488b 1057 atmci_send_command(host, cmd, cmdflags);
7d2be074
HS
1058
1059 if (data)
796211b7 1060 host->submit_data(host, data);
7d2be074
HS
1061
1062 if (mrq->stop) {
965ebf33 1063 host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop);
2c96a293 1064 host->stop_cmdr |= ATMCI_CMDR_STOP_XFER;
7d2be074 1065 if (!(data->flags & MMC_DATA_WRITE))
2c96a293 1066 host->stop_cmdr |= ATMCI_CMDR_TRDIR_READ;
7d2be074 1067 if (data->flags & MMC_DATA_STREAM)
2c96a293 1068 host->stop_cmdr |= ATMCI_CMDR_STREAM;
7d2be074 1069 else
2c96a293 1070 host->stop_cmdr |= ATMCI_CMDR_MULTI_BLOCK;
7d2be074
HS
1071 }
1072
1073 /*
1074 * We could have enabled interrupts earlier, but I suspect
1075 * that would open up a nice can of interesting race
1076 * conditions (e.g. command and data complete, but stop not
1077 * prepared yet.)
1078 */
03fc9a7f 1079 atmci_writel(host, ATMCI_IER, iflags);
965ebf33 1080}
7d2be074 1081
965ebf33
HS
1082static void atmci_queue_request(struct atmel_mci *host,
1083 struct atmel_mci_slot *slot, struct mmc_request *mrq)
1084{
1085 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1086 host->state);
1087
1088 spin_lock_bh(&host->lock);
1089 slot->mrq = mrq;
1090 if (host->state == STATE_IDLE) {
1091 host->state = STATE_SENDING_CMD;
1092 atmci_start_request(host, slot);
1093 } else {
1094 list_add_tail(&slot->queue_node, &host->queue);
1095 }
1096 spin_unlock_bh(&host->lock);
1097}
7d2be074 1098
965ebf33
HS
1099static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1100{
1101 struct atmel_mci_slot *slot = mmc_priv(mmc);
1102 struct atmel_mci *host = slot->host;
1103 struct mmc_data *data;
1104
1105 WARN_ON(slot->mrq);
1106
1107 /*
1108 * We may "know" the card is gone even though there's still an
1109 * electrical connection. If so, we really need to communicate
1110 * this to the MMC core since there won't be any more
1111 * interrupts as the card is completely removed. Otherwise,
1112 * the MMC core might believe the card is still there even
1113 * though the card was just removed very slowly.
1114 */
1115 if (!test_bit(ATMCI_CARD_PRESENT, &slot->flags)) {
1116 mrq->cmd->error = -ENOMEDIUM;
1117 mmc_request_done(mmc, mrq);
1118 return;
1119 }
1120
1121 /* We don't support multiple blocks of weird lengths. */
1122 data = mrq->data;
1123 if (data && data->blocks > 1 && data->blksz & 3) {
1124 mrq->cmd->error = -EINVAL;
1125 mmc_request_done(mmc, mrq);
1126 }
1127
1128 atmci_queue_request(host, slot, mrq);
7d2be074
HS
1129}
1130
1131static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1132{
965ebf33
HS
1133 struct atmel_mci_slot *slot = mmc_priv(mmc);
1134 struct atmel_mci *host = slot->host;
1135 unsigned int i;
7d2be074 1136
2c96a293 1137 slot->sdc_reg &= ~ATMCI_SDCBUS_MASK;
945533b5
HS
1138 switch (ios->bus_width) {
1139 case MMC_BUS_WIDTH_1:
2c96a293 1140 slot->sdc_reg |= ATMCI_SDCBUS_1BIT;
945533b5
HS
1141 break;
1142 case MMC_BUS_WIDTH_4:
2c96a293 1143 slot->sdc_reg |= ATMCI_SDCBUS_4BIT;
945533b5
HS
1144 break;
1145 }
1146
7d2be074 1147 if (ios->clock) {
965ebf33 1148 unsigned int clock_min = ~0U;
7d2be074
HS
1149 u32 clkdiv;
1150
965ebf33
HS
1151 spin_lock_bh(&host->lock);
1152 if (!host->mode_reg) {
945533b5 1153 clk_enable(host->mck);
03fc9a7f
LD
1154 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
1155 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
796211b7 1156 if (host->caps.has_cfg_reg)
03fc9a7f 1157 atmci_writel(host, ATMCI_CFG, host->cfg_reg);
965ebf33 1158 }
945533b5 1159
965ebf33
HS
1160 /*
1161 * Use mirror of ios->clock to prevent race with mmc
1162 * core ios update when finding the minimum.
1163 */
1164 slot->clock = ios->clock;
2c96a293 1165 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
965ebf33
HS
1166 if (host->slot[i] && host->slot[i]->clock
1167 && host->slot[i]->clock < clock_min)
1168 clock_min = host->slot[i]->clock;
1169 }
1170
1171 /* Calculate clock divider */
faf8180b
LD
1172 if (host->caps.has_odd_clk_div) {
1173 clkdiv = DIV_ROUND_UP(host->bus_hz, clock_min) - 2;
1174 if (clkdiv > 511) {
1175 dev_warn(&mmc->class_dev,
1176 "clock %u too slow; using %lu\n",
1177 clock_min, host->bus_hz / (511 + 2));
1178 clkdiv = 511;
1179 }
1180 host->mode_reg = ATMCI_MR_CLKDIV(clkdiv >> 1)
1181 | ATMCI_MR_CLKODD(clkdiv & 1);
1182 } else {
1183 clkdiv = DIV_ROUND_UP(host->bus_hz, 2 * clock_min) - 1;
1184 if (clkdiv > 255) {
1185 dev_warn(&mmc->class_dev,
1186 "clock %u too slow; using %lu\n",
1187 clock_min, host->bus_hz / (2 * 256));
1188 clkdiv = 255;
1189 }
1190 host->mode_reg = ATMCI_MR_CLKDIV(clkdiv);
7d2be074
HS
1191 }
1192
965ebf33
HS
1193 /*
1194 * WRPROOF and RDPROOF prevent overruns/underruns by
1195 * stopping the clock when the FIFO is full/empty.
1196 * This state is not expected to last for long.
1197 */
796211b7 1198 if (host->caps.has_rwproof)
2c96a293 1199 host->mode_reg |= (ATMCI_MR_WRPROOF | ATMCI_MR_RDPROOF);
7d2be074 1200
796211b7 1201 if (host->caps.has_cfg_reg) {
99ddffd8
NF
1202 /* setup High Speed mode in relation with card capacity */
1203 if (ios->timing == MMC_TIMING_SD_HS)
2c96a293 1204 host->cfg_reg |= ATMCI_CFG_HSMODE;
99ddffd8 1205 else
2c96a293 1206 host->cfg_reg &= ~ATMCI_CFG_HSMODE;
99ddffd8
NF
1207 }
1208
1209 if (list_empty(&host->queue)) {
03fc9a7f 1210 atmci_writel(host, ATMCI_MR, host->mode_reg);
796211b7 1211 if (host->caps.has_cfg_reg)
03fc9a7f 1212 atmci_writel(host, ATMCI_CFG, host->cfg_reg);
99ddffd8 1213 } else {
965ebf33 1214 host->need_clock_update = true;
99ddffd8 1215 }
965ebf33
HS
1216
1217 spin_unlock_bh(&host->lock);
945533b5 1218 } else {
965ebf33
HS
1219 bool any_slot_active = false;
1220
1221 spin_lock_bh(&host->lock);
1222 slot->clock = 0;
2c96a293 1223 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
965ebf33
HS
1224 if (host->slot[i] && host->slot[i]->clock) {
1225 any_slot_active = true;
1226 break;
1227 }
945533b5 1228 }
965ebf33 1229 if (!any_slot_active) {
03fc9a7f 1230 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS);
965ebf33 1231 if (host->mode_reg) {
03fc9a7f 1232 atmci_readl(host, ATMCI_MR);
965ebf33
HS
1233 clk_disable(host->mck);
1234 }
1235 host->mode_reg = 0;
1236 }
1237 spin_unlock_bh(&host->lock);
7d2be074
HS
1238 }
1239
1240 switch (ios->power_mode) {
965ebf33
HS
1241 case MMC_POWER_UP:
1242 set_bit(ATMCI_CARD_NEED_INIT, &slot->flags);
1243 break;
7d2be074
HS
1244 default:
1245 /*
1246 * TODO: None of the currently available AVR32-based
1247 * boards allow MMC power to be turned off. Implement
1248 * power control when this can be tested properly.
965ebf33
HS
1249 *
1250 * We also need to hook this into the clock management
1251 * somehow so that newly inserted cards aren't
1252 * subjected to a fast clock before we have a chance
1253 * to figure out what the maximum rate is. Currently,
1254 * there's no way to avoid this, and there never will
1255 * be for boards that don't support power control.
7d2be074
HS
1256 */
1257 break;
1258 }
1259}
1260
1261static int atmci_get_ro(struct mmc_host *mmc)
1262{
965ebf33
HS
1263 int read_only = -ENOSYS;
1264 struct atmel_mci_slot *slot = mmc_priv(mmc);
7d2be074 1265
965ebf33
HS
1266 if (gpio_is_valid(slot->wp_pin)) {
1267 read_only = gpio_get_value(slot->wp_pin);
7d2be074
HS
1268 dev_dbg(&mmc->class_dev, "card is %s\n",
1269 read_only ? "read-only" : "read-write");
7d2be074
HS
1270 }
1271
1272 return read_only;
1273}
1274
965ebf33
HS
1275static int atmci_get_cd(struct mmc_host *mmc)
1276{
1277 int present = -ENOSYS;
1278 struct atmel_mci_slot *slot = mmc_priv(mmc);
1279
1280 if (gpio_is_valid(slot->detect_pin)) {
1c1452be
JL
1281 present = !(gpio_get_value(slot->detect_pin) ^
1282 slot->detect_is_active_high);
965ebf33
HS
1283 dev_dbg(&mmc->class_dev, "card is %spresent\n",
1284 present ? "" : "not ");
1285 }
1286
1287 return present;
1288}
1289
88ff82ed
AG
1290static void atmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1291{
1292 struct atmel_mci_slot *slot = mmc_priv(mmc);
1293 struct atmel_mci *host = slot->host;
1294
1295 if (enable)
03fc9a7f 1296 atmci_writel(host, ATMCI_IER, slot->sdio_irq);
88ff82ed 1297 else
03fc9a7f 1298 atmci_writel(host, ATMCI_IDR, slot->sdio_irq);
88ff82ed
AG
1299}
1300
965ebf33 1301static const struct mmc_host_ops atmci_ops = {
7d2be074
HS
1302 .request = atmci_request,
1303 .set_ios = atmci_set_ios,
1304 .get_ro = atmci_get_ro,
965ebf33 1305 .get_cd = atmci_get_cd,
88ff82ed 1306 .enable_sdio_irq = atmci_enable_sdio_irq,
7d2be074
HS
1307};
1308
965ebf33
HS
1309/* Called with host->lock held */
1310static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq)
1311 __releases(&host->lock)
1312 __acquires(&host->lock)
1313{
1314 struct atmel_mci_slot *slot = NULL;
1315 struct mmc_host *prev_mmc = host->cur_slot->mmc;
1316
1317 WARN_ON(host->cmd || host->data);
1318
1319 /*
1320 * Update the MMC clock rate if necessary. This may be
1321 * necessary if set_ios() is called when a different slot is
25985edc 1322 * busy transferring data.
965ebf33 1323 */
99ddffd8 1324 if (host->need_clock_update) {
03fc9a7f 1325 atmci_writel(host, ATMCI_MR, host->mode_reg);
796211b7 1326 if (host->caps.has_cfg_reg)
03fc9a7f 1327 atmci_writel(host, ATMCI_CFG, host->cfg_reg);
99ddffd8 1328 }
965ebf33
HS
1329
1330 host->cur_slot->mrq = NULL;
1331 host->mrq = NULL;
1332 if (!list_empty(&host->queue)) {
1333 slot = list_entry(host->queue.next,
1334 struct atmel_mci_slot, queue_node);
1335 list_del(&slot->queue_node);
1336 dev_vdbg(&host->pdev->dev, "list not empty: %s is next\n",
1337 mmc_hostname(slot->mmc));
1338 host->state = STATE_SENDING_CMD;
1339 atmci_start_request(host, slot);
1340 } else {
1341 dev_vdbg(&host->pdev->dev, "list empty\n");
1342 host->state = STATE_IDLE;
1343 }
1344
1345 spin_unlock(&host->lock);
1346 mmc_request_done(prev_mmc, mrq);
1347 spin_lock(&host->lock);
1348}
1349
7d2be074 1350static void atmci_command_complete(struct atmel_mci *host,
c06ad258 1351 struct mmc_command *cmd)
7d2be074 1352{
c06ad258
HS
1353 u32 status = host->cmd_status;
1354
7d2be074 1355 /* Read the response from the card (up to 16 bytes) */
03fc9a7f
LD
1356 cmd->resp[0] = atmci_readl(host, ATMCI_RSPR);
1357 cmd->resp[1] = atmci_readl(host, ATMCI_RSPR);
1358 cmd->resp[2] = atmci_readl(host, ATMCI_RSPR);
1359 cmd->resp[3] = atmci_readl(host, ATMCI_RSPR);
7d2be074 1360
2c96a293 1361 if (status & ATMCI_RTOE)
7d2be074 1362 cmd->error = -ETIMEDOUT;
2c96a293 1363 else if ((cmd->flags & MMC_RSP_CRC) && (status & ATMCI_RCRCE))
7d2be074 1364 cmd->error = -EILSEQ;
2c96a293 1365 else if (status & (ATMCI_RINDE | ATMCI_RDIRE | ATMCI_RENDE))
7d2be074
HS
1366 cmd->error = -EIO;
1367 else
1368 cmd->error = 0;
7d2be074
HS
1369}
1370
1371static void atmci_detect_change(unsigned long data)
1372{
965ebf33
HS
1373 struct atmel_mci_slot *slot = (struct atmel_mci_slot *)data;
1374 bool present;
1375 bool present_old;
7d2be074
HS
1376
1377 /*
965ebf33
HS
1378 * atmci_cleanup_slot() sets the ATMCI_SHUTDOWN flag before
1379 * freeing the interrupt. We must not re-enable the interrupt
1380 * if it has been freed, and if we're shutting down, it
1381 * doesn't really matter whether the card is present or not.
7d2be074
HS
1382 */
1383 smp_rmb();
965ebf33 1384 if (test_bit(ATMCI_SHUTDOWN, &slot->flags))
7d2be074
HS
1385 return;
1386
965ebf33 1387 enable_irq(gpio_to_irq(slot->detect_pin));
1c1452be
JL
1388 present = !(gpio_get_value(slot->detect_pin) ^
1389 slot->detect_is_active_high);
965ebf33 1390 present_old = test_bit(ATMCI_CARD_PRESENT, &slot->flags);
7d2be074 1391
965ebf33
HS
1392 dev_vdbg(&slot->mmc->class_dev, "detect change: %d (was %d)\n",
1393 present, present_old);
7d2be074 1394
965ebf33
HS
1395 if (present != present_old) {
1396 struct atmel_mci *host = slot->host;
1397 struct mmc_request *mrq;
1398
1399 dev_dbg(&slot->mmc->class_dev, "card %s\n",
7d2be074 1400 present ? "inserted" : "removed");
7d2be074 1401
965ebf33
HS
1402 spin_lock(&host->lock);
1403
1404 if (!present)
1405 clear_bit(ATMCI_CARD_PRESENT, &slot->flags);
1406 else
1407 set_bit(ATMCI_CARD_PRESENT, &slot->flags);
7d2be074
HS
1408
1409 /* Clean up queue if present */
965ebf33 1410 mrq = slot->mrq;
7d2be074 1411 if (mrq) {
965ebf33
HS
1412 if (mrq == host->mrq) {
1413 /*
1414 * Reset controller to terminate any ongoing
1415 * commands or data transfers.
1416 */
03fc9a7f
LD
1417 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
1418 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
1419 atmci_writel(host, ATMCI_MR, host->mode_reg);
796211b7 1420 if (host->caps.has_cfg_reg)
03fc9a7f 1421 atmci_writel(host, ATMCI_CFG, host->cfg_reg);
965ebf33
HS
1422
1423 host->data = NULL;
1424 host->cmd = NULL;
1425
1426 switch (host->state) {
1427 case STATE_IDLE:
c06ad258 1428 break;
965ebf33
HS
1429 case STATE_SENDING_CMD:
1430 mrq->cmd->error = -ENOMEDIUM;
f5177547
LD
1431 if (mrq->data)
1432 host->stop_transfer(host);
1433 break;
1434 case STATE_DATA_XFER:
c06ad258 1435 mrq->data->error = -ENOMEDIUM;
796211b7 1436 host->stop_transfer(host);
c06ad258 1437 break;
f5177547
LD
1438 case STATE_WAITING_NOTBUSY:
1439 mrq->data->error = -ENOMEDIUM;
1440 break;
965ebf33
HS
1441 case STATE_SENDING_STOP:
1442 mrq->stop->error = -ENOMEDIUM;
1443 break;
f5177547
LD
1444 case STATE_END_REQUEST:
1445 break;
965ebf33 1446 }
7d2be074 1447
965ebf33
HS
1448 atmci_request_end(host, mrq);
1449 } else {
1450 list_del(&slot->queue_node);
1451 mrq->cmd->error = -ENOMEDIUM;
1452 if (mrq->data)
1453 mrq->data->error = -ENOMEDIUM;
1454 if (mrq->stop)
1455 mrq->stop->error = -ENOMEDIUM;
1456
1457 spin_unlock(&host->lock);
1458 mmc_request_done(slot->mmc, mrq);
1459 spin_lock(&host->lock);
1460 }
7d2be074 1461 }
965ebf33 1462 spin_unlock(&host->lock);
7d2be074 1463
965ebf33 1464 mmc_detect_change(slot->mmc, 0);
7d2be074
HS
1465 }
1466}
1467
1468static void atmci_tasklet_func(unsigned long priv)
1469{
965ebf33 1470 struct atmel_mci *host = (struct atmel_mci *)priv;
7d2be074
HS
1471 struct mmc_request *mrq = host->mrq;
1472 struct mmc_data *data = host->data;
c06ad258
HS
1473 enum atmel_mci_state state = host->state;
1474 enum atmel_mci_state prev_state;
1475 u32 status;
1476
965ebf33
HS
1477 spin_lock(&host->lock);
1478
c06ad258 1479 state = host->state;
7d2be074 1480
965ebf33 1481 dev_vdbg(&host->pdev->dev,
c06ad258
HS
1482 "tasklet: state %u pending/completed/mask %lx/%lx/%x\n",
1483 state, host->pending_events, host->completed_events,
03fc9a7f 1484 atmci_readl(host, ATMCI_IMR));
7d2be074 1485
c06ad258
HS
1486 do {
1487 prev_state = state;
7d2be074 1488
c06ad258 1489 switch (state) {
965ebf33
HS
1490 case STATE_IDLE:
1491 break;
1492
c06ad258 1493 case STATE_SENDING_CMD:
f5177547
LD
1494 /*
1495 * Command has been sent, we are waiting for command
1496 * ready. Then we have three next states possible:
1497 * END_REQUEST by default, WAITING_NOTBUSY if it's a
1498 * command needing it or DATA_XFER if there is data.
1499 */
c06ad258 1500 if (!atmci_test_and_clear_pending(host,
f5177547 1501 EVENT_CMD_RDY))
c06ad258 1502 break;
7d2be074 1503
c06ad258 1504 host->cmd = NULL;
f5177547 1505 atmci_set_completed(host, EVENT_CMD_RDY);
c06ad258 1506 atmci_command_complete(host, mrq->cmd);
f5177547
LD
1507 if (mrq->data) {
1508 /*
1509 * If there is a command error don't start
1510 * data transfer.
1511 */
1512 if (mrq->cmd->error) {
1513 host->stop_transfer(host);
1514 host->data = NULL;
1515 atmci_writel(host, ATMCI_IDR,
1516 ATMCI_TXRDY | ATMCI_RXRDY
1517 | ATMCI_DATA_ERROR_FLAGS);
1518 state = STATE_END_REQUEST;
1519 } else
1520 state = STATE_DATA_XFER;
1521 } else if ((!mrq->data) && (mrq->cmd->flags & MMC_RSP_BUSY)) {
1522 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1523 state = STATE_WAITING_NOTBUSY;
1524 } else
1525 state = STATE_END_REQUEST;
7d2be074 1526
f5177547 1527 break;
7d2be074 1528
f5177547 1529 case STATE_DATA_XFER:
c06ad258
HS
1530 if (atmci_test_and_clear_pending(host,
1531 EVENT_DATA_ERROR)) {
f5177547
LD
1532 atmci_set_completed(host, EVENT_DATA_ERROR);
1533 state = STATE_END_REQUEST;
c06ad258
HS
1534 break;
1535 }
7d2be074 1536
f5177547
LD
1537 /*
1538 * A data transfer is in progress. The event expected
1539 * to move to the next state depends of data transfer
1540 * type (PDC or DMA). Once transfer done we can move
1541 * to the next step which is WAITING_NOTBUSY in write
1542 * case and directly SENDING_STOP in read case.
1543 */
c06ad258
HS
1544 if (!atmci_test_and_clear_pending(host,
1545 EVENT_XFER_COMPLETE))
1546 break;
7d2be074 1547
c06ad258 1548 atmci_set_completed(host, EVENT_XFER_COMPLETE);
7d2be074 1549
f5177547
LD
1550 if (host->data->flags & MMC_DATA_WRITE) {
1551 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1552 state = STATE_WAITING_NOTBUSY;
1553 } else if (host->mrq->stop) {
1554 atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
1555 atmci_send_stop_cmd(host, data);
1556 state = STATE_SENDING_STOP;
c06ad258 1557 } else {
f5177547 1558 host->data = NULL;
c06ad258
HS
1559 data->bytes_xfered = data->blocks * data->blksz;
1560 data->error = 0;
f5177547 1561 state = STATE_END_REQUEST;
c06ad258 1562 }
f5177547 1563 break;
c06ad258 1564
f5177547
LD
1565 case STATE_WAITING_NOTBUSY:
1566 /*
1567 * We can be in the state for two reasons: a command
1568 * requiring waiting not busy signal (stop command
1569 * included) or a write operation. In the latest case,
1570 * we need to send a stop command.
1571 */
1572 if (!atmci_test_and_clear_pending(host,
1573 EVENT_NOTBUSY))
1574 break;
7d2be074 1575
f5177547
LD
1576 atmci_set_completed(host, EVENT_NOTBUSY);
1577
1578 if (host->data) {
1579 /*
1580 * For some commands such as CMD53, even if
1581 * there is data transfer, there is no stop
1582 * command to send.
1583 */
1584 if (host->mrq->stop) {
1585 atmci_writel(host, ATMCI_IER,
1586 ATMCI_CMDRDY);
1587 atmci_send_stop_cmd(host, data);
1588 state = STATE_SENDING_STOP;
1589 } else {
1590 host->data = NULL;
1591 data->bytes_xfered = data->blocks
1592 * data->blksz;
1593 data->error = 0;
1594 state = STATE_END_REQUEST;
1595 }
1596 } else
1597 state = STATE_END_REQUEST;
1598 break;
c06ad258
HS
1599
1600 case STATE_SENDING_STOP:
f5177547
LD
1601 /*
1602 * In this state, it is important to set host->data to
1603 * NULL (which is tested in the waiting notbusy state)
1604 * in order to go to the end request state instead of
1605 * sending stop again.
1606 */
c06ad258 1607 if (!atmci_test_and_clear_pending(host,
f5177547 1608 EVENT_CMD_RDY))
c06ad258
HS
1609 break;
1610
1611 host->cmd = NULL;
f5177547
LD
1612 host->data = NULL;
1613 data->bytes_xfered = data->blocks * data->blksz;
1614 data->error = 0;
c06ad258 1615 atmci_command_complete(host, mrq->stop);
f5177547
LD
1616 if (mrq->stop->error) {
1617 host->stop_transfer(host);
1618 atmci_writel(host, ATMCI_IDR,
1619 ATMCI_TXRDY | ATMCI_RXRDY
1620 | ATMCI_DATA_ERROR_FLAGS);
1621 state = STATE_END_REQUEST;
1622 } else {
1623 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1624 state = STATE_WAITING_NOTBUSY;
1625 }
1626 break;
c06ad258 1627
f5177547
LD
1628 case STATE_END_REQUEST:
1629 atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY | ATMCI_RXRDY
1630 | ATMCI_DATA_ERROR_FLAGS);
1631 status = host->data_status;
1632 if (unlikely(status)) {
1633 host->stop_transfer(host);
1634 host->data = NULL;
1635 if (status & ATMCI_DTOE) {
1636 data->error = -ETIMEDOUT;
1637 } else if (status & ATMCI_DCRCE) {
1638 data->error = -EILSEQ;
1639 } else {
1640 data->error = -EIO;
1641 }
1642 }
c06ad258 1643
f5177547
LD
1644 atmci_request_end(host, host->mrq);
1645 state = STATE_IDLE;
c06ad258
HS
1646 break;
1647 }
1648 } while (state != prev_state);
1649
1650 host->state = state;
965ebf33 1651
965ebf33 1652 spin_unlock(&host->lock);
7d2be074
HS
1653}
1654
1655static void atmci_read_data_pio(struct atmel_mci *host)
1656{
1657 struct scatterlist *sg = host->sg;
1658 void *buf = sg_virt(sg);
1659 unsigned int offset = host->pio_offset;
1660 struct mmc_data *data = host->data;
1661 u32 value;
1662 u32 status;
1663 unsigned int nbytes = 0;
1664
1665 do {
03fc9a7f 1666 value = atmci_readl(host, ATMCI_RDR);
7d2be074
HS
1667 if (likely(offset + 4 <= sg->length)) {
1668 put_unaligned(value, (u32 *)(buf + offset));
1669
1670 offset += 4;
1671 nbytes += 4;
1672
1673 if (offset == sg->length) {
5e7184ae 1674 flush_dcache_page(sg_page(sg));
7d2be074
HS
1675 host->sg = sg = sg_next(sg);
1676 if (!sg)
1677 goto done;
1678
1679 offset = 0;
1680 buf = sg_virt(sg);
1681 }
1682 } else {
1683 unsigned int remaining = sg->length - offset;
1684 memcpy(buf + offset, &value, remaining);
1685 nbytes += remaining;
1686
1687 flush_dcache_page(sg_page(sg));
1688 host->sg = sg = sg_next(sg);
1689 if (!sg)
1690 goto done;
1691
1692 offset = 4 - remaining;
1693 buf = sg_virt(sg);
1694 memcpy(buf, (u8 *)&value + remaining, offset);
1695 nbytes += offset;
1696 }
1697
03fc9a7f 1698 status = atmci_readl(host, ATMCI_SR);
7d2be074 1699 if (status & ATMCI_DATA_ERROR_FLAGS) {
03fc9a7f 1700 atmci_writel(host, ATMCI_IDR, (ATMCI_NOTBUSY | ATMCI_RXRDY
7d2be074
HS
1701 | ATMCI_DATA_ERROR_FLAGS));
1702 host->data_status = status;
965ebf33 1703 data->bytes_xfered += nbytes;
965ebf33 1704 return;
7d2be074 1705 }
2c96a293 1706 } while (status & ATMCI_RXRDY);
7d2be074
HS
1707
1708 host->pio_offset = offset;
1709 data->bytes_xfered += nbytes;
1710
1711 return;
1712
1713done:
03fc9a7f
LD
1714 atmci_writel(host, ATMCI_IDR, ATMCI_RXRDY);
1715 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
7d2be074 1716 data->bytes_xfered += nbytes;
965ebf33 1717 smp_wmb();
c06ad258 1718 atmci_set_pending(host, EVENT_XFER_COMPLETE);
7d2be074
HS
1719}
1720
1721static void atmci_write_data_pio(struct atmel_mci *host)
1722{
1723 struct scatterlist *sg = host->sg;
1724 void *buf = sg_virt(sg);
1725 unsigned int offset = host->pio_offset;
1726 struct mmc_data *data = host->data;
1727 u32 value;
1728 u32 status;
1729 unsigned int nbytes = 0;
1730
1731 do {
1732 if (likely(offset + 4 <= sg->length)) {
1733 value = get_unaligned((u32 *)(buf + offset));
03fc9a7f 1734 atmci_writel(host, ATMCI_TDR, value);
7d2be074
HS
1735
1736 offset += 4;
1737 nbytes += 4;
1738 if (offset == sg->length) {
1739 host->sg = sg = sg_next(sg);
1740 if (!sg)
1741 goto done;
1742
1743 offset = 0;
1744 buf = sg_virt(sg);
1745 }
1746 } else {
1747 unsigned int remaining = sg->length - offset;
1748
1749 value = 0;
1750 memcpy(&value, buf + offset, remaining);
1751 nbytes += remaining;
1752
1753 host->sg = sg = sg_next(sg);
1754 if (!sg) {
03fc9a7f 1755 atmci_writel(host, ATMCI_TDR, value);
7d2be074
HS
1756 goto done;
1757 }
1758
1759 offset = 4 - remaining;
1760 buf = sg_virt(sg);
1761 memcpy((u8 *)&value + remaining, buf, offset);
03fc9a7f 1762 atmci_writel(host, ATMCI_TDR, value);
7d2be074
HS
1763 nbytes += offset;
1764 }
1765
03fc9a7f 1766 status = atmci_readl(host, ATMCI_SR);
7d2be074 1767 if (status & ATMCI_DATA_ERROR_FLAGS) {
03fc9a7f 1768 atmci_writel(host, ATMCI_IDR, (ATMCI_NOTBUSY | ATMCI_TXRDY
7d2be074
HS
1769 | ATMCI_DATA_ERROR_FLAGS));
1770 host->data_status = status;
965ebf33 1771 data->bytes_xfered += nbytes;
965ebf33 1772 return;
7d2be074 1773 }
2c96a293 1774 } while (status & ATMCI_TXRDY);
7d2be074
HS
1775
1776 host->pio_offset = offset;
1777 data->bytes_xfered += nbytes;
1778
1779 return;
1780
1781done:
03fc9a7f
LD
1782 atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY);
1783 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
7d2be074 1784 data->bytes_xfered += nbytes;
965ebf33 1785 smp_wmb();
c06ad258 1786 atmci_set_pending(host, EVENT_XFER_COMPLETE);
7d2be074
HS
1787}
1788
88ff82ed
AG
1789static void atmci_sdio_interrupt(struct atmel_mci *host, u32 status)
1790{
1791 int i;
1792
2c96a293 1793 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
88ff82ed
AG
1794 struct atmel_mci_slot *slot = host->slot[i];
1795 if (slot && (status & slot->sdio_irq)) {
1796 mmc_signal_sdio_irq(slot->mmc);
1797 }
1798 }
1799}
1800
1801
7d2be074
HS
1802static irqreturn_t atmci_interrupt(int irq, void *dev_id)
1803{
965ebf33 1804 struct atmel_mci *host = dev_id;
7d2be074
HS
1805 u32 status, mask, pending;
1806 unsigned int pass_count = 0;
1807
7d2be074 1808 do {
03fc9a7f
LD
1809 status = atmci_readl(host, ATMCI_SR);
1810 mask = atmci_readl(host, ATMCI_IMR);
7d2be074
HS
1811 pending = status & mask;
1812 if (!pending)
1813 break;
1814
1815 if (pending & ATMCI_DATA_ERROR_FLAGS) {
03fc9a7f 1816 atmci_writel(host, ATMCI_IDR, ATMCI_DATA_ERROR_FLAGS
f5177547
LD
1817 | ATMCI_RXRDY | ATMCI_TXRDY
1818 | ATMCI_ENDRX | ATMCI_ENDTX
1819 | ATMCI_RXBUFF | ATMCI_TXBUFE);
965ebf33 1820
7d2be074 1821 host->data_status = status;
965ebf33 1822 smp_wmb();
7d2be074
HS
1823 atmci_set_pending(host, EVENT_DATA_ERROR);
1824 tasklet_schedule(&host->tasklet);
1825 }
796211b7 1826
796211b7
LD
1827 if (pending & ATMCI_TXBUFE) {
1828 atmci_writel(host, ATMCI_IDR, ATMCI_TXBUFE);
7e8ba228 1829 atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
796211b7
LD
1830 /*
1831 * We can receive this interruption before having configured
1832 * the second pdc buffer, so we need to reconfigure first and
1833 * second buffers again
1834 */
1835 if (host->data_size) {
1836 atmci_pdc_set_both_buf(host, XFER_TRANSMIT);
7e8ba228 1837 atmci_writel(host, ATMCI_IER, ATMCI_ENDTX);
796211b7
LD
1838 atmci_writel(host, ATMCI_IER, ATMCI_TXBUFE);
1839 } else {
1840 atmci_pdc_complete(host);
1841 }
7e8ba228
LD
1842 } else if (pending & ATMCI_ENDTX) {
1843 atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
796211b7
LD
1844
1845 if (host->data_size) {
1846 atmci_pdc_set_single_buf(host,
7e8ba228
LD
1847 XFER_TRANSMIT, PDC_SECOND_BUF);
1848 atmci_writel(host, ATMCI_IER, ATMCI_ENDTX);
796211b7
LD
1849 }
1850 }
1851
1852 if (pending & ATMCI_RXBUFF) {
1853 atmci_writel(host, ATMCI_IDR, ATMCI_RXBUFF);
7e8ba228 1854 atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
796211b7
LD
1855 /*
1856 * We can receive this interruption before having configured
1857 * the second pdc buffer, so we need to reconfigure first and
1858 * second buffers again
1859 */
1860 if (host->data_size) {
1861 atmci_pdc_set_both_buf(host, XFER_RECEIVE);
7e8ba228 1862 atmci_writel(host, ATMCI_IER, ATMCI_ENDRX);
796211b7
LD
1863 atmci_writel(host, ATMCI_IER, ATMCI_RXBUFF);
1864 } else {
1865 atmci_pdc_complete(host);
1866 }
7e8ba228
LD
1867 } else if (pending & ATMCI_ENDRX) {
1868 atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
1869
1870 if (host->data_size) {
1871 atmci_pdc_set_single_buf(host,
1872 XFER_RECEIVE, PDC_SECOND_BUF);
1873 atmci_writel(host, ATMCI_IER, ATMCI_ENDRX);
1874 }
796211b7
LD
1875 }
1876
f5177547
LD
1877 /*
1878 * First mci IPs, so mainly the ones having pdc, have some
1879 * issues with the notbusy signal. You can't get it after
1880 * data transmission if you have not sent a stop command.
1881 * The appropriate workaround is to use the BLKE signal.
1882 */
1883 if (pending & ATMCI_BLKE) {
1884 atmci_writel(host, ATMCI_IDR, ATMCI_BLKE);
1885 smp_wmb();
1886 atmci_set_pending(host, EVENT_NOTBUSY);
1887 tasklet_schedule(&host->tasklet);
1888 }
7e8ba228 1889
2c96a293 1890 if (pending & ATMCI_NOTBUSY) {
f5177547 1891 atmci_writel(host, ATMCI_IDR, ATMCI_NOTBUSY);
965ebf33 1892 smp_wmb();
f5177547 1893 atmci_set_pending(host, EVENT_NOTBUSY);
7d2be074
HS
1894 tasklet_schedule(&host->tasklet);
1895 }
f5177547 1896
2c96a293 1897 if (pending & ATMCI_RXRDY)
7d2be074 1898 atmci_read_data_pio(host);
2c96a293 1899 if (pending & ATMCI_TXRDY)
7d2be074
HS
1900 atmci_write_data_pio(host);
1901
f5177547
LD
1902 if (pending & ATMCI_CMDRDY) {
1903 atmci_writel(host, ATMCI_IDR, ATMCI_CMDRDY);
1904 host->cmd_status = status;
1905 smp_wmb();
1906 atmci_set_pending(host, EVENT_CMD_RDY);
1907 tasklet_schedule(&host->tasklet);
1908 }
88ff82ed 1909
2c96a293 1910 if (pending & (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
88ff82ed
AG
1911 atmci_sdio_interrupt(host, status);
1912
7d2be074
HS
1913 } while (pass_count++ < 5);
1914
7d2be074
HS
1915 return pass_count ? IRQ_HANDLED : IRQ_NONE;
1916}
1917
1918static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id)
1919{
965ebf33 1920 struct atmel_mci_slot *slot = dev_id;
7d2be074
HS
1921
1922 /*
1923 * Disable interrupts until the pin has stabilized and check
1924 * the state then. Use mod_timer() since we may be in the
1925 * middle of the timer routine when this interrupt triggers.
1926 */
1927 disable_irq_nosync(irq);
965ebf33 1928 mod_timer(&slot->detect_timer, jiffies + msecs_to_jiffies(20));
7d2be074
HS
1929
1930 return IRQ_HANDLED;
1931}
1932
965ebf33
HS
1933static int __init atmci_init_slot(struct atmel_mci *host,
1934 struct mci_slot_pdata *slot_data, unsigned int id,
88ff82ed 1935 u32 sdc_reg, u32 sdio_irq)
965ebf33
HS
1936{
1937 struct mmc_host *mmc;
1938 struct atmel_mci_slot *slot;
1939
1940 mmc = mmc_alloc_host(sizeof(struct atmel_mci_slot), &host->pdev->dev);
1941 if (!mmc)
1942 return -ENOMEM;
1943
1944 slot = mmc_priv(mmc);
1945 slot->mmc = mmc;
1946 slot->host = host;
1947 slot->detect_pin = slot_data->detect_pin;
1948 slot->wp_pin = slot_data->wp_pin;
1c1452be 1949 slot->detect_is_active_high = slot_data->detect_is_active_high;
965ebf33 1950 slot->sdc_reg = sdc_reg;
88ff82ed 1951 slot->sdio_irq = sdio_irq;
965ebf33
HS
1952
1953 mmc->ops = &atmci_ops;
1954 mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512);
1955 mmc->f_max = host->bus_hz / 2;
1956 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
88ff82ed
AG
1957 if (sdio_irq)
1958 mmc->caps |= MMC_CAP_SDIO_IRQ;
796211b7 1959 if (host->caps.has_highspeed)
99ddffd8 1960 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
7a90dcc2
LD
1961 /*
1962 * Without the read/write proof capability, it is strongly suggested to
1963 * use only one bit for data to prevent fifo underruns and overruns
1964 * which will corrupt data.
1965 */
1966 if ((slot_data->bus_width >= 4) && host->caps.has_rwproof)
965ebf33
HS
1967 mmc->caps |= MMC_CAP_4_BIT_DATA;
1968
7a90dcc2
LD
1969 if (atmci_get_version(host) < 0x200) {
1970 mmc->max_segs = 256;
1971 mmc->max_blk_size = 4095;
1972 mmc->max_blk_count = 256;
1973 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1974 mmc->max_seg_size = mmc->max_blk_size * mmc->max_segs;
1975 } else {
1976 mmc->max_segs = 64;
1977 mmc->max_req_size = 32768 * 512;
1978 mmc->max_blk_size = 32768;
1979 mmc->max_blk_count = 512;
1980 }
965ebf33
HS
1981
1982 /* Assume card is present initially */
1983 set_bit(ATMCI_CARD_PRESENT, &slot->flags);
1984 if (gpio_is_valid(slot->detect_pin)) {
1985 if (gpio_request(slot->detect_pin, "mmc_detect")) {
1986 dev_dbg(&mmc->class_dev, "no detect pin available\n");
1987 slot->detect_pin = -EBUSY;
1c1452be
JL
1988 } else if (gpio_get_value(slot->detect_pin) ^
1989 slot->detect_is_active_high) {
965ebf33
HS
1990 clear_bit(ATMCI_CARD_PRESENT, &slot->flags);
1991 }
1992 }
1993
1994 if (!gpio_is_valid(slot->detect_pin))
1995 mmc->caps |= MMC_CAP_NEEDS_POLL;
1996
1997 if (gpio_is_valid(slot->wp_pin)) {
1998 if (gpio_request(slot->wp_pin, "mmc_wp")) {
1999 dev_dbg(&mmc->class_dev, "no WP pin available\n");
2000 slot->wp_pin = -EBUSY;
2001 }
2002 }
2003
2004 host->slot[id] = slot;
2005 mmc_add_host(mmc);
2006
2007 if (gpio_is_valid(slot->detect_pin)) {
2008 int ret;
2009
2010 setup_timer(&slot->detect_timer, atmci_detect_change,
2011 (unsigned long)slot);
2012
2013 ret = request_irq(gpio_to_irq(slot->detect_pin),
2014 atmci_detect_interrupt,
2015 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
2016 "mmc-detect", slot);
2017 if (ret) {
2018 dev_dbg(&mmc->class_dev,
2019 "could not request IRQ %d for detect pin\n",
2020 gpio_to_irq(slot->detect_pin));
2021 gpio_free(slot->detect_pin);
2022 slot->detect_pin = -EBUSY;
2023 }
2024 }
2025
2026 atmci_init_debugfs(slot);
2027
2028 return 0;
2029}
2030
2031static void __exit atmci_cleanup_slot(struct atmel_mci_slot *slot,
2032 unsigned int id)
2033{
2034 /* Debugfs stuff is cleaned up by mmc core */
2035
2036 set_bit(ATMCI_SHUTDOWN, &slot->flags);
2037 smp_wmb();
2038
2039 mmc_remove_host(slot->mmc);
2040
2041 if (gpio_is_valid(slot->detect_pin)) {
2042 int pin = slot->detect_pin;
2043
2044 free_irq(gpio_to_irq(pin), slot);
2045 del_timer_sync(&slot->detect_timer);
2046 gpio_free(pin);
2047 }
2048 if (gpio_is_valid(slot->wp_pin))
2049 gpio_free(slot->wp_pin);
2050
2051 slot->host->slot[id] = NULL;
2052 mmc_free_host(slot->mmc);
2053}
2054
2c96a293 2055static bool atmci_filter(struct dma_chan *chan, void *slave)
74465b4f 2056{
2635d1ba 2057 struct mci_dma_data *sl = slave;
74465b4f 2058
2635d1ba
NF
2059 if (sl && find_slave_dev(sl) == chan->device->dev) {
2060 chan->private = slave_data_ptr(sl);
7dd60251 2061 return true;
2635d1ba 2062 } else {
7dd60251 2063 return false;
2635d1ba 2064 }
74465b4f 2065}
2635d1ba 2066
ef878198 2067static bool atmci_configure_dma(struct atmel_mci *host)
2635d1ba
NF
2068{
2069 struct mci_platform_data *pdata;
2070
2071 if (host == NULL)
ef878198 2072 return false;
2635d1ba
NF
2073
2074 pdata = host->pdev->dev.platform_data;
2075
2076 if (pdata && find_slave_dev(pdata->dma_slave)) {
2077 dma_cap_mask_t mask;
2078
2635d1ba
NF
2079 /* Try to grab a DMA channel */
2080 dma_cap_zero(mask);
2081 dma_cap_set(DMA_SLAVE, mask);
2082 host->dma.chan =
2c96a293 2083 dma_request_channel(mask, atmci_filter, pdata->dma_slave);
2635d1ba 2084 }
ef878198
LD
2085 if (!host->dma.chan) {
2086 dev_warn(&host->pdev->dev, "no DMA channel available\n");
2087 return false;
2088 } else {
74791a2d 2089 dev_info(&host->pdev->dev,
b81cfc41 2090 "using %s for DMA transfers\n",
74791a2d 2091 dma_chan_name(host->dma.chan));
e2b35f3d
VK
2092
2093 host->dma_conf.src_addr = host->mapbase + ATMCI_RDR;
2094 host->dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2095 host->dma_conf.src_maxburst = 1;
2096 host->dma_conf.dst_addr = host->mapbase + ATMCI_TDR;
2097 host->dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2098 host->dma_conf.dst_maxburst = 1;
2099 host->dma_conf.device_fc = false;
ef878198
LD
2100 return true;
2101 }
2635d1ba 2102}
796211b7 2103
796211b7
LD
2104/*
2105 * HSMCI (High Speed MCI) module is not fully compatible with MCI module.
2106 * HSMCI provides DMA support and a new config register but no more supports
2107 * PDC.
2108 */
2109static void __init atmci_get_cap(struct atmel_mci *host)
2110{
2111 unsigned int version;
2112
2113 version = atmci_get_version(host);
2114 dev_info(&host->pdev->dev,
2115 "version: 0x%x\n", version);
2116
2117 host->caps.has_dma = 0;
faf8180b 2118 host->caps.has_pdc = 1;
796211b7
LD
2119 host->caps.has_cfg_reg = 0;
2120 host->caps.has_cstor_reg = 0;
2121 host->caps.has_highspeed = 0;
2122 host->caps.has_rwproof = 0;
faf8180b 2123 host->caps.has_odd_clk_div = 0;
796211b7
LD
2124
2125 /* keep only major version number */
2126 switch (version & 0xf00) {
796211b7 2127 case 0x500:
faf8180b
LD
2128 host->caps.has_odd_clk_div = 1;
2129 case 0x400:
2130 case 0x300:
796211b7
LD
2131#ifdef CONFIG_AT_HDMAC
2132 host->caps.has_dma = 1;
2635d1ba 2133#else
796211b7
LD
2134 dev_info(&host->pdev->dev,
2135 "has dma capability but dma engine is not selected, then use pio\n");
74465b4f 2136#endif
faf8180b 2137 host->caps.has_pdc = 0;
796211b7
LD
2138 host->caps.has_cfg_reg = 1;
2139 host->caps.has_cstor_reg = 1;
2140 host->caps.has_highspeed = 1;
faf8180b 2141 case 0x200:
796211b7 2142 host->caps.has_rwproof = 1;
faf8180b 2143 case 0x100:
796211b7
LD
2144 break;
2145 default:
faf8180b 2146 host->caps.has_pdc = 0;
796211b7
LD
2147 dev_warn(&host->pdev->dev,
2148 "Unmanaged mci version, set minimum capabilities\n");
2149 break;
2150 }
2151}
74465b4f 2152
7d2be074
HS
2153static int __init atmci_probe(struct platform_device *pdev)
2154{
2155 struct mci_platform_data *pdata;
965ebf33
HS
2156 struct atmel_mci *host;
2157 struct resource *regs;
2158 unsigned int nr_slots;
2159 int irq;
2160 int ret;
7d2be074
HS
2161
2162 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2163 if (!regs)
2164 return -ENXIO;
2165 pdata = pdev->dev.platform_data;
2166 if (!pdata)
2167 return -ENXIO;
2168 irq = platform_get_irq(pdev, 0);
2169 if (irq < 0)
2170 return irq;
2171
965ebf33
HS
2172 host = kzalloc(sizeof(struct atmel_mci), GFP_KERNEL);
2173 if (!host)
7d2be074
HS
2174 return -ENOMEM;
2175
7d2be074 2176 host->pdev = pdev;
965ebf33
HS
2177 spin_lock_init(&host->lock);
2178 INIT_LIST_HEAD(&host->queue);
7d2be074
HS
2179
2180 host->mck = clk_get(&pdev->dev, "mci_clk");
2181 if (IS_ERR(host->mck)) {
2182 ret = PTR_ERR(host->mck);
2183 goto err_clk_get;
2184 }
2185
2186 ret = -ENOMEM;
e8e3f6ca 2187 host->regs = ioremap(regs->start, resource_size(regs));
7d2be074
HS
2188 if (!host->regs)
2189 goto err_ioremap;
2190
2191 clk_enable(host->mck);
03fc9a7f 2192 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
7d2be074
HS
2193 host->bus_hz = clk_get_rate(host->mck);
2194 clk_disable(host->mck);
2195
2196 host->mapbase = regs->start;
2197
965ebf33 2198 tasklet_init(&host->tasklet, atmci_tasklet_func, (unsigned long)host);
7d2be074 2199
89c8aa20 2200 ret = request_irq(irq, atmci_interrupt, 0, dev_name(&pdev->dev), host);
7d2be074
HS
2201 if (ret)
2202 goto err_request_irq;
2203
796211b7
LD
2204 /* Get MCI capabilities and set operations according to it */
2205 atmci_get_cap(host);
ef878198 2206 if (host->caps.has_dma && atmci_configure_dma(host)) {
796211b7
LD
2207 host->prepare_data = &atmci_prepare_data_dma;
2208 host->submit_data = &atmci_submit_data_dma;
2209 host->stop_transfer = &atmci_stop_transfer_dma;
2210 } else if (host->caps.has_pdc) {
2211 dev_info(&pdev->dev, "using PDC\n");
2212 host->prepare_data = &atmci_prepare_data_pdc;
2213 host->submit_data = &atmci_submit_data_pdc;
2214 host->stop_transfer = &atmci_stop_transfer_pdc;
2215 } else {
ef878198 2216 dev_info(&pdev->dev, "using PIO\n");
796211b7
LD
2217 host->prepare_data = &atmci_prepare_data;
2218 host->submit_data = &atmci_submit_data;
2219 host->stop_transfer = &atmci_stop_transfer;
2220 }
2221
7d2be074
HS
2222 platform_set_drvdata(pdev, host);
2223
965ebf33
HS
2224 /* We need at least one slot to succeed */
2225 nr_slots = 0;
2226 ret = -ENODEV;
2227 if (pdata->slot[0].bus_width) {
2228 ret = atmci_init_slot(host, &pdata->slot[0],
2c96a293 2229 0, ATMCI_SDCSEL_SLOT_A, ATMCI_SDIOIRQA);
7a90dcc2 2230 if (!ret) {
965ebf33 2231 nr_slots++;
7a90dcc2
LD
2232 host->buf_size = host->slot[0]->mmc->max_req_size;
2233 }
965ebf33
HS
2234 }
2235 if (pdata->slot[1].bus_width) {
2236 ret = atmci_init_slot(host, &pdata->slot[1],
2c96a293 2237 1, ATMCI_SDCSEL_SLOT_B, ATMCI_SDIOIRQB);
7a90dcc2 2238 if (!ret) {
965ebf33 2239 nr_slots++;
7a90dcc2
LD
2240 if (host->slot[1]->mmc->max_req_size > host->buf_size)
2241 host->buf_size =
2242 host->slot[1]->mmc->max_req_size;
2243 }
7d2be074
HS
2244 }
2245
04d699c3
RE
2246 if (!nr_slots) {
2247 dev_err(&pdev->dev, "init failed: no slot defined\n");
965ebf33 2248 goto err_init_slot;
04d699c3 2249 }
7d2be074 2250
7a90dcc2
LD
2251 if (!host->caps.has_rwproof) {
2252 host->buffer = dma_alloc_coherent(&pdev->dev, host->buf_size,
2253 &host->buf_phys_addr,
2254 GFP_KERNEL);
2255 if (!host->buffer) {
2256 ret = -ENOMEM;
2257 dev_err(&pdev->dev, "buffer allocation failed\n");
2258 goto err_init_slot;
2259 }
2260 }
2261
965ebf33
HS
2262 dev_info(&pdev->dev,
2263 "Atmel MCI controller at 0x%08lx irq %d, %u slots\n",
2264 host->mapbase, irq, nr_slots);
deec9ae3 2265
7d2be074
HS
2266 return 0;
2267
965ebf33 2268err_init_slot:
74465b4f
DW
2269 if (host->dma.chan)
2270 dma_release_channel(host->dma.chan);
965ebf33 2271 free_irq(irq, host);
7d2be074
HS
2272err_request_irq:
2273 iounmap(host->regs);
2274err_ioremap:
2275 clk_put(host->mck);
2276err_clk_get:
965ebf33 2277 kfree(host);
7d2be074
HS
2278 return ret;
2279}
2280
2281static int __exit atmci_remove(struct platform_device *pdev)
2282{
965ebf33
HS
2283 struct atmel_mci *host = platform_get_drvdata(pdev);
2284 unsigned int i;
7d2be074
HS
2285
2286 platform_set_drvdata(pdev, NULL);
2287
7a90dcc2
LD
2288 if (host->buffer)
2289 dma_free_coherent(&pdev->dev, host->buf_size,
2290 host->buffer, host->buf_phys_addr);
2291
2c96a293 2292 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
965ebf33
HS
2293 if (host->slot[i])
2294 atmci_cleanup_slot(host->slot[i], i);
2295 }
7d2be074 2296
965ebf33 2297 clk_enable(host->mck);
03fc9a7f
LD
2298 atmci_writel(host, ATMCI_IDR, ~0UL);
2299 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS);
2300 atmci_readl(host, ATMCI_SR);
965ebf33 2301 clk_disable(host->mck);
7d2be074 2302
65e8b083 2303#ifdef CONFIG_MMC_ATMELMCI_DMA
74465b4f
DW
2304 if (host->dma.chan)
2305 dma_release_channel(host->dma.chan);
65e8b083
HS
2306#endif
2307
965ebf33
HS
2308 free_irq(platform_get_irq(pdev, 0), host);
2309 iounmap(host->regs);
7d2be074 2310
965ebf33
HS
2311 clk_put(host->mck);
2312 kfree(host);
7d2be074 2313
7d2be074
HS
2314 return 0;
2315}
2316
5c2f2b9b
NF
2317#ifdef CONFIG_PM
2318static int atmci_suspend(struct device *dev)
2319{
2320 struct atmel_mci *host = dev_get_drvdata(dev);
2321 int i;
2322
2c96a293 2323 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
5c2f2b9b
NF
2324 struct atmel_mci_slot *slot = host->slot[i];
2325 int ret;
2326
2327 if (!slot)
2328 continue;
2329 ret = mmc_suspend_host(slot->mmc);
2330 if (ret < 0) {
2331 while (--i >= 0) {
2332 slot = host->slot[i];
2333 if (slot
2334 && test_bit(ATMCI_SUSPENDED, &slot->flags)) {
2335 mmc_resume_host(host->slot[i]->mmc);
2336 clear_bit(ATMCI_SUSPENDED, &slot->flags);
2337 }
2338 }
2339 return ret;
2340 } else {
2341 set_bit(ATMCI_SUSPENDED, &slot->flags);
2342 }
2343 }
2344
2345 return 0;
2346}
2347
2348static int atmci_resume(struct device *dev)
2349{
2350 struct atmel_mci *host = dev_get_drvdata(dev);
2351 int i;
2352 int ret = 0;
2353
2c96a293 2354 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
5c2f2b9b
NF
2355 struct atmel_mci_slot *slot = host->slot[i];
2356 int err;
2357
2358 slot = host->slot[i];
2359 if (!slot)
2360 continue;
2361 if (!test_bit(ATMCI_SUSPENDED, &slot->flags))
2362 continue;
2363 err = mmc_resume_host(slot->mmc);
2364 if (err < 0)
2365 ret = err;
2366 else
2367 clear_bit(ATMCI_SUSPENDED, &slot->flags);
2368 }
2369
2370 return ret;
2371}
2372static SIMPLE_DEV_PM_OPS(atmci_pm, atmci_suspend, atmci_resume);
2373#define ATMCI_PM_OPS (&atmci_pm)
2374#else
2375#define ATMCI_PM_OPS NULL
2376#endif
2377
7d2be074
HS
2378static struct platform_driver atmci_driver = {
2379 .remove = __exit_p(atmci_remove),
2380 .driver = {
2381 .name = "atmel_mci",
5c2f2b9b 2382 .pm = ATMCI_PM_OPS,
7d2be074
HS
2383 },
2384};
2385
2386static int __init atmci_init(void)
2387{
2388 return platform_driver_probe(&atmci_driver, atmci_probe);
2389}
2390
2391static void __exit atmci_exit(void)
2392{
2393 platform_driver_unregister(&atmci_driver);
2394}
2395
74465b4f 2396late_initcall(atmci_init); /* try to load after dma driver when built-in */
7d2be074
HS
2397module_exit(atmci_exit);
2398
2399MODULE_DESCRIPTION("Atmel Multimedia Card Interface driver");
e05503ef 2400MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
7d2be074 2401MODULE_LICENSE("GPL v2");
This page took 0.398787 seconds and 5 git commands to generate.