mmc: at91_mci: fix hanging and rework to match flowcharts
[deliverable/linux.git] / drivers / mmc / host / at91_mci.c
CommitLineData
65dbf343 1/*
99eeb8df 2 * linux/drivers/mmc/at91_mci.c - ATMEL AT91 MCI Driver
65dbf343
AV
3 *
4 * Copyright (C) 2005 Cougar Creek Computing Devices Ltd, All Rights Reserved
5 *
6 * Copyright (C) 2006 Malcolm Noyes
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13/*
99eeb8df 14 This is the AT91 MCI driver that has been tested with both MMC cards
65dbf343
AV
15 and SD-cards. Boards that support write protect are now supported.
16 The CCAT91SBC001 board does not support SD cards.
17
18 The three entry points are at91_mci_request, at91_mci_set_ios
19 and at91_mci_get_ro.
20
21 SET IOS
22 This configures the device to put it into the correct mode and clock speed
23 required.
24
25 MCI REQUEST
26 MCI request processes the commands sent in the mmc_request structure. This
27 can consist of a processing command and a stop command in the case of
28 multiple block transfers.
29
30 There are three main types of request, commands, reads and writes.
31
32 Commands are straight forward. The command is submitted to the controller and
33 the request function returns. When the controller generates an interrupt to indicate
34 the command is finished, the response to the command are read and the mmc_request_done
35 function called to end the request.
36
37 Reads and writes work in a similar manner to normal commands but involve the PDC (DMA)
38 controller to manage the transfers.
39
40 A read is done from the controller directly to the scatterlist passed in from the request.
99eeb8df
AV
41 Due to a bug in the AT91RM9200 controller, when a read is completed, all the words are byte
42 swapped in the scatterlist buffers. AT91SAM926x are not affected by this bug.
65dbf343
AV
43
44 The sequence of read interrupts is: ENDRX, RXBUFF, CMDRDY
45
46 A write is slightly different in that the bytes to write are read from the scatterlist
47 into a dma memory buffer (this is in case the source buffer should be read only). The
48 entire write buffer is then done from this single dma memory buffer.
49
50 The sequence of write interrupts is: ENDTX, TXBUFE, NOTBUSY, CMDRDY
51
52 GET RO
53 Gets the status of the write protect pin, if available.
54*/
55
65dbf343
AV
56#include <linux/module.h>
57#include <linux/moduleparam.h>
58#include <linux/init.h>
59#include <linux/ioport.h>
60#include <linux/platform_device.h>
61#include <linux/interrupt.h>
62#include <linux/blkdev.h>
63#include <linux/delay.h>
64#include <linux/err.h>
65#include <linux/dma-mapping.h>
66#include <linux/clk.h>
93a3ddc2 67#include <linux/atmel_pdc.h>
65dbf343
AV
68
69#include <linux/mmc/host.h>
65dbf343
AV
70
71#include <asm/io.h>
72#include <asm/irq.h>
73#include <asm/mach/mmc.h>
74#include <asm/arch/board.h>
99eeb8df 75#include <asm/arch/cpu.h>
65dbf343 76#include <asm/arch/gpio.h>
55d8baee 77#include <asm/arch/at91_mci.h>
65dbf343
AV
78
79#define DRIVER_NAME "at91_mci"
80
df05a303
AV
81#define FL_SENT_COMMAND (1 << 0)
82#define FL_SENT_STOP (1 << 1)
65dbf343 83
df05a303
AV
84#define AT91_MCI_ERRORS (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE \
85 | AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE \
86 | AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE)
65dbf343 87
e0b19b83
AV
88#define at91_mci_read(host, reg) __raw_readl((host)->baseaddr + (reg))
89#define at91_mci_write(host, reg, val) __raw_writel((val), (host)->baseaddr + (reg))
65dbf343 90
65dbf343
AV
91
92/*
93 * Low level type for this driver
94 */
95struct at91mci_host
96{
97 struct mmc_host *mmc;
98 struct mmc_command *cmd;
99 struct mmc_request *request;
100
e0b19b83 101 void __iomem *baseaddr;
17ea0595 102 int irq;
e0b19b83 103
65dbf343
AV
104 struct at91_mmc_data *board;
105 int present;
106
3dd3b039
AV
107 struct clk *mci_clk;
108
65dbf343
AV
109 /*
110 * Flag indicating when the command has been sent. This is used to
111 * work out whether or not to send the stop
112 */
113 unsigned int flags;
114 /* flag for current bus settings */
115 u32 bus_mode;
116
117 /* DMA buffer used for transmitting */
118 unsigned int* buffer;
119 dma_addr_t physical_address;
120 unsigned int total_length;
121
122 /* Latest in the scatterlist that has been enabled for transfer, but not freed */
123 int in_use_index;
124
125 /* Latest in the scatterlist that has been enabled for transfer */
126 int transfer_index;
127};
128
129/*
130 * Copy from sg to a dma block - used for transfers
131 */
e8d04d3d 132static inline void at91_mci_sg_to_dma(struct at91mci_host *host, struct mmc_data *data)
65dbf343
AV
133{
134 unsigned int len, i, size;
135 unsigned *dmabuf = host->buffer;
136
137 size = host->total_length;
138 len = data->sg_len;
139
140 /*
141 * Just loop through all entries. Size might not
142 * be the entire list though so make sure that
143 * we do not transfer too much.
144 */
145 for (i = 0; i < len; i++) {
146 struct scatterlist *sg;
147 int amount;
65dbf343
AV
148 unsigned int *sgbuffer;
149
150 sg = &data->sg[i];
151
152 sgbuffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset;
153 amount = min(size, sg->length);
154 size -= amount;
65dbf343 155
99eeb8df
AV
156 if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
157 int index;
158
159 for (index = 0; index < (amount / 4); index++)
160 *dmabuf++ = swab32(sgbuffer[index]);
161 }
162 else
163 memcpy(dmabuf, sgbuffer, amount);
65dbf343
AV
164
165 kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ);
166
167 if (size == 0)
168 break;
169 }
170
171 /*
172 * Check that we didn't get a request to transfer
173 * more data than can fit into the SG list.
174 */
175 BUG_ON(size != 0);
176}
177
178/*
179 * Prepare a dma read
180 */
e8d04d3d 181static void at91_mci_pre_dma_read(struct at91mci_host *host)
65dbf343
AV
182{
183 int i;
184 struct scatterlist *sg;
185 struct mmc_command *cmd;
186 struct mmc_data *data;
187
b44fb7a0 188 pr_debug("pre dma read\n");
65dbf343
AV
189
190 cmd = host->cmd;
191 if (!cmd) {
b44fb7a0 192 pr_debug("no command\n");
65dbf343
AV
193 return;
194 }
195
196 data = cmd->data;
197 if (!data) {
b44fb7a0 198 pr_debug("no data\n");
65dbf343
AV
199 return;
200 }
201
202 for (i = 0; i < 2; i++) {
203 /* nothing left to transfer */
204 if (host->transfer_index >= data->sg_len) {
b44fb7a0 205 pr_debug("Nothing left to transfer (index = %d)\n", host->transfer_index);
65dbf343
AV
206 break;
207 }
208
209 /* Check to see if this needs filling */
210 if (i == 0) {
93a3ddc2 211 if (at91_mci_read(host, ATMEL_PDC_RCR) != 0) {
b44fb7a0 212 pr_debug("Transfer active in current\n");
65dbf343
AV
213 continue;
214 }
215 }
216 else {
93a3ddc2 217 if (at91_mci_read(host, ATMEL_PDC_RNCR) != 0) {
b44fb7a0 218 pr_debug("Transfer active in next\n");
65dbf343
AV
219 continue;
220 }
221 }
222
223 /* Setup the next transfer */
b44fb7a0 224 pr_debug("Using transfer index %d\n", host->transfer_index);
65dbf343
AV
225
226 sg = &data->sg[host->transfer_index++];
b44fb7a0 227 pr_debug("sg = %p\n", sg);
65dbf343
AV
228
229 sg->dma_address = dma_map_page(NULL, sg->page, sg->offset, sg->length, DMA_FROM_DEVICE);
230
b44fb7a0 231 pr_debug("dma address = %08X, length = %d\n", sg->dma_address, sg->length);
65dbf343
AV
232
233 if (i == 0) {
93a3ddc2
AV
234 at91_mci_write(host, ATMEL_PDC_RPR, sg->dma_address);
235 at91_mci_write(host, ATMEL_PDC_RCR, sg->length / 4);
65dbf343
AV
236 }
237 else {
93a3ddc2
AV
238 at91_mci_write(host, ATMEL_PDC_RNPR, sg->dma_address);
239 at91_mci_write(host, ATMEL_PDC_RNCR, sg->length / 4);
65dbf343
AV
240 }
241 }
242
b44fb7a0 243 pr_debug("pre dma read done\n");
65dbf343
AV
244}
245
246/*
247 * Handle after a dma read
248 */
e8d04d3d 249static void at91_mci_post_dma_read(struct at91mci_host *host)
65dbf343
AV
250{
251 struct mmc_command *cmd;
252 struct mmc_data *data;
253
b44fb7a0 254 pr_debug("post dma read\n");
65dbf343
AV
255
256 cmd = host->cmd;
257 if (!cmd) {
b44fb7a0 258 pr_debug("no command\n");
65dbf343
AV
259 return;
260 }
261
262 data = cmd->data;
263 if (!data) {
b44fb7a0 264 pr_debug("no data\n");
65dbf343
AV
265 return;
266 }
267
268 while (host->in_use_index < host->transfer_index) {
65dbf343
AV
269 struct scatterlist *sg;
270
b44fb7a0 271 pr_debug("finishing index %d\n", host->in_use_index);
65dbf343
AV
272
273 sg = &data->sg[host->in_use_index++];
274
b44fb7a0 275 pr_debug("Unmapping page %08X\n", sg->dma_address);
65dbf343
AV
276
277 dma_unmap_page(NULL, sg->dma_address, sg->length, DMA_FROM_DEVICE);
278
65dbf343
AV
279 data->bytes_xfered += sg->length;
280
99eeb8df 281 if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
ed99c541 282 unsigned int *buffer;
99eeb8df 283 int index;
65dbf343 284
ed99c541
NF
285 /* Swap the contents of the buffer */
286 buffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset;
287 pr_debug("buffer = %p, length = %d\n", buffer, sg->length);
288
99eeb8df
AV
289 for (index = 0; index < (sg->length / 4); index++)
290 buffer[index] = swab32(buffer[index]);
ed99c541
NF
291
292 kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
65dbf343 293 }
99eeb8df 294
65dbf343
AV
295 flush_dcache_page(sg->page);
296 }
297
298 /* Is there another transfer to trigger? */
299 if (host->transfer_index < data->sg_len)
e8d04d3d 300 at91_mci_pre_dma_read(host);
65dbf343 301 else {
ed99c541 302 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_ENDRX);
e0b19b83 303 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_RXBUFF);
65dbf343
AV
304 }
305
b44fb7a0 306 pr_debug("post dma read done\n");
65dbf343
AV
307}
308
309/*
310 * Handle transmitted data
311 */
312static void at91_mci_handle_transmitted(struct at91mci_host *host)
313{
314 struct mmc_command *cmd;
315 struct mmc_data *data;
316
b44fb7a0 317 pr_debug("Handling the transmit\n");
65dbf343
AV
318
319 /* Disable the transfer */
93a3ddc2 320 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
65dbf343
AV
321
322 /* Now wait for cmd ready */
e0b19b83 323 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_TXBUFE);
65dbf343
AV
324
325 cmd = host->cmd;
326 if (!cmd) return;
327
328 data = cmd->data;
329 if (!data) return;
330
ed99c541
NF
331 if (cmd->data->flags & MMC_DATA_MULTI) {
332 pr_debug("multiple write : wait for BLKE...\n");
333 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_BLKE);
334 } else
335 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
336
65dbf343
AV
337 data->bytes_xfered = host->total_length;
338}
339
ed99c541
NF
340/*Handle after command sent ready*/
341static int at91_mci_handle_cmdrdy(struct at91mci_host *host)
342{
343 if (!host->cmd)
344 return 1;
345 else if (!host->cmd->data) {
346 if (host->flags & FL_SENT_STOP) {
347 /*After multi block write, we must wait for NOTBUSY*/
348 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
349 } else return 1;
350 } else if (host->cmd->data->flags & MMC_DATA_WRITE) {
351 /*After sendding multi-block-write command, start DMA transfer*/
352 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_TXBUFE);
353 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_BLKE);
354 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
355 }
356
357 /* command not completed, have to wait */
358 return 0;
359}
360
361
65dbf343
AV
362/*
363 * Enable the controller
364 */
e0b19b83 365static void at91_mci_enable(struct at91mci_host *host)
65dbf343 366{
ed99c541
NF
367 unsigned int mr;
368
e0b19b83 369 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
f3a8efa9 370 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
e0b19b83 371 at91_mci_write(host, AT91_MCI_DTOR, AT91_MCI_DTOMUL_1M | AT91_MCI_DTOCYC);
ed99c541
NF
372 mr = AT91_MCI_PDCMODE | 0x34a;
373
374 if (cpu_is_at91sam9260() || cpu_is_at91sam9263())
375 mr |= AT91_MCI_RDPROOF | AT91_MCI_WRPROOF;
376
377 at91_mci_write(host, AT91_MCI_MR, mr);
99eeb8df
AV
378
379 /* use Slot A or B (only one at same time) */
380 at91_mci_write(host, AT91_MCI_SDCR, host->board->slot_b);
65dbf343
AV
381}
382
383/*
384 * Disable the controller
385 */
e0b19b83 386static void at91_mci_disable(struct at91mci_host *host)
65dbf343 387{
e0b19b83 388 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST);
65dbf343
AV
389}
390
391/*
392 * Send a command
65dbf343 393 */
ed99c541 394static void at91_mci_send_command(struct at91mci_host *host, struct mmc_command *cmd)
65dbf343
AV
395{
396 unsigned int cmdr, mr;
397 unsigned int block_length;
398 struct mmc_data *data = cmd->data;
399
400 unsigned int blocks;
401 unsigned int ier = 0;
402
403 host->cmd = cmd;
404
ed99c541 405 /* Needed for leaving busy state before CMD1 */
e0b19b83 406 if ((at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_RTOE) && (cmd->opcode == 1)) {
b44fb7a0 407 pr_debug("Clearing timeout\n");
e0b19b83
AV
408 at91_mci_write(host, AT91_MCI_ARGR, 0);
409 at91_mci_write(host, AT91_MCI_CMDR, AT91_MCI_OPDCMD);
410 while (!(at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_CMDRDY)) {
65dbf343 411 /* spin */
e0b19b83 412 pr_debug("Clearing: SR = %08X\n", at91_mci_read(host, AT91_MCI_SR));
65dbf343
AV
413 }
414 }
ed99c541 415
65dbf343
AV
416 cmdr = cmd->opcode;
417
418 if (mmc_resp_type(cmd) == MMC_RSP_NONE)
419 cmdr |= AT91_MCI_RSPTYP_NONE;
420 else {
421 /* if a response is expected then allow maximum response latancy */
422 cmdr |= AT91_MCI_MAXLAT;
423 /* set 136 bit response for R2, 48 bit response otherwise */
424 if (mmc_resp_type(cmd) == MMC_RSP_R2)
425 cmdr |= AT91_MCI_RSPTYP_136;
426 else
427 cmdr |= AT91_MCI_RSPTYP_48;
428 }
429
430 if (data) {
a3fd4a1b 431 block_length = data->blksz;
65dbf343
AV
432 blocks = data->blocks;
433
434 /* always set data start - also set direction flag for read */
435 if (data->flags & MMC_DATA_READ)
436 cmdr |= (AT91_MCI_TRDIR | AT91_MCI_TRCMD_START);
437 else if (data->flags & MMC_DATA_WRITE)
438 cmdr |= AT91_MCI_TRCMD_START;
439
440 if (data->flags & MMC_DATA_STREAM)
441 cmdr |= AT91_MCI_TRTYP_STREAM;
442 if (data->flags & MMC_DATA_MULTI)
443 cmdr |= AT91_MCI_TRTYP_MULTIPLE;
444 }
445 else {
446 block_length = 0;
447 blocks = 0;
448 }
449
b6cedb38 450 if (host->flags & FL_SENT_STOP)
65dbf343
AV
451 cmdr |= AT91_MCI_TRCMD_STOP;
452
453 if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
454 cmdr |= AT91_MCI_OPDCMD;
455
456 /*
457 * Set the arguments and send the command
458 */
f3a8efa9 459 pr_debug("Sending command %d as %08X, arg = %08X, blocks = %d, length = %d (MR = %08X)\n",
e0b19b83 460 cmd->opcode, cmdr, cmd->arg, blocks, block_length, at91_mci_read(host, AT91_MCI_MR));
65dbf343
AV
461
462 if (!data) {
93a3ddc2
AV
463 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS | ATMEL_PDC_RXTDIS);
464 at91_mci_write(host, ATMEL_PDC_RPR, 0);
465 at91_mci_write(host, ATMEL_PDC_RCR, 0);
466 at91_mci_write(host, ATMEL_PDC_RNPR, 0);
467 at91_mci_write(host, ATMEL_PDC_RNCR, 0);
468 at91_mci_write(host, ATMEL_PDC_TPR, 0);
469 at91_mci_write(host, ATMEL_PDC_TCR, 0);
470 at91_mci_write(host, ATMEL_PDC_TNPR, 0);
471 at91_mci_write(host, ATMEL_PDC_TNCR, 0);
ed99c541
NF
472 ier = AT91_MCI_CMDRDY;
473 } else {
474 /* zero block length and PDC mode */
475 mr = at91_mci_read(host, AT91_MCI_MR) & 0x7fff;
476 at91_mci_write(host, AT91_MCI_MR, mr | (block_length << 16) | AT91_MCI_PDCMODE);
e0b19b83 477
ed99c541
NF
478 /*
479 * Disable the PDC controller
480 */
481 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
65dbf343 482
ed99c541
NF
483 if (cmdr & AT91_MCI_TRCMD_START) {
484 data->bytes_xfered = 0;
485 host->transfer_index = 0;
486 host->in_use_index = 0;
487 if (cmdr & AT91_MCI_TRDIR) {
488 /*
489 * Handle a read
490 */
491 host->buffer = NULL;
492 host->total_length = 0;
493
494 at91_mci_pre_dma_read(host);
495 ier = AT91_MCI_ENDRX /* | AT91_MCI_RXBUFF */;
496 }
497 else {
498 /*
499 * Handle a write
500 */
501 host->total_length = block_length * blocks;
502 host->buffer = dma_alloc_coherent(NULL,
503 host->total_length,
504 &host->physical_address, GFP_KERNEL);
505
506 at91_mci_sg_to_dma(host, data);
507
508 pr_debug("Transmitting %d bytes\n", host->total_length);
509
510 at91_mci_write(host, ATMEL_PDC_TPR, host->physical_address);
511 at91_mci_write(host, ATMEL_PDC_TCR, host->total_length / 4);
512 ier = AT91_MCI_CMDRDY;
513 }
65dbf343
AV
514 }
515 }
516
517 /*
518 * Send the command and then enable the PDC - not the other way round as
519 * the data sheet says
520 */
521
e0b19b83
AV
522 at91_mci_write(host, AT91_MCI_ARGR, cmd->arg);
523 at91_mci_write(host, AT91_MCI_CMDR, cmdr);
65dbf343
AV
524
525 if (cmdr & AT91_MCI_TRCMD_START) {
526 if (cmdr & AT91_MCI_TRDIR)
93a3ddc2 527 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
65dbf343 528 }
65dbf343 529
ed99c541 530 /* Enable selected interrupts */
df05a303 531 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_ERRORS | ier);
65dbf343
AV
532}
533
534/*
535 * Process the next step in the request
536 */
e8d04d3d 537static void at91_mci_process_next(struct at91mci_host *host)
65dbf343
AV
538{
539 if (!(host->flags & FL_SENT_COMMAND)) {
540 host->flags |= FL_SENT_COMMAND;
ed99c541 541 at91_mci_send_command(host, host->request->cmd);
65dbf343
AV
542 }
543 else if ((!(host->flags & FL_SENT_STOP)) && host->request->stop) {
544 host->flags |= FL_SENT_STOP;
ed99c541 545 at91_mci_send_command(host, host->request->stop);
65dbf343
AV
546 }
547 else
548 mmc_request_done(host->mmc, host->request);
549}
550
551/*
552 * Handle a command that has been completed
553 */
e8d04d3d 554static void at91_mci_completed_command(struct at91mci_host *host)
65dbf343
AV
555{
556 struct mmc_command *cmd = host->cmd;
557 unsigned int status;
558
e0b19b83 559 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
65dbf343 560
e0b19b83
AV
561 cmd->resp[0] = at91_mci_read(host, AT91_MCI_RSPR(0));
562 cmd->resp[1] = at91_mci_read(host, AT91_MCI_RSPR(1));
563 cmd->resp[2] = at91_mci_read(host, AT91_MCI_RSPR(2));
564 cmd->resp[3] = at91_mci_read(host, AT91_MCI_RSPR(3));
65dbf343
AV
565
566 if (host->buffer) {
567 dma_free_coherent(NULL, host->total_length, host->buffer, host->physical_address);
568 host->buffer = NULL;
569 }
570
e0b19b83 571 status = at91_mci_read(host, AT91_MCI_SR);
65dbf343 572
b44fb7a0 573 pr_debug("Status = %08X [%08X %08X %08X %08X]\n",
65dbf343
AV
574 status, cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
575
576 if (status & (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE |
577 AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE |
578 AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE)) {
b6cedb38 579 if ((status & AT91_MCI_RCRCE) && !(mmc_resp_type(cmd) & MMC_RSP_CRC)) {
65dbf343
AV
580 cmd->error = MMC_ERR_NONE;
581 }
582 else {
583 if (status & (AT91_MCI_RTOE | AT91_MCI_DTOE))
584 cmd->error = MMC_ERR_TIMEOUT;
585 else if (status & (AT91_MCI_RCRCE | AT91_MCI_DCRCE))
586 cmd->error = MMC_ERR_BADCRC;
587 else if (status & (AT91_MCI_OVRE | AT91_MCI_UNRE))
588 cmd->error = MMC_ERR_FIFO;
589 else
590 cmd->error = MMC_ERR_FAILED;
591
b44fb7a0 592 pr_debug("Error detected and set to %d (cmd = %d, retries = %d)\n",
65dbf343
AV
593 cmd->error, cmd->opcode, cmd->retries);
594 }
595 }
596 else
597 cmd->error = MMC_ERR_NONE;
598
e8d04d3d 599 at91_mci_process_next(host);
65dbf343
AV
600}
601
602/*
603 * Handle an MMC request
604 */
605static void at91_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
606{
607 struct at91mci_host *host = mmc_priv(mmc);
608 host->request = mrq;
609 host->flags = 0;
610
e8d04d3d 611 at91_mci_process_next(host);
65dbf343
AV
612}
613
614/*
615 * Set the IOS
616 */
617static void at91_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
618{
619 int clkdiv;
620 struct at91mci_host *host = mmc_priv(mmc);
3dd3b039 621 unsigned long at91_master_clock = clk_get_rate(host->mci_clk);
65dbf343 622
b44fb7a0 623 host->bus_mode = ios->bus_mode;
65dbf343
AV
624
625 if (ios->clock == 0) {
626 /* Disable the MCI controller */
e0b19b83 627 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS);
65dbf343
AV
628 clkdiv = 0;
629 }
630 else {
631 /* Enable the MCI controller */
e0b19b83 632 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
65dbf343
AV
633
634 if ((at91_master_clock % (ios->clock * 2)) == 0)
635 clkdiv = ((at91_master_clock / ios->clock) / 2) - 1;
636 else
637 clkdiv = (at91_master_clock / ios->clock) / 2;
638
b44fb7a0 639 pr_debug("clkdiv = %d. mcck = %ld\n", clkdiv,
65dbf343
AV
640 at91_master_clock / (2 * (clkdiv + 1)));
641 }
642 if (ios->bus_width == MMC_BUS_WIDTH_4 && host->board->wire4) {
b44fb7a0 643 pr_debug("MMC: Setting controller bus width to 4\n");
e0b19b83 644 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) | AT91_MCI_SDCBUS);
65dbf343
AV
645 }
646 else {
b44fb7a0 647 pr_debug("MMC: Setting controller bus width to 1\n");
e0b19b83 648 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
65dbf343
AV
649 }
650
651 /* Set the clock divider */
e0b19b83 652 at91_mci_write(host, AT91_MCI_MR, (at91_mci_read(host, AT91_MCI_MR) & ~AT91_MCI_CLKDIV) | clkdiv);
65dbf343
AV
653
654 /* maybe switch power to the card */
b44fb7a0 655 if (host->board->vcc_pin) {
65dbf343
AV
656 switch (ios->power_mode) {
657 case MMC_POWER_OFF:
99eeb8df 658 at91_set_gpio_value(host->board->vcc_pin, 0);
65dbf343
AV
659 break;
660 case MMC_POWER_UP:
661 case MMC_POWER_ON:
99eeb8df 662 at91_set_gpio_value(host->board->vcc_pin, 1);
65dbf343
AV
663 break;
664 }
665 }
666}
667
668/*
669 * Handle an interrupt
670 */
7d12e780 671static irqreturn_t at91_mci_irq(int irq, void *devid)
65dbf343
AV
672{
673 struct at91mci_host *host = devid;
674 int completed = 0;
df05a303 675 unsigned int int_status, int_mask;
65dbf343 676
e0b19b83 677 int_status = at91_mci_read(host, AT91_MCI_SR);
df05a303
AV
678 int_mask = at91_mci_read(host, AT91_MCI_IMR);
679
f3a8efa9 680 pr_debug("MCI irq: status = %08X, %08X, %08X\n", int_status, int_mask,
df05a303
AV
681 int_status & int_mask);
682
683 int_status = int_status & int_mask;
684
685 if (int_status & AT91_MCI_ERRORS) {
65dbf343 686 completed = 1;
df05a303
AV
687
688 if (int_status & AT91_MCI_UNRE)
689 pr_debug("MMC: Underrun error\n");
690 if (int_status & AT91_MCI_OVRE)
691 pr_debug("MMC: Overrun error\n");
692 if (int_status & AT91_MCI_DTOE)
693 pr_debug("MMC: Data timeout\n");
694 if (int_status & AT91_MCI_DCRCE)
695 pr_debug("MMC: CRC error in data\n");
696 if (int_status & AT91_MCI_RTOE)
697 pr_debug("MMC: Response timeout\n");
698 if (int_status & AT91_MCI_RENDE)
699 pr_debug("MMC: Response end bit error\n");
700 if (int_status & AT91_MCI_RCRCE)
701 pr_debug("MMC: Response CRC error\n");
702 if (int_status & AT91_MCI_RDIRE)
703 pr_debug("MMC: Response direction error\n");
704 if (int_status & AT91_MCI_RINDE)
705 pr_debug("MMC: Response index error\n");
706 } else {
707 /* Only continue processing if no errors */
65dbf343 708
65dbf343 709 if (int_status & AT91_MCI_TXBUFE) {
b44fb7a0 710 pr_debug("TX buffer empty\n");
65dbf343
AV
711 at91_mci_handle_transmitted(host);
712 }
713
ed99c541
NF
714 if (int_status & AT91_MCI_ENDRX) {
715 pr_debug("ENDRX\n");
716 at91_mci_post_dma_read(host);
717 }
718
65dbf343 719 if (int_status & AT91_MCI_RXBUFF) {
b44fb7a0 720 pr_debug("RX buffer full\n");
ed99c541
NF
721 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
722 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_RXBUFF | AT91_MCI_ENDRX);
723 completed = 1;
65dbf343
AV
724 }
725
df05a303 726 if (int_status & AT91_MCI_ENDTX)
b44fb7a0 727 pr_debug("Transmit has ended\n");
65dbf343 728
65dbf343 729 if (int_status & AT91_MCI_NOTBUSY) {
b44fb7a0 730 pr_debug("Card is ready\n");
ed99c541 731 completed = 1;
65dbf343
AV
732 }
733
df05a303 734 if (int_status & AT91_MCI_DTIP)
b44fb7a0 735 pr_debug("Data transfer in progress\n");
65dbf343 736
ed99c541 737 if (int_status & AT91_MCI_BLKE) {
b44fb7a0 738 pr_debug("Block transfer has ended\n");
ed99c541
NF
739 completed = 1;
740 }
65dbf343 741
df05a303 742 if (int_status & AT91_MCI_TXRDY)
b44fb7a0 743 pr_debug("Ready to transmit\n");
65dbf343 744
df05a303 745 if (int_status & AT91_MCI_RXRDY)
b44fb7a0 746 pr_debug("Ready to receive\n");
65dbf343
AV
747
748 if (int_status & AT91_MCI_CMDRDY) {
b44fb7a0 749 pr_debug("Command ready\n");
ed99c541 750 completed = at91_mci_handle_cmdrdy(host);
65dbf343
AV
751 }
752 }
65dbf343
AV
753
754 if (completed) {
b44fb7a0 755 pr_debug("Completed command\n");
e0b19b83 756 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
e8d04d3d 757 at91_mci_completed_command(host);
df05a303
AV
758 } else
759 at91_mci_write(host, AT91_MCI_IDR, int_status);
65dbf343
AV
760
761 return IRQ_HANDLED;
762}
763
7d12e780 764static irqreturn_t at91_mmc_det_irq(int irq, void *_host)
65dbf343
AV
765{
766 struct at91mci_host *host = _host;
767 int present = !at91_get_gpio_value(irq);
768
769 /*
770 * we expect this irq on both insert and remove,
771 * and use a short delay to debounce.
772 */
773 if (present != host->present) {
774 host->present = present;
b44fb7a0 775 pr_debug("%s: card %s\n", mmc_hostname(host->mmc),
65dbf343
AV
776 present ? "insert" : "remove");
777 if (!present) {
b44fb7a0 778 pr_debug("****** Resetting SD-card bus width ******\n");
99eeb8df 779 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
65dbf343
AV
780 }
781 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
782 }
783 return IRQ_HANDLED;
784}
785
a26b498c 786static int at91_mci_get_ro(struct mmc_host *mmc)
65dbf343
AV
787{
788 int read_only = 0;
789 struct at91mci_host *host = mmc_priv(mmc);
790
791 if (host->board->wp_pin) {
792 read_only = at91_get_gpio_value(host->board->wp_pin);
793 printk(KERN_WARNING "%s: card is %s\n", mmc_hostname(mmc),
794 (read_only ? "read-only" : "read-write") );
795 }
796 else {
797 printk(KERN_WARNING "%s: host does not support reading read-only "
798 "switch. Assuming write-enable.\n", mmc_hostname(mmc));
799 }
800 return read_only;
801}
802
ab7aefd0 803static const struct mmc_host_ops at91_mci_ops = {
65dbf343
AV
804 .request = at91_mci_request,
805 .set_ios = at91_mci_set_ios,
806 .get_ro = at91_mci_get_ro,
807};
808
809/*
810 * Probe for the device
811 */
a26b498c 812static int __init at91_mci_probe(struct platform_device *pdev)
65dbf343
AV
813{
814 struct mmc_host *mmc;
815 struct at91mci_host *host;
17ea0595 816 struct resource *res;
65dbf343
AV
817 int ret;
818
b44fb7a0 819 pr_debug("Probe MCI devices\n");
65dbf343 820
17ea0595
AV
821 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
822 if (!res)
823 return -ENXIO;
824
825 if (!request_mem_region(res->start, res->end - res->start + 1, DRIVER_NAME))
826 return -EBUSY;
827
65dbf343
AV
828 mmc = mmc_alloc_host(sizeof(struct at91mci_host), &pdev->dev);
829 if (!mmc) {
b44fb7a0 830 pr_debug("Failed to allocate mmc host\n");
17ea0595 831 release_mem_region(res->start, res->end - res->start + 1);
65dbf343
AV
832 return -ENOMEM;
833 }
834
835 mmc->ops = &at91_mci_ops;
836 mmc->f_min = 375000;
837 mmc->f_max = 25000000;
838 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
42431acb 839 mmc->caps = MMC_CAP_BYTEBLOCK;
65dbf343 840
fe4a3c7a 841 mmc->max_blk_size = 4095;
55db890a 842 mmc->max_blk_count = mmc->max_req_size;
fe4a3c7a 843
65dbf343
AV
844 host = mmc_priv(mmc);
845 host->mmc = mmc;
846 host->buffer = NULL;
847 host->bus_mode = 0;
848 host->board = pdev->dev.platform_data;
849 if (host->board->wire4) {
ed99c541
NF
850 if (cpu_is_at91sam9260() || cpu_is_at91sam9263())
851 mmc->caps |= MMC_CAP_4_BIT_DATA;
852 else
853 printk("AT91 MMC: 4 wire bus mode not supported"
854 " - using 1 wire\n");
65dbf343
AV
855 }
856
857 /*
858 * Get Clock
859 */
3dd3b039
AV
860 host->mci_clk = clk_get(&pdev->dev, "mci_clk");
861 if (IS_ERR(host->mci_clk)) {
65dbf343 862 printk(KERN_ERR "AT91 MMC: no clock defined.\n");
b44fb7a0 863 mmc_free_host(mmc);
17ea0595 864 release_mem_region(res->start, res->end - res->start + 1);
65dbf343
AV
865 return -ENODEV;
866 }
65dbf343 867
17ea0595
AV
868 /*
869 * Map I/O region
870 */
871 host->baseaddr = ioremap(res->start, res->end - res->start + 1);
872 if (!host->baseaddr) {
3dd3b039 873 clk_put(host->mci_clk);
17ea0595
AV
874 mmc_free_host(mmc);
875 release_mem_region(res->start, res->end - res->start + 1);
876 return -ENOMEM;
877 }
e0b19b83
AV
878
879 /*
880 * Reset hardware
881 */
3dd3b039 882 clk_enable(host->mci_clk); /* Enable the peripheral clock */
e0b19b83
AV
883 at91_mci_disable(host);
884 at91_mci_enable(host);
885
65dbf343
AV
886 /*
887 * Allocate the MCI interrupt
888 */
17ea0595
AV
889 host->irq = platform_get_irq(pdev, 0);
890 ret = request_irq(host->irq, at91_mci_irq, IRQF_SHARED, DRIVER_NAME, host);
65dbf343 891 if (ret) {
f3a8efa9 892 printk(KERN_ERR "AT91 MMC: Failed to request MCI interrupt\n");
3dd3b039
AV
893 clk_disable(host->mci_clk);
894 clk_put(host->mci_clk);
b44fb7a0 895 mmc_free_host(mmc);
17ea0595
AV
896 iounmap(host->baseaddr);
897 release_mem_region(res->start, res->end - res->start + 1);
65dbf343
AV
898 return ret;
899 }
900
901 platform_set_drvdata(pdev, mmc);
902
903 /*
904 * Add host to MMC layer
905 */
906 if (host->board->det_pin)
907 host->present = !at91_get_gpio_value(host->board->det_pin);
908 else
909 host->present = -1;
910
911 mmc_add_host(mmc);
912
913 /*
914 * monitor card insertion/removal if we can
915 */
916 if (host->board->det_pin) {
917 ret = request_irq(host->board->det_pin, at91_mmc_det_irq,
b44fb7a0 918 0, DRIVER_NAME, host);
65dbf343 919 if (ret)
f3a8efa9 920 printk(KERN_ERR "AT91 MMC: Couldn't allocate MMC detect irq\n");
65dbf343
AV
921 }
922
f3a8efa9 923 pr_debug("Added MCI driver\n");
65dbf343
AV
924
925 return 0;
926}
927
928/*
929 * Remove a device
930 */
a26b498c 931static int __exit at91_mci_remove(struct platform_device *pdev)
65dbf343
AV
932{
933 struct mmc_host *mmc = platform_get_drvdata(pdev);
934 struct at91mci_host *host;
17ea0595 935 struct resource *res;
65dbf343
AV
936
937 if (!mmc)
938 return -1;
939
940 host = mmc_priv(mmc);
941
942 if (host->present != -1) {
943 free_irq(host->board->det_pin, host);
944 cancel_delayed_work(&host->mmc->detect);
945 }
946
e0b19b83 947 at91_mci_disable(host);
17ea0595
AV
948 mmc_remove_host(mmc);
949 free_irq(host->irq, host);
65dbf343 950
3dd3b039
AV
951 clk_disable(host->mci_clk); /* Disable the peripheral clock */
952 clk_put(host->mci_clk);
65dbf343 953
17ea0595
AV
954 iounmap(host->baseaddr);
955 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
956 release_mem_region(res->start, res->end - res->start + 1);
65dbf343 957
17ea0595
AV
958 mmc_free_host(mmc);
959 platform_set_drvdata(pdev, NULL);
b44fb7a0 960 pr_debug("MCI Removed\n");
65dbf343
AV
961
962 return 0;
963}
964
965#ifdef CONFIG_PM
966static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state)
967{
968 struct mmc_host *mmc = platform_get_drvdata(pdev);
969 int ret = 0;
970
971 if (mmc)
972 ret = mmc_suspend_host(mmc, state);
973
974 return ret;
975}
976
977static int at91_mci_resume(struct platform_device *pdev)
978{
979 struct mmc_host *mmc = platform_get_drvdata(pdev);
980 int ret = 0;
981
982 if (mmc)
983 ret = mmc_resume_host(mmc);
984
985 return ret;
986}
987#else
988#define at91_mci_suspend NULL
989#define at91_mci_resume NULL
990#endif
991
992static struct platform_driver at91_mci_driver = {
a26b498c 993 .remove = __exit_p(at91_mci_remove),
65dbf343
AV
994 .suspend = at91_mci_suspend,
995 .resume = at91_mci_resume,
996 .driver = {
997 .name = DRIVER_NAME,
998 .owner = THIS_MODULE,
999 },
1000};
1001
1002static int __init at91_mci_init(void)
1003{
a26b498c 1004 return platform_driver_probe(&at91_mci_driver, at91_mci_probe);
65dbf343
AV
1005}
1006
1007static void __exit at91_mci_exit(void)
1008{
1009 platform_driver_unregister(&at91_mci_driver);
1010}
1011
1012module_init(at91_mci_init);
1013module_exit(at91_mci_exit);
1014
1015MODULE_DESCRIPTION("AT91 Multimedia Card Interface driver");
1016MODULE_AUTHOR("Nick Randell");
1017MODULE_LICENSE("GPL");
This page took 0.200269 seconds and 5 git commands to generate.