mmc: at91_mci: fix timeout errors
[deliverable/linux.git] / drivers / mmc / host / at91_mci.c
1 /*
2 * linux/drivers/mmc/host/at91_mci.c - ATMEL AT91 MCI Driver
3 *
4 * Copyright (C) 2005 Cougar Creek Computing Devices Ltd, All Rights Reserved
5 *
6 * Copyright (C) 2006 Malcolm Noyes
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13 /*
14 This is the AT91 MCI driver that has been tested with both MMC cards
15 and SD-cards. Boards that support write protect are now supported.
16 The CCAT91SBC001 board does not support SD cards.
17
18 The three entry points are at91_mci_request, at91_mci_set_ios
19 and at91_mci_get_ro.
20
21 SET IOS
22 This configures the device to put it into the correct mode and clock speed
23 required.
24
25 MCI REQUEST
26 MCI request processes the commands sent in the mmc_request structure. This
27 can consist of a processing command and a stop command in the case of
28 multiple block transfers.
29
30 There are three main types of request, commands, reads and writes.
31
32 Commands are straight forward. The command is submitted to the controller and
33 the request function returns. When the controller generates an interrupt to indicate
34 the command is finished, the response to the command are read and the mmc_request_done
35 function called to end the request.
36
37 Reads and writes work in a similar manner to normal commands but involve the PDC (DMA)
38 controller to manage the transfers.
39
40 A read is done from the controller directly to the scatterlist passed in from the request.
41 Due to a bug in the AT91RM9200 controller, when a read is completed, all the words are byte
42 swapped in the scatterlist buffers. AT91SAM926x are not affected by this bug.
43
44 The sequence of read interrupts is: ENDRX, RXBUFF, CMDRDY
45
46 A write is slightly different in that the bytes to write are read from the scatterlist
47 into a dma memory buffer (this is in case the source buffer should be read only). The
48 entire write buffer is then done from this single dma memory buffer.
49
50 The sequence of write interrupts is: ENDTX, TXBUFE, NOTBUSY, CMDRDY
51
52 GET RO
53 Gets the status of the write protect pin, if available.
54 */
55
56 #include <linux/module.h>
57 #include <linux/moduleparam.h>
58 #include <linux/init.h>
59 #include <linux/ioport.h>
60 #include <linux/platform_device.h>
61 #include <linux/interrupt.h>
62 #include <linux/blkdev.h>
63 #include <linux/delay.h>
64 #include <linux/err.h>
65 #include <linux/dma-mapping.h>
66 #include <linux/clk.h>
67 #include <linux/atmel_pdc.h>
68
69 #include <linux/mmc/host.h>
70
71 #include <asm/io.h>
72 #include <asm/irq.h>
73 #include <asm/gpio.h>
74
75 #include <mach/board.h>
76 #include <mach/cpu.h>
77 #include <mach/at91_mci.h>
78
79 #define DRIVER_NAME "at91_mci"
80
81 #define FL_SENT_COMMAND (1 << 0)
82 #define FL_SENT_STOP (1 << 1)
83
84 #define AT91_MCI_ERRORS (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE \
85 | AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE \
86 | AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE)
87
88 #define at91_mci_read(host, reg) __raw_readl((host)->baseaddr + (reg))
89 #define at91_mci_write(host, reg, val) __raw_writel((val), (host)->baseaddr + (reg))
90
91
92 /*
93 * Low level type for this driver
94 */
95 struct at91mci_host
96 {
97 struct mmc_host *mmc;
98 struct mmc_command *cmd;
99 struct mmc_request *request;
100
101 void __iomem *baseaddr;
102 int irq;
103
104 struct at91_mmc_data *board;
105 int present;
106
107 struct clk *mci_clk;
108
109 /*
110 * Flag indicating when the command has been sent. This is used to
111 * work out whether or not to send the stop
112 */
113 unsigned int flags;
114 /* flag for current bus settings */
115 u32 bus_mode;
116
117 /* DMA buffer used for transmitting */
118 unsigned int* buffer;
119 dma_addr_t physical_address;
120 unsigned int total_length;
121
122 /* Latest in the scatterlist that has been enabled for transfer, but not freed */
123 int in_use_index;
124
125 /* Latest in the scatterlist that has been enabled for transfer */
126 int transfer_index;
127
128 /* Timer for timeouts */
129 struct timer_list timer;
130 };
131
132 /*
133 * Reset the controller and restore most of the state
134 */
135 static void at91_reset_host(struct at91mci_host *host)
136 {
137 unsigned long flags;
138 u32 mr;
139 u32 sdcr;
140 u32 dtor;
141 u32 imr;
142
143 local_irq_save(flags);
144 imr = at91_mci_read(host, AT91_MCI_IMR);
145
146 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
147
148 /* save current state */
149 mr = at91_mci_read(host, AT91_MCI_MR) & 0x7fff;
150 sdcr = at91_mci_read(host, AT91_MCI_SDCR);
151 dtor = at91_mci_read(host, AT91_MCI_DTOR);
152
153 /* reset the controller */
154 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST);
155
156 /* restore state */
157 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
158 at91_mci_write(host, AT91_MCI_MR, mr);
159 at91_mci_write(host, AT91_MCI_SDCR, sdcr);
160 at91_mci_write(host, AT91_MCI_DTOR, dtor);
161 at91_mci_write(host, AT91_MCI_IER, imr);
162
163 /* make sure sdio interrupts will fire */
164 at91_mci_read(host, AT91_MCI_SR);
165
166 local_irq_restore(flags);
167 }
168
169 static void at91_timeout_timer(unsigned long data)
170 {
171 struct at91mci_host *host;
172
173 host = (struct at91mci_host *)data;
174
175 if (host->request) {
176 dev_err(host->mmc->parent, "Timeout waiting end of packet\n");
177
178 if (host->cmd && host->cmd->data) {
179 host->cmd->data->error = -ETIMEDOUT;
180 } else {
181 if (host->cmd)
182 host->cmd->error = -ETIMEDOUT;
183 else
184 host->request->cmd->error = -ETIMEDOUT;
185 }
186
187 at91_reset_host(host);
188 mmc_request_done(host->mmc, host->request);
189 }
190 }
191
192 /*
193 * Copy from sg to a dma block - used for transfers
194 */
195 static inline void at91_mci_sg_to_dma(struct at91mci_host *host, struct mmc_data *data)
196 {
197 unsigned int len, i, size;
198 unsigned *dmabuf = host->buffer;
199
200 size = data->blksz * data->blocks;
201 len = data->sg_len;
202
203 /* AT91SAM926[0/3] Data Write Operation and number of bytes erratum */
204 if (cpu_is_at91sam9260() || cpu_is_at91sam9263())
205 if (host->total_length == 12)
206 memset(dmabuf, 0, 12);
207
208 /*
209 * Just loop through all entries. Size might not
210 * be the entire list though so make sure that
211 * we do not transfer too much.
212 */
213 for (i = 0; i < len; i++) {
214 struct scatterlist *sg;
215 int amount;
216 unsigned int *sgbuffer;
217
218 sg = &data->sg[i];
219
220 sgbuffer = kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
221 amount = min(size, sg->length);
222 size -= amount;
223
224 if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
225 int index;
226
227 for (index = 0; index < (amount / 4); index++)
228 *dmabuf++ = swab32(sgbuffer[index]);
229 } else {
230 char *tmpv = (char *)dmabuf;
231 memcpy(tmpv, sgbuffer, amount);
232 tmpv += amount;
233 dmabuf = (unsigned *)tmpv;
234 }
235
236 kunmap_atomic(((void *)sgbuffer) - sg->offset, KM_BIO_SRC_IRQ);
237
238 if (size == 0)
239 break;
240 }
241
242 /*
243 * Check that we didn't get a request to transfer
244 * more data than can fit into the SG list.
245 */
246 BUG_ON(size != 0);
247 }
248
249 /*
250 * Prepare a dma read
251 */
252 static void at91_mci_pre_dma_read(struct at91mci_host *host)
253 {
254 int i;
255 struct scatterlist *sg;
256 struct mmc_command *cmd;
257 struct mmc_data *data;
258
259 pr_debug("pre dma read\n");
260
261 cmd = host->cmd;
262 if (!cmd) {
263 pr_debug("no command\n");
264 return;
265 }
266
267 data = cmd->data;
268 if (!data) {
269 pr_debug("no data\n");
270 return;
271 }
272
273 for (i = 0; i < 2; i++) {
274 /* nothing left to transfer */
275 if (host->transfer_index >= data->sg_len) {
276 pr_debug("Nothing left to transfer (index = %d)\n", host->transfer_index);
277 break;
278 }
279
280 /* Check to see if this needs filling */
281 if (i == 0) {
282 if (at91_mci_read(host, ATMEL_PDC_RCR) != 0) {
283 pr_debug("Transfer active in current\n");
284 continue;
285 }
286 }
287 else {
288 if (at91_mci_read(host, ATMEL_PDC_RNCR) != 0) {
289 pr_debug("Transfer active in next\n");
290 continue;
291 }
292 }
293
294 /* Setup the next transfer */
295 pr_debug("Using transfer index %d\n", host->transfer_index);
296
297 sg = &data->sg[host->transfer_index++];
298 pr_debug("sg = %p\n", sg);
299
300 sg->dma_address = dma_map_page(NULL, sg_page(sg), sg->offset, sg->length, DMA_FROM_DEVICE);
301
302 pr_debug("dma address = %08X, length = %d\n", sg->dma_address, sg->length);
303
304 if (i == 0) {
305 at91_mci_write(host, ATMEL_PDC_RPR, sg->dma_address);
306 at91_mci_write(host, ATMEL_PDC_RCR, (data->blksz & 0x3) ? sg->length : sg->length / 4);
307 }
308 else {
309 at91_mci_write(host, ATMEL_PDC_RNPR, sg->dma_address);
310 at91_mci_write(host, ATMEL_PDC_RNCR, (data->blksz & 0x3) ? sg->length : sg->length / 4);
311 }
312 }
313
314 pr_debug("pre dma read done\n");
315 }
316
317 /*
318 * Handle after a dma read
319 */
320 static void at91_mci_post_dma_read(struct at91mci_host *host)
321 {
322 struct mmc_command *cmd;
323 struct mmc_data *data;
324
325 pr_debug("post dma read\n");
326
327 cmd = host->cmd;
328 if (!cmd) {
329 pr_debug("no command\n");
330 return;
331 }
332
333 data = cmd->data;
334 if (!data) {
335 pr_debug("no data\n");
336 return;
337 }
338
339 while (host->in_use_index < host->transfer_index) {
340 struct scatterlist *sg;
341
342 pr_debug("finishing index %d\n", host->in_use_index);
343
344 sg = &data->sg[host->in_use_index++];
345
346 pr_debug("Unmapping page %08X\n", sg->dma_address);
347
348 dma_unmap_page(NULL, sg->dma_address, sg->length, DMA_FROM_DEVICE);
349
350 if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
351 unsigned int *buffer;
352 int index;
353
354 /* Swap the contents of the buffer */
355 buffer = kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
356 pr_debug("buffer = %p, length = %d\n", buffer, sg->length);
357
358 for (index = 0; index < (sg->length / 4); index++)
359 buffer[index] = swab32(buffer[index]);
360
361 kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
362 }
363
364 flush_dcache_page(sg_page(sg));
365
366 data->bytes_xfered += sg->length;
367 }
368
369 /* Is there another transfer to trigger? */
370 if (host->transfer_index < data->sg_len)
371 at91_mci_pre_dma_read(host);
372 else {
373 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_ENDRX);
374 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_RXBUFF);
375 }
376
377 pr_debug("post dma read done\n");
378 }
379
380 /*
381 * Handle transmitted data
382 */
383 static void at91_mci_handle_transmitted(struct at91mci_host *host)
384 {
385 struct mmc_command *cmd;
386 struct mmc_data *data;
387
388 pr_debug("Handling the transmit\n");
389
390 /* Disable the transfer */
391 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
392
393 /* Now wait for cmd ready */
394 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_TXBUFE);
395
396 cmd = host->cmd;
397 if (!cmd) return;
398
399 data = cmd->data;
400 if (!data) return;
401
402 if (cmd->data->blocks > 1) {
403 pr_debug("multiple write : wait for BLKE...\n");
404 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_BLKE);
405 } else
406 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
407 }
408
409 /*
410 * Update bytes tranfered count during a write operation
411 */
412 static void at91_mci_update_bytes_xfered(struct at91mci_host *host)
413 {
414 struct mmc_data *data;
415
416 /* always deal with the effective request (and not the current cmd) */
417
418 if (host->request->cmd && host->request->cmd->error != 0)
419 return;
420
421 if (host->request->data) {
422 data = host->request->data;
423 if (data->flags & MMC_DATA_WRITE) {
424 /* card is in IDLE mode now */
425 pr_debug("-> bytes_xfered %d, total_length = %d\n",
426 data->bytes_xfered, host->total_length);
427 data->bytes_xfered = data->blksz * data->blocks;
428 }
429 }
430 }
431
432
433 /*Handle after command sent ready*/
434 static int at91_mci_handle_cmdrdy(struct at91mci_host *host)
435 {
436 if (!host->cmd)
437 return 1;
438 else if (!host->cmd->data) {
439 if (host->flags & FL_SENT_STOP) {
440 /*After multi block write, we must wait for NOTBUSY*/
441 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
442 } else return 1;
443 } else if (host->cmd->data->flags & MMC_DATA_WRITE) {
444 /*After sendding multi-block-write command, start DMA transfer*/
445 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_TXBUFE | AT91_MCI_BLKE);
446 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
447 }
448
449 /* command not completed, have to wait */
450 return 0;
451 }
452
453
454 /*
455 * Enable the controller
456 */
457 static void at91_mci_enable(struct at91mci_host *host)
458 {
459 unsigned int mr;
460
461 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
462 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
463 at91_mci_write(host, AT91_MCI_DTOR, AT91_MCI_DTOMUL_1M | AT91_MCI_DTOCYC);
464 mr = AT91_MCI_PDCMODE | 0x34a;
465
466 if (cpu_is_at91sam9260() || cpu_is_at91sam9263())
467 mr |= AT91_MCI_RDPROOF | AT91_MCI_WRPROOF;
468
469 at91_mci_write(host, AT91_MCI_MR, mr);
470
471 /* use Slot A or B (only one at same time) */
472 at91_mci_write(host, AT91_MCI_SDCR, host->board->slot_b);
473 }
474
475 /*
476 * Disable the controller
477 */
478 static void at91_mci_disable(struct at91mci_host *host)
479 {
480 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST);
481 }
482
483 /*
484 * Send a command
485 */
486 static void at91_mci_send_command(struct at91mci_host *host, struct mmc_command *cmd)
487 {
488 unsigned int cmdr, mr;
489 unsigned int block_length;
490 struct mmc_data *data = cmd->data;
491
492 unsigned int blocks;
493 unsigned int ier = 0;
494
495 host->cmd = cmd;
496
497 /* Needed for leaving busy state before CMD1 */
498 if ((at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_RTOE) && (cmd->opcode == 1)) {
499 pr_debug("Clearing timeout\n");
500 at91_mci_write(host, AT91_MCI_ARGR, 0);
501 at91_mci_write(host, AT91_MCI_CMDR, AT91_MCI_OPDCMD);
502 while (!(at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_CMDRDY)) {
503 /* spin */
504 pr_debug("Clearing: SR = %08X\n", at91_mci_read(host, AT91_MCI_SR));
505 }
506 }
507
508 cmdr = cmd->opcode;
509
510 if (mmc_resp_type(cmd) == MMC_RSP_NONE)
511 cmdr |= AT91_MCI_RSPTYP_NONE;
512 else {
513 /* if a response is expected then allow maximum response latancy */
514 cmdr |= AT91_MCI_MAXLAT;
515 /* set 136 bit response for R2, 48 bit response otherwise */
516 if (mmc_resp_type(cmd) == MMC_RSP_R2)
517 cmdr |= AT91_MCI_RSPTYP_136;
518 else
519 cmdr |= AT91_MCI_RSPTYP_48;
520 }
521
522 if (data) {
523
524 if (cpu_is_at91rm9200() || cpu_is_at91sam9261()) {
525 if (data->blksz & 0x3) {
526 pr_debug("Unsupported block size\n");
527 cmd->error = -EINVAL;
528 mmc_request_done(host->mmc, host->request);
529 return;
530 }
531 if (data->flags & MMC_DATA_STREAM) {
532 pr_debug("Stream commands not supported\n");
533 cmd->error = -EINVAL;
534 mmc_request_done(host->mmc, host->request);
535 return;
536 }
537 }
538
539 block_length = data->blksz;
540 blocks = data->blocks;
541
542 /* always set data start - also set direction flag for read */
543 if (data->flags & MMC_DATA_READ)
544 cmdr |= (AT91_MCI_TRDIR | AT91_MCI_TRCMD_START);
545 else if (data->flags & MMC_DATA_WRITE)
546 cmdr |= AT91_MCI_TRCMD_START;
547
548 if (data->flags & MMC_DATA_STREAM)
549 cmdr |= AT91_MCI_TRTYP_STREAM;
550 if (data->blocks > 1)
551 cmdr |= AT91_MCI_TRTYP_MULTIPLE;
552 }
553 else {
554 block_length = 0;
555 blocks = 0;
556 }
557
558 if (host->flags & FL_SENT_STOP)
559 cmdr |= AT91_MCI_TRCMD_STOP;
560
561 if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
562 cmdr |= AT91_MCI_OPDCMD;
563
564 /*
565 * Set the arguments and send the command
566 */
567 pr_debug("Sending command %d as %08X, arg = %08X, blocks = %d, length = %d (MR = %08X)\n",
568 cmd->opcode, cmdr, cmd->arg, blocks, block_length, at91_mci_read(host, AT91_MCI_MR));
569
570 if (!data) {
571 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS | ATMEL_PDC_RXTDIS);
572 at91_mci_write(host, ATMEL_PDC_RPR, 0);
573 at91_mci_write(host, ATMEL_PDC_RCR, 0);
574 at91_mci_write(host, ATMEL_PDC_RNPR, 0);
575 at91_mci_write(host, ATMEL_PDC_RNCR, 0);
576 at91_mci_write(host, ATMEL_PDC_TPR, 0);
577 at91_mci_write(host, ATMEL_PDC_TCR, 0);
578 at91_mci_write(host, ATMEL_PDC_TNPR, 0);
579 at91_mci_write(host, ATMEL_PDC_TNCR, 0);
580 ier = AT91_MCI_CMDRDY;
581 } else {
582 /* zero block length and PDC mode */
583 mr = at91_mci_read(host, AT91_MCI_MR) & 0x5fff;
584 mr |= (data->blksz & 0x3) ? AT91_MCI_PDCFBYTE : 0;
585 mr |= (block_length << 16);
586 mr |= AT91_MCI_PDCMODE;
587 at91_mci_write(host, AT91_MCI_MR, mr);
588
589 if (!(cpu_is_at91rm9200() || cpu_is_at91sam9261()))
590 at91_mci_write(host, AT91_MCI_BLKR,
591 AT91_MCI_BLKR_BCNT(blocks) |
592 AT91_MCI_BLKR_BLKLEN(block_length));
593
594 /*
595 * Disable the PDC controller
596 */
597 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
598
599 if (cmdr & AT91_MCI_TRCMD_START) {
600 data->bytes_xfered = 0;
601 host->transfer_index = 0;
602 host->in_use_index = 0;
603 if (cmdr & AT91_MCI_TRDIR) {
604 /*
605 * Handle a read
606 */
607 host->buffer = NULL;
608 host->total_length = 0;
609
610 at91_mci_pre_dma_read(host);
611 ier = AT91_MCI_ENDRX /* | AT91_MCI_RXBUFF */;
612 }
613 else {
614 /*
615 * Handle a write
616 */
617 host->total_length = block_length * blocks;
618 /*
619 * AT91SAM926[0/3] Data Write Operation and
620 * number of bytes erratum
621 */
622 if (cpu_is_at91sam9260 () || cpu_is_at91sam9263())
623 if (host->total_length < 12)
624 host->total_length = 12;
625
626 host->buffer = kmalloc(host->total_length, GFP_KERNEL);
627 if (!host->buffer) {
628 pr_debug("Can't alloc tx buffer\n");
629 cmd->error = -ENOMEM;
630 mmc_request_done(host->mmc, host->request);
631 return;
632 }
633
634 at91_mci_sg_to_dma(host, data);
635
636 host->physical_address = dma_map_single(NULL,
637 host->buffer, host->total_length,
638 DMA_TO_DEVICE);
639
640 pr_debug("Transmitting %d bytes\n", host->total_length);
641
642 at91_mci_write(host, ATMEL_PDC_TPR, host->physical_address);
643 at91_mci_write(host, ATMEL_PDC_TCR, (data->blksz & 0x3) ?
644 host->total_length : host->total_length / 4);
645
646 ier = AT91_MCI_CMDRDY;
647 }
648 }
649 }
650
651 /*
652 * Send the command and then enable the PDC - not the other way round as
653 * the data sheet says
654 */
655
656 at91_mci_write(host, AT91_MCI_ARGR, cmd->arg);
657 at91_mci_write(host, AT91_MCI_CMDR, cmdr);
658
659 if (cmdr & AT91_MCI_TRCMD_START) {
660 if (cmdr & AT91_MCI_TRDIR)
661 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
662 }
663
664 /* Enable selected interrupts */
665 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_ERRORS | ier);
666 }
667
668 /*
669 * Process the next step in the request
670 */
671 static void at91_mci_process_next(struct at91mci_host *host)
672 {
673 if (!(host->flags & FL_SENT_COMMAND)) {
674 host->flags |= FL_SENT_COMMAND;
675 at91_mci_send_command(host, host->request->cmd);
676 }
677 else if ((!(host->flags & FL_SENT_STOP)) && host->request->stop) {
678 host->flags |= FL_SENT_STOP;
679 at91_mci_send_command(host, host->request->stop);
680 } else {
681 del_timer(&host->timer);
682 /* the at91rm9200 mci controller hangs after some transfers,
683 * and the workaround is to reset it after each transfer.
684 */
685 if (cpu_is_at91rm9200())
686 at91_reset_host(host);
687 mmc_request_done(host->mmc, host->request);
688 }
689 }
690
691 /*
692 * Handle a command that has been completed
693 */
694 static void at91_mci_completed_command(struct at91mci_host *host, unsigned int status)
695 {
696 struct mmc_command *cmd = host->cmd;
697 struct mmc_data *data = cmd->data;
698
699 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB));
700
701 cmd->resp[0] = at91_mci_read(host, AT91_MCI_RSPR(0));
702 cmd->resp[1] = at91_mci_read(host, AT91_MCI_RSPR(1));
703 cmd->resp[2] = at91_mci_read(host, AT91_MCI_RSPR(2));
704 cmd->resp[3] = at91_mci_read(host, AT91_MCI_RSPR(3));
705
706 if (host->buffer) {
707 dma_unmap_single(NULL,
708 host->physical_address, host->total_length,
709 DMA_TO_DEVICE);
710 kfree(host->buffer);
711 host->buffer = NULL;
712 }
713
714 pr_debug("Status = %08X/%08x [%08X %08X %08X %08X]\n",
715 status, at91_mci_read(host, AT91_MCI_SR),
716 cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
717
718 if (status & AT91_MCI_ERRORS) {
719 if ((status & AT91_MCI_RCRCE) && !(mmc_resp_type(cmd) & MMC_RSP_CRC)) {
720 cmd->error = 0;
721 }
722 else {
723 if (status & (AT91_MCI_DTOE | AT91_MCI_DCRCE)) {
724 if (data) {
725 if (status & AT91_MCI_DTOE)
726 data->error = -ETIMEDOUT;
727 else if (status & AT91_MCI_DCRCE)
728 data->error = -EILSEQ;
729 }
730 } else {
731 if (status & AT91_MCI_RTOE)
732 cmd->error = -ETIMEDOUT;
733 else if (status & AT91_MCI_RCRCE)
734 cmd->error = -EILSEQ;
735 else
736 cmd->error = -EIO;
737 }
738
739 pr_debug("Error detected and set to %d/%d (cmd = %d, retries = %d)\n",
740 cmd->error, data ? data->error : 0,
741 cmd->opcode, cmd->retries);
742 }
743 }
744 else
745 cmd->error = 0;
746
747 at91_mci_process_next(host);
748 }
749
750 /*
751 * Handle an MMC request
752 */
753 static void at91_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
754 {
755 struct at91mci_host *host = mmc_priv(mmc);
756 host->request = mrq;
757 host->flags = 0;
758
759 /* more than 1s timeout needed with slow SD cards */
760 mod_timer(&host->timer, jiffies + msecs_to_jiffies(2000));
761
762 at91_mci_process_next(host);
763 }
764
765 /*
766 * Set the IOS
767 */
768 static void at91_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
769 {
770 int clkdiv;
771 struct at91mci_host *host = mmc_priv(mmc);
772 unsigned long at91_master_clock = clk_get_rate(host->mci_clk);
773
774 host->bus_mode = ios->bus_mode;
775
776 if (ios->clock == 0) {
777 /* Disable the MCI controller */
778 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS);
779 clkdiv = 0;
780 }
781 else {
782 /* Enable the MCI controller */
783 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
784
785 if ((at91_master_clock % (ios->clock * 2)) == 0)
786 clkdiv = ((at91_master_clock / ios->clock) / 2) - 1;
787 else
788 clkdiv = (at91_master_clock / ios->clock) / 2;
789
790 pr_debug("clkdiv = %d. mcck = %ld\n", clkdiv,
791 at91_master_clock / (2 * (clkdiv + 1)));
792 }
793 if (ios->bus_width == MMC_BUS_WIDTH_4 && host->board->wire4) {
794 pr_debug("MMC: Setting controller bus width to 4\n");
795 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) | AT91_MCI_SDCBUS);
796 }
797 else {
798 pr_debug("MMC: Setting controller bus width to 1\n");
799 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
800 }
801
802 /* Set the clock divider */
803 at91_mci_write(host, AT91_MCI_MR, (at91_mci_read(host, AT91_MCI_MR) & ~AT91_MCI_CLKDIV) | clkdiv);
804
805 /* maybe switch power to the card */
806 if (host->board->vcc_pin) {
807 switch (ios->power_mode) {
808 case MMC_POWER_OFF:
809 gpio_set_value(host->board->vcc_pin, 0);
810 break;
811 case MMC_POWER_UP:
812 gpio_set_value(host->board->vcc_pin, 1);
813 break;
814 case MMC_POWER_ON:
815 break;
816 default:
817 WARN_ON(1);
818 }
819 }
820 }
821
822 /*
823 * Handle an interrupt
824 */
825 static irqreturn_t at91_mci_irq(int irq, void *devid)
826 {
827 struct at91mci_host *host = devid;
828 int completed = 0;
829 unsigned int int_status, int_mask;
830
831 int_status = at91_mci_read(host, AT91_MCI_SR);
832 int_mask = at91_mci_read(host, AT91_MCI_IMR);
833
834 pr_debug("MCI irq: status = %08X, %08X, %08X\n", int_status, int_mask,
835 int_status & int_mask);
836
837 int_status = int_status & int_mask;
838
839 if (int_status & AT91_MCI_ERRORS) {
840 completed = 1;
841
842 if (int_status & AT91_MCI_UNRE)
843 pr_debug("MMC: Underrun error\n");
844 if (int_status & AT91_MCI_OVRE)
845 pr_debug("MMC: Overrun error\n");
846 if (int_status & AT91_MCI_DTOE)
847 pr_debug("MMC: Data timeout\n");
848 if (int_status & AT91_MCI_DCRCE)
849 pr_debug("MMC: CRC error in data\n");
850 if (int_status & AT91_MCI_RTOE)
851 pr_debug("MMC: Response timeout\n");
852 if (int_status & AT91_MCI_RENDE)
853 pr_debug("MMC: Response end bit error\n");
854 if (int_status & AT91_MCI_RCRCE)
855 pr_debug("MMC: Response CRC error\n");
856 if (int_status & AT91_MCI_RDIRE)
857 pr_debug("MMC: Response direction error\n");
858 if (int_status & AT91_MCI_RINDE)
859 pr_debug("MMC: Response index error\n");
860 } else {
861 /* Only continue processing if no errors */
862
863 if (int_status & AT91_MCI_TXBUFE) {
864 pr_debug("TX buffer empty\n");
865 at91_mci_handle_transmitted(host);
866 }
867
868 if (int_status & AT91_MCI_ENDRX) {
869 pr_debug("ENDRX\n");
870 at91_mci_post_dma_read(host);
871 }
872
873 if (int_status & AT91_MCI_RXBUFF) {
874 pr_debug("RX buffer full\n");
875 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
876 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_RXBUFF | AT91_MCI_ENDRX);
877 completed = 1;
878 }
879
880 if (int_status & AT91_MCI_ENDTX)
881 pr_debug("Transmit has ended\n");
882
883 if (int_status & AT91_MCI_NOTBUSY) {
884 pr_debug("Card is ready\n");
885 at91_mci_update_bytes_xfered(host);
886 completed = 1;
887 }
888
889 if (int_status & AT91_MCI_DTIP)
890 pr_debug("Data transfer in progress\n");
891
892 if (int_status & AT91_MCI_BLKE) {
893 pr_debug("Block transfer has ended\n");
894 if (host->request->data && host->request->data->blocks > 1) {
895 /* multi block write : complete multi write
896 * command and send stop */
897 completed = 1;
898 } else {
899 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
900 }
901 }
902
903 if (int_status & AT91_MCI_SDIOIRQA)
904 mmc_signal_sdio_irq(host->mmc);
905
906 if (int_status & AT91_MCI_SDIOIRQB)
907 mmc_signal_sdio_irq(host->mmc);
908
909 if (int_status & AT91_MCI_TXRDY)
910 pr_debug("Ready to transmit\n");
911
912 if (int_status & AT91_MCI_RXRDY)
913 pr_debug("Ready to receive\n");
914
915 if (int_status & AT91_MCI_CMDRDY) {
916 pr_debug("Command ready\n");
917 completed = at91_mci_handle_cmdrdy(host);
918 }
919 }
920
921 if (completed) {
922 pr_debug("Completed command\n");
923 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB));
924 at91_mci_completed_command(host, int_status);
925 } else
926 at91_mci_write(host, AT91_MCI_IDR, int_status & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB));
927
928 return IRQ_HANDLED;
929 }
930
931 static irqreturn_t at91_mmc_det_irq(int irq, void *_host)
932 {
933 struct at91mci_host *host = _host;
934 int present = !gpio_get_value(irq_to_gpio(irq));
935
936 /*
937 * we expect this irq on both insert and remove,
938 * and use a short delay to debounce.
939 */
940 if (present != host->present) {
941 host->present = present;
942 pr_debug("%s: card %s\n", mmc_hostname(host->mmc),
943 present ? "insert" : "remove");
944 if (!present) {
945 pr_debug("****** Resetting SD-card bus width ******\n");
946 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
947 }
948 /* 0.5s needed because of early card detect switch firing */
949 mmc_detect_change(host->mmc, msecs_to_jiffies(500));
950 }
951 return IRQ_HANDLED;
952 }
953
954 static int at91_mci_get_ro(struct mmc_host *mmc)
955 {
956 struct at91mci_host *host = mmc_priv(mmc);
957
958 if (host->board->wp_pin)
959 return !!gpio_get_value(host->board->wp_pin);
960 /*
961 * Board doesn't support read only detection; let the mmc core
962 * decide what to do.
963 */
964 return -ENOSYS;
965 }
966
967 static void at91_mci_enable_sdio_irq(struct mmc_host *mmc, int enable)
968 {
969 struct at91mci_host *host = mmc_priv(mmc);
970
971 pr_debug("%s: sdio_irq %c : %s\n", mmc_hostname(host->mmc),
972 host->board->slot_b ? 'B':'A', enable ? "enable" : "disable");
973 at91_mci_write(host, enable ? AT91_MCI_IER : AT91_MCI_IDR,
974 host->board->slot_b ? AT91_MCI_SDIOIRQB : AT91_MCI_SDIOIRQA);
975
976 }
977
978 static const struct mmc_host_ops at91_mci_ops = {
979 .request = at91_mci_request,
980 .set_ios = at91_mci_set_ios,
981 .get_ro = at91_mci_get_ro,
982 .enable_sdio_irq = at91_mci_enable_sdio_irq,
983 };
984
985 /*
986 * Probe for the device
987 */
988 static int __init at91_mci_probe(struct platform_device *pdev)
989 {
990 struct mmc_host *mmc;
991 struct at91mci_host *host;
992 struct resource *res;
993 int ret;
994
995 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
996 if (!res)
997 return -ENXIO;
998
999 if (!request_mem_region(res->start, res->end - res->start + 1, DRIVER_NAME))
1000 return -EBUSY;
1001
1002 mmc = mmc_alloc_host(sizeof(struct at91mci_host), &pdev->dev);
1003 if (!mmc) {
1004 ret = -ENOMEM;
1005 dev_dbg(&pdev->dev, "couldn't allocate mmc host\n");
1006 goto fail6;
1007 }
1008
1009 mmc->ops = &at91_mci_ops;
1010 mmc->f_min = 375000;
1011 mmc->f_max = 25000000;
1012 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1013 mmc->caps = MMC_CAP_SDIO_IRQ;
1014
1015 mmc->max_blk_size = 4095;
1016 mmc->max_blk_count = mmc->max_req_size;
1017
1018 host = mmc_priv(mmc);
1019 host->mmc = mmc;
1020 host->buffer = NULL;
1021 host->bus_mode = 0;
1022 host->board = pdev->dev.platform_data;
1023 if (host->board->wire4) {
1024 if (cpu_is_at91sam9260() || cpu_is_at91sam9263())
1025 mmc->caps |= MMC_CAP_4_BIT_DATA;
1026 else
1027 dev_warn(&pdev->dev, "4 wire bus mode not supported"
1028 " - using 1 wire\n");
1029 }
1030
1031 /*
1032 * Reserve GPIOs ... board init code makes sure these pins are set
1033 * up as GPIOs with the right direction (input, except for vcc)
1034 */
1035 if (host->board->det_pin) {
1036 ret = gpio_request(host->board->det_pin, "mmc_detect");
1037 if (ret < 0) {
1038 dev_dbg(&pdev->dev, "couldn't claim card detect pin\n");
1039 goto fail5;
1040 }
1041 }
1042 if (host->board->wp_pin) {
1043 ret = gpio_request(host->board->wp_pin, "mmc_wp");
1044 if (ret < 0) {
1045 dev_dbg(&pdev->dev, "couldn't claim wp sense pin\n");
1046 goto fail4;
1047 }
1048 }
1049 if (host->board->vcc_pin) {
1050 ret = gpio_request(host->board->vcc_pin, "mmc_vcc");
1051 if (ret < 0) {
1052 dev_dbg(&pdev->dev, "couldn't claim vcc switch pin\n");
1053 goto fail3;
1054 }
1055 }
1056
1057 /*
1058 * Get Clock
1059 */
1060 host->mci_clk = clk_get(&pdev->dev, "mci_clk");
1061 if (IS_ERR(host->mci_clk)) {
1062 ret = -ENODEV;
1063 dev_dbg(&pdev->dev, "no mci_clk?\n");
1064 goto fail2;
1065 }
1066
1067 /*
1068 * Map I/O region
1069 */
1070 host->baseaddr = ioremap(res->start, res->end - res->start + 1);
1071 if (!host->baseaddr) {
1072 ret = -ENOMEM;
1073 goto fail1;
1074 }
1075
1076 /*
1077 * Reset hardware
1078 */
1079 clk_enable(host->mci_clk); /* Enable the peripheral clock */
1080 at91_mci_disable(host);
1081 at91_mci_enable(host);
1082
1083 /*
1084 * Allocate the MCI interrupt
1085 */
1086 host->irq = platform_get_irq(pdev, 0);
1087 ret = request_irq(host->irq, at91_mci_irq, IRQF_SHARED,
1088 mmc_hostname(mmc), host);
1089 if (ret) {
1090 dev_dbg(&pdev->dev, "request MCI interrupt failed\n");
1091 goto fail0;
1092 }
1093
1094 setup_timer(&host->timer, at91_timeout_timer, (unsigned long)host);
1095
1096 platform_set_drvdata(pdev, mmc);
1097
1098 /*
1099 * Add host to MMC layer
1100 */
1101 if (host->board->det_pin) {
1102 host->present = !gpio_get_value(host->board->det_pin);
1103 }
1104 else
1105 host->present = -1;
1106
1107 mmc_add_host(mmc);
1108
1109 /*
1110 * monitor card insertion/removal if we can
1111 */
1112 if (host->board->det_pin) {
1113 ret = request_irq(gpio_to_irq(host->board->det_pin),
1114 at91_mmc_det_irq, 0, mmc_hostname(mmc), host);
1115 if (ret)
1116 dev_warn(&pdev->dev, "request MMC detect irq failed\n");
1117 else
1118 device_init_wakeup(&pdev->dev, 1);
1119 }
1120
1121 pr_debug("Added MCI driver\n");
1122
1123 return 0;
1124
1125 fail0:
1126 clk_disable(host->mci_clk);
1127 iounmap(host->baseaddr);
1128 fail1:
1129 clk_put(host->mci_clk);
1130 fail2:
1131 if (host->board->vcc_pin)
1132 gpio_free(host->board->vcc_pin);
1133 fail3:
1134 if (host->board->wp_pin)
1135 gpio_free(host->board->wp_pin);
1136 fail4:
1137 if (host->board->det_pin)
1138 gpio_free(host->board->det_pin);
1139 fail5:
1140 mmc_free_host(mmc);
1141 fail6:
1142 release_mem_region(res->start, res->end - res->start + 1);
1143 dev_err(&pdev->dev, "probe failed, err %d\n", ret);
1144 return ret;
1145 }
1146
1147 /*
1148 * Remove a device
1149 */
1150 static int __exit at91_mci_remove(struct platform_device *pdev)
1151 {
1152 struct mmc_host *mmc = platform_get_drvdata(pdev);
1153 struct at91mci_host *host;
1154 struct resource *res;
1155
1156 if (!mmc)
1157 return -1;
1158
1159 host = mmc_priv(mmc);
1160
1161 if (host->board->det_pin) {
1162 if (device_can_wakeup(&pdev->dev))
1163 free_irq(gpio_to_irq(host->board->det_pin), host);
1164 device_init_wakeup(&pdev->dev, 0);
1165 gpio_free(host->board->det_pin);
1166 }
1167
1168 at91_mci_disable(host);
1169 del_timer_sync(&host->timer);
1170 mmc_remove_host(mmc);
1171 free_irq(host->irq, host);
1172
1173 clk_disable(host->mci_clk); /* Disable the peripheral clock */
1174 clk_put(host->mci_clk);
1175
1176 if (host->board->vcc_pin)
1177 gpio_free(host->board->vcc_pin);
1178 if (host->board->wp_pin)
1179 gpio_free(host->board->wp_pin);
1180
1181 iounmap(host->baseaddr);
1182 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1183 release_mem_region(res->start, res->end - res->start + 1);
1184
1185 mmc_free_host(mmc);
1186 platform_set_drvdata(pdev, NULL);
1187 pr_debug("MCI Removed\n");
1188
1189 return 0;
1190 }
1191
1192 #ifdef CONFIG_PM
1193 static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state)
1194 {
1195 struct mmc_host *mmc = platform_get_drvdata(pdev);
1196 struct at91mci_host *host = mmc_priv(mmc);
1197 int ret = 0;
1198
1199 if (host->board->det_pin && device_may_wakeup(&pdev->dev))
1200 enable_irq_wake(host->board->det_pin);
1201
1202 if (mmc)
1203 ret = mmc_suspend_host(mmc, state);
1204
1205 return ret;
1206 }
1207
1208 static int at91_mci_resume(struct platform_device *pdev)
1209 {
1210 struct mmc_host *mmc = platform_get_drvdata(pdev);
1211 struct at91mci_host *host = mmc_priv(mmc);
1212 int ret = 0;
1213
1214 if (host->board->det_pin && device_may_wakeup(&pdev->dev))
1215 disable_irq_wake(host->board->det_pin);
1216
1217 if (mmc)
1218 ret = mmc_resume_host(mmc);
1219
1220 return ret;
1221 }
1222 #else
1223 #define at91_mci_suspend NULL
1224 #define at91_mci_resume NULL
1225 #endif
1226
1227 static struct platform_driver at91_mci_driver = {
1228 .remove = __exit_p(at91_mci_remove),
1229 .suspend = at91_mci_suspend,
1230 .resume = at91_mci_resume,
1231 .driver = {
1232 .name = DRIVER_NAME,
1233 .owner = THIS_MODULE,
1234 },
1235 };
1236
1237 static int __init at91_mci_init(void)
1238 {
1239 return platform_driver_probe(&at91_mci_driver, at91_mci_probe);
1240 }
1241
1242 static void __exit at91_mci_exit(void)
1243 {
1244 platform_driver_unregister(&at91_mci_driver);
1245 }
1246
1247 module_init(at91_mci_init);
1248 module_exit(at91_mci_exit);
1249
1250 MODULE_DESCRIPTION("AT91 Multimedia Card Interface driver");
1251 MODULE_AUTHOR("Nick Randell");
1252 MODULE_LICENSE("GPL");
1253 MODULE_ALIAS("platform:at91_mci");
This page took 0.075538 seconds and 5 git commands to generate.