76cb05ab9341a447743165ab80c332eb3f880103
[deliverable/linux.git] / drivers / mmc / host / at91_mci.c
1 /*
2 * linux/drivers/mmc/host/at91_mci.c - ATMEL AT91 MCI Driver
3 *
4 * Copyright (C) 2005 Cougar Creek Computing Devices Ltd, All Rights Reserved
5 *
6 * Copyright (C) 2006 Malcolm Noyes
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13 /*
14 This is the AT91 MCI driver that has been tested with both MMC cards
15 and SD-cards. Boards that support write protect are now supported.
16 The CCAT91SBC001 board does not support SD cards.
17
18 The three entry points are at91_mci_request, at91_mci_set_ios
19 and at91_mci_get_ro.
20
21 SET IOS
22 This configures the device to put it into the correct mode and clock speed
23 required.
24
25 MCI REQUEST
26 MCI request processes the commands sent in the mmc_request structure. This
27 can consist of a processing command and a stop command in the case of
28 multiple block transfers.
29
30 There are three main types of request, commands, reads and writes.
31
32 Commands are straight forward. The command is submitted to the controller and
33 the request function returns. When the controller generates an interrupt to indicate
34 the command is finished, the response to the command are read and the mmc_request_done
35 function called to end the request.
36
37 Reads and writes work in a similar manner to normal commands but involve the PDC (DMA)
38 controller to manage the transfers.
39
40 A read is done from the controller directly to the scatterlist passed in from the request.
41 Due to a bug in the AT91RM9200 controller, when a read is completed, all the words are byte
42 swapped in the scatterlist buffers. AT91SAM926x are not affected by this bug.
43
44 The sequence of read interrupts is: ENDRX, RXBUFF, CMDRDY
45
46 A write is slightly different in that the bytes to write are read from the scatterlist
47 into a dma memory buffer (this is in case the source buffer should be read only). The
48 entire write buffer is then done from this single dma memory buffer.
49
50 The sequence of write interrupts is: ENDTX, TXBUFE, NOTBUSY, CMDRDY
51
52 GET RO
53 Gets the status of the write protect pin, if available.
54 */
55
56 #include <linux/module.h>
57 #include <linux/moduleparam.h>
58 #include <linux/init.h>
59 #include <linux/ioport.h>
60 #include <linux/platform_device.h>
61 #include <linux/interrupt.h>
62 #include <linux/blkdev.h>
63 #include <linux/delay.h>
64 #include <linux/err.h>
65 #include <linux/dma-mapping.h>
66 #include <linux/clk.h>
67 #include <linux/atmel_pdc.h>
68
69 #include <linux/mmc/host.h>
70
71 #include <asm/io.h>
72 #include <asm/irq.h>
73 #include <asm/gpio.h>
74
75 #include <mach/board.h>
76 #include <mach/cpu.h>
77 #include <mach/at91_mci.h>
78
79 #define DRIVER_NAME "at91_mci"
80
81 #define FL_SENT_COMMAND (1 << 0)
82 #define FL_SENT_STOP (1 << 1)
83
84 #define AT91_MCI_ERRORS (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE \
85 | AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE \
86 | AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE)
87
88 #define at91_mci_read(host, reg) __raw_readl((host)->baseaddr + (reg))
89 #define at91_mci_write(host, reg, val) __raw_writel((val), (host)->baseaddr + (reg))
90
91 #define MCI_BLKSIZE 512
92 #define MCI_MAXBLKSIZE 4095
93 #define MCI_BLKATONCE 256
94 #define MCI_BUFSIZE (MCI_BLKSIZE * MCI_BLKATONCE)
95
96 /*
97 * Low level type for this driver
98 */
99 struct at91mci_host
100 {
101 struct mmc_host *mmc;
102 struct mmc_command *cmd;
103 struct mmc_request *request;
104
105 void __iomem *baseaddr;
106 int irq;
107
108 struct at91_mmc_data *board;
109 int present;
110
111 struct clk *mci_clk;
112
113 /*
114 * Flag indicating when the command has been sent. This is used to
115 * work out whether or not to send the stop
116 */
117 unsigned int flags;
118 /* flag for current bus settings */
119 u32 bus_mode;
120
121 /* DMA buffer used for transmitting */
122 unsigned int* buffer;
123 dma_addr_t physical_address;
124 unsigned int total_length;
125
126 /* Latest in the scatterlist that has been enabled for transfer, but not freed */
127 int in_use_index;
128
129 /* Latest in the scatterlist that has been enabled for transfer */
130 int transfer_index;
131
132 /* Timer for timeouts */
133 struct timer_list timer;
134 };
135
136 /*
137 * Reset the controller and restore most of the state
138 */
139 static void at91_reset_host(struct at91mci_host *host)
140 {
141 unsigned long flags;
142 u32 mr;
143 u32 sdcr;
144 u32 dtor;
145 u32 imr;
146
147 local_irq_save(flags);
148 imr = at91_mci_read(host, AT91_MCI_IMR);
149
150 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
151
152 /* save current state */
153 mr = at91_mci_read(host, AT91_MCI_MR) & 0x7fff;
154 sdcr = at91_mci_read(host, AT91_MCI_SDCR);
155 dtor = at91_mci_read(host, AT91_MCI_DTOR);
156
157 /* reset the controller */
158 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST);
159
160 /* restore state */
161 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
162 at91_mci_write(host, AT91_MCI_MR, mr);
163 at91_mci_write(host, AT91_MCI_SDCR, sdcr);
164 at91_mci_write(host, AT91_MCI_DTOR, dtor);
165 at91_mci_write(host, AT91_MCI_IER, imr);
166
167 /* make sure sdio interrupts will fire */
168 at91_mci_read(host, AT91_MCI_SR);
169
170 local_irq_restore(flags);
171 }
172
173 static void at91_timeout_timer(unsigned long data)
174 {
175 struct at91mci_host *host;
176
177 host = (struct at91mci_host *)data;
178
179 if (host->request) {
180 dev_err(host->mmc->parent, "Timeout waiting end of packet\n");
181
182 if (host->cmd && host->cmd->data) {
183 host->cmd->data->error = -ETIMEDOUT;
184 } else {
185 if (host->cmd)
186 host->cmd->error = -ETIMEDOUT;
187 else
188 host->request->cmd->error = -ETIMEDOUT;
189 }
190
191 at91_reset_host(host);
192 mmc_request_done(host->mmc, host->request);
193 }
194 }
195
196 /*
197 * Copy from sg to a dma block - used for transfers
198 */
199 static inline void at91_mci_sg_to_dma(struct at91mci_host *host, struct mmc_data *data)
200 {
201 unsigned int len, i, size;
202 unsigned *dmabuf = host->buffer;
203
204 size = data->blksz * data->blocks;
205 len = data->sg_len;
206
207 /* AT91SAM926[0/3] Data Write Operation and number of bytes erratum */
208 if (cpu_is_at91sam9260() || cpu_is_at91sam9263())
209 if (host->total_length == 12)
210 memset(dmabuf, 0, 12);
211
212 /*
213 * Just loop through all entries. Size might not
214 * be the entire list though so make sure that
215 * we do not transfer too much.
216 */
217 for (i = 0; i < len; i++) {
218 struct scatterlist *sg;
219 int amount;
220 unsigned int *sgbuffer;
221
222 sg = &data->sg[i];
223
224 sgbuffer = kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
225 amount = min(size, sg->length);
226 size -= amount;
227
228 if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
229 int index;
230
231 for (index = 0; index < (amount / 4); index++)
232 *dmabuf++ = swab32(sgbuffer[index]);
233 } else {
234 char *tmpv = (char *)dmabuf;
235 memcpy(tmpv, sgbuffer, amount);
236 tmpv += amount;
237 dmabuf = (unsigned *)tmpv;
238 }
239
240 kunmap_atomic(((void *)sgbuffer) - sg->offset, KM_BIO_SRC_IRQ);
241
242 if (size == 0)
243 break;
244 }
245
246 /*
247 * Check that we didn't get a request to transfer
248 * more data than can fit into the SG list.
249 */
250 BUG_ON(size != 0);
251 }
252
253 /*
254 * Prepare a dma read
255 */
256 static void at91_mci_pre_dma_read(struct at91mci_host *host)
257 {
258 int i;
259 struct scatterlist *sg;
260 struct mmc_command *cmd;
261 struct mmc_data *data;
262
263 pr_debug("pre dma read\n");
264
265 cmd = host->cmd;
266 if (!cmd) {
267 pr_debug("no command\n");
268 return;
269 }
270
271 data = cmd->data;
272 if (!data) {
273 pr_debug("no data\n");
274 return;
275 }
276
277 for (i = 0; i < 2; i++) {
278 /* nothing left to transfer */
279 if (host->transfer_index >= data->sg_len) {
280 pr_debug("Nothing left to transfer (index = %d)\n", host->transfer_index);
281 break;
282 }
283
284 /* Check to see if this needs filling */
285 if (i == 0) {
286 if (at91_mci_read(host, ATMEL_PDC_RCR) != 0) {
287 pr_debug("Transfer active in current\n");
288 continue;
289 }
290 }
291 else {
292 if (at91_mci_read(host, ATMEL_PDC_RNCR) != 0) {
293 pr_debug("Transfer active in next\n");
294 continue;
295 }
296 }
297
298 /* Setup the next transfer */
299 pr_debug("Using transfer index %d\n", host->transfer_index);
300
301 sg = &data->sg[host->transfer_index++];
302 pr_debug("sg = %p\n", sg);
303
304 sg->dma_address = dma_map_page(NULL, sg_page(sg), sg->offset, sg->length, DMA_FROM_DEVICE);
305
306 pr_debug("dma address = %08X, length = %d\n", sg->dma_address, sg->length);
307
308 if (i == 0) {
309 at91_mci_write(host, ATMEL_PDC_RPR, sg->dma_address);
310 at91_mci_write(host, ATMEL_PDC_RCR, (data->blksz & 0x3) ? sg->length : sg->length / 4);
311 }
312 else {
313 at91_mci_write(host, ATMEL_PDC_RNPR, sg->dma_address);
314 at91_mci_write(host, ATMEL_PDC_RNCR, (data->blksz & 0x3) ? sg->length : sg->length / 4);
315 }
316 }
317
318 pr_debug("pre dma read done\n");
319 }
320
321 /*
322 * Handle after a dma read
323 */
324 static void at91_mci_post_dma_read(struct at91mci_host *host)
325 {
326 struct mmc_command *cmd;
327 struct mmc_data *data;
328
329 pr_debug("post dma read\n");
330
331 cmd = host->cmd;
332 if (!cmd) {
333 pr_debug("no command\n");
334 return;
335 }
336
337 data = cmd->data;
338 if (!data) {
339 pr_debug("no data\n");
340 return;
341 }
342
343 while (host->in_use_index < host->transfer_index) {
344 struct scatterlist *sg;
345
346 pr_debug("finishing index %d\n", host->in_use_index);
347
348 sg = &data->sg[host->in_use_index++];
349
350 pr_debug("Unmapping page %08X\n", sg->dma_address);
351
352 dma_unmap_page(NULL, sg->dma_address, sg->length, DMA_FROM_DEVICE);
353
354 if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
355 unsigned int *buffer;
356 int index;
357
358 /* Swap the contents of the buffer */
359 buffer = kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
360 pr_debug("buffer = %p, length = %d\n", buffer, sg->length);
361
362 for (index = 0; index < (sg->length / 4); index++)
363 buffer[index] = swab32(buffer[index]);
364
365 kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
366 }
367
368 flush_dcache_page(sg_page(sg));
369
370 data->bytes_xfered += sg->length;
371 }
372
373 /* Is there another transfer to trigger? */
374 if (host->transfer_index < data->sg_len)
375 at91_mci_pre_dma_read(host);
376 else {
377 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_ENDRX);
378 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_RXBUFF);
379 }
380
381 pr_debug("post dma read done\n");
382 }
383
384 /*
385 * Handle transmitted data
386 */
387 static void at91_mci_handle_transmitted(struct at91mci_host *host)
388 {
389 struct mmc_command *cmd;
390 struct mmc_data *data;
391
392 pr_debug("Handling the transmit\n");
393
394 /* Disable the transfer */
395 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
396
397 /* Now wait for cmd ready */
398 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_TXBUFE);
399
400 cmd = host->cmd;
401 if (!cmd) return;
402
403 data = cmd->data;
404 if (!data) return;
405
406 if (cmd->data->blocks > 1) {
407 pr_debug("multiple write : wait for BLKE...\n");
408 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_BLKE);
409 } else
410 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
411 }
412
413 /*
414 * Update bytes tranfered count during a write operation
415 */
416 static void at91_mci_update_bytes_xfered(struct at91mci_host *host)
417 {
418 struct mmc_data *data;
419
420 /* always deal with the effective request (and not the current cmd) */
421
422 if (host->request->cmd && host->request->cmd->error != 0)
423 return;
424
425 if (host->request->data) {
426 data = host->request->data;
427 if (data->flags & MMC_DATA_WRITE) {
428 /* card is in IDLE mode now */
429 pr_debug("-> bytes_xfered %d, total_length = %d\n",
430 data->bytes_xfered, host->total_length);
431 data->bytes_xfered = data->blksz * data->blocks;
432 }
433 }
434 }
435
436
437 /*Handle after command sent ready*/
438 static int at91_mci_handle_cmdrdy(struct at91mci_host *host)
439 {
440 if (!host->cmd)
441 return 1;
442 else if (!host->cmd->data) {
443 if (host->flags & FL_SENT_STOP) {
444 /*After multi block write, we must wait for NOTBUSY*/
445 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
446 } else return 1;
447 } else if (host->cmd->data->flags & MMC_DATA_WRITE) {
448 /*After sendding multi-block-write command, start DMA transfer*/
449 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_TXBUFE | AT91_MCI_BLKE);
450 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
451 }
452
453 /* command not completed, have to wait */
454 return 0;
455 }
456
457
458 /*
459 * Enable the controller
460 */
461 static void at91_mci_enable(struct at91mci_host *host)
462 {
463 unsigned int mr;
464
465 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
466 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
467 at91_mci_write(host, AT91_MCI_DTOR, AT91_MCI_DTOMUL_1M | AT91_MCI_DTOCYC);
468 mr = AT91_MCI_PDCMODE | 0x34a;
469
470 if (cpu_is_at91sam9260() || cpu_is_at91sam9263())
471 mr |= AT91_MCI_RDPROOF | AT91_MCI_WRPROOF;
472
473 at91_mci_write(host, AT91_MCI_MR, mr);
474
475 /* use Slot A or B (only one at same time) */
476 at91_mci_write(host, AT91_MCI_SDCR, host->board->slot_b);
477 }
478
479 /*
480 * Disable the controller
481 */
482 static void at91_mci_disable(struct at91mci_host *host)
483 {
484 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST);
485 }
486
487 /*
488 * Send a command
489 */
490 static void at91_mci_send_command(struct at91mci_host *host, struct mmc_command *cmd)
491 {
492 unsigned int cmdr, mr;
493 unsigned int block_length;
494 struct mmc_data *data = cmd->data;
495
496 unsigned int blocks;
497 unsigned int ier = 0;
498
499 host->cmd = cmd;
500
501 /* Needed for leaving busy state before CMD1 */
502 if ((at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_RTOE) && (cmd->opcode == 1)) {
503 pr_debug("Clearing timeout\n");
504 at91_mci_write(host, AT91_MCI_ARGR, 0);
505 at91_mci_write(host, AT91_MCI_CMDR, AT91_MCI_OPDCMD);
506 while (!(at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_CMDRDY)) {
507 /* spin */
508 pr_debug("Clearing: SR = %08X\n", at91_mci_read(host, AT91_MCI_SR));
509 }
510 }
511
512 cmdr = cmd->opcode;
513
514 if (mmc_resp_type(cmd) == MMC_RSP_NONE)
515 cmdr |= AT91_MCI_RSPTYP_NONE;
516 else {
517 /* if a response is expected then allow maximum response latancy */
518 cmdr |= AT91_MCI_MAXLAT;
519 /* set 136 bit response for R2, 48 bit response otherwise */
520 if (mmc_resp_type(cmd) == MMC_RSP_R2)
521 cmdr |= AT91_MCI_RSPTYP_136;
522 else
523 cmdr |= AT91_MCI_RSPTYP_48;
524 }
525
526 if (data) {
527
528 if (cpu_is_at91rm9200() || cpu_is_at91sam9261()) {
529 if (data->blksz & 0x3) {
530 pr_debug("Unsupported block size\n");
531 cmd->error = -EINVAL;
532 mmc_request_done(host->mmc, host->request);
533 return;
534 }
535 if (data->flags & MMC_DATA_STREAM) {
536 pr_debug("Stream commands not supported\n");
537 cmd->error = -EINVAL;
538 mmc_request_done(host->mmc, host->request);
539 return;
540 }
541 }
542
543 block_length = data->blksz;
544 blocks = data->blocks;
545
546 /* always set data start - also set direction flag for read */
547 if (data->flags & MMC_DATA_READ)
548 cmdr |= (AT91_MCI_TRDIR | AT91_MCI_TRCMD_START);
549 else if (data->flags & MMC_DATA_WRITE)
550 cmdr |= AT91_MCI_TRCMD_START;
551
552 if (data->flags & MMC_DATA_STREAM)
553 cmdr |= AT91_MCI_TRTYP_STREAM;
554 if (data->blocks > 1)
555 cmdr |= AT91_MCI_TRTYP_MULTIPLE;
556 }
557 else {
558 block_length = 0;
559 blocks = 0;
560 }
561
562 if (host->flags & FL_SENT_STOP)
563 cmdr |= AT91_MCI_TRCMD_STOP;
564
565 if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
566 cmdr |= AT91_MCI_OPDCMD;
567
568 /*
569 * Set the arguments and send the command
570 */
571 pr_debug("Sending command %d as %08X, arg = %08X, blocks = %d, length = %d (MR = %08X)\n",
572 cmd->opcode, cmdr, cmd->arg, blocks, block_length, at91_mci_read(host, AT91_MCI_MR));
573
574 if (!data) {
575 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS | ATMEL_PDC_RXTDIS);
576 at91_mci_write(host, ATMEL_PDC_RPR, 0);
577 at91_mci_write(host, ATMEL_PDC_RCR, 0);
578 at91_mci_write(host, ATMEL_PDC_RNPR, 0);
579 at91_mci_write(host, ATMEL_PDC_RNCR, 0);
580 at91_mci_write(host, ATMEL_PDC_TPR, 0);
581 at91_mci_write(host, ATMEL_PDC_TCR, 0);
582 at91_mci_write(host, ATMEL_PDC_TNPR, 0);
583 at91_mci_write(host, ATMEL_PDC_TNCR, 0);
584 ier = AT91_MCI_CMDRDY;
585 } else {
586 /* zero block length and PDC mode */
587 mr = at91_mci_read(host, AT91_MCI_MR) & 0x5fff;
588 mr |= (data->blksz & 0x3) ? AT91_MCI_PDCFBYTE : 0;
589 mr |= (block_length << 16);
590 mr |= AT91_MCI_PDCMODE;
591 at91_mci_write(host, AT91_MCI_MR, mr);
592
593 if (!(cpu_is_at91rm9200() || cpu_is_at91sam9261()))
594 at91_mci_write(host, AT91_MCI_BLKR,
595 AT91_MCI_BLKR_BCNT(blocks) |
596 AT91_MCI_BLKR_BLKLEN(block_length));
597
598 /*
599 * Disable the PDC controller
600 */
601 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
602
603 if (cmdr & AT91_MCI_TRCMD_START) {
604 data->bytes_xfered = 0;
605 host->transfer_index = 0;
606 host->in_use_index = 0;
607 if (cmdr & AT91_MCI_TRDIR) {
608 /*
609 * Handle a read
610 */
611 host->total_length = 0;
612
613 at91_mci_pre_dma_read(host);
614 ier = AT91_MCI_ENDRX /* | AT91_MCI_RXBUFF */;
615 }
616 else {
617 /*
618 * Handle a write
619 */
620 host->total_length = block_length * blocks;
621 /*
622 * AT91SAM926[0/3] Data Write Operation and
623 * number of bytes erratum
624 */
625 if (cpu_is_at91sam9260 () || cpu_is_at91sam9263())
626 if (host->total_length < 12)
627 host->total_length = 12;
628
629 at91_mci_sg_to_dma(host, data);
630
631 pr_debug("Transmitting %d bytes\n", host->total_length);
632
633 at91_mci_write(host, ATMEL_PDC_TPR, host->physical_address);
634 at91_mci_write(host, ATMEL_PDC_TCR, (data->blksz & 0x3) ?
635 host->total_length : host->total_length / 4);
636
637 ier = AT91_MCI_CMDRDY;
638 }
639 }
640 }
641
642 /*
643 * Send the command and then enable the PDC - not the other way round as
644 * the data sheet says
645 */
646
647 at91_mci_write(host, AT91_MCI_ARGR, cmd->arg);
648 at91_mci_write(host, AT91_MCI_CMDR, cmdr);
649
650 if (cmdr & AT91_MCI_TRCMD_START) {
651 if (cmdr & AT91_MCI_TRDIR)
652 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
653 }
654
655 /* Enable selected interrupts */
656 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_ERRORS | ier);
657 }
658
659 /*
660 * Process the next step in the request
661 */
662 static void at91_mci_process_next(struct at91mci_host *host)
663 {
664 if (!(host->flags & FL_SENT_COMMAND)) {
665 host->flags |= FL_SENT_COMMAND;
666 at91_mci_send_command(host, host->request->cmd);
667 }
668 else if ((!(host->flags & FL_SENT_STOP)) && host->request->stop) {
669 host->flags |= FL_SENT_STOP;
670 at91_mci_send_command(host, host->request->stop);
671 } else {
672 del_timer(&host->timer);
673 /* the at91rm9200 mci controller hangs after some transfers,
674 * and the workaround is to reset it after each transfer.
675 */
676 if (cpu_is_at91rm9200())
677 at91_reset_host(host);
678 mmc_request_done(host->mmc, host->request);
679 }
680 }
681
682 /*
683 * Handle a command that has been completed
684 */
685 static void at91_mci_completed_command(struct at91mci_host *host, unsigned int status)
686 {
687 struct mmc_command *cmd = host->cmd;
688 struct mmc_data *data = cmd->data;
689
690 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB));
691
692 cmd->resp[0] = at91_mci_read(host, AT91_MCI_RSPR(0));
693 cmd->resp[1] = at91_mci_read(host, AT91_MCI_RSPR(1));
694 cmd->resp[2] = at91_mci_read(host, AT91_MCI_RSPR(2));
695 cmd->resp[3] = at91_mci_read(host, AT91_MCI_RSPR(3));
696
697 pr_debug("Status = %08X/%08x [%08X %08X %08X %08X]\n",
698 status, at91_mci_read(host, AT91_MCI_SR),
699 cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
700
701 if (status & AT91_MCI_ERRORS) {
702 if ((status & AT91_MCI_RCRCE) && !(mmc_resp_type(cmd) & MMC_RSP_CRC)) {
703 cmd->error = 0;
704 }
705 else {
706 if (status & (AT91_MCI_DTOE | AT91_MCI_DCRCE)) {
707 if (data) {
708 if (status & AT91_MCI_DTOE)
709 data->error = -ETIMEDOUT;
710 else if (status & AT91_MCI_DCRCE)
711 data->error = -EILSEQ;
712 }
713 } else {
714 if (status & AT91_MCI_RTOE)
715 cmd->error = -ETIMEDOUT;
716 else if (status & AT91_MCI_RCRCE)
717 cmd->error = -EILSEQ;
718 else
719 cmd->error = -EIO;
720 }
721
722 pr_debug("Error detected and set to %d/%d (cmd = %d, retries = %d)\n",
723 cmd->error, data ? data->error : 0,
724 cmd->opcode, cmd->retries);
725 }
726 }
727 else
728 cmd->error = 0;
729
730 at91_mci_process_next(host);
731 }
732
733 /*
734 * Handle an MMC request
735 */
736 static void at91_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
737 {
738 struct at91mci_host *host = mmc_priv(mmc);
739 host->request = mrq;
740 host->flags = 0;
741
742 /* more than 1s timeout needed with slow SD cards */
743 mod_timer(&host->timer, jiffies + msecs_to_jiffies(2000));
744
745 at91_mci_process_next(host);
746 }
747
748 /*
749 * Set the IOS
750 */
751 static void at91_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
752 {
753 int clkdiv;
754 struct at91mci_host *host = mmc_priv(mmc);
755 unsigned long at91_master_clock = clk_get_rate(host->mci_clk);
756
757 host->bus_mode = ios->bus_mode;
758
759 if (ios->clock == 0) {
760 /* Disable the MCI controller */
761 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS);
762 clkdiv = 0;
763 }
764 else {
765 /* Enable the MCI controller */
766 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
767
768 if ((at91_master_clock % (ios->clock * 2)) == 0)
769 clkdiv = ((at91_master_clock / ios->clock) / 2) - 1;
770 else
771 clkdiv = (at91_master_clock / ios->clock) / 2;
772
773 pr_debug("clkdiv = %d. mcck = %ld\n", clkdiv,
774 at91_master_clock / (2 * (clkdiv + 1)));
775 }
776 if (ios->bus_width == MMC_BUS_WIDTH_4 && host->board->wire4) {
777 pr_debug("MMC: Setting controller bus width to 4\n");
778 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) | AT91_MCI_SDCBUS);
779 }
780 else {
781 pr_debug("MMC: Setting controller bus width to 1\n");
782 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
783 }
784
785 /* Set the clock divider */
786 at91_mci_write(host, AT91_MCI_MR, (at91_mci_read(host, AT91_MCI_MR) & ~AT91_MCI_CLKDIV) | clkdiv);
787
788 /* maybe switch power to the card */
789 if (host->board->vcc_pin) {
790 switch (ios->power_mode) {
791 case MMC_POWER_OFF:
792 gpio_set_value(host->board->vcc_pin, 0);
793 break;
794 case MMC_POWER_UP:
795 gpio_set_value(host->board->vcc_pin, 1);
796 break;
797 case MMC_POWER_ON:
798 break;
799 default:
800 WARN_ON(1);
801 }
802 }
803 }
804
805 /*
806 * Handle an interrupt
807 */
808 static irqreturn_t at91_mci_irq(int irq, void *devid)
809 {
810 struct at91mci_host *host = devid;
811 int completed = 0;
812 unsigned int int_status, int_mask;
813
814 int_status = at91_mci_read(host, AT91_MCI_SR);
815 int_mask = at91_mci_read(host, AT91_MCI_IMR);
816
817 pr_debug("MCI irq: status = %08X, %08X, %08X\n", int_status, int_mask,
818 int_status & int_mask);
819
820 int_status = int_status & int_mask;
821
822 if (int_status & AT91_MCI_ERRORS) {
823 completed = 1;
824
825 if (int_status & AT91_MCI_UNRE)
826 pr_debug("MMC: Underrun error\n");
827 if (int_status & AT91_MCI_OVRE)
828 pr_debug("MMC: Overrun error\n");
829 if (int_status & AT91_MCI_DTOE)
830 pr_debug("MMC: Data timeout\n");
831 if (int_status & AT91_MCI_DCRCE)
832 pr_debug("MMC: CRC error in data\n");
833 if (int_status & AT91_MCI_RTOE)
834 pr_debug("MMC: Response timeout\n");
835 if (int_status & AT91_MCI_RENDE)
836 pr_debug("MMC: Response end bit error\n");
837 if (int_status & AT91_MCI_RCRCE)
838 pr_debug("MMC: Response CRC error\n");
839 if (int_status & AT91_MCI_RDIRE)
840 pr_debug("MMC: Response direction error\n");
841 if (int_status & AT91_MCI_RINDE)
842 pr_debug("MMC: Response index error\n");
843 } else {
844 /* Only continue processing if no errors */
845
846 if (int_status & AT91_MCI_TXBUFE) {
847 pr_debug("TX buffer empty\n");
848 at91_mci_handle_transmitted(host);
849 }
850
851 if (int_status & AT91_MCI_ENDRX) {
852 pr_debug("ENDRX\n");
853 at91_mci_post_dma_read(host);
854 }
855
856 if (int_status & AT91_MCI_RXBUFF) {
857 pr_debug("RX buffer full\n");
858 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
859 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_RXBUFF | AT91_MCI_ENDRX);
860 completed = 1;
861 }
862
863 if (int_status & AT91_MCI_ENDTX)
864 pr_debug("Transmit has ended\n");
865
866 if (int_status & AT91_MCI_NOTBUSY) {
867 pr_debug("Card is ready\n");
868 at91_mci_update_bytes_xfered(host);
869 completed = 1;
870 }
871
872 if (int_status & AT91_MCI_DTIP)
873 pr_debug("Data transfer in progress\n");
874
875 if (int_status & AT91_MCI_BLKE) {
876 pr_debug("Block transfer has ended\n");
877 if (host->request->data && host->request->data->blocks > 1) {
878 /* multi block write : complete multi write
879 * command and send stop */
880 completed = 1;
881 } else {
882 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
883 }
884 }
885
886 if (int_status & AT91_MCI_SDIOIRQA)
887 mmc_signal_sdio_irq(host->mmc);
888
889 if (int_status & AT91_MCI_SDIOIRQB)
890 mmc_signal_sdio_irq(host->mmc);
891
892 if (int_status & AT91_MCI_TXRDY)
893 pr_debug("Ready to transmit\n");
894
895 if (int_status & AT91_MCI_RXRDY)
896 pr_debug("Ready to receive\n");
897
898 if (int_status & AT91_MCI_CMDRDY) {
899 pr_debug("Command ready\n");
900 completed = at91_mci_handle_cmdrdy(host);
901 }
902 }
903
904 if (completed) {
905 pr_debug("Completed command\n");
906 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB));
907 at91_mci_completed_command(host, int_status);
908 } else
909 at91_mci_write(host, AT91_MCI_IDR, int_status & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB));
910
911 return IRQ_HANDLED;
912 }
913
914 static irqreturn_t at91_mmc_det_irq(int irq, void *_host)
915 {
916 struct at91mci_host *host = _host;
917 int present = !gpio_get_value(irq_to_gpio(irq));
918
919 /*
920 * we expect this irq on both insert and remove,
921 * and use a short delay to debounce.
922 */
923 if (present != host->present) {
924 host->present = present;
925 pr_debug("%s: card %s\n", mmc_hostname(host->mmc),
926 present ? "insert" : "remove");
927 if (!present) {
928 pr_debug("****** Resetting SD-card bus width ******\n");
929 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
930 }
931 /* 0.5s needed because of early card detect switch firing */
932 mmc_detect_change(host->mmc, msecs_to_jiffies(500));
933 }
934 return IRQ_HANDLED;
935 }
936
937 static int at91_mci_get_ro(struct mmc_host *mmc)
938 {
939 struct at91mci_host *host = mmc_priv(mmc);
940
941 if (host->board->wp_pin)
942 return !!gpio_get_value(host->board->wp_pin);
943 /*
944 * Board doesn't support read only detection; let the mmc core
945 * decide what to do.
946 */
947 return -ENOSYS;
948 }
949
950 static void at91_mci_enable_sdio_irq(struct mmc_host *mmc, int enable)
951 {
952 struct at91mci_host *host = mmc_priv(mmc);
953
954 pr_debug("%s: sdio_irq %c : %s\n", mmc_hostname(host->mmc),
955 host->board->slot_b ? 'B':'A', enable ? "enable" : "disable");
956 at91_mci_write(host, enable ? AT91_MCI_IER : AT91_MCI_IDR,
957 host->board->slot_b ? AT91_MCI_SDIOIRQB : AT91_MCI_SDIOIRQA);
958
959 }
960
961 static const struct mmc_host_ops at91_mci_ops = {
962 .request = at91_mci_request,
963 .set_ios = at91_mci_set_ios,
964 .get_ro = at91_mci_get_ro,
965 .enable_sdio_irq = at91_mci_enable_sdio_irq,
966 };
967
968 /*
969 * Probe for the device
970 */
971 static int __init at91_mci_probe(struct platform_device *pdev)
972 {
973 struct mmc_host *mmc;
974 struct at91mci_host *host;
975 struct resource *res;
976 int ret;
977
978 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
979 if (!res)
980 return -ENXIO;
981
982 if (!request_mem_region(res->start, res->end - res->start + 1, DRIVER_NAME))
983 return -EBUSY;
984
985 mmc = mmc_alloc_host(sizeof(struct at91mci_host), &pdev->dev);
986 if (!mmc) {
987 ret = -ENOMEM;
988 dev_dbg(&pdev->dev, "couldn't allocate mmc host\n");
989 goto fail6;
990 }
991
992 mmc->ops = &at91_mci_ops;
993 mmc->f_min = 375000;
994 mmc->f_max = 25000000;
995 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
996 mmc->caps = MMC_CAP_SDIO_IRQ;
997
998 mmc->max_blk_size = MCI_MAXBLKSIZE;
999 mmc->max_blk_count = MCI_BLKATONCE;
1000 mmc->max_req_size = MCI_BUFSIZE;
1001
1002 host = mmc_priv(mmc);
1003 host->mmc = mmc;
1004 host->bus_mode = 0;
1005 host->board = pdev->dev.platform_data;
1006 if (host->board->wire4) {
1007 if (cpu_is_at91sam9260() || cpu_is_at91sam9263())
1008 mmc->caps |= MMC_CAP_4_BIT_DATA;
1009 else
1010 dev_warn(&pdev->dev, "4 wire bus mode not supported"
1011 " - using 1 wire\n");
1012 }
1013
1014 host->buffer = dma_alloc_coherent(&pdev->dev, MCI_BUFSIZE,
1015 &host->physical_address, GFP_KERNEL);
1016 if (!host->buffer) {
1017 ret = -ENOMEM;
1018 dev_err(&pdev->dev, "Can't allocate transmit buffer\n");
1019 goto fail5;
1020 }
1021
1022 /*
1023 * Reserve GPIOs ... board init code makes sure these pins are set
1024 * up as GPIOs with the right direction (input, except for vcc)
1025 */
1026 if (host->board->det_pin) {
1027 ret = gpio_request(host->board->det_pin, "mmc_detect");
1028 if (ret < 0) {
1029 dev_dbg(&pdev->dev, "couldn't claim card detect pin\n");
1030 goto fail4b;
1031 }
1032 }
1033 if (host->board->wp_pin) {
1034 ret = gpio_request(host->board->wp_pin, "mmc_wp");
1035 if (ret < 0) {
1036 dev_dbg(&pdev->dev, "couldn't claim wp sense pin\n");
1037 goto fail4;
1038 }
1039 }
1040 if (host->board->vcc_pin) {
1041 ret = gpio_request(host->board->vcc_pin, "mmc_vcc");
1042 if (ret < 0) {
1043 dev_dbg(&pdev->dev, "couldn't claim vcc switch pin\n");
1044 goto fail3;
1045 }
1046 }
1047
1048 /*
1049 * Get Clock
1050 */
1051 host->mci_clk = clk_get(&pdev->dev, "mci_clk");
1052 if (IS_ERR(host->mci_clk)) {
1053 ret = -ENODEV;
1054 dev_dbg(&pdev->dev, "no mci_clk?\n");
1055 goto fail2;
1056 }
1057
1058 /*
1059 * Map I/O region
1060 */
1061 host->baseaddr = ioremap(res->start, res->end - res->start + 1);
1062 if (!host->baseaddr) {
1063 ret = -ENOMEM;
1064 goto fail1;
1065 }
1066
1067 /*
1068 * Reset hardware
1069 */
1070 clk_enable(host->mci_clk); /* Enable the peripheral clock */
1071 at91_mci_disable(host);
1072 at91_mci_enable(host);
1073
1074 /*
1075 * Allocate the MCI interrupt
1076 */
1077 host->irq = platform_get_irq(pdev, 0);
1078 ret = request_irq(host->irq, at91_mci_irq, IRQF_SHARED,
1079 mmc_hostname(mmc), host);
1080 if (ret) {
1081 dev_dbg(&pdev->dev, "request MCI interrupt failed\n");
1082 goto fail0;
1083 }
1084
1085 setup_timer(&host->timer, at91_timeout_timer, (unsigned long)host);
1086
1087 platform_set_drvdata(pdev, mmc);
1088
1089 /*
1090 * Add host to MMC layer
1091 */
1092 if (host->board->det_pin) {
1093 host->present = !gpio_get_value(host->board->det_pin);
1094 }
1095 else
1096 host->present = -1;
1097
1098 mmc_add_host(mmc);
1099
1100 /*
1101 * monitor card insertion/removal if we can
1102 */
1103 if (host->board->det_pin) {
1104 ret = request_irq(gpio_to_irq(host->board->det_pin),
1105 at91_mmc_det_irq, 0, mmc_hostname(mmc), host);
1106 if (ret)
1107 dev_warn(&pdev->dev, "request MMC detect irq failed\n");
1108 else
1109 device_init_wakeup(&pdev->dev, 1);
1110 }
1111
1112 pr_debug("Added MCI driver\n");
1113
1114 return 0;
1115
1116 fail0:
1117 clk_disable(host->mci_clk);
1118 iounmap(host->baseaddr);
1119 fail1:
1120 clk_put(host->mci_clk);
1121 fail2:
1122 if (host->board->vcc_pin)
1123 gpio_free(host->board->vcc_pin);
1124 fail3:
1125 if (host->board->wp_pin)
1126 gpio_free(host->board->wp_pin);
1127 fail4:
1128 if (host->board->det_pin)
1129 gpio_free(host->board->det_pin);
1130 fail4b:
1131 if (host->buffer)
1132 dma_free_coherent(&pdev->dev, MCI_BUFSIZE,
1133 host->buffer, host->physical_address);
1134 fail5:
1135 mmc_free_host(mmc);
1136 fail6:
1137 release_mem_region(res->start, res->end - res->start + 1);
1138 dev_err(&pdev->dev, "probe failed, err %d\n", ret);
1139 return ret;
1140 }
1141
1142 /*
1143 * Remove a device
1144 */
1145 static int __exit at91_mci_remove(struct platform_device *pdev)
1146 {
1147 struct mmc_host *mmc = platform_get_drvdata(pdev);
1148 struct at91mci_host *host;
1149 struct resource *res;
1150
1151 if (!mmc)
1152 return -1;
1153
1154 host = mmc_priv(mmc);
1155
1156 if (host->buffer)
1157 dma_free_coherent(&pdev->dev, MCI_BUFSIZE,
1158 host->buffer, host->physical_address);
1159
1160 if (host->board->det_pin) {
1161 if (device_can_wakeup(&pdev->dev))
1162 free_irq(gpio_to_irq(host->board->det_pin), host);
1163 device_init_wakeup(&pdev->dev, 0);
1164 gpio_free(host->board->det_pin);
1165 }
1166
1167 at91_mci_disable(host);
1168 del_timer_sync(&host->timer);
1169 mmc_remove_host(mmc);
1170 free_irq(host->irq, host);
1171
1172 clk_disable(host->mci_clk); /* Disable the peripheral clock */
1173 clk_put(host->mci_clk);
1174
1175 if (host->board->vcc_pin)
1176 gpio_free(host->board->vcc_pin);
1177 if (host->board->wp_pin)
1178 gpio_free(host->board->wp_pin);
1179
1180 iounmap(host->baseaddr);
1181 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1182 release_mem_region(res->start, res->end - res->start + 1);
1183
1184 mmc_free_host(mmc);
1185 platform_set_drvdata(pdev, NULL);
1186 pr_debug("MCI Removed\n");
1187
1188 return 0;
1189 }
1190
1191 #ifdef CONFIG_PM
1192 static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state)
1193 {
1194 struct mmc_host *mmc = platform_get_drvdata(pdev);
1195 struct at91mci_host *host = mmc_priv(mmc);
1196 int ret = 0;
1197
1198 if (host->board->det_pin && device_may_wakeup(&pdev->dev))
1199 enable_irq_wake(host->board->det_pin);
1200
1201 if (mmc)
1202 ret = mmc_suspend_host(mmc, state);
1203
1204 return ret;
1205 }
1206
1207 static int at91_mci_resume(struct platform_device *pdev)
1208 {
1209 struct mmc_host *mmc = platform_get_drvdata(pdev);
1210 struct at91mci_host *host = mmc_priv(mmc);
1211 int ret = 0;
1212
1213 if (host->board->det_pin && device_may_wakeup(&pdev->dev))
1214 disable_irq_wake(host->board->det_pin);
1215
1216 if (mmc)
1217 ret = mmc_resume_host(mmc);
1218
1219 return ret;
1220 }
1221 #else
1222 #define at91_mci_suspend NULL
1223 #define at91_mci_resume NULL
1224 #endif
1225
1226 static struct platform_driver at91_mci_driver = {
1227 .remove = __exit_p(at91_mci_remove),
1228 .suspend = at91_mci_suspend,
1229 .resume = at91_mci_resume,
1230 .driver = {
1231 .name = DRIVER_NAME,
1232 .owner = THIS_MODULE,
1233 },
1234 };
1235
1236 static int __init at91_mci_init(void)
1237 {
1238 return platform_driver_probe(&at91_mci_driver, at91_mci_probe);
1239 }
1240
1241 static void __exit at91_mci_exit(void)
1242 {
1243 platform_driver_unregister(&at91_mci_driver);
1244 }
1245
1246 module_init(at91_mci_init);
1247 module_exit(at91_mci_exit);
1248
1249 MODULE_DESCRIPTION("AT91 Multimedia Card Interface driver");
1250 MODULE_AUTHOR("Nick Randell");
1251 MODULE_LICENSE("GPL");
1252 MODULE_ALIAS("platform:at91_mci");
This page took 0.094565 seconds and 4 git commands to generate.