Automatic merge with /usr/src/ntfs-2.6.git.
[deliverable/linux.git] / drivers / mmc / wbsd.c
1 /*
2 * linux/drivers/mmc/wbsd.c - Winbond W83L51xD SD/MMC driver
3 *
4 * Copyright (C) 2004-2005 Pierre Ossman, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 *
11 * Warning!
12 *
13 * Changes to the FIFO system should be done with extreme care since
14 * the hardware is full of bugs related to the FIFO. Known issues are:
15 *
16 * - FIFO size field in FSR is always zero.
17 *
18 * - FIFO interrupts tend not to work as they should. Interrupts are
19 * triggered only for full/empty events, not for threshold values.
20 *
21 * - On APIC systems the FIFO empty interrupt is sometimes lost.
22 */
23
24 #include <linux/config.h>
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/device.h>
30 #include <linux/interrupt.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/delay.h>
33 #include <linux/pnp.h>
34 #include <linux/highmem.h>
35 #include <linux/mmc/host.h>
36 #include <linux/mmc/protocol.h>
37
38 #include <asm/io.h>
39 #include <asm/dma.h>
40 #include <asm/scatterlist.h>
41
42 #include "wbsd.h"
43
44 #define DRIVER_NAME "wbsd"
45 #define DRIVER_VERSION "1.2"
46
47 #ifdef CONFIG_MMC_DEBUG
48 #define DBG(x...) \
49 printk(KERN_DEBUG DRIVER_NAME ": " x)
50 #define DBGF(f, x...) \
51 printk(KERN_DEBUG DRIVER_NAME " [%s()]: " f, __func__ , ##x)
52 #else
53 #define DBG(x...) do { } while (0)
54 #define DBGF(x...) do { } while (0)
55 #endif
56
57 /*
58 * Device resources
59 */
60
61 #ifdef CONFIG_PNP
62
63 static const struct pnp_device_id pnp_dev_table[] = {
64 { "WEC0517", 0 },
65 { "WEC0518", 0 },
66 { "", 0 },
67 };
68
69 MODULE_DEVICE_TABLE(pnp, pnp_dev_table);
70
71 #endif /* CONFIG_PNP */
72
73 static const int config_ports[] = { 0x2E, 0x4E };
74 static const int unlock_codes[] = { 0x83, 0x87 };
75
76 static const int valid_ids[] = {
77 0x7112,
78 };
79
80 #ifdef CONFIG_PNP
81 static unsigned int nopnp = 0;
82 #else
83 static const unsigned int nopnp = 1;
84 #endif
85 static unsigned int io = 0x248;
86 static unsigned int irq = 6;
87 static int dma = 2;
88
89 /*
90 * Basic functions
91 */
92
93 static inline void wbsd_unlock_config(struct wbsd_host* host)
94 {
95 BUG_ON(host->config == 0);
96
97 outb(host->unlock_code, host->config);
98 outb(host->unlock_code, host->config);
99 }
100
101 static inline void wbsd_lock_config(struct wbsd_host* host)
102 {
103 BUG_ON(host->config == 0);
104
105 outb(LOCK_CODE, host->config);
106 }
107
108 static inline void wbsd_write_config(struct wbsd_host* host, u8 reg, u8 value)
109 {
110 BUG_ON(host->config == 0);
111
112 outb(reg, host->config);
113 outb(value, host->config + 1);
114 }
115
116 static inline u8 wbsd_read_config(struct wbsd_host* host, u8 reg)
117 {
118 BUG_ON(host->config == 0);
119
120 outb(reg, host->config);
121 return inb(host->config + 1);
122 }
123
124 static inline void wbsd_write_index(struct wbsd_host* host, u8 index, u8 value)
125 {
126 outb(index, host->base + WBSD_IDXR);
127 outb(value, host->base + WBSD_DATAR);
128 }
129
130 static inline u8 wbsd_read_index(struct wbsd_host* host, u8 index)
131 {
132 outb(index, host->base + WBSD_IDXR);
133 return inb(host->base + WBSD_DATAR);
134 }
135
136 /*
137 * Common routines
138 */
139
140 static void wbsd_init_device(struct wbsd_host* host)
141 {
142 u8 setup, ier;
143
144 /*
145 * Reset chip (SD/MMC part) and fifo.
146 */
147 setup = wbsd_read_index(host, WBSD_IDX_SETUP);
148 setup |= WBSD_FIFO_RESET | WBSD_SOFT_RESET;
149 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
150
151 /*
152 * Set DAT3 to input
153 */
154 setup &= ~WBSD_DAT3_H;
155 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
156 host->flags &= ~WBSD_FIGNORE_DETECT;
157
158 /*
159 * Read back default clock.
160 */
161 host->clk = wbsd_read_index(host, WBSD_IDX_CLK);
162
163 /*
164 * Power down port.
165 */
166 outb(WBSD_POWER_N, host->base + WBSD_CSR);
167
168 /*
169 * Set maximum timeout.
170 */
171 wbsd_write_index(host, WBSD_IDX_TAAC, 0x7F);
172
173 /*
174 * Test for card presence
175 */
176 if (inb(host->base + WBSD_CSR) & WBSD_CARDPRESENT)
177 host->flags |= WBSD_FCARD_PRESENT;
178 else
179 host->flags &= ~WBSD_FCARD_PRESENT;
180
181 /*
182 * Enable interesting interrupts.
183 */
184 ier = 0;
185 ier |= WBSD_EINT_CARD;
186 ier |= WBSD_EINT_FIFO_THRE;
187 ier |= WBSD_EINT_CCRC;
188 ier |= WBSD_EINT_TIMEOUT;
189 ier |= WBSD_EINT_CRC;
190 ier |= WBSD_EINT_TC;
191
192 outb(ier, host->base + WBSD_EIR);
193
194 /*
195 * Clear interrupts.
196 */
197 inb(host->base + WBSD_ISR);
198 }
199
200 static void wbsd_reset(struct wbsd_host* host)
201 {
202 u8 setup;
203
204 printk(KERN_ERR DRIVER_NAME ": Resetting chip\n");
205
206 /*
207 * Soft reset of chip (SD/MMC part).
208 */
209 setup = wbsd_read_index(host, WBSD_IDX_SETUP);
210 setup |= WBSD_SOFT_RESET;
211 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
212 }
213
214 static void wbsd_request_end(struct wbsd_host* host, struct mmc_request* mrq)
215 {
216 unsigned long dmaflags;
217
218 DBGF("Ending request, cmd (%x)\n", mrq->cmd->opcode);
219
220 if (host->dma >= 0)
221 {
222 /*
223 * Release ISA DMA controller.
224 */
225 dmaflags = claim_dma_lock();
226 disable_dma(host->dma);
227 clear_dma_ff(host->dma);
228 release_dma_lock(dmaflags);
229
230 /*
231 * Disable DMA on host.
232 */
233 wbsd_write_index(host, WBSD_IDX_DMA, 0);
234 }
235
236 host->mrq = NULL;
237
238 /*
239 * MMC layer might call back into the driver so first unlock.
240 */
241 spin_unlock(&host->lock);
242 mmc_request_done(host->mmc, mrq);
243 spin_lock(&host->lock);
244 }
245
246 /*
247 * Scatter/gather functions
248 */
249
250 static inline void wbsd_init_sg(struct wbsd_host* host, struct mmc_data* data)
251 {
252 /*
253 * Get info. about SG list from data structure.
254 */
255 host->cur_sg = data->sg;
256 host->num_sg = data->sg_len;
257
258 host->offset = 0;
259 host->remain = host->cur_sg->length;
260 }
261
262 static inline int wbsd_next_sg(struct wbsd_host* host)
263 {
264 /*
265 * Skip to next SG entry.
266 */
267 host->cur_sg++;
268 host->num_sg--;
269
270 /*
271 * Any entries left?
272 */
273 if (host->num_sg > 0)
274 {
275 host->offset = 0;
276 host->remain = host->cur_sg->length;
277 }
278
279 return host->num_sg;
280 }
281
282 static inline char* wbsd_kmap_sg(struct wbsd_host* host)
283 {
284 host->mapped_sg = kmap_atomic(host->cur_sg->page, KM_BIO_SRC_IRQ) +
285 host->cur_sg->offset;
286 return host->mapped_sg;
287 }
288
289 static inline void wbsd_kunmap_sg(struct wbsd_host* host)
290 {
291 kunmap_atomic(host->mapped_sg, KM_BIO_SRC_IRQ);
292 }
293
294 static inline void wbsd_sg_to_dma(struct wbsd_host* host, struct mmc_data* data)
295 {
296 unsigned int len, i, size;
297 struct scatterlist* sg;
298 char* dmabuf = host->dma_buffer;
299 char* sgbuf;
300
301 size = host->size;
302
303 sg = data->sg;
304 len = data->sg_len;
305
306 /*
307 * Just loop through all entries. Size might not
308 * be the entire list though so make sure that
309 * we do not transfer too much.
310 */
311 for (i = 0;i < len;i++)
312 {
313 sgbuf = kmap_atomic(sg[i].page, KM_BIO_SRC_IRQ) + sg[i].offset;
314 if (size < sg[i].length)
315 memcpy(dmabuf, sgbuf, size);
316 else
317 memcpy(dmabuf, sgbuf, sg[i].length);
318 kunmap_atomic(sgbuf, KM_BIO_SRC_IRQ);
319 dmabuf += sg[i].length;
320
321 if (size < sg[i].length)
322 size = 0;
323 else
324 size -= sg[i].length;
325
326 if (size == 0)
327 break;
328 }
329
330 /*
331 * Check that we didn't get a request to transfer
332 * more data than can fit into the SG list.
333 */
334
335 BUG_ON(size != 0);
336
337 host->size -= size;
338 }
339
340 static inline void wbsd_dma_to_sg(struct wbsd_host* host, struct mmc_data* data)
341 {
342 unsigned int len, i, size;
343 struct scatterlist* sg;
344 char* dmabuf = host->dma_buffer;
345 char* sgbuf;
346
347 size = host->size;
348
349 sg = data->sg;
350 len = data->sg_len;
351
352 /*
353 * Just loop through all entries. Size might not
354 * be the entire list though so make sure that
355 * we do not transfer too much.
356 */
357 for (i = 0;i < len;i++)
358 {
359 sgbuf = kmap_atomic(sg[i].page, KM_BIO_SRC_IRQ) + sg[i].offset;
360 if (size < sg[i].length)
361 memcpy(sgbuf, dmabuf, size);
362 else
363 memcpy(sgbuf, dmabuf, sg[i].length);
364 kunmap_atomic(sgbuf, KM_BIO_SRC_IRQ);
365 dmabuf += sg[i].length;
366
367 if (size < sg[i].length)
368 size = 0;
369 else
370 size -= sg[i].length;
371
372 if (size == 0)
373 break;
374 }
375
376 /*
377 * Check that we didn't get a request to transfer
378 * more data than can fit into the SG list.
379 */
380
381 BUG_ON(size != 0);
382
383 host->size -= size;
384 }
385
386 /*
387 * Command handling
388 */
389
390 static inline void wbsd_get_short_reply(struct wbsd_host* host,
391 struct mmc_command* cmd)
392 {
393 /*
394 * Correct response type?
395 */
396 if (wbsd_read_index(host, WBSD_IDX_RSPLEN) != WBSD_RSP_SHORT)
397 {
398 cmd->error = MMC_ERR_INVALID;
399 return;
400 }
401
402 cmd->resp[0] =
403 wbsd_read_index(host, WBSD_IDX_RESP12) << 24;
404 cmd->resp[0] |=
405 wbsd_read_index(host, WBSD_IDX_RESP13) << 16;
406 cmd->resp[0] |=
407 wbsd_read_index(host, WBSD_IDX_RESP14) << 8;
408 cmd->resp[0] |=
409 wbsd_read_index(host, WBSD_IDX_RESP15) << 0;
410 cmd->resp[1] =
411 wbsd_read_index(host, WBSD_IDX_RESP16) << 24;
412 }
413
414 static inline void wbsd_get_long_reply(struct wbsd_host* host,
415 struct mmc_command* cmd)
416 {
417 int i;
418
419 /*
420 * Correct response type?
421 */
422 if (wbsd_read_index(host, WBSD_IDX_RSPLEN) != WBSD_RSP_LONG)
423 {
424 cmd->error = MMC_ERR_INVALID;
425 return;
426 }
427
428 for (i = 0;i < 4;i++)
429 {
430 cmd->resp[i] =
431 wbsd_read_index(host, WBSD_IDX_RESP1 + i * 4) << 24;
432 cmd->resp[i] |=
433 wbsd_read_index(host, WBSD_IDX_RESP2 + i * 4) << 16;
434 cmd->resp[i] |=
435 wbsd_read_index(host, WBSD_IDX_RESP3 + i * 4) << 8;
436 cmd->resp[i] |=
437 wbsd_read_index(host, WBSD_IDX_RESP4 + i * 4) << 0;
438 }
439 }
440
441 static void wbsd_send_command(struct wbsd_host* host, struct mmc_command* cmd)
442 {
443 int i;
444 u8 status, isr;
445
446 DBGF("Sending cmd (%x)\n", cmd->opcode);
447
448 /*
449 * Clear accumulated ISR. The interrupt routine
450 * will fill this one with events that occur during
451 * transfer.
452 */
453 host->isr = 0;
454
455 /*
456 * Send the command (CRC calculated by host).
457 */
458 outb(cmd->opcode, host->base + WBSD_CMDR);
459 for (i = 3;i >= 0;i--)
460 outb((cmd->arg >> (i * 8)) & 0xff, host->base + WBSD_CMDR);
461
462 cmd->error = MMC_ERR_NONE;
463
464 /*
465 * Wait for the request to complete.
466 */
467 do {
468 status = wbsd_read_index(host, WBSD_IDX_STATUS);
469 } while (status & WBSD_CARDTRAFFIC);
470
471 /*
472 * Do we expect a reply?
473 */
474 if ((cmd->flags & MMC_RSP_MASK) != MMC_RSP_NONE)
475 {
476 /*
477 * Read back status.
478 */
479 isr = host->isr;
480
481 /* Card removed? */
482 if (isr & WBSD_INT_CARD)
483 cmd->error = MMC_ERR_TIMEOUT;
484 /* Timeout? */
485 else if (isr & WBSD_INT_TIMEOUT)
486 cmd->error = MMC_ERR_TIMEOUT;
487 /* CRC? */
488 else if ((cmd->flags & MMC_RSP_CRC) && (isr & WBSD_INT_CRC))
489 cmd->error = MMC_ERR_BADCRC;
490 /* All ok */
491 else
492 {
493 if ((cmd->flags & MMC_RSP_MASK) == MMC_RSP_SHORT)
494 wbsd_get_short_reply(host, cmd);
495 else
496 wbsd_get_long_reply(host, cmd);
497 }
498 }
499
500 DBGF("Sent cmd (%x), res %d\n", cmd->opcode, cmd->error);
501 }
502
503 /*
504 * Data functions
505 */
506
507 static void wbsd_empty_fifo(struct wbsd_host* host)
508 {
509 struct mmc_data* data = host->mrq->cmd->data;
510 char* buffer;
511 int i, fsr, fifo;
512
513 /*
514 * Handle excessive data.
515 */
516 if (data->bytes_xfered == host->size)
517 return;
518
519 buffer = wbsd_kmap_sg(host) + host->offset;
520
521 /*
522 * Drain the fifo. This has a tendency to loop longer
523 * than the FIFO length (usually one block).
524 */
525 while (!((fsr = inb(host->base + WBSD_FSR)) & WBSD_FIFO_EMPTY))
526 {
527 /*
528 * The size field in the FSR is broken so we have to
529 * do some guessing.
530 */
531 if (fsr & WBSD_FIFO_FULL)
532 fifo = 16;
533 else if (fsr & WBSD_FIFO_FUTHRE)
534 fifo = 8;
535 else
536 fifo = 1;
537
538 for (i = 0;i < fifo;i++)
539 {
540 *buffer = inb(host->base + WBSD_DFR);
541 buffer++;
542 host->offset++;
543 host->remain--;
544
545 data->bytes_xfered++;
546
547 /*
548 * Transfer done?
549 */
550 if (data->bytes_xfered == host->size)
551 {
552 wbsd_kunmap_sg(host);
553 return;
554 }
555
556 /*
557 * End of scatter list entry?
558 */
559 if (host->remain == 0)
560 {
561 wbsd_kunmap_sg(host);
562
563 /*
564 * Get next entry. Check if last.
565 */
566 if (!wbsd_next_sg(host))
567 {
568 /*
569 * We should never reach this point.
570 * It means that we're trying to
571 * transfer more blocks than can fit
572 * into the scatter list.
573 */
574 BUG_ON(1);
575
576 host->size = data->bytes_xfered;
577
578 return;
579 }
580
581 buffer = wbsd_kmap_sg(host);
582 }
583 }
584 }
585
586 wbsd_kunmap_sg(host);
587
588 /*
589 * This is a very dirty hack to solve a
590 * hardware problem. The chip doesn't trigger
591 * FIFO threshold interrupts properly.
592 */
593 if ((host->size - data->bytes_xfered) < 16)
594 tasklet_schedule(&host->fifo_tasklet);
595 }
596
597 static void wbsd_fill_fifo(struct wbsd_host* host)
598 {
599 struct mmc_data* data = host->mrq->cmd->data;
600 char* buffer;
601 int i, fsr, fifo;
602
603 /*
604 * Check that we aren't being called after the
605 * entire buffer has been transfered.
606 */
607 if (data->bytes_xfered == host->size)
608 return;
609
610 buffer = wbsd_kmap_sg(host) + host->offset;
611
612 /*
613 * Fill the fifo. This has a tendency to loop longer
614 * than the FIFO length (usually one block).
615 */
616 while (!((fsr = inb(host->base + WBSD_FSR)) & WBSD_FIFO_FULL))
617 {
618 /*
619 * The size field in the FSR is broken so we have to
620 * do some guessing.
621 */
622 if (fsr & WBSD_FIFO_EMPTY)
623 fifo = 0;
624 else if (fsr & WBSD_FIFO_EMTHRE)
625 fifo = 8;
626 else
627 fifo = 15;
628
629 for (i = 16;i > fifo;i--)
630 {
631 outb(*buffer, host->base + WBSD_DFR);
632 buffer++;
633 host->offset++;
634 host->remain--;
635
636 data->bytes_xfered++;
637
638 /*
639 * Transfer done?
640 */
641 if (data->bytes_xfered == host->size)
642 {
643 wbsd_kunmap_sg(host);
644 return;
645 }
646
647 /*
648 * End of scatter list entry?
649 */
650 if (host->remain == 0)
651 {
652 wbsd_kunmap_sg(host);
653
654 /*
655 * Get next entry. Check if last.
656 */
657 if (!wbsd_next_sg(host))
658 {
659 /*
660 * We should never reach this point.
661 * It means that we're trying to
662 * transfer more blocks than can fit
663 * into the scatter list.
664 */
665 BUG_ON(1);
666
667 host->size = data->bytes_xfered;
668
669 return;
670 }
671
672 buffer = wbsd_kmap_sg(host);
673 }
674 }
675 }
676
677 wbsd_kunmap_sg(host);
678
679 /*
680 * The controller stops sending interrupts for
681 * 'FIFO empty' under certain conditions. So we
682 * need to be a bit more pro-active.
683 */
684 tasklet_schedule(&host->fifo_tasklet);
685 }
686
687 static void wbsd_prepare_data(struct wbsd_host* host, struct mmc_data* data)
688 {
689 u16 blksize;
690 u8 setup;
691 unsigned long dmaflags;
692
693 DBGF("blksz %04x blks %04x flags %08x\n",
694 1 << data->blksz_bits, data->blocks, data->flags);
695 DBGF("tsac %d ms nsac %d clk\n",
696 data->timeout_ns / 1000000, data->timeout_clks);
697
698 /*
699 * Calculate size.
700 */
701 host->size = data->blocks << data->blksz_bits;
702
703 /*
704 * Check timeout values for overflow.
705 * (Yes, some cards cause this value to overflow).
706 */
707 if (data->timeout_ns > 127000000)
708 wbsd_write_index(host, WBSD_IDX_TAAC, 127);
709 else
710 wbsd_write_index(host, WBSD_IDX_TAAC, data->timeout_ns/1000000);
711
712 if (data->timeout_clks > 255)
713 wbsd_write_index(host, WBSD_IDX_NSAC, 255);
714 else
715 wbsd_write_index(host, WBSD_IDX_NSAC, data->timeout_clks);
716
717 /*
718 * Inform the chip of how large blocks will be
719 * sent. It needs this to determine when to
720 * calculate CRC.
721 *
722 * Space for CRC must be included in the size.
723 */
724 blksize = (1 << data->blksz_bits) + 2;
725
726 wbsd_write_index(host, WBSD_IDX_PBSMSB, (blksize >> 4) & 0xF0);
727 wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF);
728
729 /*
730 * Clear the FIFO. This is needed even for DMA
731 * transfers since the chip still uses the FIFO
732 * internally.
733 */
734 setup = wbsd_read_index(host, WBSD_IDX_SETUP);
735 setup |= WBSD_FIFO_RESET;
736 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
737
738 /*
739 * DMA transfer?
740 */
741 if (host->dma >= 0)
742 {
743 /*
744 * The buffer for DMA is only 64 kB.
745 */
746 BUG_ON(host->size > 0x10000);
747 if (host->size > 0x10000)
748 {
749 data->error = MMC_ERR_INVALID;
750 return;
751 }
752
753 /*
754 * Transfer data from the SG list to
755 * the DMA buffer.
756 */
757 if (data->flags & MMC_DATA_WRITE)
758 wbsd_sg_to_dma(host, data);
759
760 /*
761 * Initialise the ISA DMA controller.
762 */
763 dmaflags = claim_dma_lock();
764 disable_dma(host->dma);
765 clear_dma_ff(host->dma);
766 if (data->flags & MMC_DATA_READ)
767 set_dma_mode(host->dma, DMA_MODE_READ & ~0x40);
768 else
769 set_dma_mode(host->dma, DMA_MODE_WRITE & ~0x40);
770 set_dma_addr(host->dma, host->dma_addr);
771 set_dma_count(host->dma, host->size);
772
773 enable_dma(host->dma);
774 release_dma_lock(dmaflags);
775
776 /*
777 * Enable DMA on the host.
778 */
779 wbsd_write_index(host, WBSD_IDX_DMA, WBSD_DMA_ENABLE);
780 }
781 else
782 {
783 /*
784 * This flag is used to keep printk
785 * output to a minimum.
786 */
787 host->firsterr = 1;
788
789 /*
790 * Initialise the SG list.
791 */
792 wbsd_init_sg(host, data);
793
794 /*
795 * Turn off DMA.
796 */
797 wbsd_write_index(host, WBSD_IDX_DMA, 0);
798
799 /*
800 * Set up FIFO threshold levels (and fill
801 * buffer if doing a write).
802 */
803 if (data->flags & MMC_DATA_READ)
804 {
805 wbsd_write_index(host, WBSD_IDX_FIFOEN,
806 WBSD_FIFOEN_FULL | 8);
807 }
808 else
809 {
810 wbsd_write_index(host, WBSD_IDX_FIFOEN,
811 WBSD_FIFOEN_EMPTY | 8);
812 wbsd_fill_fifo(host);
813 }
814 }
815
816 data->error = MMC_ERR_NONE;
817 }
818
819 static void wbsd_finish_data(struct wbsd_host* host, struct mmc_data* data)
820 {
821 unsigned long dmaflags;
822 int count;
823 u8 status;
824
825 WARN_ON(host->mrq == NULL);
826
827 /*
828 * Send a stop command if needed.
829 */
830 if (data->stop)
831 wbsd_send_command(host, data->stop);
832
833 /*
834 * Wait for the controller to leave data
835 * transfer state.
836 */
837 do
838 {
839 status = wbsd_read_index(host, WBSD_IDX_STATUS);
840 } while (status & (WBSD_BLOCK_READ | WBSD_BLOCK_WRITE));
841
842 /*
843 * DMA transfer?
844 */
845 if (host->dma >= 0)
846 {
847 /*
848 * Disable DMA on the host.
849 */
850 wbsd_write_index(host, WBSD_IDX_DMA, 0);
851
852 /*
853 * Turn of ISA DMA controller.
854 */
855 dmaflags = claim_dma_lock();
856 disable_dma(host->dma);
857 clear_dma_ff(host->dma);
858 count = get_dma_residue(host->dma);
859 release_dma_lock(dmaflags);
860
861 /*
862 * Any leftover data?
863 */
864 if (count)
865 {
866 printk(KERN_ERR DRIVER_NAME ": Incomplete DMA "
867 "transfer. %d bytes left.\n", count);
868
869 data->error = MMC_ERR_FAILED;
870 }
871 else
872 {
873 /*
874 * Transfer data from DMA buffer to
875 * SG list.
876 */
877 if (data->flags & MMC_DATA_READ)
878 wbsd_dma_to_sg(host, data);
879
880 data->bytes_xfered = host->size;
881 }
882 }
883
884 DBGF("Ending data transfer (%d bytes)\n", data->bytes_xfered);
885
886 wbsd_request_end(host, host->mrq);
887 }
888
889 /*****************************************************************************\
890 * *
891 * MMC layer callbacks *
892 * *
893 \*****************************************************************************/
894
895 static void wbsd_request(struct mmc_host* mmc, struct mmc_request* mrq)
896 {
897 struct wbsd_host* host = mmc_priv(mmc);
898 struct mmc_command* cmd;
899
900 /*
901 * Disable tasklets to avoid a deadlock.
902 */
903 spin_lock_bh(&host->lock);
904
905 BUG_ON(host->mrq != NULL);
906
907 cmd = mrq->cmd;
908
909 host->mrq = mrq;
910
911 /*
912 * If there is no card in the slot then
913 * timeout immediatly.
914 */
915 if (!(host->flags & WBSD_FCARD_PRESENT))
916 {
917 cmd->error = MMC_ERR_TIMEOUT;
918 goto done;
919 }
920
921 /*
922 * Does the request include data?
923 */
924 if (cmd->data)
925 {
926 wbsd_prepare_data(host, cmd->data);
927
928 if (cmd->data->error != MMC_ERR_NONE)
929 goto done;
930 }
931
932 wbsd_send_command(host, cmd);
933
934 /*
935 * If this is a data transfer the request
936 * will be finished after the data has
937 * transfered.
938 */
939 if (cmd->data && (cmd->error == MMC_ERR_NONE))
940 {
941 /*
942 * Dirty fix for hardware bug.
943 */
944 if (host->dma == -1)
945 tasklet_schedule(&host->fifo_tasklet);
946
947 spin_unlock_bh(&host->lock);
948
949 return;
950 }
951
952 done:
953 wbsd_request_end(host, mrq);
954
955 spin_unlock_bh(&host->lock);
956 }
957
958 static void wbsd_set_ios(struct mmc_host* mmc, struct mmc_ios* ios)
959 {
960 struct wbsd_host* host = mmc_priv(mmc);
961 u8 clk, setup, pwr;
962
963 DBGF("clock %uHz busmode %u powermode %u Vdd %u\n",
964 ios->clock, ios->bus_mode, ios->power_mode, ios->vdd);
965
966 spin_lock_bh(&host->lock);
967
968 /*
969 * Reset the chip on each power off.
970 * Should clear out any weird states.
971 */
972 if (ios->power_mode == MMC_POWER_OFF)
973 wbsd_init_device(host);
974
975 if (ios->clock >= 24000000)
976 clk = WBSD_CLK_24M;
977 else if (ios->clock >= 16000000)
978 clk = WBSD_CLK_16M;
979 else if (ios->clock >= 12000000)
980 clk = WBSD_CLK_12M;
981 else
982 clk = WBSD_CLK_375K;
983
984 /*
985 * Only write to the clock register when
986 * there is an actual change.
987 */
988 if (clk != host->clk)
989 {
990 wbsd_write_index(host, WBSD_IDX_CLK, clk);
991 host->clk = clk;
992 }
993
994 /*
995 * Power up card.
996 */
997 if (ios->power_mode != MMC_POWER_OFF)
998 {
999 pwr = inb(host->base + WBSD_CSR);
1000 pwr &= ~WBSD_POWER_N;
1001 outb(pwr, host->base + WBSD_CSR);
1002 }
1003
1004 /*
1005 * MMC cards need to have pin 1 high during init.
1006 * Init time corresponds rather nicely with the bus mode.
1007 * It wreaks havoc with the card detection though so
1008 * that needs to be disabed.
1009 */
1010 setup = wbsd_read_index(host, WBSD_IDX_SETUP);
1011 if ((ios->power_mode == MMC_POWER_ON) &&
1012 (ios->bus_mode == MMC_BUSMODE_OPENDRAIN))
1013 {
1014 setup |= WBSD_DAT3_H;
1015 host->flags |= WBSD_FIGNORE_DETECT;
1016 }
1017 else
1018 {
1019 setup &= ~WBSD_DAT3_H;
1020 host->flags &= ~WBSD_FIGNORE_DETECT;
1021 }
1022 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
1023
1024 spin_unlock_bh(&host->lock);
1025 }
1026
1027 static struct mmc_host_ops wbsd_ops = {
1028 .request = wbsd_request,
1029 .set_ios = wbsd_set_ios,
1030 };
1031
1032 /*****************************************************************************\
1033 * *
1034 * Interrupt handling *
1035 * *
1036 \*****************************************************************************/
1037
1038 /*
1039 * Helper function for card detection
1040 */
1041 static void wbsd_detect_card(unsigned long data)
1042 {
1043 struct wbsd_host *host = (struct wbsd_host*)data;
1044
1045 BUG_ON(host == NULL);
1046
1047 DBG("Executing card detection\n");
1048
1049 mmc_detect_change(host->mmc);
1050 }
1051
1052 /*
1053 * Tasklets
1054 */
1055
1056 inline static struct mmc_data* wbsd_get_data(struct wbsd_host* host)
1057 {
1058 WARN_ON(!host->mrq);
1059 if (!host->mrq)
1060 return NULL;
1061
1062 WARN_ON(!host->mrq->cmd);
1063 if (!host->mrq->cmd)
1064 return NULL;
1065
1066 WARN_ON(!host->mrq->cmd->data);
1067 if (!host->mrq->cmd->data)
1068 return NULL;
1069
1070 return host->mrq->cmd->data;
1071 }
1072
1073 static void wbsd_tasklet_card(unsigned long param)
1074 {
1075 struct wbsd_host* host = (struct wbsd_host*)param;
1076 u8 csr;
1077
1078 spin_lock(&host->lock);
1079
1080 if (host->flags & WBSD_FIGNORE_DETECT)
1081 {
1082 spin_unlock(&host->lock);
1083 return;
1084 }
1085
1086 csr = inb(host->base + WBSD_CSR);
1087 WARN_ON(csr == 0xff);
1088
1089 if (csr & WBSD_CARDPRESENT)
1090 {
1091 if (!(host->flags & WBSD_FCARD_PRESENT))
1092 {
1093 DBG("Card inserted\n");
1094 host->flags |= WBSD_FCARD_PRESENT;
1095
1096 /*
1097 * Delay card detection to allow electrical connections
1098 * to stabilise.
1099 */
1100 mod_timer(&host->timer, jiffies + HZ/2);
1101 }
1102
1103 spin_unlock(&host->lock);
1104 }
1105 else if (host->flags & WBSD_FCARD_PRESENT)
1106 {
1107 DBG("Card removed\n");
1108 host->flags &= ~WBSD_FCARD_PRESENT;
1109
1110 if (host->mrq)
1111 {
1112 printk(KERN_ERR DRIVER_NAME
1113 ": Card removed during transfer!\n");
1114 wbsd_reset(host);
1115
1116 host->mrq->cmd->error = MMC_ERR_FAILED;
1117 tasklet_schedule(&host->finish_tasklet);
1118 }
1119
1120 /*
1121 * Unlock first since we might get a call back.
1122 */
1123 spin_unlock(&host->lock);
1124
1125 mmc_detect_change(host->mmc);
1126 }
1127 }
1128
1129 static void wbsd_tasklet_fifo(unsigned long param)
1130 {
1131 struct wbsd_host* host = (struct wbsd_host*)param;
1132 struct mmc_data* data;
1133
1134 spin_lock(&host->lock);
1135
1136 if (!host->mrq)
1137 goto end;
1138
1139 data = wbsd_get_data(host);
1140 if (!data)
1141 goto end;
1142
1143 if (data->flags & MMC_DATA_WRITE)
1144 wbsd_fill_fifo(host);
1145 else
1146 wbsd_empty_fifo(host);
1147
1148 /*
1149 * Done?
1150 */
1151 if (host->size == data->bytes_xfered)
1152 {
1153 wbsd_write_index(host, WBSD_IDX_FIFOEN, 0);
1154 tasklet_schedule(&host->finish_tasklet);
1155 }
1156
1157 end:
1158 spin_unlock(&host->lock);
1159 }
1160
1161 static void wbsd_tasklet_crc(unsigned long param)
1162 {
1163 struct wbsd_host* host = (struct wbsd_host*)param;
1164 struct mmc_data* data;
1165
1166 spin_lock(&host->lock);
1167
1168 if (!host->mrq)
1169 goto end;
1170
1171 data = wbsd_get_data(host);
1172 if (!data)
1173 goto end;
1174
1175 DBGF("CRC error\n");
1176
1177 data->error = MMC_ERR_BADCRC;
1178
1179 tasklet_schedule(&host->finish_tasklet);
1180
1181 end:
1182 spin_unlock(&host->lock);
1183 }
1184
1185 static void wbsd_tasklet_timeout(unsigned long param)
1186 {
1187 struct wbsd_host* host = (struct wbsd_host*)param;
1188 struct mmc_data* data;
1189
1190 spin_lock(&host->lock);
1191
1192 if (!host->mrq)
1193 goto end;
1194
1195 data = wbsd_get_data(host);
1196 if (!data)
1197 goto end;
1198
1199 DBGF("Timeout\n");
1200
1201 data->error = MMC_ERR_TIMEOUT;
1202
1203 tasklet_schedule(&host->finish_tasklet);
1204
1205 end:
1206 spin_unlock(&host->lock);
1207 }
1208
1209 static void wbsd_tasklet_finish(unsigned long param)
1210 {
1211 struct wbsd_host* host = (struct wbsd_host*)param;
1212 struct mmc_data* data;
1213
1214 spin_lock(&host->lock);
1215
1216 WARN_ON(!host->mrq);
1217 if (!host->mrq)
1218 goto end;
1219
1220 data = wbsd_get_data(host);
1221 if (!data)
1222 goto end;
1223
1224 wbsd_finish_data(host, data);
1225
1226 end:
1227 spin_unlock(&host->lock);
1228 }
1229
1230 static void wbsd_tasklet_block(unsigned long param)
1231 {
1232 struct wbsd_host* host = (struct wbsd_host*)param;
1233 struct mmc_data* data;
1234
1235 spin_lock(&host->lock);
1236
1237 if ((wbsd_read_index(host, WBSD_IDX_CRCSTATUS) & WBSD_CRC_MASK) !=
1238 WBSD_CRC_OK)
1239 {
1240 data = wbsd_get_data(host);
1241 if (!data)
1242 goto end;
1243
1244 DBGF("CRC error\n");
1245
1246 data->error = MMC_ERR_BADCRC;
1247
1248 tasklet_schedule(&host->finish_tasklet);
1249 }
1250
1251 end:
1252 spin_unlock(&host->lock);
1253 }
1254
1255 /*
1256 * Interrupt handling
1257 */
1258
1259 static irqreturn_t wbsd_irq(int irq, void *dev_id, struct pt_regs *regs)
1260 {
1261 struct wbsd_host* host = dev_id;
1262 int isr;
1263
1264 isr = inb(host->base + WBSD_ISR);
1265
1266 /*
1267 * Was it actually our hardware that caused the interrupt?
1268 */
1269 if (isr == 0xff || isr == 0x00)
1270 return IRQ_NONE;
1271
1272 host->isr |= isr;
1273
1274 /*
1275 * Schedule tasklets as needed.
1276 */
1277 if (isr & WBSD_INT_CARD)
1278 tasklet_schedule(&host->card_tasklet);
1279 if (isr & WBSD_INT_FIFO_THRE)
1280 tasklet_schedule(&host->fifo_tasklet);
1281 if (isr & WBSD_INT_CRC)
1282 tasklet_hi_schedule(&host->crc_tasklet);
1283 if (isr & WBSD_INT_TIMEOUT)
1284 tasklet_hi_schedule(&host->timeout_tasklet);
1285 if (isr & WBSD_INT_BUSYEND)
1286 tasklet_hi_schedule(&host->block_tasklet);
1287 if (isr & WBSD_INT_TC)
1288 tasklet_schedule(&host->finish_tasklet);
1289
1290 return IRQ_HANDLED;
1291 }
1292
1293 /*****************************************************************************\
1294 * *
1295 * Device initialisation and shutdown *
1296 * *
1297 \*****************************************************************************/
1298
1299 /*
1300 * Allocate/free MMC structure.
1301 */
1302
1303 static int __devinit wbsd_alloc_mmc(struct device* dev)
1304 {
1305 struct mmc_host* mmc;
1306 struct wbsd_host* host;
1307
1308 /*
1309 * Allocate MMC structure.
1310 */
1311 mmc = mmc_alloc_host(sizeof(struct wbsd_host), dev);
1312 if (!mmc)
1313 return -ENOMEM;
1314
1315 host = mmc_priv(mmc);
1316 host->mmc = mmc;
1317
1318 host->dma = -1;
1319
1320 /*
1321 * Set host parameters.
1322 */
1323 mmc->ops = &wbsd_ops;
1324 mmc->f_min = 375000;
1325 mmc->f_max = 24000000;
1326 mmc->ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34;
1327
1328 spin_lock_init(&host->lock);
1329
1330 /*
1331 * Set up detection timer
1332 */
1333 init_timer(&host->timer);
1334 host->timer.data = (unsigned long)host;
1335 host->timer.function = wbsd_detect_card;
1336
1337 /*
1338 * Maximum number of segments. Worst case is one sector per segment
1339 * so this will be 64kB/512.
1340 */
1341 mmc->max_hw_segs = 128;
1342 mmc->max_phys_segs = 128;
1343
1344 /*
1345 * Maximum number of sectors in one transfer. Also limited by 64kB
1346 * buffer.
1347 */
1348 mmc->max_sectors = 128;
1349
1350 /*
1351 * Maximum segment size. Could be one segment with the maximum number
1352 * of segments.
1353 */
1354 mmc->max_seg_size = mmc->max_sectors * 512;
1355
1356 dev_set_drvdata(dev, mmc);
1357
1358 return 0;
1359 }
1360
1361 static void __devexit wbsd_free_mmc(struct device* dev)
1362 {
1363 struct mmc_host* mmc;
1364 struct wbsd_host* host;
1365
1366 mmc = dev_get_drvdata(dev);
1367 if (!mmc)
1368 return;
1369
1370 host = mmc_priv(mmc);
1371 BUG_ON(host == NULL);
1372
1373 del_timer_sync(&host->timer);
1374
1375 mmc_free_host(mmc);
1376
1377 dev_set_drvdata(dev, NULL);
1378 }
1379
1380 /*
1381 * Scan for known chip id:s
1382 */
1383
1384 static int __devinit wbsd_scan(struct wbsd_host* host)
1385 {
1386 int i, j, k;
1387 int id;
1388
1389 /*
1390 * Iterate through all ports, all codes to
1391 * find hardware that is in our known list.
1392 */
1393 for (i = 0;i < sizeof(config_ports)/sizeof(int);i++)
1394 {
1395 if (!request_region(config_ports[i], 2, DRIVER_NAME))
1396 continue;
1397
1398 for (j = 0;j < sizeof(unlock_codes)/sizeof(int);j++)
1399 {
1400 id = 0xFFFF;
1401
1402 outb(unlock_codes[j], config_ports[i]);
1403 outb(unlock_codes[j], config_ports[i]);
1404
1405 outb(WBSD_CONF_ID_HI, config_ports[i]);
1406 id = inb(config_ports[i] + 1) << 8;
1407
1408 outb(WBSD_CONF_ID_LO, config_ports[i]);
1409 id |= inb(config_ports[i] + 1);
1410
1411 for (k = 0;k < sizeof(valid_ids)/sizeof(int);k++)
1412 {
1413 if (id == valid_ids[k])
1414 {
1415 host->chip_id = id;
1416 host->config = config_ports[i];
1417 host->unlock_code = unlock_codes[i];
1418
1419 return 0;
1420 }
1421 }
1422
1423 if (id != 0xFFFF)
1424 {
1425 DBG("Unknown hardware (id %x) found at %x\n",
1426 id, config_ports[i]);
1427 }
1428
1429 outb(LOCK_CODE, config_ports[i]);
1430 }
1431
1432 release_region(config_ports[i], 2);
1433 }
1434
1435 return -ENODEV;
1436 }
1437
1438 /*
1439 * Allocate/free io port ranges
1440 */
1441
1442 static int __devinit wbsd_request_region(struct wbsd_host* host, int base)
1443 {
1444 if (io & 0x7)
1445 return -EINVAL;
1446
1447 if (!request_region(base, 8, DRIVER_NAME))
1448 return -EIO;
1449
1450 host->base = io;
1451
1452 return 0;
1453 }
1454
1455 static void __devexit wbsd_release_regions(struct wbsd_host* host)
1456 {
1457 if (host->base)
1458 release_region(host->base, 8);
1459
1460 host->base = 0;
1461
1462 if (host->config)
1463 release_region(host->config, 2);
1464
1465 host->config = 0;
1466 }
1467
1468 /*
1469 * Allocate/free DMA port and buffer
1470 */
1471
1472 static void __devinit wbsd_request_dma(struct wbsd_host* host, int dma)
1473 {
1474 if (dma < 0)
1475 return;
1476
1477 if (request_dma(dma, DRIVER_NAME))
1478 goto err;
1479
1480 /*
1481 * We need to allocate a special buffer in
1482 * order for ISA to be able to DMA to it.
1483 */
1484 host->dma_buffer = kmalloc(WBSD_DMA_SIZE,
1485 GFP_NOIO | GFP_DMA | __GFP_REPEAT | __GFP_NOWARN);
1486 if (!host->dma_buffer)
1487 goto free;
1488
1489 /*
1490 * Translate the address to a physical address.
1491 */
1492 host->dma_addr = dma_map_single(host->mmc->dev, host->dma_buffer,
1493 WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
1494
1495 /*
1496 * ISA DMA must be aligned on a 64k basis.
1497 */
1498 if ((host->dma_addr & 0xffff) != 0)
1499 goto kfree;
1500 /*
1501 * ISA cannot access memory above 16 MB.
1502 */
1503 else if (host->dma_addr >= 0x1000000)
1504 goto kfree;
1505
1506 host->dma = dma;
1507
1508 return;
1509
1510 kfree:
1511 /*
1512 * If we've gotten here then there is some kind of alignment bug
1513 */
1514 BUG_ON(1);
1515
1516 dma_unmap_single(host->mmc->dev, host->dma_addr, WBSD_DMA_SIZE,
1517 DMA_BIDIRECTIONAL);
1518 host->dma_addr = (dma_addr_t)NULL;
1519
1520 kfree(host->dma_buffer);
1521 host->dma_buffer = NULL;
1522
1523 free:
1524 free_dma(dma);
1525
1526 err:
1527 printk(KERN_WARNING DRIVER_NAME ": Unable to allocate DMA %d. "
1528 "Falling back on FIFO.\n", dma);
1529 }
1530
1531 static void __devexit wbsd_release_dma(struct wbsd_host* host)
1532 {
1533 if (host->dma_addr)
1534 dma_unmap_single(host->mmc->dev, host->dma_addr, WBSD_DMA_SIZE,
1535 DMA_BIDIRECTIONAL);
1536 if (host->dma_buffer)
1537 kfree(host->dma_buffer);
1538 if (host->dma >= 0)
1539 free_dma(host->dma);
1540
1541 host->dma = -1;
1542 host->dma_buffer = NULL;
1543 host->dma_addr = (dma_addr_t)NULL;
1544 }
1545
1546 /*
1547 * Allocate/free IRQ.
1548 */
1549
1550 static int __devinit wbsd_request_irq(struct wbsd_host* host, int irq)
1551 {
1552 int ret;
1553
1554 /*
1555 * Allocate interrupt.
1556 */
1557
1558 ret = request_irq(irq, wbsd_irq, SA_SHIRQ, DRIVER_NAME, host);
1559 if (ret)
1560 return ret;
1561
1562 host->irq = irq;
1563
1564 /*
1565 * Set up tasklets.
1566 */
1567 tasklet_init(&host->card_tasklet, wbsd_tasklet_card, (unsigned long)host);
1568 tasklet_init(&host->fifo_tasklet, wbsd_tasklet_fifo, (unsigned long)host);
1569 tasklet_init(&host->crc_tasklet, wbsd_tasklet_crc, (unsigned long)host);
1570 tasklet_init(&host->timeout_tasklet, wbsd_tasklet_timeout, (unsigned long)host);
1571 tasklet_init(&host->finish_tasklet, wbsd_tasklet_finish, (unsigned long)host);
1572 tasklet_init(&host->block_tasklet, wbsd_tasklet_block, (unsigned long)host);
1573
1574 return 0;
1575 }
1576
1577 static void __devexit wbsd_release_irq(struct wbsd_host* host)
1578 {
1579 if (!host->irq)
1580 return;
1581
1582 free_irq(host->irq, host);
1583
1584 host->irq = 0;
1585
1586 tasklet_kill(&host->card_tasklet);
1587 tasklet_kill(&host->fifo_tasklet);
1588 tasklet_kill(&host->crc_tasklet);
1589 tasklet_kill(&host->timeout_tasklet);
1590 tasklet_kill(&host->finish_tasklet);
1591 tasklet_kill(&host->block_tasklet);
1592 }
1593
1594 /*
1595 * Allocate all resources for the host.
1596 */
1597
1598 static int __devinit wbsd_request_resources(struct wbsd_host* host,
1599 int base, int irq, int dma)
1600 {
1601 int ret;
1602
1603 /*
1604 * Allocate I/O ports.
1605 */
1606 ret = wbsd_request_region(host, base);
1607 if (ret)
1608 return ret;
1609
1610 /*
1611 * Allocate interrupt.
1612 */
1613 ret = wbsd_request_irq(host, irq);
1614 if (ret)
1615 return ret;
1616
1617 /*
1618 * Allocate DMA.
1619 */
1620 wbsd_request_dma(host, dma);
1621
1622 return 0;
1623 }
1624
1625 /*
1626 * Release all resources for the host.
1627 */
1628
1629 static void __devexit wbsd_release_resources(struct wbsd_host* host)
1630 {
1631 wbsd_release_dma(host);
1632 wbsd_release_irq(host);
1633 wbsd_release_regions(host);
1634 }
1635
1636 /*
1637 * Configure the resources the chip should use.
1638 */
1639
1640 static void __devinit wbsd_chip_config(struct wbsd_host* host)
1641 {
1642 /*
1643 * Reset the chip.
1644 */
1645 wbsd_write_config(host, WBSD_CONF_SWRST, 1);
1646 wbsd_write_config(host, WBSD_CONF_SWRST, 0);
1647
1648 /*
1649 * Select SD/MMC function.
1650 */
1651 wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD);
1652
1653 /*
1654 * Set up card detection.
1655 */
1656 wbsd_write_config(host, WBSD_CONF_PINS, WBSD_PINS_DETECT_GP11);
1657
1658 /*
1659 * Configure chip
1660 */
1661 wbsd_write_config(host, WBSD_CONF_PORT_HI, host->base >> 8);
1662 wbsd_write_config(host, WBSD_CONF_PORT_LO, host->base & 0xff);
1663
1664 wbsd_write_config(host, WBSD_CONF_IRQ, host->irq);
1665
1666 if (host->dma >= 0)
1667 wbsd_write_config(host, WBSD_CONF_DRQ, host->dma);
1668
1669 /*
1670 * Enable and power up chip.
1671 */
1672 wbsd_write_config(host, WBSD_CONF_ENABLE, 1);
1673 wbsd_write_config(host, WBSD_CONF_POWER, 0x20);
1674 }
1675
1676 /*
1677 * Check that configured resources are correct.
1678 */
1679
1680 static int __devinit wbsd_chip_validate(struct wbsd_host* host)
1681 {
1682 int base, irq, dma;
1683
1684 /*
1685 * Select SD/MMC function.
1686 */
1687 wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD);
1688
1689 /*
1690 * Read configuration.
1691 */
1692 base = wbsd_read_config(host, WBSD_CONF_PORT_HI) << 8;
1693 base |= wbsd_read_config(host, WBSD_CONF_PORT_LO);
1694
1695 irq = wbsd_read_config(host, WBSD_CONF_IRQ);
1696
1697 dma = wbsd_read_config(host, WBSD_CONF_DRQ);
1698
1699 /*
1700 * Validate against given configuration.
1701 */
1702 if (base != host->base)
1703 return 0;
1704 if (irq != host->irq)
1705 return 0;
1706 if ((dma != host->dma) && (host->dma != -1))
1707 return 0;
1708
1709 return 1;
1710 }
1711
1712 /*****************************************************************************\
1713 * *
1714 * Devices setup and shutdown *
1715 * *
1716 \*****************************************************************************/
1717
1718 static int __devinit wbsd_init(struct device* dev, int base, int irq, int dma,
1719 int pnp)
1720 {
1721 struct wbsd_host* host = NULL;
1722 struct mmc_host* mmc = NULL;
1723 int ret;
1724
1725 ret = wbsd_alloc_mmc(dev);
1726 if (ret)
1727 return ret;
1728
1729 mmc = dev_get_drvdata(dev);
1730 host = mmc_priv(mmc);
1731
1732 /*
1733 * Scan for hardware.
1734 */
1735 ret = wbsd_scan(host);
1736 if (ret)
1737 {
1738 if (pnp && (ret == -ENODEV))
1739 {
1740 printk(KERN_WARNING DRIVER_NAME
1741 ": Unable to confirm device presence. You may "
1742 "experience lock-ups.\n");
1743 }
1744 else
1745 {
1746 wbsd_free_mmc(dev);
1747 return ret;
1748 }
1749 }
1750
1751 /*
1752 * Request resources.
1753 */
1754 ret = wbsd_request_resources(host, io, irq, dma);
1755 if (ret)
1756 {
1757 wbsd_release_resources(host);
1758 wbsd_free_mmc(dev);
1759 return ret;
1760 }
1761
1762 /*
1763 * See if chip needs to be configured.
1764 */
1765 if (pnp && (host->config != 0))
1766 {
1767 if (!wbsd_chip_validate(host))
1768 {
1769 printk(KERN_WARNING DRIVER_NAME
1770 ": PnP active but chip not configured! "
1771 "You probably have a buggy BIOS. "
1772 "Configuring chip manually.\n");
1773 wbsd_chip_config(host);
1774 }
1775 }
1776 else
1777 wbsd_chip_config(host);
1778
1779 /*
1780 * Power Management stuff. No idea how this works.
1781 * Not tested.
1782 */
1783 #ifdef CONFIG_PM
1784 if (host->config)
1785 wbsd_write_config(host, WBSD_CONF_PME, 0xA0);
1786 #endif
1787 /*
1788 * Allow device to initialise itself properly.
1789 */
1790 mdelay(5);
1791
1792 /*
1793 * Reset the chip into a known state.
1794 */
1795 wbsd_init_device(host);
1796
1797 mmc_add_host(mmc);
1798
1799 printk(KERN_INFO "%s: W83L51xD", mmc->host_name);
1800 if (host->chip_id != 0)
1801 printk(" id %x", (int)host->chip_id);
1802 printk(" at 0x%x irq %d", (int)host->base, (int)host->irq);
1803 if (host->dma >= 0)
1804 printk(" dma %d", (int)host->dma);
1805 else
1806 printk(" FIFO");
1807 if (pnp)
1808 printk(" PnP");
1809 printk("\n");
1810
1811 return 0;
1812 }
1813
1814 static void __devexit wbsd_shutdown(struct device* dev, int pnp)
1815 {
1816 struct mmc_host* mmc = dev_get_drvdata(dev);
1817 struct wbsd_host* host;
1818
1819 if (!mmc)
1820 return;
1821
1822 host = mmc_priv(mmc);
1823
1824 mmc_remove_host(mmc);
1825
1826 if (!pnp)
1827 {
1828 /*
1829 * Power down the SD/MMC function.
1830 */
1831 wbsd_unlock_config(host);
1832 wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD);
1833 wbsd_write_config(host, WBSD_CONF_ENABLE, 0);
1834 wbsd_lock_config(host);
1835 }
1836
1837 wbsd_release_resources(host);
1838
1839 wbsd_free_mmc(dev);
1840 }
1841
1842 /*
1843 * Non-PnP
1844 */
1845
1846 static int __devinit wbsd_probe(struct device* dev)
1847 {
1848 return wbsd_init(dev, io, irq, dma, 0);
1849 }
1850
1851 static int __devexit wbsd_remove(struct device* dev)
1852 {
1853 wbsd_shutdown(dev, 0);
1854
1855 return 0;
1856 }
1857
1858 /*
1859 * PnP
1860 */
1861
1862 #ifdef CONFIG_PNP
1863
1864 static int __devinit
1865 wbsd_pnp_probe(struct pnp_dev * pnpdev, const struct pnp_device_id *dev_id)
1866 {
1867 int io, irq, dma;
1868
1869 /*
1870 * Get resources from PnP layer.
1871 */
1872 io = pnp_port_start(pnpdev, 0);
1873 irq = pnp_irq(pnpdev, 0);
1874 if (pnp_dma_valid(pnpdev, 0))
1875 dma = pnp_dma(pnpdev, 0);
1876 else
1877 dma = -1;
1878
1879 DBGF("PnP resources: port %3x irq %d dma %d\n", io, irq, dma);
1880
1881 return wbsd_init(&pnpdev->dev, io, irq, dma, 1);
1882 }
1883
1884 static void __devexit wbsd_pnp_remove(struct pnp_dev * dev)
1885 {
1886 wbsd_shutdown(&dev->dev, 1);
1887 }
1888
1889 #endif /* CONFIG_PNP */
1890
1891 /*
1892 * Power management
1893 */
1894
1895 #ifdef CONFIG_PM
1896 static int wbsd_suspend(struct device *dev, pm_message_t state, u32 level)
1897 {
1898 DBGF("Not yet supported\n");
1899
1900 return 0;
1901 }
1902
1903 static int wbsd_resume(struct device *dev, u32 level)
1904 {
1905 DBGF("Not yet supported\n");
1906
1907 return 0;
1908 }
1909 #else
1910 #define wbsd_suspend NULL
1911 #define wbsd_resume NULL
1912 #endif
1913
1914 static struct platform_device *wbsd_device;
1915
1916 static struct device_driver wbsd_driver = {
1917 .name = DRIVER_NAME,
1918 .bus = &platform_bus_type,
1919 .probe = wbsd_probe,
1920 .remove = wbsd_remove,
1921
1922 .suspend = wbsd_suspend,
1923 .resume = wbsd_resume,
1924 };
1925
1926 #ifdef CONFIG_PNP
1927
1928 static struct pnp_driver wbsd_pnp_driver = {
1929 .name = DRIVER_NAME,
1930 .id_table = pnp_dev_table,
1931 .probe = wbsd_pnp_probe,
1932 .remove = wbsd_pnp_remove,
1933 };
1934
1935 #endif /* CONFIG_PNP */
1936
1937 /*
1938 * Module loading/unloading
1939 */
1940
1941 static int __init wbsd_drv_init(void)
1942 {
1943 int result;
1944
1945 printk(KERN_INFO DRIVER_NAME
1946 ": Winbond W83L51xD SD/MMC card interface driver, "
1947 DRIVER_VERSION "\n");
1948 printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
1949
1950 #ifdef CONFIG_PNP
1951
1952 if (!nopnp)
1953 {
1954 result = pnp_register_driver(&wbsd_pnp_driver);
1955 if (result < 0)
1956 return result;
1957 }
1958
1959 #endif /* CONFIG_PNP */
1960
1961 if (nopnp)
1962 {
1963 result = driver_register(&wbsd_driver);
1964 if (result < 0)
1965 return result;
1966
1967 wbsd_device = platform_device_register_simple(DRIVER_NAME, -1,
1968 NULL, 0);
1969 if (IS_ERR(wbsd_device))
1970 return PTR_ERR(wbsd_device);
1971 }
1972
1973 return 0;
1974 }
1975
1976 static void __exit wbsd_drv_exit(void)
1977 {
1978 #ifdef CONFIG_PNP
1979
1980 if (!nopnp)
1981 pnp_unregister_driver(&wbsd_pnp_driver);
1982
1983 #endif /* CONFIG_PNP */
1984
1985 if (nopnp)
1986 {
1987 platform_device_unregister(wbsd_device);
1988
1989 driver_unregister(&wbsd_driver);
1990 }
1991
1992 DBG("unloaded\n");
1993 }
1994
1995 module_init(wbsd_drv_init);
1996 module_exit(wbsd_drv_exit);
1997 #ifdef CONFIG_PNP
1998 module_param(nopnp, uint, 0444);
1999 #endif
2000 module_param(io, uint, 0444);
2001 module_param(irq, uint, 0444);
2002 module_param(dma, int, 0444);
2003
2004 MODULE_LICENSE("GPL");
2005 MODULE_DESCRIPTION("Winbond W83L51xD SD/MMC card interface driver");
2006 MODULE_VERSION(DRIVER_VERSION);
2007
2008 #ifdef CONFIG_PNP
2009 MODULE_PARM_DESC(nopnp, "Scan for device instead of relying on PNP. (default 0)");
2010 #endif
2011 MODULE_PARM_DESC(io, "I/O base to allocate. Must be 8 byte aligned. (default 0x248)");
2012 MODULE_PARM_DESC(irq, "IRQ to allocate. (default 6)");
2013 MODULE_PARM_DESC(dma, "DMA channel to allocate. -1 for no DMA. (default 2)");
This page took 0.075178 seconds and 6 git commands to generate.