[PATCH] MMC: wbsd delayed insertion
[deliverable/linux.git] / drivers / mmc / wbsd.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/mmc/wbsd.c - Winbond W83L51xD SD/MMC driver
3 *
4 * Copyright (C) 2004-2005 Pierre Ossman, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 *
11 * Warning!
12 *
13 * Changes to the FIFO system should be done with extreme care since
14 * the hardware is full of bugs related to the FIFO. Known issues are:
15 *
16 * - FIFO size field in FSR is always zero.
17 *
18 * - FIFO interrupts tend not to work as they should. Interrupts are
19 * triggered only for full/empty events, not for threshold values.
20 *
21 * - On APIC systems the FIFO empty interrupt is sometimes lost.
22 */
23
24#include <linux/config.h>
25#include <linux/module.h>
26#include <linux/moduleparam.h>
27#include <linux/init.h>
28#include <linux/ioport.h>
29#include <linux/device.h>
30#include <linux/interrupt.h>
85bcc130 31#include <linux/dma-mapping.h>
1da177e4 32#include <linux/delay.h>
85bcc130 33#include <linux/pnp.h>
1da177e4
LT
34#include <linux/highmem.h>
35#include <linux/mmc/host.h>
36#include <linux/mmc/protocol.h>
37
38#include <asm/io.h>
39#include <asm/dma.h>
40#include <asm/scatterlist.h>
41
42#include "wbsd.h"
43
44#define DRIVER_NAME "wbsd"
85bcc130 45#define DRIVER_VERSION "1.2"
1da177e4
LT
46
47#ifdef CONFIG_MMC_DEBUG
48#define DBG(x...) \
49 printk(KERN_DEBUG DRIVER_NAME ": " x)
50#define DBGF(f, x...) \
51 printk(KERN_DEBUG DRIVER_NAME " [%s()]: " f, __func__ , ##x)
52#else
53#define DBG(x...) do { } while (0)
54#define DBGF(x...) do { } while (0)
55#endif
56
1da177e4
LT
57#ifdef CONFIG_MMC_DEBUG
58void DBG_REG(int reg, u8 value)
59{
60 int i;
61
62 printk(KERN_DEBUG "wbsd: Register %d: 0x%02X %3d '%c' ",
63 reg, (int)value, (int)value, (value < 0x20)?'.':value);
64
65 for (i = 7;i >= 0;i--)
66 {
67 if (value & (1 << i))
68 printk("x");
69 else
70 printk(".");
71 }
72
73 printk("\n");
74}
75#else
76#define DBG_REG(r, v) do {} while (0)
77#endif
78
85bcc130
PO
79/*
80 * Device resources
81 */
82
83#ifdef CONFIG_PNP
84
85static const struct pnp_device_id pnp_dev_table[] = {
86 { "WEC0517", 0 },
87 { "WEC0518", 0 },
88 { "", 0 },
89};
90
91MODULE_DEVICE_TABLE(pnp, pnp_dev_table);
92
93#endif /* CONFIG_PNP */
94
95#ifdef CONFIG_PNP
96static unsigned int nopnp = 0;
97#else
98static const unsigned int nopnp = 1;
99#endif
100static unsigned int io = 0x248;
101static unsigned int irq = 6;
102static int dma = 2;
103
1da177e4
LT
104/*
105 * Basic functions
106 */
107
108static inline void wbsd_unlock_config(struct wbsd_host* host)
109{
85bcc130
PO
110 BUG_ON(host->config == 0);
111
1da177e4
LT
112 outb(host->unlock_code, host->config);
113 outb(host->unlock_code, host->config);
114}
115
116static inline void wbsd_lock_config(struct wbsd_host* host)
117{
85bcc130
PO
118 BUG_ON(host->config == 0);
119
1da177e4
LT
120 outb(LOCK_CODE, host->config);
121}
122
123static inline void wbsd_write_config(struct wbsd_host* host, u8 reg, u8 value)
124{
85bcc130
PO
125 BUG_ON(host->config == 0);
126
1da177e4
LT
127 outb(reg, host->config);
128 outb(value, host->config + 1);
129}
130
131static inline u8 wbsd_read_config(struct wbsd_host* host, u8 reg)
132{
85bcc130
PO
133 BUG_ON(host->config == 0);
134
1da177e4
LT
135 outb(reg, host->config);
136 return inb(host->config + 1);
137}
138
139static inline void wbsd_write_index(struct wbsd_host* host, u8 index, u8 value)
140{
141 outb(index, host->base + WBSD_IDXR);
142 outb(value, host->base + WBSD_DATAR);
143}
144
145static inline u8 wbsd_read_index(struct wbsd_host* host, u8 index)
146{
147 outb(index, host->base + WBSD_IDXR);
148 return inb(host->base + WBSD_DATAR);
149}
150
151/*
152 * Common routines
153 */
154
155static void wbsd_init_device(struct wbsd_host* host)
156{
157 u8 setup, ier;
158
159 /*
160 * Reset chip (SD/MMC part) and fifo.
161 */
162 setup = wbsd_read_index(host, WBSD_IDX_SETUP);
163 setup |= WBSD_FIFO_RESET | WBSD_SOFT_RESET;
164 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
165
85bcc130
PO
166 /*
167 * Set DAT3 to input
168 */
169 setup &= ~WBSD_DAT3_H;
170 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
171 host->flags &= ~WBSD_FIGNORE_DETECT;
172
1da177e4
LT
173 /*
174 * Read back default clock.
175 */
176 host->clk = wbsd_read_index(host, WBSD_IDX_CLK);
177
178 /*
179 * Power down port.
180 */
181 outb(WBSD_POWER_N, host->base + WBSD_CSR);
182
183 /*
184 * Set maximum timeout.
185 */
186 wbsd_write_index(host, WBSD_IDX_TAAC, 0x7F);
187
85bcc130
PO
188 /*
189 * Test for card presence
190 */
191 if (inb(host->base + WBSD_CSR) & WBSD_CARDPRESENT)
192 host->flags |= WBSD_FCARD_PRESENT;
193 else
194 host->flags &= ~WBSD_FCARD_PRESENT;
195
1da177e4
LT
196 /*
197 * Enable interesting interrupts.
198 */
199 ier = 0;
200 ier |= WBSD_EINT_CARD;
201 ier |= WBSD_EINT_FIFO_THRE;
202 ier |= WBSD_EINT_CCRC;
203 ier |= WBSD_EINT_TIMEOUT;
204 ier |= WBSD_EINT_CRC;
205 ier |= WBSD_EINT_TC;
206
207 outb(ier, host->base + WBSD_EIR);
208
209 /*
210 * Clear interrupts.
211 */
212 inb(host->base + WBSD_ISR);
213}
214
215static void wbsd_reset(struct wbsd_host* host)
216{
217 u8 setup;
218
219 printk(KERN_ERR DRIVER_NAME ": Resetting chip\n");
220
221 /*
222 * Soft reset of chip (SD/MMC part).
223 */
224 setup = wbsd_read_index(host, WBSD_IDX_SETUP);
225 setup |= WBSD_SOFT_RESET;
226 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
227}
228
229static void wbsd_request_end(struct wbsd_host* host, struct mmc_request* mrq)
230{
231 unsigned long dmaflags;
232
233 DBGF("Ending request, cmd (%x)\n", mrq->cmd->opcode);
234
235 if (host->dma >= 0)
236 {
237 /*
238 * Release ISA DMA controller.
239 */
240 dmaflags = claim_dma_lock();
241 disable_dma(host->dma);
242 clear_dma_ff(host->dma);
243 release_dma_lock(dmaflags);
244
245 /*
246 * Disable DMA on host.
247 */
248 wbsd_write_index(host, WBSD_IDX_DMA, 0);
249 }
250
251 host->mrq = NULL;
252
253 /*
254 * MMC layer might call back into the driver so first unlock.
255 */
256 spin_unlock(&host->lock);
257 mmc_request_done(host->mmc, mrq);
258 spin_lock(&host->lock);
259}
260
261/*
262 * Scatter/gather functions
263 */
264
265static inline void wbsd_init_sg(struct wbsd_host* host, struct mmc_data* data)
266{
267 /*
268 * Get info. about SG list from data structure.
269 */
270 host->cur_sg = data->sg;
271 host->num_sg = data->sg_len;
272
273 host->offset = 0;
274 host->remain = host->cur_sg->length;
275}
276
277static inline int wbsd_next_sg(struct wbsd_host* host)
278{
279 /*
280 * Skip to next SG entry.
281 */
282 host->cur_sg++;
283 host->num_sg--;
284
285 /*
286 * Any entries left?
287 */
288 if (host->num_sg > 0)
289 {
290 host->offset = 0;
291 host->remain = host->cur_sg->length;
292 }
293
294 return host->num_sg;
295}
296
297static inline char* wbsd_kmap_sg(struct wbsd_host* host)
298{
299 host->mapped_sg = kmap_atomic(host->cur_sg->page, KM_BIO_SRC_IRQ) +
300 host->cur_sg->offset;
301 return host->mapped_sg;
302}
303
304static inline void wbsd_kunmap_sg(struct wbsd_host* host)
305{
306 kunmap_atomic(host->mapped_sg, KM_BIO_SRC_IRQ);
307}
308
309static inline void wbsd_sg_to_dma(struct wbsd_host* host, struct mmc_data* data)
310{
311 unsigned int len, i, size;
312 struct scatterlist* sg;
313 char* dmabuf = host->dma_buffer;
314 char* sgbuf;
315
316 size = host->size;
317
318 sg = data->sg;
319 len = data->sg_len;
320
321 /*
322 * Just loop through all entries. Size might not
323 * be the entire list though so make sure that
324 * we do not transfer too much.
325 */
326 for (i = 0;i < len;i++)
327 {
328 sgbuf = kmap_atomic(sg[i].page, KM_BIO_SRC_IRQ) + sg[i].offset;
329 if (size < sg[i].length)
330 memcpy(dmabuf, sgbuf, size);
331 else
332 memcpy(dmabuf, sgbuf, sg[i].length);
333 kunmap_atomic(sgbuf, KM_BIO_SRC_IRQ);
334 dmabuf += sg[i].length;
335
336 if (size < sg[i].length)
337 size = 0;
338 else
339 size -= sg[i].length;
340
341 if (size == 0)
342 break;
343 }
344
345 /*
346 * Check that we didn't get a request to transfer
347 * more data than can fit into the SG list.
348 */
349
350 BUG_ON(size != 0);
351
352 host->size -= size;
353}
354
355static inline void wbsd_dma_to_sg(struct wbsd_host* host, struct mmc_data* data)
356{
357 unsigned int len, i, size;
358 struct scatterlist* sg;
359 char* dmabuf = host->dma_buffer;
360 char* sgbuf;
361
362 size = host->size;
363
364 sg = data->sg;
365 len = data->sg_len;
366
367 /*
368 * Just loop through all entries. Size might not
369 * be the entire list though so make sure that
370 * we do not transfer too much.
371 */
372 for (i = 0;i < len;i++)
373 {
374 sgbuf = kmap_atomic(sg[i].page, KM_BIO_SRC_IRQ) + sg[i].offset;
375 if (size < sg[i].length)
376 memcpy(sgbuf, dmabuf, size);
377 else
378 memcpy(sgbuf, dmabuf, sg[i].length);
379 kunmap_atomic(sgbuf, KM_BIO_SRC_IRQ);
380 dmabuf += sg[i].length;
381
382 if (size < sg[i].length)
383 size = 0;
384 else
385 size -= sg[i].length;
386
387 if (size == 0)
388 break;
389 }
390
391 /*
392 * Check that we didn't get a request to transfer
393 * more data than can fit into the SG list.
394 */
395
396 BUG_ON(size != 0);
397
398 host->size -= size;
399}
400
401/*
402 * Command handling
403 */
404
405static inline void wbsd_get_short_reply(struct wbsd_host* host,
406 struct mmc_command* cmd)
407{
408 /*
409 * Correct response type?
410 */
411 if (wbsd_read_index(host, WBSD_IDX_RSPLEN) != WBSD_RSP_SHORT)
412 {
413 cmd->error = MMC_ERR_INVALID;
414 return;
415 }
416
417 cmd->resp[0] =
418 wbsd_read_index(host, WBSD_IDX_RESP12) << 24;
419 cmd->resp[0] |=
420 wbsd_read_index(host, WBSD_IDX_RESP13) << 16;
421 cmd->resp[0] |=
422 wbsd_read_index(host, WBSD_IDX_RESP14) << 8;
423 cmd->resp[0] |=
424 wbsd_read_index(host, WBSD_IDX_RESP15) << 0;
425 cmd->resp[1] =
426 wbsd_read_index(host, WBSD_IDX_RESP16) << 24;
427}
428
429static inline void wbsd_get_long_reply(struct wbsd_host* host,
430 struct mmc_command* cmd)
431{
432 int i;
433
434 /*
435 * Correct response type?
436 */
437 if (wbsd_read_index(host, WBSD_IDX_RSPLEN) != WBSD_RSP_LONG)
438 {
439 cmd->error = MMC_ERR_INVALID;
440 return;
441 }
442
443 for (i = 0;i < 4;i++)
444 {
445 cmd->resp[i] =
446 wbsd_read_index(host, WBSD_IDX_RESP1 + i * 4) << 24;
447 cmd->resp[i] |=
448 wbsd_read_index(host, WBSD_IDX_RESP2 + i * 4) << 16;
449 cmd->resp[i] |=
450 wbsd_read_index(host, WBSD_IDX_RESP3 + i * 4) << 8;
451 cmd->resp[i] |=
452 wbsd_read_index(host, WBSD_IDX_RESP4 + i * 4) << 0;
453 }
454}
455
1da177e4
LT
456static void wbsd_send_command(struct wbsd_host* host, struct mmc_command* cmd)
457{
458 int i;
459 u8 status, isr;
460
461 DBGF("Sending cmd (%x)\n", cmd->opcode);
462
463 /*
464 * Clear accumulated ISR. The interrupt routine
465 * will fill this one with events that occur during
466 * transfer.
467 */
468 host->isr = 0;
469
470 /*
471 * Send the command (CRC calculated by host).
472 */
473 outb(cmd->opcode, host->base + WBSD_CMDR);
474 for (i = 3;i >= 0;i--)
475 outb((cmd->arg >> (i * 8)) & 0xff, host->base + WBSD_CMDR);
476
477 cmd->error = MMC_ERR_NONE;
478
479 /*
480 * Wait for the request to complete.
481 */
482 do {
483 status = wbsd_read_index(host, WBSD_IDX_STATUS);
484 } while (status & WBSD_CARDTRAFFIC);
485
486 /*
487 * Do we expect a reply?
488 */
489 if ((cmd->flags & MMC_RSP_MASK) != MMC_RSP_NONE)
490 {
491 /*
492 * Read back status.
493 */
494 isr = host->isr;
495
496 /* Card removed? */
497 if (isr & WBSD_INT_CARD)
498 cmd->error = MMC_ERR_TIMEOUT;
499 /* Timeout? */
500 else if (isr & WBSD_INT_TIMEOUT)
501 cmd->error = MMC_ERR_TIMEOUT;
502 /* CRC? */
503 else if ((cmd->flags & MMC_RSP_CRC) && (isr & WBSD_INT_CRC))
504 cmd->error = MMC_ERR_BADCRC;
505 /* All ok */
506 else
507 {
508 if ((cmd->flags & MMC_RSP_MASK) == MMC_RSP_SHORT)
509 wbsd_get_short_reply(host, cmd);
510 else
511 wbsd_get_long_reply(host, cmd);
512 }
513 }
514
515 DBGF("Sent cmd (%x), res %d\n", cmd->opcode, cmd->error);
516}
517
518/*
519 * Data functions
520 */
521
522static void wbsd_empty_fifo(struct wbsd_host* host)
523{
524 struct mmc_data* data = host->mrq->cmd->data;
525 char* buffer;
526 int i, fsr, fifo;
527
528 /*
529 * Handle excessive data.
530 */
531 if (data->bytes_xfered == host->size)
532 return;
533
534 buffer = wbsd_kmap_sg(host) + host->offset;
535
536 /*
537 * Drain the fifo. This has a tendency to loop longer
538 * than the FIFO length (usually one block).
539 */
540 while (!((fsr = inb(host->base + WBSD_FSR)) & WBSD_FIFO_EMPTY))
541 {
542 /*
543 * The size field in the FSR is broken so we have to
544 * do some guessing.
545 */
546 if (fsr & WBSD_FIFO_FULL)
547 fifo = 16;
548 else if (fsr & WBSD_FIFO_FUTHRE)
549 fifo = 8;
550 else
551 fifo = 1;
552
553 for (i = 0;i < fifo;i++)
554 {
555 *buffer = inb(host->base + WBSD_DFR);
556 buffer++;
557 host->offset++;
558 host->remain--;
559
560 data->bytes_xfered++;
561
562 /*
563 * Transfer done?
564 */
565 if (data->bytes_xfered == host->size)
566 {
567 wbsd_kunmap_sg(host);
568 return;
569 }
570
571 /*
572 * End of scatter list entry?
573 */
574 if (host->remain == 0)
575 {
576 wbsd_kunmap_sg(host);
577
578 /*
579 * Get next entry. Check if last.
580 */
581 if (!wbsd_next_sg(host))
582 {
583 /*
584 * We should never reach this point.
585 * It means that we're trying to
586 * transfer more blocks than can fit
587 * into the scatter list.
588 */
589 BUG_ON(1);
590
591 host->size = data->bytes_xfered;
592
593 return;
594 }
595
596 buffer = wbsd_kmap_sg(host);
597 }
598 }
599 }
600
601 wbsd_kunmap_sg(host);
602
603 /*
604 * This is a very dirty hack to solve a
605 * hardware problem. The chip doesn't trigger
606 * FIFO threshold interrupts properly.
607 */
608 if ((host->size - data->bytes_xfered) < 16)
609 tasklet_schedule(&host->fifo_tasklet);
610}
611
612static void wbsd_fill_fifo(struct wbsd_host* host)
613{
614 struct mmc_data* data = host->mrq->cmd->data;
615 char* buffer;
616 int i, fsr, fifo;
617
618 /*
619 * Check that we aren't being called after the
620 * entire buffer has been transfered.
621 */
622 if (data->bytes_xfered == host->size)
623 return;
624
625 buffer = wbsd_kmap_sg(host) + host->offset;
626
627 /*
628 * Fill the fifo. This has a tendency to loop longer
629 * than the FIFO length (usually one block).
630 */
631 while (!((fsr = inb(host->base + WBSD_FSR)) & WBSD_FIFO_FULL))
632 {
633 /*
634 * The size field in the FSR is broken so we have to
635 * do some guessing.
636 */
637 if (fsr & WBSD_FIFO_EMPTY)
638 fifo = 0;
639 else if (fsr & WBSD_FIFO_EMTHRE)
640 fifo = 8;
641 else
642 fifo = 15;
643
644 for (i = 16;i > fifo;i--)
645 {
646 outb(*buffer, host->base + WBSD_DFR);
647 buffer++;
648 host->offset++;
649 host->remain--;
650
651 data->bytes_xfered++;
652
653 /*
654 * Transfer done?
655 */
656 if (data->bytes_xfered == host->size)
657 {
658 wbsd_kunmap_sg(host);
659 return;
660 }
661
662 /*
663 * End of scatter list entry?
664 */
665 if (host->remain == 0)
666 {
667 wbsd_kunmap_sg(host);
668
669 /*
670 * Get next entry. Check if last.
671 */
672 if (!wbsd_next_sg(host))
673 {
674 /*
675 * We should never reach this point.
676 * It means that we're trying to
677 * transfer more blocks than can fit
678 * into the scatter list.
679 */
680 BUG_ON(1);
681
682 host->size = data->bytes_xfered;
683
684 return;
685 }
686
687 buffer = wbsd_kmap_sg(host);
688 }
689 }
690 }
691
692 wbsd_kunmap_sg(host);
85bcc130
PO
693
694 /*
695 * The controller stops sending interrupts for
696 * 'FIFO empty' under certain conditions. So we
697 * need to be a bit more pro-active.
698 */
699 tasklet_schedule(&host->fifo_tasklet);
1da177e4
LT
700}
701
702static void wbsd_prepare_data(struct wbsd_host* host, struct mmc_data* data)
703{
704 u16 blksize;
705 u8 setup;
706 unsigned long dmaflags;
707
708 DBGF("blksz %04x blks %04x flags %08x\n",
709 1 << data->blksz_bits, data->blocks, data->flags);
710 DBGF("tsac %d ms nsac %d clk\n",
711 data->timeout_ns / 1000000, data->timeout_clks);
712
713 /*
714 * Calculate size.
715 */
716 host->size = data->blocks << data->blksz_bits;
717
718 /*
719 * Check timeout values for overflow.
720 * (Yes, some cards cause this value to overflow).
721 */
722 if (data->timeout_ns > 127000000)
723 wbsd_write_index(host, WBSD_IDX_TAAC, 127);
724 else
725 wbsd_write_index(host, WBSD_IDX_TAAC, data->timeout_ns/1000000);
726
727 if (data->timeout_clks > 255)
728 wbsd_write_index(host, WBSD_IDX_NSAC, 255);
729 else
730 wbsd_write_index(host, WBSD_IDX_NSAC, data->timeout_clks);
731
732 /*
733 * Inform the chip of how large blocks will be
734 * sent. It needs this to determine when to
735 * calculate CRC.
736 *
737 * Space for CRC must be included in the size.
738 */
739 blksize = (1 << data->blksz_bits) + 2;
740
741 wbsd_write_index(host, WBSD_IDX_PBSMSB, (blksize >> 4) & 0xF0);
742 wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF);
743
744 /*
745 * Clear the FIFO. This is needed even for DMA
746 * transfers since the chip still uses the FIFO
747 * internally.
748 */
749 setup = wbsd_read_index(host, WBSD_IDX_SETUP);
750 setup |= WBSD_FIFO_RESET;
751 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
752
753 /*
754 * DMA transfer?
755 */
756 if (host->dma >= 0)
757 {
758 /*
759 * The buffer for DMA is only 64 kB.
760 */
761 BUG_ON(host->size > 0x10000);
762 if (host->size > 0x10000)
763 {
764 data->error = MMC_ERR_INVALID;
765 return;
766 }
767
768 /*
769 * Transfer data from the SG list to
770 * the DMA buffer.
771 */
772 if (data->flags & MMC_DATA_WRITE)
773 wbsd_sg_to_dma(host, data);
774
775 /*
776 * Initialise the ISA DMA controller.
777 */
778 dmaflags = claim_dma_lock();
779 disable_dma(host->dma);
780 clear_dma_ff(host->dma);
781 if (data->flags & MMC_DATA_READ)
782 set_dma_mode(host->dma, DMA_MODE_READ & ~0x40);
783 else
784 set_dma_mode(host->dma, DMA_MODE_WRITE & ~0x40);
785 set_dma_addr(host->dma, host->dma_addr);
786 set_dma_count(host->dma, host->size);
787
788 enable_dma(host->dma);
789 release_dma_lock(dmaflags);
790
791 /*
792 * Enable DMA on the host.
793 */
794 wbsd_write_index(host, WBSD_IDX_DMA, WBSD_DMA_ENABLE);
795 }
796 else
797 {
798 /*
799 * This flag is used to keep printk
800 * output to a minimum.
801 */
802 host->firsterr = 1;
803
804 /*
805 * Initialise the SG list.
806 */
807 wbsd_init_sg(host, data);
808
809 /*
810 * Turn off DMA.
811 */
812 wbsd_write_index(host, WBSD_IDX_DMA, 0);
813
814 /*
815 * Set up FIFO threshold levels (and fill
816 * buffer if doing a write).
817 */
818 if (data->flags & MMC_DATA_READ)
819 {
820 wbsd_write_index(host, WBSD_IDX_FIFOEN,
821 WBSD_FIFOEN_FULL | 8);
822 }
823 else
824 {
825 wbsd_write_index(host, WBSD_IDX_FIFOEN,
826 WBSD_FIFOEN_EMPTY | 8);
827 wbsd_fill_fifo(host);
828 }
829 }
830
831 data->error = MMC_ERR_NONE;
832}
833
834static void wbsd_finish_data(struct wbsd_host* host, struct mmc_data* data)
835{
836 unsigned long dmaflags;
837 int count;
838 u8 status;
839
840 WARN_ON(host->mrq == NULL);
841
842 /*
843 * Send a stop command if needed.
844 */
845 if (data->stop)
846 wbsd_send_command(host, data->stop);
847
848 /*
849 * Wait for the controller to leave data
850 * transfer state.
851 */
852 do
853 {
854 status = wbsd_read_index(host, WBSD_IDX_STATUS);
855 } while (status & (WBSD_BLOCK_READ | WBSD_BLOCK_WRITE));
856
857 /*
858 * DMA transfer?
859 */
860 if (host->dma >= 0)
861 {
862 /*
863 * Disable DMA on the host.
864 */
865 wbsd_write_index(host, WBSD_IDX_DMA, 0);
866
867 /*
868 * Turn of ISA DMA controller.
869 */
870 dmaflags = claim_dma_lock();
871 disable_dma(host->dma);
872 clear_dma_ff(host->dma);
873 count = get_dma_residue(host->dma);
874 release_dma_lock(dmaflags);
875
876 /*
877 * Any leftover data?
878 */
879 if (count)
880 {
881 printk(KERN_ERR DRIVER_NAME ": Incomplete DMA "
882 "transfer. %d bytes left.\n", count);
883
884 data->error = MMC_ERR_FAILED;
885 }
886 else
887 {
888 /*
889 * Transfer data from DMA buffer to
890 * SG list.
891 */
892 if (data->flags & MMC_DATA_READ)
893 wbsd_dma_to_sg(host, data);
894
895 data->bytes_xfered = host->size;
896 }
897 }
898
899 DBGF("Ending data transfer (%d bytes)\n", data->bytes_xfered);
900
901 wbsd_request_end(host, host->mrq);
902}
903
85bcc130
PO
904/*****************************************************************************\
905 * *
906 * MMC layer callbacks *
907 * *
908\*****************************************************************************/
1da177e4
LT
909
910static void wbsd_request(struct mmc_host* mmc, struct mmc_request* mrq)
911{
912 struct wbsd_host* host = mmc_priv(mmc);
913 struct mmc_command* cmd;
914
915 /*
916 * Disable tasklets to avoid a deadlock.
917 */
918 spin_lock_bh(&host->lock);
919
920 BUG_ON(host->mrq != NULL);
921
922 cmd = mrq->cmd;
923
924 host->mrq = mrq;
925
926 /*
927 * If there is no card in the slot then
928 * timeout immediatly.
929 */
85bcc130 930 if (!(host->flags & WBSD_FCARD_PRESENT))
1da177e4
LT
931 {
932 cmd->error = MMC_ERR_TIMEOUT;
933 goto done;
934 }
935
936 /*
937 * Does the request include data?
938 */
939 if (cmd->data)
940 {
941 wbsd_prepare_data(host, cmd->data);
942
943 if (cmd->data->error != MMC_ERR_NONE)
944 goto done;
945 }
946
947 wbsd_send_command(host, cmd);
948
949 /*
950 * If this is a data transfer the request
951 * will be finished after the data has
952 * transfered.
953 */
954 if (cmd->data && (cmd->error == MMC_ERR_NONE))
955 {
956 /*
957 * Dirty fix for hardware bug.
958 */
959 if (host->dma == -1)
960 tasklet_schedule(&host->fifo_tasklet);
961
962 spin_unlock_bh(&host->lock);
963
964 return;
965 }
966
967done:
968 wbsd_request_end(host, mrq);
969
970 spin_unlock_bh(&host->lock);
971}
972
973static void wbsd_set_ios(struct mmc_host* mmc, struct mmc_ios* ios)
974{
975 struct wbsd_host* host = mmc_priv(mmc);
976 u8 clk, setup, pwr;
977
978 DBGF("clock %uHz busmode %u powermode %u Vdd %u\n",
979 ios->clock, ios->bus_mode, ios->power_mode, ios->vdd);
980
981 spin_lock_bh(&host->lock);
982
983 /*
984 * Reset the chip on each power off.
985 * Should clear out any weird states.
986 */
987 if (ios->power_mode == MMC_POWER_OFF)
988 wbsd_init_device(host);
989
990 if (ios->clock >= 24000000)
991 clk = WBSD_CLK_24M;
992 else if (ios->clock >= 16000000)
993 clk = WBSD_CLK_16M;
994 else if (ios->clock >= 12000000)
995 clk = WBSD_CLK_12M;
996 else
997 clk = WBSD_CLK_375K;
998
999 /*
1000 * Only write to the clock register when
1001 * there is an actual change.
1002 */
1003 if (clk != host->clk)
1004 {
1005 wbsd_write_index(host, WBSD_IDX_CLK, clk);
1006 host->clk = clk;
1007 }
1008
85bcc130
PO
1009 /*
1010 * Power up card.
1011 */
1da177e4
LT
1012 if (ios->power_mode != MMC_POWER_OFF)
1013 {
1da177e4
LT
1014 pwr = inb(host->base + WBSD_CSR);
1015 pwr &= ~WBSD_POWER_N;
1016 outb(pwr, host->base + WBSD_CSR);
1da177e4
LT
1017 }
1018
85bcc130
PO
1019 /*
1020 * MMC cards need to have pin 1 high during init.
1021 * Init time corresponds rather nicely with the bus mode.
1022 * It wreaks havoc with the card detection though so
1023 * that needs to be disabed.
1024 */
1025 setup = wbsd_read_index(host, WBSD_IDX_SETUP);
1026 if ((ios->power_mode == MMC_POWER_ON) &&
1027 (ios->bus_mode == MMC_BUSMODE_OPENDRAIN))
1028 {
1029 setup |= WBSD_DAT3_H;
1030 host->flags |= WBSD_FIGNORE_DETECT;
1031 }
1032 else
1033 {
1034 setup &= ~WBSD_DAT3_H;
1035 host->flags &= ~WBSD_FIGNORE_DETECT;
1036 }
1037 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
1038
1da177e4
LT
1039 spin_unlock_bh(&host->lock);
1040}
1041
85bcc130
PO
1042static struct mmc_host_ops wbsd_ops = {
1043 .request = wbsd_request,
1044 .set_ios = wbsd_set_ios,
1045};
1046
1047/*****************************************************************************\
1048 * *
1049 * Interrupt handling *
1050 * *
1051\*****************************************************************************/
1052
6e6293dd
PO
1053/*
1054 * Helper function for card detection
1055 */
1056static void wbsd_detect_card(unsigned long data)
1057{
1058 struct wbsd_host *host = (struct wbsd_host*)data;
1059
1060 BUG_ON(host == NULL);
1061
1062 DBG("Executing card detection\n");
1063
1064 mmc_detect_change(host->mmc);
1065}
1066
1da177e4
LT
1067/*
1068 * Tasklets
1069 */
1070
1071inline static struct mmc_data* wbsd_get_data(struct wbsd_host* host)
1072{
1073 WARN_ON(!host->mrq);
1074 if (!host->mrq)
1075 return NULL;
1076
1077 WARN_ON(!host->mrq->cmd);
1078 if (!host->mrq->cmd)
1079 return NULL;
1080
1081 WARN_ON(!host->mrq->cmd->data);
1082 if (!host->mrq->cmd->data)
1083 return NULL;
1084
1085 return host->mrq->cmd->data;
1086}
1087
1088static void wbsd_tasklet_card(unsigned long param)
1089{
1090 struct wbsd_host* host = (struct wbsd_host*)param;
1091 u8 csr;
1092
1093 spin_lock(&host->lock);
1094
85bcc130
PO
1095 if (host->flags & WBSD_FIGNORE_DETECT)
1096 {
1097 spin_unlock(&host->lock);
1098 return;
1099 }
1100
1da177e4
LT
1101 csr = inb(host->base + WBSD_CSR);
1102 WARN_ON(csr == 0xff);
1103
1104 if (csr & WBSD_CARDPRESENT)
85bcc130
PO
1105 {
1106 if (!(host->flags & WBSD_FCARD_PRESENT))
1107 {
1108 DBG("Card inserted\n");
1109 host->flags |= WBSD_FCARD_PRESENT;
6e6293dd
PO
1110
1111 /*
1112 * Delay card detection to allow electrical connections
1113 * to stabilise.
1114 */
1115 mod_timer(&host->timer, jiffies + HZ/2);
85bcc130 1116 }
6e6293dd
PO
1117
1118 spin_unlock(&host->lock);
85bcc130
PO
1119 }
1120 else if (host->flags & WBSD_FCARD_PRESENT)
1da177e4
LT
1121 {
1122 DBG("Card removed\n");
85bcc130 1123 host->flags &= ~WBSD_FCARD_PRESENT;
1da177e4
LT
1124
1125 if (host->mrq)
1126 {
1127 printk(KERN_ERR DRIVER_NAME
1128 ": Card removed during transfer!\n");
1129 wbsd_reset(host);
1130
1131 host->mrq->cmd->error = MMC_ERR_FAILED;
1132 tasklet_schedule(&host->finish_tasklet);
1133 }
6e6293dd
PO
1134
1135 /*
1136 * Unlock first since we might get a call back.
1137 */
1138 spin_unlock(&host->lock);
1da177e4 1139
85bcc130 1140 mmc_detect_change(host->mmc);
6e6293dd 1141 }
1da177e4
LT
1142}
1143
1144static void wbsd_tasklet_fifo(unsigned long param)
1145{
1146 struct wbsd_host* host = (struct wbsd_host*)param;
1147 struct mmc_data* data;
1148
1149 spin_lock(&host->lock);
1150
1151 if (!host->mrq)
1152 goto end;
1153
1154 data = wbsd_get_data(host);
1155 if (!data)
1156 goto end;
1157
1158 if (data->flags & MMC_DATA_WRITE)
1159 wbsd_fill_fifo(host);
1160 else
1161 wbsd_empty_fifo(host);
1162
1163 /*
1164 * Done?
1165 */
1166 if (host->size == data->bytes_xfered)
1167 {
1168 wbsd_write_index(host, WBSD_IDX_FIFOEN, 0);
1169 tasklet_schedule(&host->finish_tasklet);
1170 }
1171
1172end:
1173 spin_unlock(&host->lock);
1174}
1175
1176static void wbsd_tasklet_crc(unsigned long param)
1177{
1178 struct wbsd_host* host = (struct wbsd_host*)param;
1179 struct mmc_data* data;
1180
1181 spin_lock(&host->lock);
1182
1183 if (!host->mrq)
1184 goto end;
1185
1186 data = wbsd_get_data(host);
1187 if (!data)
1188 goto end;
1189
1190 DBGF("CRC error\n");
1191
1192 data->error = MMC_ERR_BADCRC;
1193
1194 tasklet_schedule(&host->finish_tasklet);
1195
1196end:
1197 spin_unlock(&host->lock);
1198}
1199
1200static void wbsd_tasklet_timeout(unsigned long param)
1201{
1202 struct wbsd_host* host = (struct wbsd_host*)param;
1203 struct mmc_data* data;
1204
1205 spin_lock(&host->lock);
1206
1207 if (!host->mrq)
1208 goto end;
1209
1210 data = wbsd_get_data(host);
1211 if (!data)
1212 goto end;
1213
1214 DBGF("Timeout\n");
1215
1216 data->error = MMC_ERR_TIMEOUT;
1217
1218 tasklet_schedule(&host->finish_tasklet);
1219
1220end:
1221 spin_unlock(&host->lock);
1222}
1223
1224static void wbsd_tasklet_finish(unsigned long param)
1225{
1226 struct wbsd_host* host = (struct wbsd_host*)param;
1227 struct mmc_data* data;
1228
1229 spin_lock(&host->lock);
1230
1231 WARN_ON(!host->mrq);
1232 if (!host->mrq)
1233 goto end;
1234
1235 data = wbsd_get_data(host);
1236 if (!data)
1237 goto end;
1238
1239 wbsd_finish_data(host, data);
1240
1241end:
1242 spin_unlock(&host->lock);
1243}
1244
1245static void wbsd_tasklet_block(unsigned long param)
1246{
1247 struct wbsd_host* host = (struct wbsd_host*)param;
1248 struct mmc_data* data;
1249
1250 spin_lock(&host->lock);
1251
1252 if ((wbsd_read_index(host, WBSD_IDX_CRCSTATUS) & WBSD_CRC_MASK) !=
1253 WBSD_CRC_OK)
1254 {
1255 data = wbsd_get_data(host);
1256 if (!data)
1257 goto end;
1258
1259 DBGF("CRC error\n");
1260
1261 data->error = MMC_ERR_BADCRC;
1262
1263 tasklet_schedule(&host->finish_tasklet);
1264 }
1265
1266end:
1267 spin_unlock(&host->lock);
1268}
1269
1270/*
1271 * Interrupt handling
1272 */
1273
1274static irqreturn_t wbsd_irq(int irq, void *dev_id, struct pt_regs *regs)
1275{
1276 struct wbsd_host* host = dev_id;
1277 int isr;
1278
1279 isr = inb(host->base + WBSD_ISR);
1280
1281 /*
1282 * Was it actually our hardware that caused the interrupt?
1283 */
1284 if (isr == 0xff || isr == 0x00)
1285 return IRQ_NONE;
1286
1287 host->isr |= isr;
1288
1289 /*
1290 * Schedule tasklets as needed.
1291 */
1292 if (isr & WBSD_INT_CARD)
1293 tasklet_schedule(&host->card_tasklet);
1294 if (isr & WBSD_INT_FIFO_THRE)
1295 tasklet_schedule(&host->fifo_tasklet);
1296 if (isr & WBSD_INT_CRC)
1297 tasklet_hi_schedule(&host->crc_tasklet);
1298 if (isr & WBSD_INT_TIMEOUT)
1299 tasklet_hi_schedule(&host->timeout_tasklet);
1300 if (isr & WBSD_INT_BUSYEND)
1301 tasklet_hi_schedule(&host->block_tasklet);
1302 if (isr & WBSD_INT_TC)
1303 tasklet_schedule(&host->finish_tasklet);
1304
1305 return IRQ_HANDLED;
1306}
1307
85bcc130
PO
1308/*****************************************************************************\
1309 * *
1310 * Device initialisation and shutdown *
1311 * *
1312\*****************************************************************************/
1313
1da177e4 1314/*
85bcc130 1315 * Allocate/free MMC structure.
1da177e4
LT
1316 */
1317
85bcc130
PO
1318static int __devinit wbsd_alloc_mmc(struct device* dev)
1319{
1320 struct mmc_host* mmc;
1321 struct wbsd_host* host;
1322
1323 /*
1324 * Allocate MMC structure.
1325 */
1326 mmc = mmc_alloc_host(sizeof(struct wbsd_host), dev);
1327 if (!mmc)
1328 return -ENOMEM;
1329
1330 host = mmc_priv(mmc);
1331 host->mmc = mmc;
1332
1333 host->dma = -1;
1334
1335 /*
1336 * Set host parameters.
1337 */
1338 mmc->ops = &wbsd_ops;
1339 mmc->f_min = 375000;
1340 mmc->f_max = 24000000;
1341 mmc->ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34;
1342
1343 spin_lock_init(&host->lock);
1344
6e6293dd
PO
1345 /*
1346 * Set up detection timer
1347 */
1348 init_timer(&host->timer);
1349 host->timer.data = (unsigned long)host;
1350 host->timer.function = wbsd_detect_card;
1351
85bcc130
PO
1352 /*
1353 * Maximum number of segments. Worst case is one sector per segment
1354 * so this will be 64kB/512.
1355 */
1356 mmc->max_hw_segs = 128;
1357 mmc->max_phys_segs = 128;
1358
1359 /*
1360 * Maximum number of sectors in one transfer. Also limited by 64kB
1361 * buffer.
1362 */
1363 mmc->max_sectors = 128;
1364
1365 /*
1366 * Maximum segment size. Could be one segment with the maximum number
1367 * of segments.
1368 */
1369 mmc->max_seg_size = mmc->max_sectors * 512;
1370
1371 dev_set_drvdata(dev, mmc);
1372
1373 return 0;
1374}
1375
1376static void __devexit wbsd_free_mmc(struct device* dev)
1377{
1378 struct mmc_host* mmc;
6e6293dd 1379 struct wbsd_host* host;
85bcc130
PO
1380
1381 mmc = dev_get_drvdata(dev);
1382 if (!mmc)
1383 return;
1384
6e6293dd
PO
1385 host = mmc_priv(mmc);
1386 BUG_ON(host == NULL);
1387
1388 del_timer_sync(&host->timer);
1389
85bcc130
PO
1390 mmc_free_host(mmc);
1391
1392 dev_set_drvdata(dev, NULL);
1393}
1394
1395/*
1396 * Scan for known chip id:s
1397 */
1398
1399static int __devinit wbsd_scan(struct wbsd_host* host)
1da177e4
LT
1400{
1401 int i, j, k;
1402 int id;
1403
1404 /*
1405 * Iterate through all ports, all codes to
1406 * find hardware that is in our known list.
1407 */
1408 for (i = 0;i < sizeof(config_ports)/sizeof(int);i++)
1409 {
1410 if (!request_region(config_ports[i], 2, DRIVER_NAME))
1411 continue;
1412
1413 for (j = 0;j < sizeof(unlock_codes)/sizeof(int);j++)
1414 {
1415 id = 0xFFFF;
1416
1417 outb(unlock_codes[j], config_ports[i]);
1418 outb(unlock_codes[j], config_ports[i]);
1419
1420 outb(WBSD_CONF_ID_HI, config_ports[i]);
1421 id = inb(config_ports[i] + 1) << 8;
1422
1423 outb(WBSD_CONF_ID_LO, config_ports[i]);
1424 id |= inb(config_ports[i] + 1);
1425
1426 for (k = 0;k < sizeof(valid_ids)/sizeof(int);k++)
1427 {
1428 if (id == valid_ids[k])
1429 {
1430 host->chip_id = id;
1431 host->config = config_ports[i];
1432 host->unlock_code = unlock_codes[i];
1433
1434 return 0;
1435 }
1436 }
1437
1438 if (id != 0xFFFF)
1439 {
1440 DBG("Unknown hardware (id %x) found at %x\n",
1441 id, config_ports[i]);
1442 }
1443
1444 outb(LOCK_CODE, config_ports[i]);
1445 }
1446
1447 release_region(config_ports[i], 2);
1448 }
1449
1450 return -ENODEV;
1451}
1452
85bcc130
PO
1453/*
1454 * Allocate/free io port ranges
1455 */
1456
1457static int __devinit wbsd_request_region(struct wbsd_host* host, int base)
1da177e4
LT
1458{
1459 if (io & 0x7)
1460 return -EINVAL;
1461
85bcc130 1462 if (!request_region(base, 8, DRIVER_NAME))
1da177e4
LT
1463 return -EIO;
1464
1465 host->base = io;
1466
1467 return 0;
1468}
1469
85bcc130 1470static void __devexit wbsd_release_regions(struct wbsd_host* host)
1da177e4
LT
1471{
1472 if (host->base)
1473 release_region(host->base, 8);
85bcc130
PO
1474
1475 host->base = 0;
1da177e4
LT
1476
1477 if (host->config)
1478 release_region(host->config, 2);
85bcc130
PO
1479
1480 host->config = 0;
1da177e4
LT
1481}
1482
85bcc130
PO
1483/*
1484 * Allocate/free DMA port and buffer
1485 */
1486
1487static void __devinit wbsd_request_dma(struct wbsd_host* host, int dma)
1da177e4 1488{
1da177e4
LT
1489 if (dma < 0)
1490 return;
1491
1492 if (request_dma(dma, DRIVER_NAME))
1493 goto err;
1494
1495 /*
1496 * We need to allocate a special buffer in
1497 * order for ISA to be able to DMA to it.
1498 */
85bcc130 1499 host->dma_buffer = kmalloc(WBSD_DMA_SIZE,
1da177e4
LT
1500 GFP_NOIO | GFP_DMA | __GFP_REPEAT | __GFP_NOWARN);
1501 if (!host->dma_buffer)
1502 goto free;
1503
1504 /*
1505 * Translate the address to a physical address.
1506 */
85bcc130
PO
1507 host->dma_addr = dma_map_single(host->mmc->dev, host->dma_buffer,
1508 WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
1da177e4
LT
1509
1510 /*
1511 * ISA DMA must be aligned on a 64k basis.
1512 */
1513 if ((host->dma_addr & 0xffff) != 0)
1514 goto kfree;
1515 /*
1516 * ISA cannot access memory above 16 MB.
1517 */
1518 else if (host->dma_addr >= 0x1000000)
1519 goto kfree;
1520
1521 host->dma = dma;
1522
1523 return;
1524
1525kfree:
1526 /*
1527 * If we've gotten here then there is some kind of alignment bug
1528 */
1529 BUG_ON(1);
1530
85bcc130
PO
1531 dma_unmap_single(host->mmc->dev, host->dma_addr, WBSD_DMA_SIZE,
1532 DMA_BIDIRECTIONAL);
1533 host->dma_addr = (dma_addr_t)NULL;
1534
1da177e4
LT
1535 kfree(host->dma_buffer);
1536 host->dma_buffer = NULL;
1537
1538free:
1539 free_dma(dma);
1540
1541err:
1542 printk(KERN_WARNING DRIVER_NAME ": Unable to allocate DMA %d. "
1543 "Falling back on FIFO.\n", dma);
1544}
1545
85bcc130
PO
1546static void __devexit wbsd_release_dma(struct wbsd_host* host)
1547{
1548 if (host->dma_addr)
1549 dma_unmap_single(host->mmc->dev, host->dma_addr, WBSD_DMA_SIZE,
1550 DMA_BIDIRECTIONAL);
1551 if (host->dma_buffer)
1552 kfree(host->dma_buffer);
1553 if (host->dma >= 0)
1554 free_dma(host->dma);
1555
1556 host->dma = -1;
1557 host->dma_buffer = NULL;
1558 host->dma_addr = (dma_addr_t)NULL;
1559}
1da177e4
LT
1560
1561/*
85bcc130 1562 * Allocate/free IRQ.
1da177e4
LT
1563 */
1564
85bcc130 1565static int __devinit wbsd_request_irq(struct wbsd_host* host, int irq)
1da177e4 1566{
1da177e4
LT
1567 int ret;
1568
1569 /*
85bcc130 1570 * Allocate interrupt.
1da177e4 1571 */
85bcc130
PO
1572
1573 ret = request_irq(irq, wbsd_irq, SA_SHIRQ, DRIVER_NAME, host);
1574 if (ret)
1575 return ret;
1da177e4 1576
85bcc130
PO
1577 host->irq = irq;
1578
1da177e4 1579 /*
85bcc130 1580 * Set up tasklets.
1da177e4 1581 */
85bcc130
PO
1582 tasklet_init(&host->card_tasklet, wbsd_tasklet_card, (unsigned long)host);
1583 tasklet_init(&host->fifo_tasklet, wbsd_tasklet_fifo, (unsigned long)host);
1584 tasklet_init(&host->crc_tasklet, wbsd_tasklet_crc, (unsigned long)host);
1585 tasklet_init(&host->timeout_tasklet, wbsd_tasklet_timeout, (unsigned long)host);
1586 tasklet_init(&host->finish_tasklet, wbsd_tasklet_finish, (unsigned long)host);
1587 tasklet_init(&host->block_tasklet, wbsd_tasklet_block, (unsigned long)host);
1588
1589 return 0;
1590}
1da177e4 1591
85bcc130
PO
1592static void __devexit wbsd_release_irq(struct wbsd_host* host)
1593{
1594 if (!host->irq)
1595 return;
1da177e4 1596
85bcc130
PO
1597 free_irq(host->irq, host);
1598
1599 host->irq = 0;
1600
1601 tasklet_kill(&host->card_tasklet);
1602 tasklet_kill(&host->fifo_tasklet);
1603 tasklet_kill(&host->crc_tasklet);
1604 tasklet_kill(&host->timeout_tasklet);
1605 tasklet_kill(&host->finish_tasklet);
1606 tasklet_kill(&host->block_tasklet);
1607}
1608
1609/*
1610 * Allocate all resources for the host.
1611 */
1612
1613static int __devinit wbsd_request_resources(struct wbsd_host* host,
1614 int base, int irq, int dma)
1615{
1616 int ret;
1617
1da177e4
LT
1618 /*
1619 * Allocate I/O ports.
1620 */
85bcc130 1621 ret = wbsd_request_region(host, base);
1da177e4 1622 if (ret)
85bcc130 1623 return ret;
1da177e4
LT
1624
1625 /*
85bcc130 1626 * Allocate interrupt.
1da177e4 1627 */
85bcc130
PO
1628 ret = wbsd_request_irq(host, irq);
1629 if (ret)
1630 return ret;
1631
1632 /*
1633 * Allocate DMA.
1634 */
1635 wbsd_request_dma(host, dma);
1da177e4 1636
85bcc130
PO
1637 return 0;
1638}
1639
1640/*
1641 * Release all resources for the host.
1642 */
1643
1644static void __devexit wbsd_release_resources(struct wbsd_host* host)
1645{
1646 wbsd_release_dma(host);
1647 wbsd_release_irq(host);
1648 wbsd_release_regions(host);
1649}
1650
1651/*
1652 * Configure the resources the chip should use.
1653 */
1654
1655static void __devinit wbsd_chip_config(struct wbsd_host* host)
1656{
1657 /*
1658 * Reset the chip.
1659 */
1660 wbsd_write_config(host, WBSD_CONF_SWRST, 1);
1661 wbsd_write_config(host, WBSD_CONF_SWRST, 0);
1da177e4
LT
1662
1663 /*
1664 * Select SD/MMC function.
1665 */
1666 wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD);
1667
1668 /*
1669 * Set up card detection.
1670 */
85bcc130 1671 wbsd_write_config(host, WBSD_CONF_PINS, WBSD_PINS_DETECT_GP11);
1da177e4
LT
1672
1673 /*
85bcc130 1674 * Configure chip
1da177e4
LT
1675 */
1676 wbsd_write_config(host, WBSD_CONF_PORT_HI, host->base >> 8);
1677 wbsd_write_config(host, WBSD_CONF_PORT_LO, host->base & 0xff);
1da177e4 1678
85bcc130 1679 wbsd_write_config(host, WBSD_CONF_IRQ, host->irq);
1da177e4 1680
85bcc130
PO
1681 if (host->dma >= 0)
1682 wbsd_write_config(host, WBSD_CONF_DRQ, host->dma);
1da177e4
LT
1683
1684 /*
85bcc130 1685 * Enable and power up chip.
1da177e4 1686 */
85bcc130
PO
1687 wbsd_write_config(host, WBSD_CONF_ENABLE, 1);
1688 wbsd_write_config(host, WBSD_CONF_POWER, 0x20);
1689}
1690
1691/*
1692 * Check that configured resources are correct.
1693 */
1694
1695static int __devinit wbsd_chip_validate(struct wbsd_host* host)
1696{
1697 int base, irq, dma;
1da177e4
LT
1698
1699 /*
85bcc130 1700 * Select SD/MMC function.
1da177e4 1701 */
85bcc130 1702 wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD);
1da177e4
LT
1703
1704 /*
85bcc130 1705 * Read configuration.
1da177e4 1706 */
85bcc130
PO
1707 base = wbsd_read_config(host, WBSD_CONF_PORT_HI) << 8;
1708 base |= wbsd_read_config(host, WBSD_CONF_PORT_LO);
1da177e4 1709
85bcc130
PO
1710 irq = wbsd_read_config(host, WBSD_CONF_IRQ);
1711
1712 dma = wbsd_read_config(host, WBSD_CONF_DRQ);
1da177e4
LT
1713
1714 /*
85bcc130 1715 * Validate against given configuration.
1da177e4 1716 */
85bcc130
PO
1717 if (base != host->base)
1718 return 0;
1719 if (irq != host->irq)
1720 return 0;
1721 if ((dma != host->dma) && (host->dma != -1))
1722 return 0;
1723
1724 return 1;
1725}
1726
1727/*****************************************************************************\
1728 * *
1729 * Devices setup and shutdown *
1730 * *
1731\*****************************************************************************/
1732
1733static int __devinit wbsd_init(struct device* dev, int base, int irq, int dma,
1734 int pnp)
1735{
1736 struct wbsd_host* host = NULL;
1737 struct mmc_host* mmc = NULL;
1738 int ret;
1739
1740 ret = wbsd_alloc_mmc(dev);
1741 if (ret)
1742 return ret;
1743
1744 mmc = dev_get_drvdata(dev);
1745 host = mmc_priv(mmc);
1da177e4
LT
1746
1747 /*
85bcc130 1748 * Scan for hardware.
1da177e4 1749 */
85bcc130
PO
1750 ret = wbsd_scan(host);
1751 if (ret)
1752 {
1753 if (pnp && (ret == -ENODEV))
1754 {
1755 printk(KERN_WARNING DRIVER_NAME
1756 ": Unable to confirm device presence. You may "
1757 "experience lock-ups.\n");
1758 }
1759 else
1760 {
1761 wbsd_free_mmc(dev);
1762 return ret;
1763 }
1764 }
1da177e4
LT
1765
1766 /*
85bcc130 1767 * Request resources.
1da177e4 1768 */
85bcc130
PO
1769 ret = wbsd_request_resources(host, io, irq, dma);
1770 if (ret)
1771 {
1772 wbsd_release_resources(host);
1773 wbsd_free_mmc(dev);
1774 return ret;
1775 }
1da177e4
LT
1776
1777 /*
85bcc130 1778 * See if chip needs to be configured.
1da177e4 1779 */
85bcc130
PO
1780 if (pnp && (host->config != 0))
1781 {
1782 if (!wbsd_chip_validate(host))
1783 {
1784 printk(KERN_WARNING DRIVER_NAME
1785 ": PnP active but chip not configured! "
1786 "You probably have a buggy BIOS. "
1787 "Configuring chip manually.\n");
1788 wbsd_chip_config(host);
1789 }
1790 }
1791 else
1792 wbsd_chip_config(host);
1da177e4
LT
1793
1794 /*
1795 * Power Management stuff. No idea how this works.
1796 * Not tested.
1797 */
1798#ifdef CONFIG_PM
85bcc130
PO
1799 if (host->config)
1800 wbsd_write_config(host, WBSD_CONF_PME, 0xA0);
1da177e4 1801#endif
85bcc130
PO
1802 /*
1803 * Allow device to initialise itself properly.
1804 */
1805 mdelay(5);
1da177e4
LT
1806
1807 /*
1808 * Reset the chip into a known state.
1809 */
1810 wbsd_init_device(host);
1811
1da177e4
LT
1812 mmc_add_host(mmc);
1813
85bcc130
PO
1814 printk(KERN_INFO "%s: W83L51xD", mmc->host_name);
1815 if (host->chip_id != 0)
1816 printk(" id %x", (int)host->chip_id);
1817 printk(" at 0x%x irq %d", (int)host->base, (int)host->irq);
1818 if (host->dma >= 0)
1819 printk(" dma %d", (int)host->dma);
1820 else
1821 printk(" FIFO");
1822 if (pnp)
1823 printk(" PnP");
1824 printk("\n");
1da177e4
LT
1825
1826 return 0;
1da177e4
LT
1827}
1828
85bcc130 1829static void __devexit wbsd_shutdown(struct device* dev, int pnp)
1da177e4
LT
1830{
1831 struct mmc_host* mmc = dev_get_drvdata(dev);
1832 struct wbsd_host* host;
1833
1834 if (!mmc)
85bcc130 1835 return;
1da177e4
LT
1836
1837 host = mmc_priv(mmc);
1838
1da177e4
LT
1839 mmc_remove_host(mmc);
1840
85bcc130
PO
1841 if (!pnp)
1842 {
1843 /*
1844 * Power down the SD/MMC function.
1845 */
1846 wbsd_unlock_config(host);
1847 wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD);
1848 wbsd_write_config(host, WBSD_CONF_ENABLE, 0);
1849 wbsd_lock_config(host);
1850 }
1da177e4 1851
85bcc130 1852 wbsd_release_resources(host);
1da177e4 1853
85bcc130
PO
1854 wbsd_free_mmc(dev);
1855}
1da177e4 1856
85bcc130
PO
1857/*
1858 * Non-PnP
1859 */
1860
1861static int __devinit wbsd_probe(struct device* dev)
1862{
1863 return wbsd_init(dev, io, irq, dma, 0);
1864}
1865
1866static int __devexit wbsd_remove(struct device* dev)
1867{
1868 wbsd_shutdown(dev, 0);
1869
1870 return 0;
1871}
1872
1873/*
1874 * PnP
1875 */
1876
1877#ifdef CONFIG_PNP
1878
1879static int __devinit
1880wbsd_pnp_probe(struct pnp_dev * pnpdev, const struct pnp_device_id *dev_id)
1881{
1882 int io, irq, dma;
1da177e4 1883
85bcc130
PO
1884 /*
1885 * Get resources from PnP layer.
1886 */
1887 io = pnp_port_start(pnpdev, 0);
1888 irq = pnp_irq(pnpdev, 0);
1889 if (pnp_dma_valid(pnpdev, 0))
1890 dma = pnp_dma(pnpdev, 0);
1891 else
1892 dma = -1;
1da177e4 1893
85bcc130 1894 DBGF("PnP resources: port %3x irq %d dma %d\n", io, irq, dma);
1da177e4 1895
85bcc130
PO
1896 return wbsd_init(&pnpdev->dev, io, irq, dma, 1);
1897}
1da177e4 1898
85bcc130
PO
1899static void __devexit wbsd_pnp_remove(struct pnp_dev * dev)
1900{
1901 wbsd_shutdown(&dev->dev, 1);
1da177e4
LT
1902}
1903
85bcc130
PO
1904#endif /* CONFIG_PNP */
1905
1da177e4
LT
1906/*
1907 * Power management
1908 */
1909
1910#ifdef CONFIG_PM
e5378ca8 1911static int wbsd_suspend(struct device *dev, pm_message_t state, u32 level)
1da177e4
LT
1912{
1913 DBGF("Not yet supported\n");
1914
1915 return 0;
1916}
1917
1918static int wbsd_resume(struct device *dev, u32 level)
1919{
1920 DBGF("Not yet supported\n");
1921
1922 return 0;
1923}
1924#else
1925#define wbsd_suspend NULL
1926#define wbsd_resume NULL
1927#endif
1928
85bcc130 1929static struct platform_device *wbsd_device;
1da177e4
LT
1930
1931static struct device_driver wbsd_driver = {
1932 .name = DRIVER_NAME,
1933 .bus = &platform_bus_type,
1934 .probe = wbsd_probe,
1935 .remove = wbsd_remove,
1936
1937 .suspend = wbsd_suspend,
1938 .resume = wbsd_resume,
1939};
1940
85bcc130
PO
1941#ifdef CONFIG_PNP
1942
1943static struct pnp_driver wbsd_pnp_driver = {
1944 .name = DRIVER_NAME,
1945 .id_table = pnp_dev_table,
1946 .probe = wbsd_pnp_probe,
1947 .remove = wbsd_pnp_remove,
1948};
1949
1950#endif /* CONFIG_PNP */
1951
1da177e4
LT
1952/*
1953 * Module loading/unloading
1954 */
1955
1956static int __init wbsd_drv_init(void)
1957{
1958 int result;
1959
1960 printk(KERN_INFO DRIVER_NAME
1961 ": Winbond W83L51xD SD/MMC card interface driver, "
1962 DRIVER_VERSION "\n");
1963 printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
1da177e4 1964
85bcc130
PO
1965#ifdef CONFIG_PNP
1966
1967 if (!nopnp)
1968 {
1969 result = pnp_register_driver(&wbsd_pnp_driver);
1970 if (result < 0)
1971 return result;
1972 }
1973
1974#endif /* CONFIG_PNP */
1975
1976 if (nopnp)
1977 {
1978 result = driver_register(&wbsd_driver);
1979 if (result < 0)
1980 return result;
1981
1982 wbsd_device = platform_device_register_simple(DRIVER_NAME, -1,
1983 NULL, 0);
1984 if (IS_ERR(wbsd_device))
1985 return PTR_ERR(wbsd_device);
1986 }
1da177e4
LT
1987
1988 return 0;
1989}
1990
1991static void __exit wbsd_drv_exit(void)
1992{
85bcc130
PO
1993#ifdef CONFIG_PNP
1994
1995 if (!nopnp)
1996 pnp_unregister_driver(&wbsd_pnp_driver);
1da177e4 1997
85bcc130
PO
1998#endif /* CONFIG_PNP */
1999
2000 if (nopnp)
2001 {
2002 platform_device_unregister(wbsd_device);
2003
2004 driver_unregister(&wbsd_driver);
2005 }
1da177e4
LT
2006
2007 DBG("unloaded\n");
2008}
2009
2010module_init(wbsd_drv_init);
2011module_exit(wbsd_drv_exit);
85bcc130
PO
2012#ifdef CONFIG_PNP
2013module_param(nopnp, uint, 0444);
2014#endif
1da177e4
LT
2015module_param(io, uint, 0444);
2016module_param(irq, uint, 0444);
2017module_param(dma, int, 0444);
2018
2019MODULE_LICENSE("GPL");
2020MODULE_DESCRIPTION("Winbond W83L51xD SD/MMC card interface driver");
2021MODULE_VERSION(DRIVER_VERSION);
2022
85bcc130
PO
2023#ifdef CONFIG_PNP
2024MODULE_PARM_DESC(nopnp, "Scan for device instead of relying on PNP. (default 0)");
2025#endif
1da177e4
LT
2026MODULE_PARM_DESC(io, "I/O base to allocate. Must be 8 byte aligned. (default 0x248)");
2027MODULE_PARM_DESC(irq, "IRQ to allocate. (default 6)");
2028MODULE_PARM_DESC(dma, "DMA channel to allocate. -1 for no DMA. (default 2)");
This page took 0.122102 seconds and 5 git commands to generate.