sdhci: fix bad warning from commit c8b3e02
[deliverable/linux.git] / drivers / mmc / host / sdhci.c
1 /*
2 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
3 *
4 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 *
11 * Thanks to the following companies for their support:
12 *
13 * - JMicron (hardware and technical support)
14 */
15
16 #include <linux/delay.h>
17 #include <linux/highmem.h>
18 #include <linux/io.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/scatterlist.h>
21
22 #include <linux/leds.h>
23
24 #include <linux/mmc/host.h>
25
26 #include "sdhci.h"
27
28 #define DRIVER_NAME "sdhci"
29
30 #define DBG(f, x...) \
31 pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
32
33 static unsigned int debug_quirks = 0;
34
35 static void sdhci_prepare_data(struct sdhci_host *, struct mmc_data *);
36 static void sdhci_finish_data(struct sdhci_host *);
37
38 static void sdhci_send_command(struct sdhci_host *, struct mmc_command *);
39 static void sdhci_finish_command(struct sdhci_host *);
40
41 static void sdhci_dumpregs(struct sdhci_host *host)
42 {
43 printk(KERN_DEBUG DRIVER_NAME ": ============== REGISTER DUMP ==============\n");
44
45 printk(KERN_DEBUG DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n",
46 readl(host->ioaddr + SDHCI_DMA_ADDRESS),
47 readw(host->ioaddr + SDHCI_HOST_VERSION));
48 printk(KERN_DEBUG DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n",
49 readw(host->ioaddr + SDHCI_BLOCK_SIZE),
50 readw(host->ioaddr + SDHCI_BLOCK_COUNT));
51 printk(KERN_DEBUG DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
52 readl(host->ioaddr + SDHCI_ARGUMENT),
53 readw(host->ioaddr + SDHCI_TRANSFER_MODE));
54 printk(KERN_DEBUG DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n",
55 readl(host->ioaddr + SDHCI_PRESENT_STATE),
56 readb(host->ioaddr + SDHCI_HOST_CONTROL));
57 printk(KERN_DEBUG DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n",
58 readb(host->ioaddr + SDHCI_POWER_CONTROL),
59 readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL));
60 printk(KERN_DEBUG DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n",
61 readb(host->ioaddr + SDHCI_WAKE_UP_CONTROL),
62 readw(host->ioaddr + SDHCI_CLOCK_CONTROL));
63 printk(KERN_DEBUG DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n",
64 readb(host->ioaddr + SDHCI_TIMEOUT_CONTROL),
65 readl(host->ioaddr + SDHCI_INT_STATUS));
66 printk(KERN_DEBUG DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
67 readl(host->ioaddr + SDHCI_INT_ENABLE),
68 readl(host->ioaddr + SDHCI_SIGNAL_ENABLE));
69 printk(KERN_DEBUG DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
70 readw(host->ioaddr + SDHCI_ACMD12_ERR),
71 readw(host->ioaddr + SDHCI_SLOT_INT_STATUS));
72 printk(KERN_DEBUG DRIVER_NAME ": Caps: 0x%08x | Max curr: 0x%08x\n",
73 readl(host->ioaddr + SDHCI_CAPABILITIES),
74 readl(host->ioaddr + SDHCI_MAX_CURRENT));
75
76 printk(KERN_DEBUG DRIVER_NAME ": ===========================================\n");
77 }
78
79 /*****************************************************************************\
80 * *
81 * Low level functions *
82 * *
83 \*****************************************************************************/
84
85 static void sdhci_reset(struct sdhci_host *host, u8 mask)
86 {
87 unsigned long timeout;
88
89 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
90 if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) &
91 SDHCI_CARD_PRESENT))
92 return;
93 }
94
95 writeb(mask, host->ioaddr + SDHCI_SOFTWARE_RESET);
96
97 if (mask & SDHCI_RESET_ALL)
98 host->clock = 0;
99
100 /* Wait max 100 ms */
101 timeout = 100;
102
103 /* hw clears the bit when it's done */
104 while (readb(host->ioaddr + SDHCI_SOFTWARE_RESET) & mask) {
105 if (timeout == 0) {
106 printk(KERN_ERR "%s: Reset 0x%x never completed.\n",
107 mmc_hostname(host->mmc), (int)mask);
108 sdhci_dumpregs(host);
109 return;
110 }
111 timeout--;
112 mdelay(1);
113 }
114 }
115
116 static void sdhci_init(struct sdhci_host *host)
117 {
118 u32 intmask;
119
120 sdhci_reset(host, SDHCI_RESET_ALL);
121
122 intmask = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
123 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX |
124 SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT |
125 SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT |
126 SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL |
127 SDHCI_INT_DMA_END | SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE |
128 SDHCI_INT_ADMA_ERROR;
129
130 writel(intmask, host->ioaddr + SDHCI_INT_ENABLE);
131 writel(intmask, host->ioaddr + SDHCI_SIGNAL_ENABLE);
132 }
133
134 static void sdhci_activate_led(struct sdhci_host *host)
135 {
136 u8 ctrl;
137
138 ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL);
139 ctrl |= SDHCI_CTRL_LED;
140 writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
141 }
142
143 static void sdhci_deactivate_led(struct sdhci_host *host)
144 {
145 u8 ctrl;
146
147 ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL);
148 ctrl &= ~SDHCI_CTRL_LED;
149 writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
150 }
151
152 #ifdef CONFIG_LEDS_CLASS
153 static void sdhci_led_control(struct led_classdev *led,
154 enum led_brightness brightness)
155 {
156 struct sdhci_host *host = container_of(led, struct sdhci_host, led);
157 unsigned long flags;
158
159 spin_lock_irqsave(&host->lock, flags);
160
161 if (brightness == LED_OFF)
162 sdhci_deactivate_led(host);
163 else
164 sdhci_activate_led(host);
165
166 spin_unlock_irqrestore(&host->lock, flags);
167 }
168 #endif
169
170 /*****************************************************************************\
171 * *
172 * Core functions *
173 * *
174 \*****************************************************************************/
175
176 static inline char* sdhci_sg_to_buffer(struct sdhci_host* host)
177 {
178 return sg_virt(host->cur_sg);
179 }
180
181 static inline int sdhci_next_sg(struct sdhci_host* host)
182 {
183 /*
184 * Skip to next SG entry.
185 */
186 host->cur_sg++;
187 host->num_sg--;
188
189 /*
190 * Any entries left?
191 */
192 if (host->num_sg > 0) {
193 host->offset = 0;
194 host->remain = host->cur_sg->length;
195 }
196
197 return host->num_sg;
198 }
199
200 static void sdhci_read_block_pio(struct sdhci_host *host)
201 {
202 int blksize, chunk_remain;
203 u32 data;
204 char *buffer;
205 int size;
206
207 DBG("PIO reading\n");
208
209 blksize = host->data->blksz;
210 chunk_remain = 0;
211 data = 0;
212
213 buffer = sdhci_sg_to_buffer(host) + host->offset;
214
215 while (blksize) {
216 if (chunk_remain == 0) {
217 data = readl(host->ioaddr + SDHCI_BUFFER);
218 chunk_remain = min(blksize, 4);
219 }
220
221 size = min(host->remain, chunk_remain);
222
223 chunk_remain -= size;
224 blksize -= size;
225 host->offset += size;
226 host->remain -= size;
227
228 while (size) {
229 *buffer = data & 0xFF;
230 buffer++;
231 data >>= 8;
232 size--;
233 }
234
235 if (host->remain == 0) {
236 if (sdhci_next_sg(host) == 0) {
237 BUG_ON(blksize != 0);
238 return;
239 }
240 buffer = sdhci_sg_to_buffer(host);
241 }
242 }
243 }
244
245 static void sdhci_write_block_pio(struct sdhci_host *host)
246 {
247 int blksize, chunk_remain;
248 u32 data;
249 char *buffer;
250 int bytes, size;
251
252 DBG("PIO writing\n");
253
254 blksize = host->data->blksz;
255 chunk_remain = 4;
256 data = 0;
257
258 bytes = 0;
259 buffer = sdhci_sg_to_buffer(host) + host->offset;
260
261 while (blksize) {
262 size = min(host->remain, chunk_remain);
263
264 chunk_remain -= size;
265 blksize -= size;
266 host->offset += size;
267 host->remain -= size;
268
269 while (size) {
270 data >>= 8;
271 data |= (u32)*buffer << 24;
272 buffer++;
273 size--;
274 }
275
276 if (chunk_remain == 0) {
277 writel(data, host->ioaddr + SDHCI_BUFFER);
278 chunk_remain = min(blksize, 4);
279 }
280
281 if (host->remain == 0) {
282 if (sdhci_next_sg(host) == 0) {
283 BUG_ON(blksize != 0);
284 return;
285 }
286 buffer = sdhci_sg_to_buffer(host);
287 }
288 }
289 }
290
291 static void sdhci_transfer_pio(struct sdhci_host *host)
292 {
293 u32 mask;
294
295 BUG_ON(!host->data);
296
297 if (host->num_sg == 0)
298 return;
299
300 if (host->data->flags & MMC_DATA_READ)
301 mask = SDHCI_DATA_AVAILABLE;
302 else
303 mask = SDHCI_SPACE_AVAILABLE;
304
305 while (readl(host->ioaddr + SDHCI_PRESENT_STATE) & mask) {
306 if (host->data->flags & MMC_DATA_READ)
307 sdhci_read_block_pio(host);
308 else
309 sdhci_write_block_pio(host);
310
311 if (host->num_sg == 0)
312 break;
313 }
314
315 DBG("PIO transfer complete.\n");
316 }
317
318 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
319 {
320 local_irq_save(*flags);
321 return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
322 }
323
324 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
325 {
326 kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
327 local_irq_restore(*flags);
328 }
329
330 static int sdhci_adma_table_pre(struct sdhci_host *host,
331 struct mmc_data *data)
332 {
333 int direction;
334
335 u8 *desc;
336 u8 *align;
337 dma_addr_t addr;
338 dma_addr_t align_addr;
339 int len, offset;
340
341 struct scatterlist *sg;
342 int i;
343 char *buffer;
344 unsigned long flags;
345
346 /*
347 * The spec does not specify endianness of descriptor table.
348 * We currently guess that it is LE.
349 */
350
351 if (data->flags & MMC_DATA_READ)
352 direction = DMA_FROM_DEVICE;
353 else
354 direction = DMA_TO_DEVICE;
355
356 /*
357 * The ADMA descriptor table is mapped further down as we
358 * need to fill it with data first.
359 */
360
361 host->align_addr = dma_map_single(mmc_dev(host->mmc),
362 host->align_buffer, 128 * 4, direction);
363 if (dma_mapping_error(host->align_addr))
364 goto fail;
365 BUG_ON(host->align_addr & 0x3);
366
367 host->sg_count = dma_map_sg(mmc_dev(host->mmc),
368 data->sg, data->sg_len, direction);
369 if (host->sg_count == 0)
370 goto unmap_align;
371
372 desc = host->adma_desc;
373 align = host->align_buffer;
374
375 align_addr = host->align_addr;
376
377 for_each_sg(data->sg, sg, host->sg_count, i) {
378 addr = sg_dma_address(sg);
379 len = sg_dma_len(sg);
380
381 /*
382 * The SDHCI specification states that ADMA
383 * addresses must be 32-bit aligned. If they
384 * aren't, then we use a bounce buffer for
385 * the (up to three) bytes that screw up the
386 * alignment.
387 */
388 offset = (4 - (addr & 0x3)) & 0x3;
389 if (offset) {
390 if (data->flags & MMC_DATA_WRITE) {
391 buffer = sdhci_kmap_atomic(sg, &flags);
392 WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3));
393 memcpy(align, buffer, offset);
394 sdhci_kunmap_atomic(buffer, &flags);
395 }
396
397 desc[7] = (align_addr >> 24) & 0xff;
398 desc[6] = (align_addr >> 16) & 0xff;
399 desc[5] = (align_addr >> 8) & 0xff;
400 desc[4] = (align_addr >> 0) & 0xff;
401
402 BUG_ON(offset > 65536);
403
404 desc[3] = (offset >> 8) & 0xff;
405 desc[2] = (offset >> 0) & 0xff;
406
407 desc[1] = 0x00;
408 desc[0] = 0x21; /* tran, valid */
409
410 align += 4;
411 align_addr += 4;
412
413 desc += 8;
414
415 addr += offset;
416 len -= offset;
417 }
418
419 desc[7] = (addr >> 24) & 0xff;
420 desc[6] = (addr >> 16) & 0xff;
421 desc[5] = (addr >> 8) & 0xff;
422 desc[4] = (addr >> 0) & 0xff;
423
424 BUG_ON(len > 65536);
425
426 desc[3] = (len >> 8) & 0xff;
427 desc[2] = (len >> 0) & 0xff;
428
429 desc[1] = 0x00;
430 desc[0] = 0x21; /* tran, valid */
431
432 desc += 8;
433
434 /*
435 * If this triggers then we have a calculation bug
436 * somewhere. :/
437 */
438 WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4);
439 }
440
441 /*
442 * Add a terminating entry.
443 */
444 desc[7] = 0;
445 desc[6] = 0;
446 desc[5] = 0;
447 desc[4] = 0;
448
449 desc[3] = 0;
450 desc[2] = 0;
451
452 desc[1] = 0x00;
453 desc[0] = 0x03; /* nop, end, valid */
454
455 /*
456 * Resync align buffer as we might have changed it.
457 */
458 if (data->flags & MMC_DATA_WRITE) {
459 dma_sync_single_for_device(mmc_dev(host->mmc),
460 host->align_addr, 128 * 4, direction);
461 }
462
463 host->adma_addr = dma_map_single(mmc_dev(host->mmc),
464 host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE);
465 if (dma_mapping_error(host->align_addr))
466 goto unmap_entries;
467 BUG_ON(host->adma_addr & 0x3);
468
469 return 0;
470
471 unmap_entries:
472 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
473 data->sg_len, direction);
474 unmap_align:
475 dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
476 128 * 4, direction);
477 fail:
478 return -EINVAL;
479 }
480
481 static void sdhci_adma_table_post(struct sdhci_host *host,
482 struct mmc_data *data)
483 {
484 int direction;
485
486 struct scatterlist *sg;
487 int i, size;
488 u8 *align;
489 char *buffer;
490 unsigned long flags;
491
492 if (data->flags & MMC_DATA_READ)
493 direction = DMA_FROM_DEVICE;
494 else
495 direction = DMA_TO_DEVICE;
496
497 dma_unmap_single(mmc_dev(host->mmc), host->adma_addr,
498 (128 * 2 + 1) * 4, DMA_TO_DEVICE);
499
500 dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
501 128 * 4, direction);
502
503 if (data->flags & MMC_DATA_READ) {
504 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
505 data->sg_len, direction);
506
507 align = host->align_buffer;
508
509 for_each_sg(data->sg, sg, host->sg_count, i) {
510 if (sg_dma_address(sg) & 0x3) {
511 size = 4 - (sg_dma_address(sg) & 0x3);
512
513 buffer = sdhci_kmap_atomic(sg, &flags);
514 WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3));
515 memcpy(buffer, align, size);
516 sdhci_kunmap_atomic(buffer, &flags);
517
518 align += 4;
519 }
520 }
521 }
522
523 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
524 data->sg_len, direction);
525 }
526
527 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data)
528 {
529 u8 count;
530 unsigned target_timeout, current_timeout;
531
532 /*
533 * If the host controller provides us with an incorrect timeout
534 * value, just skip the check and use 0xE. The hardware may take
535 * longer to time out, but that's much better than having a too-short
536 * timeout value.
537 */
538 if ((host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL))
539 return 0xE;
540
541 /* timeout in us */
542 target_timeout = data->timeout_ns / 1000 +
543 data->timeout_clks / host->clock;
544
545 /*
546 * Figure out needed cycles.
547 * We do this in steps in order to fit inside a 32 bit int.
548 * The first step is the minimum timeout, which will have a
549 * minimum resolution of 6 bits:
550 * (1) 2^13*1000 > 2^22,
551 * (2) host->timeout_clk < 2^16
552 * =>
553 * (1) / (2) > 2^6
554 */
555 count = 0;
556 current_timeout = (1 << 13) * 1000 / host->timeout_clk;
557 while (current_timeout < target_timeout) {
558 count++;
559 current_timeout <<= 1;
560 if (count >= 0xF)
561 break;
562 }
563
564 if (count >= 0xF) {
565 printk(KERN_WARNING "%s: Too large timeout requested!\n",
566 mmc_hostname(host->mmc));
567 count = 0xE;
568 }
569
570 return count;
571 }
572
573 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
574 {
575 u8 count;
576 u8 ctrl;
577 int ret;
578
579 WARN_ON(host->data);
580
581 if (data == NULL)
582 return;
583
584 /* Sanity checks */
585 BUG_ON(data->blksz * data->blocks > 524288);
586 BUG_ON(data->blksz > host->mmc->max_blk_size);
587 BUG_ON(data->blocks > 65535);
588
589 host->data = data;
590 host->data_early = 0;
591
592 count = sdhci_calc_timeout(host, data);
593 writeb(count, host->ioaddr + SDHCI_TIMEOUT_CONTROL);
594
595 if (host->flags & SDHCI_USE_DMA)
596 host->flags |= SDHCI_REQ_USE_DMA;
597
598 /*
599 * FIXME: This doesn't account for merging when mapping the
600 * scatterlist.
601 */
602 if (host->flags & SDHCI_REQ_USE_DMA) {
603 int broken, i;
604 struct scatterlist *sg;
605
606 broken = 0;
607 if (host->flags & SDHCI_USE_ADMA) {
608 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
609 broken = 1;
610 } else {
611 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
612 broken = 1;
613 }
614
615 if (unlikely(broken)) {
616 for_each_sg(data->sg, sg, data->sg_len, i) {
617 if (sg->length & 0x3) {
618 DBG("Reverting to PIO because of "
619 "transfer size (%d)\n",
620 sg->length);
621 host->flags &= ~SDHCI_REQ_USE_DMA;
622 break;
623 }
624 }
625 }
626 }
627
628 /*
629 * The assumption here being that alignment is the same after
630 * translation to device address space.
631 */
632 if (host->flags & SDHCI_REQ_USE_DMA) {
633 int broken, i;
634 struct scatterlist *sg;
635
636 broken = 0;
637 if (host->flags & SDHCI_USE_ADMA) {
638 /*
639 * As we use 3 byte chunks to work around
640 * alignment problems, we need to check this
641 * quirk.
642 */
643 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
644 broken = 1;
645 } else {
646 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
647 broken = 1;
648 }
649
650 if (unlikely(broken)) {
651 for_each_sg(data->sg, sg, data->sg_len, i) {
652 if (sg->offset & 0x3) {
653 DBG("Reverting to PIO because of "
654 "bad alignment\n");
655 host->flags &= ~SDHCI_REQ_USE_DMA;
656 break;
657 }
658 }
659 }
660 }
661
662 if (host->flags & SDHCI_REQ_USE_DMA) {
663 if (host->flags & SDHCI_USE_ADMA) {
664 ret = sdhci_adma_table_pre(host, data);
665 if (ret) {
666 /*
667 * This only happens when someone fed
668 * us an invalid request.
669 */
670 WARN_ON(1);
671 host->flags &= ~SDHCI_USE_DMA;
672 } else {
673 writel(host->adma_addr,
674 host->ioaddr + SDHCI_ADMA_ADDRESS);
675 }
676 } else {
677 int sg_cnt;
678
679 sg_cnt = dma_map_sg(mmc_dev(host->mmc),
680 data->sg, data->sg_len,
681 (data->flags & MMC_DATA_READ) ?
682 DMA_FROM_DEVICE :
683 DMA_TO_DEVICE);
684 if (sg_cnt == 0) {
685 /*
686 * This only happens when someone fed
687 * us an invalid request.
688 */
689 WARN_ON(1);
690 host->flags &= ~SDHCI_USE_DMA;
691 } else {
692 WARN_ON(sg_cnt != 1);
693 writel(sg_dma_address(data->sg),
694 host->ioaddr + SDHCI_DMA_ADDRESS);
695 }
696 }
697 }
698
699 /*
700 * Always adjust the DMA selection as some controllers
701 * (e.g. JMicron) can't do PIO properly when the selection
702 * is ADMA.
703 */
704 if (host->version >= SDHCI_SPEC_200) {
705 ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL);
706 ctrl &= ~SDHCI_CTRL_DMA_MASK;
707 if ((host->flags & SDHCI_REQ_USE_DMA) &&
708 (host->flags & SDHCI_USE_ADMA))
709 ctrl |= SDHCI_CTRL_ADMA32;
710 else
711 ctrl |= SDHCI_CTRL_SDMA;
712 writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
713 }
714
715 if (!(host->flags & SDHCI_REQ_USE_DMA)) {
716 host->cur_sg = data->sg;
717 host->num_sg = data->sg_len;
718
719 host->offset = 0;
720 host->remain = host->cur_sg->length;
721 }
722
723 /* We do not handle DMA boundaries, so set it to max (512 KiB) */
724 writew(SDHCI_MAKE_BLKSZ(7, data->blksz),
725 host->ioaddr + SDHCI_BLOCK_SIZE);
726 writew(data->blocks, host->ioaddr + SDHCI_BLOCK_COUNT);
727 }
728
729 static void sdhci_set_transfer_mode(struct sdhci_host *host,
730 struct mmc_data *data)
731 {
732 u16 mode;
733
734 if (data == NULL)
735 return;
736
737 WARN_ON(!host->data);
738
739 mode = SDHCI_TRNS_BLK_CNT_EN;
740 if (data->blocks > 1)
741 mode |= SDHCI_TRNS_MULTI;
742 if (data->flags & MMC_DATA_READ)
743 mode |= SDHCI_TRNS_READ;
744 if (host->flags & SDHCI_REQ_USE_DMA)
745 mode |= SDHCI_TRNS_DMA;
746
747 writew(mode, host->ioaddr + SDHCI_TRANSFER_MODE);
748 }
749
750 static void sdhci_finish_data(struct sdhci_host *host)
751 {
752 struct mmc_data *data;
753
754 BUG_ON(!host->data);
755
756 data = host->data;
757 host->data = NULL;
758
759 if (host->flags & SDHCI_REQ_USE_DMA) {
760 if (host->flags & SDHCI_USE_ADMA)
761 sdhci_adma_table_post(host, data);
762 else {
763 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
764 data->sg_len, (data->flags & MMC_DATA_READ) ?
765 DMA_FROM_DEVICE : DMA_TO_DEVICE);
766 }
767 }
768
769 /*
770 * The specification states that the block count register must
771 * be updated, but it does not specify at what point in the
772 * data flow. That makes the register entirely useless to read
773 * back so we have to assume that nothing made it to the card
774 * in the event of an error.
775 */
776 if (data->error)
777 data->bytes_xfered = 0;
778 else
779 data->bytes_xfered = data->blksz * data->blocks;
780
781 if (data->stop) {
782 /*
783 * The controller needs a reset of internal state machines
784 * upon error conditions.
785 */
786 if (data->error) {
787 sdhci_reset(host, SDHCI_RESET_CMD);
788 sdhci_reset(host, SDHCI_RESET_DATA);
789 }
790
791 sdhci_send_command(host, data->stop);
792 } else
793 tasklet_schedule(&host->finish_tasklet);
794 }
795
796 static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
797 {
798 int flags;
799 u32 mask;
800 unsigned long timeout;
801
802 WARN_ON(host->cmd);
803
804 /* Wait max 10 ms */
805 timeout = 10;
806
807 mask = SDHCI_CMD_INHIBIT;
808 if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY))
809 mask |= SDHCI_DATA_INHIBIT;
810
811 /* We shouldn't wait for data inihibit for stop commands, even
812 though they might use busy signaling */
813 if (host->mrq->data && (cmd == host->mrq->data->stop))
814 mask &= ~SDHCI_DATA_INHIBIT;
815
816 while (readl(host->ioaddr + SDHCI_PRESENT_STATE) & mask) {
817 if (timeout == 0) {
818 printk(KERN_ERR "%s: Controller never released "
819 "inhibit bit(s).\n", mmc_hostname(host->mmc));
820 sdhci_dumpregs(host);
821 cmd->error = -EIO;
822 tasklet_schedule(&host->finish_tasklet);
823 return;
824 }
825 timeout--;
826 mdelay(1);
827 }
828
829 mod_timer(&host->timer, jiffies + 10 * HZ);
830
831 host->cmd = cmd;
832
833 sdhci_prepare_data(host, cmd->data);
834
835 writel(cmd->arg, host->ioaddr + SDHCI_ARGUMENT);
836
837 sdhci_set_transfer_mode(host, cmd->data);
838
839 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
840 printk(KERN_ERR "%s: Unsupported response type!\n",
841 mmc_hostname(host->mmc));
842 cmd->error = -EINVAL;
843 tasklet_schedule(&host->finish_tasklet);
844 return;
845 }
846
847 if (!(cmd->flags & MMC_RSP_PRESENT))
848 flags = SDHCI_CMD_RESP_NONE;
849 else if (cmd->flags & MMC_RSP_136)
850 flags = SDHCI_CMD_RESP_LONG;
851 else if (cmd->flags & MMC_RSP_BUSY)
852 flags = SDHCI_CMD_RESP_SHORT_BUSY;
853 else
854 flags = SDHCI_CMD_RESP_SHORT;
855
856 if (cmd->flags & MMC_RSP_CRC)
857 flags |= SDHCI_CMD_CRC;
858 if (cmd->flags & MMC_RSP_OPCODE)
859 flags |= SDHCI_CMD_INDEX;
860 if (cmd->data)
861 flags |= SDHCI_CMD_DATA;
862
863 writew(SDHCI_MAKE_CMD(cmd->opcode, flags),
864 host->ioaddr + SDHCI_COMMAND);
865 }
866
867 static void sdhci_finish_command(struct sdhci_host *host)
868 {
869 int i;
870
871 BUG_ON(host->cmd == NULL);
872
873 if (host->cmd->flags & MMC_RSP_PRESENT) {
874 if (host->cmd->flags & MMC_RSP_136) {
875 /* CRC is stripped so we need to do some shifting. */
876 for (i = 0;i < 4;i++) {
877 host->cmd->resp[i] = readl(host->ioaddr +
878 SDHCI_RESPONSE + (3-i)*4) << 8;
879 if (i != 3)
880 host->cmd->resp[i] |=
881 readb(host->ioaddr +
882 SDHCI_RESPONSE + (3-i)*4-1);
883 }
884 } else {
885 host->cmd->resp[0] = readl(host->ioaddr + SDHCI_RESPONSE);
886 }
887 }
888
889 host->cmd->error = 0;
890
891 if (host->data && host->data_early)
892 sdhci_finish_data(host);
893
894 if (!host->cmd->data)
895 tasklet_schedule(&host->finish_tasklet);
896
897 host->cmd = NULL;
898 }
899
900 static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
901 {
902 int div;
903 u16 clk;
904 unsigned long timeout;
905
906 if (clock == host->clock)
907 return;
908
909 writew(0, host->ioaddr + SDHCI_CLOCK_CONTROL);
910
911 if (clock == 0)
912 goto out;
913
914 for (div = 1;div < 256;div *= 2) {
915 if ((host->max_clk / div) <= clock)
916 break;
917 }
918 div >>= 1;
919
920 clk = div << SDHCI_DIVIDER_SHIFT;
921 clk |= SDHCI_CLOCK_INT_EN;
922 writew(clk, host->ioaddr + SDHCI_CLOCK_CONTROL);
923
924 /* Wait max 10 ms */
925 timeout = 10;
926 while (!((clk = readw(host->ioaddr + SDHCI_CLOCK_CONTROL))
927 & SDHCI_CLOCK_INT_STABLE)) {
928 if (timeout == 0) {
929 printk(KERN_ERR "%s: Internal clock never "
930 "stabilised.\n", mmc_hostname(host->mmc));
931 sdhci_dumpregs(host);
932 return;
933 }
934 timeout--;
935 mdelay(1);
936 }
937
938 clk |= SDHCI_CLOCK_CARD_EN;
939 writew(clk, host->ioaddr + SDHCI_CLOCK_CONTROL);
940
941 out:
942 host->clock = clock;
943 }
944
945 static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
946 {
947 u8 pwr;
948
949 if (host->power == power)
950 return;
951
952 if (power == (unsigned short)-1) {
953 writeb(0, host->ioaddr + SDHCI_POWER_CONTROL);
954 goto out;
955 }
956
957 /*
958 * Spec says that we should clear the power reg before setting
959 * a new value. Some controllers don't seem to like this though.
960 */
961 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
962 writeb(0, host->ioaddr + SDHCI_POWER_CONTROL);
963
964 pwr = SDHCI_POWER_ON;
965
966 switch (1 << power) {
967 case MMC_VDD_165_195:
968 pwr |= SDHCI_POWER_180;
969 break;
970 case MMC_VDD_29_30:
971 case MMC_VDD_30_31:
972 pwr |= SDHCI_POWER_300;
973 break;
974 case MMC_VDD_32_33:
975 case MMC_VDD_33_34:
976 pwr |= SDHCI_POWER_330;
977 break;
978 default:
979 BUG();
980 }
981
982 /*
983 * At least the Marvell CaFe chip gets confused if we set the voltage
984 * and set turn on power at the same time, so set the voltage first.
985 */
986 if ((host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER))
987 writeb(pwr & ~SDHCI_POWER_ON,
988 host->ioaddr + SDHCI_POWER_CONTROL);
989
990 writeb(pwr, host->ioaddr + SDHCI_POWER_CONTROL);
991
992 out:
993 host->power = power;
994 }
995
996 /*****************************************************************************\
997 * *
998 * MMC callbacks *
999 * *
1000 \*****************************************************************************/
1001
1002 static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1003 {
1004 struct sdhci_host *host;
1005 unsigned long flags;
1006
1007 host = mmc_priv(mmc);
1008
1009 spin_lock_irqsave(&host->lock, flags);
1010
1011 WARN_ON(host->mrq != NULL);
1012
1013 #ifndef CONFIG_LEDS_CLASS
1014 sdhci_activate_led(host);
1015 #endif
1016
1017 host->mrq = mrq;
1018
1019 if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)
1020 || (host->flags & SDHCI_DEVICE_DEAD)) {
1021 host->mrq->cmd->error = -ENOMEDIUM;
1022 tasklet_schedule(&host->finish_tasklet);
1023 } else
1024 sdhci_send_command(host, mrq->cmd);
1025
1026 mmiowb();
1027 spin_unlock_irqrestore(&host->lock, flags);
1028 }
1029
1030 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1031 {
1032 struct sdhci_host *host;
1033 unsigned long flags;
1034 u8 ctrl;
1035
1036 host = mmc_priv(mmc);
1037
1038 spin_lock_irqsave(&host->lock, flags);
1039
1040 if (host->flags & SDHCI_DEVICE_DEAD)
1041 goto out;
1042
1043 /*
1044 * Reset the chip on each power off.
1045 * Should clear out any weird states.
1046 */
1047 if (ios->power_mode == MMC_POWER_OFF) {
1048 writel(0, host->ioaddr + SDHCI_SIGNAL_ENABLE);
1049 sdhci_init(host);
1050 }
1051
1052 sdhci_set_clock(host, ios->clock);
1053
1054 if (ios->power_mode == MMC_POWER_OFF)
1055 sdhci_set_power(host, -1);
1056 else
1057 sdhci_set_power(host, ios->vdd);
1058
1059 ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL);
1060
1061 if (ios->bus_width == MMC_BUS_WIDTH_4)
1062 ctrl |= SDHCI_CTRL_4BITBUS;
1063 else
1064 ctrl &= ~SDHCI_CTRL_4BITBUS;
1065
1066 if (ios->timing == MMC_TIMING_SD_HS)
1067 ctrl |= SDHCI_CTRL_HISPD;
1068 else
1069 ctrl &= ~SDHCI_CTRL_HISPD;
1070
1071 writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
1072
1073 /*
1074 * Some (ENE) controllers go apeshit on some ios operation,
1075 * signalling timeout and CRC errors even on CMD0. Resetting
1076 * it on each ios seems to solve the problem.
1077 */
1078 if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
1079 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1080
1081 out:
1082 mmiowb();
1083 spin_unlock_irqrestore(&host->lock, flags);
1084 }
1085
1086 static int sdhci_get_ro(struct mmc_host *mmc)
1087 {
1088 struct sdhci_host *host;
1089 unsigned long flags;
1090 int present;
1091
1092 host = mmc_priv(mmc);
1093
1094 spin_lock_irqsave(&host->lock, flags);
1095
1096 if (host->flags & SDHCI_DEVICE_DEAD)
1097 present = 0;
1098 else
1099 present = readl(host->ioaddr + SDHCI_PRESENT_STATE);
1100
1101 spin_unlock_irqrestore(&host->lock, flags);
1102
1103 return !(present & SDHCI_WRITE_PROTECT);
1104 }
1105
1106 static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1107 {
1108 struct sdhci_host *host;
1109 unsigned long flags;
1110 u32 ier;
1111
1112 host = mmc_priv(mmc);
1113
1114 spin_lock_irqsave(&host->lock, flags);
1115
1116 if (host->flags & SDHCI_DEVICE_DEAD)
1117 goto out;
1118
1119 ier = readl(host->ioaddr + SDHCI_INT_ENABLE);
1120
1121 ier &= ~SDHCI_INT_CARD_INT;
1122 if (enable)
1123 ier |= SDHCI_INT_CARD_INT;
1124
1125 writel(ier, host->ioaddr + SDHCI_INT_ENABLE);
1126 writel(ier, host->ioaddr + SDHCI_SIGNAL_ENABLE);
1127
1128 out:
1129 mmiowb();
1130
1131 spin_unlock_irqrestore(&host->lock, flags);
1132 }
1133
1134 static const struct mmc_host_ops sdhci_ops = {
1135 .request = sdhci_request,
1136 .set_ios = sdhci_set_ios,
1137 .get_ro = sdhci_get_ro,
1138 .enable_sdio_irq = sdhci_enable_sdio_irq,
1139 };
1140
1141 /*****************************************************************************\
1142 * *
1143 * Tasklets *
1144 * *
1145 \*****************************************************************************/
1146
1147 static void sdhci_tasklet_card(unsigned long param)
1148 {
1149 struct sdhci_host *host;
1150 unsigned long flags;
1151
1152 host = (struct sdhci_host*)param;
1153
1154 spin_lock_irqsave(&host->lock, flags);
1155
1156 if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) {
1157 if (host->mrq) {
1158 printk(KERN_ERR "%s: Card removed during transfer!\n",
1159 mmc_hostname(host->mmc));
1160 printk(KERN_ERR "%s: Resetting controller.\n",
1161 mmc_hostname(host->mmc));
1162
1163 sdhci_reset(host, SDHCI_RESET_CMD);
1164 sdhci_reset(host, SDHCI_RESET_DATA);
1165
1166 host->mrq->cmd->error = -ENOMEDIUM;
1167 tasklet_schedule(&host->finish_tasklet);
1168 }
1169 }
1170
1171 spin_unlock_irqrestore(&host->lock, flags);
1172
1173 mmc_detect_change(host->mmc, msecs_to_jiffies(500));
1174 }
1175
1176 static void sdhci_tasklet_finish(unsigned long param)
1177 {
1178 struct sdhci_host *host;
1179 unsigned long flags;
1180 struct mmc_request *mrq;
1181
1182 host = (struct sdhci_host*)param;
1183
1184 spin_lock_irqsave(&host->lock, flags);
1185
1186 del_timer(&host->timer);
1187
1188 mrq = host->mrq;
1189
1190 /*
1191 * The controller needs a reset of internal state machines
1192 * upon error conditions.
1193 */
1194 if (!(host->flags & SDHCI_DEVICE_DEAD) &&
1195 (mrq->cmd->error ||
1196 (mrq->data && (mrq->data->error ||
1197 (mrq->data->stop && mrq->data->stop->error))) ||
1198 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
1199
1200 /* Some controllers need this kick or reset won't work here */
1201 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) {
1202 unsigned int clock;
1203
1204 /* This is to force an update */
1205 clock = host->clock;
1206 host->clock = 0;
1207 sdhci_set_clock(host, clock);
1208 }
1209
1210 /* Spec says we should do both at the same time, but Ricoh
1211 controllers do not like that. */
1212 sdhci_reset(host, SDHCI_RESET_CMD);
1213 sdhci_reset(host, SDHCI_RESET_DATA);
1214 }
1215
1216 host->mrq = NULL;
1217 host->cmd = NULL;
1218 host->data = NULL;
1219
1220 #ifndef CONFIG_LEDS_CLASS
1221 sdhci_deactivate_led(host);
1222 #endif
1223
1224 mmiowb();
1225 spin_unlock_irqrestore(&host->lock, flags);
1226
1227 mmc_request_done(host->mmc, mrq);
1228 }
1229
1230 static void sdhci_timeout_timer(unsigned long data)
1231 {
1232 struct sdhci_host *host;
1233 unsigned long flags;
1234
1235 host = (struct sdhci_host*)data;
1236
1237 spin_lock_irqsave(&host->lock, flags);
1238
1239 if (host->mrq) {
1240 printk(KERN_ERR "%s: Timeout waiting for hardware "
1241 "interrupt.\n", mmc_hostname(host->mmc));
1242 sdhci_dumpregs(host);
1243
1244 if (host->data) {
1245 host->data->error = -ETIMEDOUT;
1246 sdhci_finish_data(host);
1247 } else {
1248 if (host->cmd)
1249 host->cmd->error = -ETIMEDOUT;
1250 else
1251 host->mrq->cmd->error = -ETIMEDOUT;
1252
1253 tasklet_schedule(&host->finish_tasklet);
1254 }
1255 }
1256
1257 mmiowb();
1258 spin_unlock_irqrestore(&host->lock, flags);
1259 }
1260
1261 /*****************************************************************************\
1262 * *
1263 * Interrupt handling *
1264 * *
1265 \*****************************************************************************/
1266
1267 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
1268 {
1269 BUG_ON(intmask == 0);
1270
1271 if (!host->cmd) {
1272 printk(KERN_ERR "%s: Got command interrupt 0x%08x even "
1273 "though no command operation was in progress.\n",
1274 mmc_hostname(host->mmc), (unsigned)intmask);
1275 sdhci_dumpregs(host);
1276 return;
1277 }
1278
1279 if (intmask & SDHCI_INT_TIMEOUT)
1280 host->cmd->error = -ETIMEDOUT;
1281 else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT |
1282 SDHCI_INT_INDEX))
1283 host->cmd->error = -EILSEQ;
1284
1285 if (host->cmd->error)
1286 tasklet_schedule(&host->finish_tasklet);
1287 else if (intmask & SDHCI_INT_RESPONSE)
1288 sdhci_finish_command(host);
1289 }
1290
1291 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
1292 {
1293 BUG_ON(intmask == 0);
1294
1295 if (!host->data) {
1296 /*
1297 * A data end interrupt is sent together with the response
1298 * for the stop command.
1299 */
1300 if (intmask & SDHCI_INT_DATA_END)
1301 return;
1302
1303 printk(KERN_ERR "%s: Got data interrupt 0x%08x even "
1304 "though no data operation was in progress.\n",
1305 mmc_hostname(host->mmc), (unsigned)intmask);
1306 sdhci_dumpregs(host);
1307
1308 return;
1309 }
1310
1311 if (intmask & SDHCI_INT_DATA_TIMEOUT)
1312 host->data->error = -ETIMEDOUT;
1313 else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT))
1314 host->data->error = -EILSEQ;
1315 else if (intmask & SDHCI_INT_ADMA_ERROR)
1316 host->data->error = -EIO;
1317
1318 if (host->data->error)
1319 sdhci_finish_data(host);
1320 else {
1321 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
1322 sdhci_transfer_pio(host);
1323
1324 /*
1325 * We currently don't do anything fancy with DMA
1326 * boundaries, but as we can't disable the feature
1327 * we need to at least restart the transfer.
1328 */
1329 if (intmask & SDHCI_INT_DMA_END)
1330 writel(readl(host->ioaddr + SDHCI_DMA_ADDRESS),
1331 host->ioaddr + SDHCI_DMA_ADDRESS);
1332
1333 if (intmask & SDHCI_INT_DATA_END) {
1334 if (host->cmd) {
1335 /*
1336 * Data managed to finish before the
1337 * command completed. Make sure we do
1338 * things in the proper order.
1339 */
1340 host->data_early = 1;
1341 } else {
1342 sdhci_finish_data(host);
1343 }
1344 }
1345 }
1346 }
1347
1348 static irqreturn_t sdhci_irq(int irq, void *dev_id)
1349 {
1350 irqreturn_t result;
1351 struct sdhci_host* host = dev_id;
1352 u32 intmask;
1353 int cardint = 0;
1354
1355 spin_lock(&host->lock);
1356
1357 intmask = readl(host->ioaddr + SDHCI_INT_STATUS);
1358
1359 if (!intmask || intmask == 0xffffffff) {
1360 result = IRQ_NONE;
1361 goto out;
1362 }
1363
1364 DBG("*** %s got interrupt: 0x%08x\n",
1365 mmc_hostname(host->mmc), intmask);
1366
1367 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
1368 writel(intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE),
1369 host->ioaddr + SDHCI_INT_STATUS);
1370 tasklet_schedule(&host->card_tasklet);
1371 }
1372
1373 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
1374
1375 if (intmask & SDHCI_INT_CMD_MASK) {
1376 writel(intmask & SDHCI_INT_CMD_MASK,
1377 host->ioaddr + SDHCI_INT_STATUS);
1378 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
1379 }
1380
1381 if (intmask & SDHCI_INT_DATA_MASK) {
1382 writel(intmask & SDHCI_INT_DATA_MASK,
1383 host->ioaddr + SDHCI_INT_STATUS);
1384 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
1385 }
1386
1387 intmask &= ~(SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK);
1388
1389 intmask &= ~SDHCI_INT_ERROR;
1390
1391 if (intmask & SDHCI_INT_BUS_POWER) {
1392 printk(KERN_ERR "%s: Card is consuming too much power!\n",
1393 mmc_hostname(host->mmc));
1394 writel(SDHCI_INT_BUS_POWER, host->ioaddr + SDHCI_INT_STATUS);
1395 }
1396
1397 intmask &= ~SDHCI_INT_BUS_POWER;
1398
1399 if (intmask & SDHCI_INT_CARD_INT)
1400 cardint = 1;
1401
1402 intmask &= ~SDHCI_INT_CARD_INT;
1403
1404 if (intmask) {
1405 printk(KERN_ERR "%s: Unexpected interrupt 0x%08x.\n",
1406 mmc_hostname(host->mmc), intmask);
1407 sdhci_dumpregs(host);
1408
1409 writel(intmask, host->ioaddr + SDHCI_INT_STATUS);
1410 }
1411
1412 result = IRQ_HANDLED;
1413
1414 mmiowb();
1415 out:
1416 spin_unlock(&host->lock);
1417
1418 /*
1419 * We have to delay this as it calls back into the driver.
1420 */
1421 if (cardint)
1422 mmc_signal_sdio_irq(host->mmc);
1423
1424 return result;
1425 }
1426
1427 /*****************************************************************************\
1428 * *
1429 * Suspend/resume *
1430 * *
1431 \*****************************************************************************/
1432
1433 #ifdef CONFIG_PM
1434
1435 int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
1436 {
1437 int ret;
1438
1439 ret = mmc_suspend_host(host->mmc, state);
1440 if (ret)
1441 return ret;
1442
1443 free_irq(host->irq, host);
1444
1445 return 0;
1446 }
1447
1448 EXPORT_SYMBOL_GPL(sdhci_suspend_host);
1449
1450 int sdhci_resume_host(struct sdhci_host *host)
1451 {
1452 int ret;
1453
1454 if (host->flags & SDHCI_USE_DMA) {
1455 if (host->ops->enable_dma)
1456 host->ops->enable_dma(host);
1457 }
1458
1459 ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
1460 mmc_hostname(host->mmc), host);
1461 if (ret)
1462 return ret;
1463
1464 sdhci_init(host);
1465 mmiowb();
1466
1467 ret = mmc_resume_host(host->mmc);
1468 if (ret)
1469 return ret;
1470
1471 return 0;
1472 }
1473
1474 EXPORT_SYMBOL_GPL(sdhci_resume_host);
1475
1476 #endif /* CONFIG_PM */
1477
1478 /*****************************************************************************\
1479 * *
1480 * Device allocation/registration *
1481 * *
1482 \*****************************************************************************/
1483
1484 struct sdhci_host *sdhci_alloc_host(struct device *dev,
1485 size_t priv_size)
1486 {
1487 struct mmc_host *mmc;
1488 struct sdhci_host *host;
1489
1490 WARN_ON(dev == NULL);
1491
1492 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
1493 if (!mmc)
1494 return ERR_PTR(-ENOMEM);
1495
1496 host = mmc_priv(mmc);
1497 host->mmc = mmc;
1498
1499 return host;
1500 }
1501
1502 EXPORT_SYMBOL_GPL(sdhci_alloc_host);
1503
1504 int sdhci_add_host(struct sdhci_host *host)
1505 {
1506 struct mmc_host *mmc;
1507 unsigned int caps;
1508 int ret;
1509
1510 WARN_ON(host == NULL);
1511 if (host == NULL)
1512 return -EINVAL;
1513
1514 mmc = host->mmc;
1515
1516 if (debug_quirks)
1517 host->quirks = debug_quirks;
1518
1519 sdhci_reset(host, SDHCI_RESET_ALL);
1520
1521 host->version = readw(host->ioaddr + SDHCI_HOST_VERSION);
1522 host->version = (host->version & SDHCI_SPEC_VER_MASK)
1523 >> SDHCI_SPEC_VER_SHIFT;
1524 if (host->version > SDHCI_SPEC_200) {
1525 printk(KERN_ERR "%s: Unknown controller version (%d). "
1526 "You may experience problems.\n", mmc_hostname(mmc),
1527 host->version);
1528 }
1529
1530 caps = readl(host->ioaddr + SDHCI_CAPABILITIES);
1531
1532 if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
1533 host->flags |= SDHCI_USE_DMA;
1534 else if (!(caps & SDHCI_CAN_DO_DMA))
1535 DBG("Controller doesn't have DMA capability\n");
1536 else
1537 host->flags |= SDHCI_USE_DMA;
1538
1539 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
1540 (host->flags & SDHCI_USE_DMA)) {
1541 DBG("Disabling DMA as it is marked broken\n");
1542 host->flags &= ~SDHCI_USE_DMA;
1543 }
1544
1545 if (host->flags & SDHCI_USE_DMA) {
1546 if ((host->version >= SDHCI_SPEC_200) &&
1547 (caps & SDHCI_CAN_DO_ADMA2))
1548 host->flags |= SDHCI_USE_ADMA;
1549 }
1550
1551 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
1552 (host->flags & SDHCI_USE_ADMA)) {
1553 DBG("Disabling ADMA as it is marked broken\n");
1554 host->flags &= ~SDHCI_USE_ADMA;
1555 }
1556
1557 if (host->flags & SDHCI_USE_DMA) {
1558 if (host->ops->enable_dma) {
1559 if (host->ops->enable_dma(host)) {
1560 printk(KERN_WARNING "%s: No suitable DMA "
1561 "available. Falling back to PIO.\n",
1562 mmc_hostname(mmc));
1563 host->flags &= ~(SDHCI_USE_DMA | SDHCI_USE_ADMA);
1564 }
1565 }
1566 }
1567
1568 if (host->flags & SDHCI_USE_ADMA) {
1569 /*
1570 * We need to allocate descriptors for all sg entries
1571 * (128) and potentially one alignment transfer for
1572 * each of those entries.
1573 */
1574 host->adma_desc = kmalloc((128 * 2 + 1) * 4, GFP_KERNEL);
1575 host->align_buffer = kmalloc(128 * 4, GFP_KERNEL);
1576 if (!host->adma_desc || !host->align_buffer) {
1577 kfree(host->adma_desc);
1578 kfree(host->align_buffer);
1579 printk(KERN_WARNING "%s: Unable to allocate ADMA "
1580 "buffers. Falling back to standard DMA.\n",
1581 mmc_hostname(mmc));
1582 host->flags &= ~SDHCI_USE_ADMA;
1583 }
1584 }
1585
1586 /* XXX: Hack to get MMC layer to avoid highmem */
1587 if (!(host->flags & SDHCI_USE_DMA))
1588 mmc_dev(host->mmc)->dma_mask = NULL;
1589
1590 host->max_clk =
1591 (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT;
1592 if (host->max_clk == 0) {
1593 printk(KERN_ERR "%s: Hardware doesn't specify base clock "
1594 "frequency.\n", mmc_hostname(mmc));
1595 return -ENODEV;
1596 }
1597 host->max_clk *= 1000000;
1598
1599 host->timeout_clk =
1600 (caps & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
1601 if (host->timeout_clk == 0) {
1602 printk(KERN_ERR "%s: Hardware doesn't specify timeout clock "
1603 "frequency.\n", mmc_hostname(mmc));
1604 return -ENODEV;
1605 }
1606 if (caps & SDHCI_TIMEOUT_CLK_UNIT)
1607 host->timeout_clk *= 1000;
1608
1609 /*
1610 * Set host parameters.
1611 */
1612 mmc->ops = &sdhci_ops;
1613 mmc->f_min = host->max_clk / 256;
1614 mmc->f_max = host->max_clk;
1615 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
1616
1617 if (caps & SDHCI_CAN_DO_HISPD)
1618 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
1619
1620 mmc->ocr_avail = 0;
1621 if (caps & SDHCI_CAN_VDD_330)
1622 mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34;
1623 if (caps & SDHCI_CAN_VDD_300)
1624 mmc->ocr_avail |= MMC_VDD_29_30|MMC_VDD_30_31;
1625 if (caps & SDHCI_CAN_VDD_180)
1626 mmc->ocr_avail |= MMC_VDD_165_195;
1627
1628 if (mmc->ocr_avail == 0) {
1629 printk(KERN_ERR "%s: Hardware doesn't report any "
1630 "support voltages.\n", mmc_hostname(mmc));
1631 return -ENODEV;
1632 }
1633
1634 spin_lock_init(&host->lock);
1635
1636 /*
1637 * Maximum number of segments. Depends on if the hardware
1638 * can do scatter/gather or not.
1639 */
1640 if (host->flags & SDHCI_USE_ADMA)
1641 mmc->max_hw_segs = 128;
1642 else if (host->flags & SDHCI_USE_DMA)
1643 mmc->max_hw_segs = 1;
1644 else /* PIO */
1645 mmc->max_hw_segs = 128;
1646 mmc->max_phys_segs = 128;
1647
1648 /*
1649 * Maximum number of sectors in one transfer. Limited by DMA boundary
1650 * size (512KiB).
1651 */
1652 mmc->max_req_size = 524288;
1653
1654 /*
1655 * Maximum segment size. Could be one segment with the maximum number
1656 * of bytes. When doing hardware scatter/gather, each entry cannot
1657 * be larger than 64 KiB though.
1658 */
1659 if (host->flags & SDHCI_USE_ADMA)
1660 mmc->max_seg_size = 65536;
1661 else
1662 mmc->max_seg_size = mmc->max_req_size;
1663
1664 /*
1665 * Maximum block size. This varies from controller to controller and
1666 * is specified in the capabilities register.
1667 */
1668 mmc->max_blk_size = (caps & SDHCI_MAX_BLOCK_MASK) >> SDHCI_MAX_BLOCK_SHIFT;
1669 if (mmc->max_blk_size >= 3) {
1670 printk(KERN_WARNING "%s: Invalid maximum block size, "
1671 "assuming 512 bytes\n", mmc_hostname(mmc));
1672 mmc->max_blk_size = 512;
1673 } else
1674 mmc->max_blk_size = 512 << mmc->max_blk_size;
1675
1676 /*
1677 * Maximum block count.
1678 */
1679 mmc->max_blk_count = 65535;
1680
1681 /*
1682 * Init tasklets.
1683 */
1684 tasklet_init(&host->card_tasklet,
1685 sdhci_tasklet_card, (unsigned long)host);
1686 tasklet_init(&host->finish_tasklet,
1687 sdhci_tasklet_finish, (unsigned long)host);
1688
1689 setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
1690
1691 ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
1692 mmc_hostname(mmc), host);
1693 if (ret)
1694 goto untasklet;
1695
1696 sdhci_init(host);
1697
1698 #ifdef CONFIG_MMC_DEBUG
1699 sdhci_dumpregs(host);
1700 #endif
1701
1702 #ifdef CONFIG_LEDS_CLASS
1703 host->led.name = mmc_hostname(mmc);
1704 host->led.brightness = LED_OFF;
1705 host->led.default_trigger = mmc_hostname(mmc);
1706 host->led.brightness_set = sdhci_led_control;
1707
1708 ret = led_classdev_register(mmc_dev(mmc), &host->led);
1709 if (ret)
1710 goto reset;
1711 #endif
1712
1713 mmiowb();
1714
1715 mmc_add_host(mmc);
1716
1717 printk(KERN_INFO "%s: SDHCI controller on %s [%s] using %s%s\n",
1718 mmc_hostname(mmc), host->hw_name, mmc_dev(mmc)->bus_id,
1719 (host->flags & SDHCI_USE_ADMA)?"A":"",
1720 (host->flags & SDHCI_USE_DMA)?"DMA":"PIO");
1721
1722 return 0;
1723
1724 #ifdef CONFIG_LEDS_CLASS
1725 reset:
1726 sdhci_reset(host, SDHCI_RESET_ALL);
1727 free_irq(host->irq, host);
1728 #endif
1729 untasklet:
1730 tasklet_kill(&host->card_tasklet);
1731 tasklet_kill(&host->finish_tasklet);
1732
1733 return ret;
1734 }
1735
1736 EXPORT_SYMBOL_GPL(sdhci_add_host);
1737
1738 void sdhci_remove_host(struct sdhci_host *host, int dead)
1739 {
1740 unsigned long flags;
1741
1742 if (dead) {
1743 spin_lock_irqsave(&host->lock, flags);
1744
1745 host->flags |= SDHCI_DEVICE_DEAD;
1746
1747 if (host->mrq) {
1748 printk(KERN_ERR "%s: Controller removed during "
1749 " transfer!\n", mmc_hostname(host->mmc));
1750
1751 host->mrq->cmd->error = -ENOMEDIUM;
1752 tasklet_schedule(&host->finish_tasklet);
1753 }
1754
1755 spin_unlock_irqrestore(&host->lock, flags);
1756 }
1757
1758 mmc_remove_host(host->mmc);
1759
1760 #ifdef CONFIG_LEDS_CLASS
1761 led_classdev_unregister(&host->led);
1762 #endif
1763
1764 if (!dead)
1765 sdhci_reset(host, SDHCI_RESET_ALL);
1766
1767 free_irq(host->irq, host);
1768
1769 del_timer_sync(&host->timer);
1770
1771 tasklet_kill(&host->card_tasklet);
1772 tasklet_kill(&host->finish_tasklet);
1773
1774 kfree(host->adma_desc);
1775 kfree(host->align_buffer);
1776
1777 host->adma_desc = NULL;
1778 host->align_buffer = NULL;
1779 }
1780
1781 EXPORT_SYMBOL_GPL(sdhci_remove_host);
1782
1783 void sdhci_free_host(struct sdhci_host *host)
1784 {
1785 mmc_free_host(host->mmc);
1786 }
1787
1788 EXPORT_SYMBOL_GPL(sdhci_free_host);
1789
1790 /*****************************************************************************\
1791 * *
1792 * Driver init/exit *
1793 * *
1794 \*****************************************************************************/
1795
1796 static int __init sdhci_drv_init(void)
1797 {
1798 printk(KERN_INFO DRIVER_NAME
1799 ": Secure Digital Host Controller Interface driver\n");
1800 printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
1801
1802 return 0;
1803 }
1804
1805 static void __exit sdhci_drv_exit(void)
1806 {
1807 }
1808
1809 module_init(sdhci_drv_init);
1810 module_exit(sdhci_drv_exit);
1811
1812 module_param(debug_quirks, uint, 0444);
1813
1814 MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>");
1815 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
1816 MODULE_LICENSE("GPL");
1817
1818 MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
This page took 0.137307 seconds and 6 git commands to generate.