mmc: core: Add mmc CMD+ACMD passthrough ioctl
[deliverable/linux.git] / drivers / mmc / host / sdhci.c
CommitLineData
d129bceb 1/*
70f10482 2 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
d129bceb 3 *
b69c9058 4 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
d129bceb
PO
5 *
6 * This program is free software; you can redistribute it and/or modify
643f720c
PO
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
84c46a53
PO
10 *
11 * Thanks to the following companies for their support:
12 *
13 * - JMicron (hardware and technical support)
d129bceb
PO
14 */
15
d129bceb
PO
16#include <linux/delay.h>
17#include <linux/highmem.h>
b8c86fc5 18#include <linux/io.h>
d129bceb 19#include <linux/dma-mapping.h>
5a0e3ad6 20#include <linux/slab.h>
11763609 21#include <linux/scatterlist.h>
9bea3c85 22#include <linux/regulator/consumer.h>
d129bceb 23
2f730fec
PO
24#include <linux/leds.h>
25
22113efd 26#include <linux/mmc/mmc.h>
d129bceb 27#include <linux/mmc/host.h>
d129bceb 28
d129bceb
PO
29#include "sdhci.h"
30
31#define DRIVER_NAME "sdhci"
d129bceb 32
d129bceb 33#define DBG(f, x...) \
c6563178 34 pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
d129bceb 35
f9134319
PO
36#if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \
37 defined(CONFIG_MMC_SDHCI_MODULE))
38#define SDHCI_USE_LEDS_CLASS
39#endif
40
df673b22 41static unsigned int debug_quirks = 0;
67435274 42
d129bceb
PO
43static void sdhci_finish_data(struct sdhci_host *);
44
45static void sdhci_send_command(struct sdhci_host *, struct mmc_command *);
46static void sdhci_finish_command(struct sdhci_host *);
47
48static void sdhci_dumpregs(struct sdhci_host *host)
49{
412ab659
PR
50 printk(KERN_DEBUG DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
51 mmc_hostname(host->mmc));
d129bceb
PO
52
53 printk(KERN_DEBUG DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n",
4e4141a5
AV
54 sdhci_readl(host, SDHCI_DMA_ADDRESS),
55 sdhci_readw(host, SDHCI_HOST_VERSION));
d129bceb 56 printk(KERN_DEBUG DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n",
4e4141a5
AV
57 sdhci_readw(host, SDHCI_BLOCK_SIZE),
58 sdhci_readw(host, SDHCI_BLOCK_COUNT));
d129bceb 59 printk(KERN_DEBUG DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
4e4141a5
AV
60 sdhci_readl(host, SDHCI_ARGUMENT),
61 sdhci_readw(host, SDHCI_TRANSFER_MODE));
d129bceb 62 printk(KERN_DEBUG DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n",
4e4141a5
AV
63 sdhci_readl(host, SDHCI_PRESENT_STATE),
64 sdhci_readb(host, SDHCI_HOST_CONTROL));
d129bceb 65 printk(KERN_DEBUG DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n",
4e4141a5
AV
66 sdhci_readb(host, SDHCI_POWER_CONTROL),
67 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
d129bceb 68 printk(KERN_DEBUG DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n",
4e4141a5
AV
69 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
70 sdhci_readw(host, SDHCI_CLOCK_CONTROL));
d129bceb 71 printk(KERN_DEBUG DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n",
4e4141a5
AV
72 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
73 sdhci_readl(host, SDHCI_INT_STATUS));
d129bceb 74 printk(KERN_DEBUG DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
4e4141a5
AV
75 sdhci_readl(host, SDHCI_INT_ENABLE),
76 sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
d129bceb 77 printk(KERN_DEBUG DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
4e4141a5
AV
78 sdhci_readw(host, SDHCI_ACMD12_ERR),
79 sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
e8120ad1 80 printk(KERN_DEBUG DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n",
4e4141a5 81 sdhci_readl(host, SDHCI_CAPABILITIES),
e8120ad1
PR
82 sdhci_readl(host, SDHCI_CAPABILITIES_1));
83 printk(KERN_DEBUG DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n",
84 sdhci_readw(host, SDHCI_COMMAND),
4e4141a5 85 sdhci_readl(host, SDHCI_MAX_CURRENT));
d129bceb 86
be3f4ae0
BD
87 if (host->flags & SDHCI_USE_ADMA)
88 printk(KERN_DEBUG DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
89 readl(host->ioaddr + SDHCI_ADMA_ERROR),
90 readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
91
d129bceb
PO
92 printk(KERN_DEBUG DRIVER_NAME ": ===========================================\n");
93}
94
95/*****************************************************************************\
96 * *
97 * Low level functions *
98 * *
99\*****************************************************************************/
100
7260cf5e
AV
101static void sdhci_clear_set_irqs(struct sdhci_host *host, u32 clear, u32 set)
102{
103 u32 ier;
104
105 ier = sdhci_readl(host, SDHCI_INT_ENABLE);
106 ier &= ~clear;
107 ier |= set;
108 sdhci_writel(host, ier, SDHCI_INT_ENABLE);
109 sdhci_writel(host, ier, SDHCI_SIGNAL_ENABLE);
110}
111
112static void sdhci_unmask_irqs(struct sdhci_host *host, u32 irqs)
113{
114 sdhci_clear_set_irqs(host, 0, irqs);
115}
116
117static void sdhci_mask_irqs(struct sdhci_host *host, u32 irqs)
118{
119 sdhci_clear_set_irqs(host, irqs, 0);
120}
121
122static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
123{
124 u32 irqs = SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT;
125
68d1fb7e
AV
126 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
127 return;
128
7260cf5e
AV
129 if (enable)
130 sdhci_unmask_irqs(host, irqs);
131 else
132 sdhci_mask_irqs(host, irqs);
133}
134
135static void sdhci_enable_card_detection(struct sdhci_host *host)
136{
137 sdhci_set_card_detection(host, true);
138}
139
140static void sdhci_disable_card_detection(struct sdhci_host *host)
141{
142 sdhci_set_card_detection(host, false);
143}
144
d129bceb
PO
145static void sdhci_reset(struct sdhci_host *host, u8 mask)
146{
e16514d8 147 unsigned long timeout;
063a9dbb 148 u32 uninitialized_var(ier);
e16514d8 149
b8c86fc5 150 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
4e4141a5 151 if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) &
8a4da143
PO
152 SDHCI_CARD_PRESENT))
153 return;
154 }
155
063a9dbb
AV
156 if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
157 ier = sdhci_readl(host, SDHCI_INT_ENABLE);
158
393c1a34
PR
159 if (host->ops->platform_reset_enter)
160 host->ops->platform_reset_enter(host, mask);
161
4e4141a5 162 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
d129bceb 163
e16514d8 164 if (mask & SDHCI_RESET_ALL)
d129bceb
PO
165 host->clock = 0;
166
e16514d8
PO
167 /* Wait max 100 ms */
168 timeout = 100;
169
170 /* hw clears the bit when it's done */
4e4141a5 171 while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
e16514d8 172 if (timeout == 0) {
acf1da45 173 printk(KERN_ERR "%s: Reset 0x%x never completed.\n",
e16514d8
PO
174 mmc_hostname(host->mmc), (int)mask);
175 sdhci_dumpregs(host);
176 return;
177 }
178 timeout--;
179 mdelay(1);
d129bceb 180 }
063a9dbb 181
393c1a34
PR
182 if (host->ops->platform_reset_exit)
183 host->ops->platform_reset_exit(host, mask);
184
063a9dbb
AV
185 if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
186 sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, ier);
d129bceb
PO
187}
188
2f4cbb3d
NP
189static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios);
190
191static void sdhci_init(struct sdhci_host *host, int soft)
d129bceb 192{
2f4cbb3d
NP
193 if (soft)
194 sdhci_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA);
195 else
196 sdhci_reset(host, SDHCI_RESET_ALL);
d129bceb 197
7260cf5e
AV
198 sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK,
199 SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
3192a28f
PO
200 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX |
201 SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT |
6aa943ab 202 SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE);
2f4cbb3d
NP
203
204 if (soft) {
205 /* force clock reconfiguration */
206 host->clock = 0;
207 sdhci_set_ios(host->mmc, &host->mmc->ios);
208 }
7260cf5e 209}
d129bceb 210
7260cf5e
AV
211static void sdhci_reinit(struct sdhci_host *host)
212{
2f4cbb3d 213 sdhci_init(host, 0);
7260cf5e 214 sdhci_enable_card_detection(host);
d129bceb
PO
215}
216
217static void sdhci_activate_led(struct sdhci_host *host)
218{
219 u8 ctrl;
220
4e4141a5 221 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
d129bceb 222 ctrl |= SDHCI_CTRL_LED;
4e4141a5 223 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
d129bceb
PO
224}
225
226static void sdhci_deactivate_led(struct sdhci_host *host)
227{
228 u8 ctrl;
229
4e4141a5 230 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
d129bceb 231 ctrl &= ~SDHCI_CTRL_LED;
4e4141a5 232 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
d129bceb
PO
233}
234
f9134319 235#ifdef SDHCI_USE_LEDS_CLASS
2f730fec
PO
236static void sdhci_led_control(struct led_classdev *led,
237 enum led_brightness brightness)
238{
239 struct sdhci_host *host = container_of(led, struct sdhci_host, led);
240 unsigned long flags;
241
242 spin_lock_irqsave(&host->lock, flags);
243
244 if (brightness == LED_OFF)
245 sdhci_deactivate_led(host);
246 else
247 sdhci_activate_led(host);
248
249 spin_unlock_irqrestore(&host->lock, flags);
250}
251#endif
252
d129bceb
PO
253/*****************************************************************************\
254 * *
255 * Core functions *
256 * *
257\*****************************************************************************/
258
a406f5a3 259static void sdhci_read_block_pio(struct sdhci_host *host)
d129bceb 260{
7659150c
PO
261 unsigned long flags;
262 size_t blksize, len, chunk;
7244b85b 263 u32 uninitialized_var(scratch);
7659150c 264 u8 *buf;
d129bceb 265
a406f5a3 266 DBG("PIO reading\n");
d129bceb 267
a406f5a3 268 blksize = host->data->blksz;
7659150c 269 chunk = 0;
d129bceb 270
7659150c 271 local_irq_save(flags);
d129bceb 272
a406f5a3 273 while (blksize) {
7659150c
PO
274 if (!sg_miter_next(&host->sg_miter))
275 BUG();
d129bceb 276
7659150c 277 len = min(host->sg_miter.length, blksize);
d129bceb 278
7659150c
PO
279 blksize -= len;
280 host->sg_miter.consumed = len;
14d836e7 281
7659150c 282 buf = host->sg_miter.addr;
d129bceb 283
7659150c
PO
284 while (len) {
285 if (chunk == 0) {
4e4141a5 286 scratch = sdhci_readl(host, SDHCI_BUFFER);
7659150c 287 chunk = 4;
a406f5a3 288 }
7659150c
PO
289
290 *buf = scratch & 0xFF;
291
292 buf++;
293 scratch >>= 8;
294 chunk--;
295 len--;
d129bceb 296 }
a406f5a3 297 }
7659150c
PO
298
299 sg_miter_stop(&host->sg_miter);
300
301 local_irq_restore(flags);
a406f5a3 302}
d129bceb 303
a406f5a3
PO
304static void sdhci_write_block_pio(struct sdhci_host *host)
305{
7659150c
PO
306 unsigned long flags;
307 size_t blksize, len, chunk;
308 u32 scratch;
309 u8 *buf;
d129bceb 310
a406f5a3
PO
311 DBG("PIO writing\n");
312
313 blksize = host->data->blksz;
7659150c
PO
314 chunk = 0;
315 scratch = 0;
d129bceb 316
7659150c 317 local_irq_save(flags);
d129bceb 318
a406f5a3 319 while (blksize) {
7659150c
PO
320 if (!sg_miter_next(&host->sg_miter))
321 BUG();
a406f5a3 322
7659150c
PO
323 len = min(host->sg_miter.length, blksize);
324
325 blksize -= len;
326 host->sg_miter.consumed = len;
327
328 buf = host->sg_miter.addr;
d129bceb 329
7659150c
PO
330 while (len) {
331 scratch |= (u32)*buf << (chunk * 8);
332
333 buf++;
334 chunk++;
335 len--;
336
337 if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
4e4141a5 338 sdhci_writel(host, scratch, SDHCI_BUFFER);
7659150c
PO
339 chunk = 0;
340 scratch = 0;
d129bceb 341 }
d129bceb
PO
342 }
343 }
7659150c
PO
344
345 sg_miter_stop(&host->sg_miter);
346
347 local_irq_restore(flags);
a406f5a3
PO
348}
349
350static void sdhci_transfer_pio(struct sdhci_host *host)
351{
352 u32 mask;
353
354 BUG_ON(!host->data);
355
7659150c 356 if (host->blocks == 0)
a406f5a3
PO
357 return;
358
359 if (host->data->flags & MMC_DATA_READ)
360 mask = SDHCI_DATA_AVAILABLE;
361 else
362 mask = SDHCI_SPACE_AVAILABLE;
363
4a3cba32
PO
364 /*
365 * Some controllers (JMicron JMB38x) mess up the buffer bits
366 * for transfers < 4 bytes. As long as it is just one block,
367 * we can ignore the bits.
368 */
369 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
370 (host->data->blocks == 1))
371 mask = ~0;
372
4e4141a5 373 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
3e3bf207
AV
374 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
375 udelay(100);
376
a406f5a3
PO
377 if (host->data->flags & MMC_DATA_READ)
378 sdhci_read_block_pio(host);
379 else
380 sdhci_write_block_pio(host);
d129bceb 381
7659150c
PO
382 host->blocks--;
383 if (host->blocks == 0)
a406f5a3 384 break;
a406f5a3 385 }
d129bceb 386
a406f5a3 387 DBG("PIO transfer complete.\n");
d129bceb
PO
388}
389
2134a922
PO
390static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
391{
392 local_irq_save(*flags);
393 return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
394}
395
396static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
397{
398 kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
399 local_irq_restore(*flags);
400}
401
118cd17d
BD
402static void sdhci_set_adma_desc(u8 *desc, u32 addr, int len, unsigned cmd)
403{
9e506f35
BD
404 __le32 *dataddr = (__le32 __force *)(desc + 4);
405 __le16 *cmdlen = (__le16 __force *)desc;
118cd17d 406
9e506f35
BD
407 /* SDHCI specification says ADMA descriptors should be 4 byte
408 * aligned, so using 16 or 32bit operations should be safe. */
118cd17d 409
9e506f35
BD
410 cmdlen[0] = cpu_to_le16(cmd);
411 cmdlen[1] = cpu_to_le16(len);
412
413 dataddr[0] = cpu_to_le32(addr);
118cd17d
BD
414}
415
8f1934ce 416static int sdhci_adma_table_pre(struct sdhci_host *host,
2134a922
PO
417 struct mmc_data *data)
418{
419 int direction;
420
421 u8 *desc;
422 u8 *align;
423 dma_addr_t addr;
424 dma_addr_t align_addr;
425 int len, offset;
426
427 struct scatterlist *sg;
428 int i;
429 char *buffer;
430 unsigned long flags;
431
432 /*
433 * The spec does not specify endianness of descriptor table.
434 * We currently guess that it is LE.
435 */
436
437 if (data->flags & MMC_DATA_READ)
438 direction = DMA_FROM_DEVICE;
439 else
440 direction = DMA_TO_DEVICE;
441
442 /*
443 * The ADMA descriptor table is mapped further down as we
444 * need to fill it with data first.
445 */
446
447 host->align_addr = dma_map_single(mmc_dev(host->mmc),
448 host->align_buffer, 128 * 4, direction);
8d8bb39b 449 if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
8f1934ce 450 goto fail;
2134a922
PO
451 BUG_ON(host->align_addr & 0x3);
452
453 host->sg_count = dma_map_sg(mmc_dev(host->mmc),
454 data->sg, data->sg_len, direction);
8f1934ce
PO
455 if (host->sg_count == 0)
456 goto unmap_align;
2134a922
PO
457
458 desc = host->adma_desc;
459 align = host->align_buffer;
460
461 align_addr = host->align_addr;
462
463 for_each_sg(data->sg, sg, host->sg_count, i) {
464 addr = sg_dma_address(sg);
465 len = sg_dma_len(sg);
466
467 /*
468 * The SDHCI specification states that ADMA
469 * addresses must be 32-bit aligned. If they
470 * aren't, then we use a bounce buffer for
471 * the (up to three) bytes that screw up the
472 * alignment.
473 */
474 offset = (4 - (addr & 0x3)) & 0x3;
475 if (offset) {
476 if (data->flags & MMC_DATA_WRITE) {
477 buffer = sdhci_kmap_atomic(sg, &flags);
6cefd05f 478 WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3));
2134a922
PO
479 memcpy(align, buffer, offset);
480 sdhci_kunmap_atomic(buffer, &flags);
481 }
482
118cd17d
BD
483 /* tran, valid */
484 sdhci_set_adma_desc(desc, align_addr, offset, 0x21);
2134a922
PO
485
486 BUG_ON(offset > 65536);
487
2134a922
PO
488 align += 4;
489 align_addr += 4;
490
491 desc += 8;
492
493 addr += offset;
494 len -= offset;
495 }
496
2134a922
PO
497 BUG_ON(len > 65536);
498
118cd17d
BD
499 /* tran, valid */
500 sdhci_set_adma_desc(desc, addr, len, 0x21);
2134a922
PO
501 desc += 8;
502
503 /*
504 * If this triggers then we have a calculation bug
505 * somewhere. :/
506 */
507 WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4);
508 }
509
70764a90
TA
510 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
511 /*
512 * Mark the last descriptor as the terminating descriptor
513 */
514 if (desc != host->adma_desc) {
515 desc -= 8;
516 desc[0] |= 0x2; /* end */
517 }
518 } else {
519 /*
520 * Add a terminating entry.
521 */
2134a922 522
70764a90
TA
523 /* nop, end, valid */
524 sdhci_set_adma_desc(desc, 0, 0, 0x3);
525 }
2134a922
PO
526
527 /*
528 * Resync align buffer as we might have changed it.
529 */
530 if (data->flags & MMC_DATA_WRITE) {
531 dma_sync_single_for_device(mmc_dev(host->mmc),
532 host->align_addr, 128 * 4, direction);
533 }
534
535 host->adma_addr = dma_map_single(mmc_dev(host->mmc),
536 host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE);
980167b7 537 if (dma_mapping_error(mmc_dev(host->mmc), host->adma_addr))
8f1934ce 538 goto unmap_entries;
2134a922 539 BUG_ON(host->adma_addr & 0x3);
8f1934ce
PO
540
541 return 0;
542
543unmap_entries:
544 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
545 data->sg_len, direction);
546unmap_align:
547 dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
548 128 * 4, direction);
549fail:
550 return -EINVAL;
2134a922
PO
551}
552
553static void sdhci_adma_table_post(struct sdhci_host *host,
554 struct mmc_data *data)
555{
556 int direction;
557
558 struct scatterlist *sg;
559 int i, size;
560 u8 *align;
561 char *buffer;
562 unsigned long flags;
563
564 if (data->flags & MMC_DATA_READ)
565 direction = DMA_FROM_DEVICE;
566 else
567 direction = DMA_TO_DEVICE;
568
569 dma_unmap_single(mmc_dev(host->mmc), host->adma_addr,
570 (128 * 2 + 1) * 4, DMA_TO_DEVICE);
571
572 dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
573 128 * 4, direction);
574
575 if (data->flags & MMC_DATA_READ) {
576 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
577 data->sg_len, direction);
578
579 align = host->align_buffer;
580
581 for_each_sg(data->sg, sg, host->sg_count, i) {
582 if (sg_dma_address(sg) & 0x3) {
583 size = 4 - (sg_dma_address(sg) & 0x3);
584
585 buffer = sdhci_kmap_atomic(sg, &flags);
6cefd05f 586 WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3));
2134a922
PO
587 memcpy(buffer, align, size);
588 sdhci_kunmap_atomic(buffer, &flags);
589
590 align += 4;
591 }
592 }
593 }
594
595 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
596 data->sg_len, direction);
597}
598
a3c7778f 599static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
d129bceb 600{
1c8cde92 601 u8 count;
a3c7778f 602 struct mmc_data *data = cmd->data;
1c8cde92 603 unsigned target_timeout, current_timeout;
d129bceb 604
ee53ab5d
PO
605 /*
606 * If the host controller provides us with an incorrect timeout
607 * value, just skip the check and use 0xE. The hardware may take
608 * longer to time out, but that's much better than having a too-short
609 * timeout value.
610 */
11a2f1b7 611 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
ee53ab5d 612 return 0xE;
e538fbe8 613
a3c7778f
AW
614 /* Unspecified timeout, assume max */
615 if (!data && !cmd->cmd_timeout_ms)
616 return 0xE;
d129bceb 617
a3c7778f
AW
618 /* timeout in us */
619 if (!data)
620 target_timeout = cmd->cmd_timeout_ms * 1000;
621 else
622 target_timeout = data->timeout_ns / 1000 +
623 data->timeout_clks / host->clock;
81b39802 624
4b01681c
MB
625 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)
626 host->timeout_clk = host->clock / 1000;
627
1c8cde92
PO
628 /*
629 * Figure out needed cycles.
630 * We do this in steps in order to fit inside a 32 bit int.
631 * The first step is the minimum timeout, which will have a
632 * minimum resolution of 6 bits:
633 * (1) 2^13*1000 > 2^22,
634 * (2) host->timeout_clk < 2^16
635 * =>
636 * (1) / (2) > 2^6
637 */
4b01681c 638 BUG_ON(!host->timeout_clk);
1c8cde92
PO
639 count = 0;
640 current_timeout = (1 << 13) * 1000 / host->timeout_clk;
641 while (current_timeout < target_timeout) {
642 count++;
643 current_timeout <<= 1;
644 if (count >= 0xF)
645 break;
646 }
647
648 if (count >= 0xF) {
a3c7778f
AW
649 printk(KERN_WARNING "%s: Too large timeout requested for CMD%d!\n",
650 mmc_hostname(host->mmc), cmd->opcode);
1c8cde92
PO
651 count = 0xE;
652 }
653
ee53ab5d
PO
654 return count;
655}
656
6aa943ab
AV
657static void sdhci_set_transfer_irqs(struct sdhci_host *host)
658{
659 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
660 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
661
662 if (host->flags & SDHCI_REQ_USE_DMA)
663 sdhci_clear_set_irqs(host, pio_irqs, dma_irqs);
664 else
665 sdhci_clear_set_irqs(host, dma_irqs, pio_irqs);
666}
667
a3c7778f 668static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
ee53ab5d
PO
669{
670 u8 count;
2134a922 671 u8 ctrl;
a3c7778f 672 struct mmc_data *data = cmd->data;
8f1934ce 673 int ret;
ee53ab5d
PO
674
675 WARN_ON(host->data);
676
a3c7778f
AW
677 if (data || (cmd->flags & MMC_RSP_BUSY)) {
678 count = sdhci_calc_timeout(host, cmd);
679 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
680 }
681
682 if (!data)
ee53ab5d
PO
683 return;
684
685 /* Sanity checks */
686 BUG_ON(data->blksz * data->blocks > 524288);
687 BUG_ON(data->blksz > host->mmc->max_blk_size);
688 BUG_ON(data->blocks > 65535);
689
690 host->data = data;
691 host->data_early = 0;
f6a03cbf 692 host->data->bytes_xfered = 0;
ee53ab5d 693
a13abc7b 694 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))
c9fddbc4
PO
695 host->flags |= SDHCI_REQ_USE_DMA;
696
2134a922
PO
697 /*
698 * FIXME: This doesn't account for merging when mapping the
699 * scatterlist.
700 */
701 if (host->flags & SDHCI_REQ_USE_DMA) {
702 int broken, i;
703 struct scatterlist *sg;
704
705 broken = 0;
706 if (host->flags & SDHCI_USE_ADMA) {
707 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
708 broken = 1;
709 } else {
710 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
711 broken = 1;
712 }
713
714 if (unlikely(broken)) {
715 for_each_sg(data->sg, sg, data->sg_len, i) {
716 if (sg->length & 0x3) {
717 DBG("Reverting to PIO because of "
718 "transfer size (%d)\n",
719 sg->length);
720 host->flags &= ~SDHCI_REQ_USE_DMA;
721 break;
722 }
723 }
724 }
c9fddbc4
PO
725 }
726
727 /*
728 * The assumption here being that alignment is the same after
729 * translation to device address space.
730 */
2134a922
PO
731 if (host->flags & SDHCI_REQ_USE_DMA) {
732 int broken, i;
733 struct scatterlist *sg;
734
735 broken = 0;
736 if (host->flags & SDHCI_USE_ADMA) {
737 /*
738 * As we use 3 byte chunks to work around
739 * alignment problems, we need to check this
740 * quirk.
741 */
742 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
743 broken = 1;
744 } else {
745 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
746 broken = 1;
747 }
748
749 if (unlikely(broken)) {
750 for_each_sg(data->sg, sg, data->sg_len, i) {
751 if (sg->offset & 0x3) {
752 DBG("Reverting to PIO because of "
753 "bad alignment\n");
754 host->flags &= ~SDHCI_REQ_USE_DMA;
755 break;
756 }
757 }
758 }
759 }
760
8f1934ce
PO
761 if (host->flags & SDHCI_REQ_USE_DMA) {
762 if (host->flags & SDHCI_USE_ADMA) {
763 ret = sdhci_adma_table_pre(host, data);
764 if (ret) {
765 /*
766 * This only happens when someone fed
767 * us an invalid request.
768 */
769 WARN_ON(1);
ebd6d357 770 host->flags &= ~SDHCI_REQ_USE_DMA;
8f1934ce 771 } else {
4e4141a5
AV
772 sdhci_writel(host, host->adma_addr,
773 SDHCI_ADMA_ADDRESS);
8f1934ce
PO
774 }
775 } else {
c8b3e02e 776 int sg_cnt;
8f1934ce 777
c8b3e02e 778 sg_cnt = dma_map_sg(mmc_dev(host->mmc),
8f1934ce
PO
779 data->sg, data->sg_len,
780 (data->flags & MMC_DATA_READ) ?
781 DMA_FROM_DEVICE :
782 DMA_TO_DEVICE);
c8b3e02e 783 if (sg_cnt == 0) {
8f1934ce
PO
784 /*
785 * This only happens when someone fed
786 * us an invalid request.
787 */
788 WARN_ON(1);
ebd6d357 789 host->flags &= ~SDHCI_REQ_USE_DMA;
8f1934ce 790 } else {
719a61b4 791 WARN_ON(sg_cnt != 1);
4e4141a5
AV
792 sdhci_writel(host, sg_dma_address(data->sg),
793 SDHCI_DMA_ADDRESS);
8f1934ce
PO
794 }
795 }
796 }
797
2134a922
PO
798 /*
799 * Always adjust the DMA selection as some controllers
800 * (e.g. JMicron) can't do PIO properly when the selection
801 * is ADMA.
802 */
803 if (host->version >= SDHCI_SPEC_200) {
4e4141a5 804 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
2134a922
PO
805 ctrl &= ~SDHCI_CTRL_DMA_MASK;
806 if ((host->flags & SDHCI_REQ_USE_DMA) &&
807 (host->flags & SDHCI_USE_ADMA))
808 ctrl |= SDHCI_CTRL_ADMA32;
809 else
810 ctrl |= SDHCI_CTRL_SDMA;
4e4141a5 811 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
c9fddbc4
PO
812 }
813
8f1934ce 814 if (!(host->flags & SDHCI_REQ_USE_DMA)) {
da60a91d
SAS
815 int flags;
816
817 flags = SG_MITER_ATOMIC;
818 if (host->data->flags & MMC_DATA_READ)
819 flags |= SG_MITER_TO_SG;
820 else
821 flags |= SG_MITER_FROM_SG;
822 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
7659150c 823 host->blocks = data->blocks;
d129bceb 824 }
c7fa9963 825
6aa943ab
AV
826 sdhci_set_transfer_irqs(host);
827
f6a03cbf
MV
828 /* Set the DMA boundary value and block size */
829 sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG,
830 data->blksz), SDHCI_BLOCK_SIZE);
4e4141a5 831 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
c7fa9963
PO
832}
833
834static void sdhci_set_transfer_mode(struct sdhci_host *host,
835 struct mmc_data *data)
836{
837 u16 mode;
838
c7fa9963
PO
839 if (data == NULL)
840 return;
841
e538fbe8
PO
842 WARN_ON(!host->data);
843
c7fa9963 844 mode = SDHCI_TRNS_BLK_CNT_EN;
c4512f79
JH
845 if (data->blocks > 1) {
846 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
847 mode |= SDHCI_TRNS_MULTI | SDHCI_TRNS_ACMD12;
848 else
849 mode |= SDHCI_TRNS_MULTI;
850 }
c7fa9963
PO
851 if (data->flags & MMC_DATA_READ)
852 mode |= SDHCI_TRNS_READ;
c9fddbc4 853 if (host->flags & SDHCI_REQ_USE_DMA)
c7fa9963
PO
854 mode |= SDHCI_TRNS_DMA;
855
4e4141a5 856 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
d129bceb
PO
857}
858
859static void sdhci_finish_data(struct sdhci_host *host)
860{
861 struct mmc_data *data;
d129bceb
PO
862
863 BUG_ON(!host->data);
864
865 data = host->data;
866 host->data = NULL;
867
c9fddbc4 868 if (host->flags & SDHCI_REQ_USE_DMA) {
2134a922
PO
869 if (host->flags & SDHCI_USE_ADMA)
870 sdhci_adma_table_post(host, data);
871 else {
872 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
873 data->sg_len, (data->flags & MMC_DATA_READ) ?
874 DMA_FROM_DEVICE : DMA_TO_DEVICE);
875 }
d129bceb
PO
876 }
877
878 /*
c9b74c5b
PO
879 * The specification states that the block count register must
880 * be updated, but it does not specify at what point in the
881 * data flow. That makes the register entirely useless to read
882 * back so we have to assume that nothing made it to the card
883 * in the event of an error.
d129bceb 884 */
c9b74c5b
PO
885 if (data->error)
886 data->bytes_xfered = 0;
d129bceb 887 else
c9b74c5b 888 data->bytes_xfered = data->blksz * data->blocks;
d129bceb 889
d129bceb
PO
890 if (data->stop) {
891 /*
892 * The controller needs a reset of internal state machines
893 * upon error conditions.
894 */
17b0429d 895 if (data->error) {
d129bceb
PO
896 sdhci_reset(host, SDHCI_RESET_CMD);
897 sdhci_reset(host, SDHCI_RESET_DATA);
898 }
899
900 sdhci_send_command(host, data->stop);
901 } else
902 tasklet_schedule(&host->finish_tasklet);
903}
904
905static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
906{
907 int flags;
fd2208d7 908 u32 mask;
7cb2c76f 909 unsigned long timeout;
d129bceb
PO
910
911 WARN_ON(host->cmd);
912
d129bceb 913 /* Wait max 10 ms */
7cb2c76f 914 timeout = 10;
fd2208d7
PO
915
916 mask = SDHCI_CMD_INHIBIT;
917 if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY))
918 mask |= SDHCI_DATA_INHIBIT;
919
920 /* We shouldn't wait for data inihibit for stop commands, even
921 though they might use busy signaling */
922 if (host->mrq->data && (cmd == host->mrq->data->stop))
923 mask &= ~SDHCI_DATA_INHIBIT;
924
4e4141a5 925 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
7cb2c76f 926 if (timeout == 0) {
d129bceb 927 printk(KERN_ERR "%s: Controller never released "
acf1da45 928 "inhibit bit(s).\n", mmc_hostname(host->mmc));
d129bceb 929 sdhci_dumpregs(host);
17b0429d 930 cmd->error = -EIO;
d129bceb
PO
931 tasklet_schedule(&host->finish_tasklet);
932 return;
933 }
7cb2c76f
PO
934 timeout--;
935 mdelay(1);
936 }
d129bceb
PO
937
938 mod_timer(&host->timer, jiffies + 10 * HZ);
939
940 host->cmd = cmd;
941
a3c7778f 942 sdhci_prepare_data(host, cmd);
d129bceb 943
4e4141a5 944 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
d129bceb 945
c7fa9963
PO
946 sdhci_set_transfer_mode(host, cmd->data);
947
d129bceb 948 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
acf1da45 949 printk(KERN_ERR "%s: Unsupported response type!\n",
d129bceb 950 mmc_hostname(host->mmc));
17b0429d 951 cmd->error = -EINVAL;
d129bceb
PO
952 tasklet_schedule(&host->finish_tasklet);
953 return;
954 }
955
956 if (!(cmd->flags & MMC_RSP_PRESENT))
957 flags = SDHCI_CMD_RESP_NONE;
958 else if (cmd->flags & MMC_RSP_136)
959 flags = SDHCI_CMD_RESP_LONG;
960 else if (cmd->flags & MMC_RSP_BUSY)
961 flags = SDHCI_CMD_RESP_SHORT_BUSY;
962 else
963 flags = SDHCI_CMD_RESP_SHORT;
964
965 if (cmd->flags & MMC_RSP_CRC)
966 flags |= SDHCI_CMD_CRC;
967 if (cmd->flags & MMC_RSP_OPCODE)
968 flags |= SDHCI_CMD_INDEX;
969 if (cmd->data)
970 flags |= SDHCI_CMD_DATA;
971
4e4141a5 972 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
d129bceb
PO
973}
974
975static void sdhci_finish_command(struct sdhci_host *host)
976{
977 int i;
978
979 BUG_ON(host->cmd == NULL);
980
981 if (host->cmd->flags & MMC_RSP_PRESENT) {
982 if (host->cmd->flags & MMC_RSP_136) {
983 /* CRC is stripped so we need to do some shifting. */
984 for (i = 0;i < 4;i++) {
4e4141a5 985 host->cmd->resp[i] = sdhci_readl(host,
d129bceb
PO
986 SDHCI_RESPONSE + (3-i)*4) << 8;
987 if (i != 3)
988 host->cmd->resp[i] |=
4e4141a5 989 sdhci_readb(host,
d129bceb
PO
990 SDHCI_RESPONSE + (3-i)*4-1);
991 }
992 } else {
4e4141a5 993 host->cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
d129bceb
PO
994 }
995 }
996
17b0429d 997 host->cmd->error = 0;
d129bceb 998
e538fbe8
PO
999 if (host->data && host->data_early)
1000 sdhci_finish_data(host);
1001
1002 if (!host->cmd->data)
d129bceb
PO
1003 tasklet_schedule(&host->finish_tasklet);
1004
1005 host->cmd = NULL;
1006}
1007
1008static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
1009{
1010 int div;
1011 u16 clk;
7cb2c76f 1012 unsigned long timeout;
d129bceb
PO
1013
1014 if (clock == host->clock)
1015 return;
1016
8114634c
AV
1017 if (host->ops->set_clock) {
1018 host->ops->set_clock(host, clock);
1019 if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK)
1020 return;
1021 }
1022
4e4141a5 1023 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
d129bceb
PO
1024
1025 if (clock == 0)
1026 goto out;
1027
85105c53
ZG
1028 if (host->version >= SDHCI_SPEC_300) {
1029 /* Version 3.00 divisors must be a multiple of 2. */
1030 if (host->max_clk <= clock)
1031 div = 1;
1032 else {
0397526d 1033 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300; div += 2) {
85105c53
ZG
1034 if ((host->max_clk / div) <= clock)
1035 break;
1036 }
1037 }
1038 } else {
1039 /* Version 2.00 divisors must be a power of 2. */
0397526d 1040 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
85105c53
ZG
1041 if ((host->max_clk / div) <= clock)
1042 break;
1043 }
d129bceb
PO
1044 }
1045 div >>= 1;
1046
85105c53
ZG
1047 clk = (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1048 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1049 << SDHCI_DIVIDER_HI_SHIFT;
d129bceb 1050 clk |= SDHCI_CLOCK_INT_EN;
4e4141a5 1051 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
d129bceb 1052
27f6cb16
CB
1053 /* Wait max 20 ms */
1054 timeout = 20;
4e4141a5 1055 while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
7cb2c76f
PO
1056 & SDHCI_CLOCK_INT_STABLE)) {
1057 if (timeout == 0) {
acf1da45
PO
1058 printk(KERN_ERR "%s: Internal clock never "
1059 "stabilised.\n", mmc_hostname(host->mmc));
d129bceb
PO
1060 sdhci_dumpregs(host);
1061 return;
1062 }
7cb2c76f
PO
1063 timeout--;
1064 mdelay(1);
1065 }
d129bceb
PO
1066
1067 clk |= SDHCI_CLOCK_CARD_EN;
4e4141a5 1068 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
d129bceb
PO
1069
1070out:
1071 host->clock = clock;
1072}
1073
146ad66e
PO
1074static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
1075{
8364248a 1076 u8 pwr = 0;
146ad66e 1077
8364248a 1078 if (power != (unsigned short)-1) {
ae628903
PO
1079 switch (1 << power) {
1080 case MMC_VDD_165_195:
1081 pwr = SDHCI_POWER_180;
1082 break;
1083 case MMC_VDD_29_30:
1084 case MMC_VDD_30_31:
1085 pwr = SDHCI_POWER_300;
1086 break;
1087 case MMC_VDD_32_33:
1088 case MMC_VDD_33_34:
1089 pwr = SDHCI_POWER_330;
1090 break;
1091 default:
1092 BUG();
1093 }
1094 }
1095
1096 if (host->pwr == pwr)
146ad66e
PO
1097 return;
1098
ae628903
PO
1099 host->pwr = pwr;
1100
1101 if (pwr == 0) {
4e4141a5 1102 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
ae628903 1103 return;
9e9dc5f2
DS
1104 }
1105
1106 /*
1107 * Spec says that we should clear the power reg before setting
1108 * a new value. Some controllers don't seem to like this though.
1109 */
b8c86fc5 1110 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
4e4141a5 1111 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
146ad66e 1112
e08c1694 1113 /*
c71f6512 1114 * At least the Marvell CaFe chip gets confused if we set the voltage
e08c1694
AS
1115 * and set turn on power at the same time, so set the voltage first.
1116 */
11a2f1b7 1117 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
ae628903 1118 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
e08c1694 1119
ae628903 1120 pwr |= SDHCI_POWER_ON;
146ad66e 1121
ae628903 1122 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
557b0697
HW
1123
1124 /*
1125 * Some controllers need an extra 10ms delay of 10ms before they
1126 * can apply clock after applying power
1127 */
11a2f1b7 1128 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
557b0697 1129 mdelay(10);
146ad66e
PO
1130}
1131
d129bceb
PO
1132/*****************************************************************************\
1133 * *
1134 * MMC callbacks *
1135 * *
1136\*****************************************************************************/
1137
1138static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1139{
1140 struct sdhci_host *host;
68d1fb7e 1141 bool present;
d129bceb
PO
1142 unsigned long flags;
1143
1144 host = mmc_priv(mmc);
1145
1146 spin_lock_irqsave(&host->lock, flags);
1147
1148 WARN_ON(host->mrq != NULL);
1149
f9134319 1150#ifndef SDHCI_USE_LEDS_CLASS
d129bceb 1151 sdhci_activate_led(host);
2f730fec 1152#endif
c4512f79
JH
1153 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) {
1154 if (mrq->stop) {
1155 mrq->data->stop = NULL;
1156 mrq->stop = NULL;
1157 }
1158 }
d129bceb
PO
1159
1160 host->mrq = mrq;
1161
68d1fb7e
AV
1162 /* If polling, assume that the card is always present. */
1163 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
1164 present = true;
1165 else
1166 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
1167 SDHCI_CARD_PRESENT;
1168
1169 if (!present || host->flags & SDHCI_DEVICE_DEAD) {
17b0429d 1170 host->mrq->cmd->error = -ENOMEDIUM;
d129bceb
PO
1171 tasklet_schedule(&host->finish_tasklet);
1172 } else
1173 sdhci_send_command(host, mrq->cmd);
1174
5f25a66f 1175 mmiowb();
d129bceb
PO
1176 spin_unlock_irqrestore(&host->lock, flags);
1177}
1178
1179static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1180{
1181 struct sdhci_host *host;
1182 unsigned long flags;
1183 u8 ctrl;
1184
1185 host = mmc_priv(mmc);
1186
1187 spin_lock_irqsave(&host->lock, flags);
1188
1e72859e
PO
1189 if (host->flags & SDHCI_DEVICE_DEAD)
1190 goto out;
1191
d129bceb
PO
1192 /*
1193 * Reset the chip on each power off.
1194 * Should clear out any weird states.
1195 */
1196 if (ios->power_mode == MMC_POWER_OFF) {
4e4141a5 1197 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
7260cf5e 1198 sdhci_reinit(host);
d129bceb
PO
1199 }
1200
1201 sdhci_set_clock(host, ios->clock);
1202
1203 if (ios->power_mode == MMC_POWER_OFF)
146ad66e 1204 sdhci_set_power(host, -1);
d129bceb 1205 else
146ad66e 1206 sdhci_set_power(host, ios->vdd);
d129bceb 1207
643a81ff
PR
1208 if (host->ops->platform_send_init_74_clocks)
1209 host->ops->platform_send_init_74_clocks(host, ios->power_mode);
1210
15ec4461
PR
1211 /*
1212 * If your platform has 8-bit width support but is not a v3 controller,
1213 * or if it requires special setup code, you should implement that in
1214 * platform_8bit_width().
1215 */
1216 if (host->ops->platform_8bit_width)
1217 host->ops->platform_8bit_width(host, ios->bus_width);
1218 else {
1219 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1220 if (ios->bus_width == MMC_BUS_WIDTH_8) {
1221 ctrl &= ~SDHCI_CTRL_4BITBUS;
1222 if (host->version >= SDHCI_SPEC_300)
1223 ctrl |= SDHCI_CTRL_8BITBUS;
1224 } else {
1225 if (host->version >= SDHCI_SPEC_300)
1226 ctrl &= ~SDHCI_CTRL_8BITBUS;
1227 if (ios->bus_width == MMC_BUS_WIDTH_4)
1228 ctrl |= SDHCI_CTRL_4BITBUS;
1229 else
1230 ctrl &= ~SDHCI_CTRL_4BITBUS;
1231 }
1232 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1233 }
ae6d6c92 1234
15ec4461 1235 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
cd9277c0 1236
3ab9c8da
PR
1237 if ((ios->timing == MMC_TIMING_SD_HS ||
1238 ios->timing == MMC_TIMING_MMC_HS)
1239 && !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
cd9277c0
PO
1240 ctrl |= SDHCI_CTRL_HISPD;
1241 else
1242 ctrl &= ~SDHCI_CTRL_HISPD;
1243
4e4141a5 1244 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
d129bceb 1245
b8352260
LD
1246 /*
1247 * Some (ENE) controllers go apeshit on some ios operation,
1248 * signalling timeout and CRC errors even on CMD0. Resetting
1249 * it on each ios seems to solve the problem.
1250 */
b8c86fc5 1251 if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
b8352260
LD
1252 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1253
1e72859e 1254out:
5f25a66f 1255 mmiowb();
d129bceb
PO
1256 spin_unlock_irqrestore(&host->lock, flags);
1257}
1258
82b0e23a 1259static int check_ro(struct sdhci_host *host)
d129bceb 1260{
d129bceb 1261 unsigned long flags;
2dfb579c 1262 int is_readonly;
d129bceb 1263
d129bceb
PO
1264 spin_lock_irqsave(&host->lock, flags);
1265
1e72859e 1266 if (host->flags & SDHCI_DEVICE_DEAD)
2dfb579c
WS
1267 is_readonly = 0;
1268 else if (host->ops->get_ro)
1269 is_readonly = host->ops->get_ro(host);
1e72859e 1270 else
2dfb579c
WS
1271 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
1272 & SDHCI_WRITE_PROTECT);
d129bceb
PO
1273
1274 spin_unlock_irqrestore(&host->lock, flags);
1275
2dfb579c
WS
1276 /* This quirk needs to be replaced by a callback-function later */
1277 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
1278 !is_readonly : is_readonly;
d129bceb
PO
1279}
1280
82b0e23a
TI
1281#define SAMPLE_COUNT 5
1282
1283static int sdhci_get_ro(struct mmc_host *mmc)
1284{
1285 struct sdhci_host *host;
1286 int i, ro_count;
1287
1288 host = mmc_priv(mmc);
1289
1290 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
1291 return check_ro(host);
1292
1293 ro_count = 0;
1294 for (i = 0; i < SAMPLE_COUNT; i++) {
1295 if (check_ro(host)) {
1296 if (++ro_count > SAMPLE_COUNT / 2)
1297 return 1;
1298 }
1299 msleep(30);
1300 }
1301 return 0;
1302}
1303
f75979b7
PO
1304static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1305{
1306 struct sdhci_host *host;
1307 unsigned long flags;
f75979b7
PO
1308
1309 host = mmc_priv(mmc);
1310
1311 spin_lock_irqsave(&host->lock, flags);
1312
1e72859e
PO
1313 if (host->flags & SDHCI_DEVICE_DEAD)
1314 goto out;
1315
f75979b7 1316 if (enable)
7260cf5e
AV
1317 sdhci_unmask_irqs(host, SDHCI_INT_CARD_INT);
1318 else
1319 sdhci_mask_irqs(host, SDHCI_INT_CARD_INT);
1e72859e 1320out:
f75979b7
PO
1321 mmiowb();
1322
1323 spin_unlock_irqrestore(&host->lock, flags);
1324}
1325
ab7aefd0 1326static const struct mmc_host_ops sdhci_ops = {
d129bceb
PO
1327 .request = sdhci_request,
1328 .set_ios = sdhci_set_ios,
1329 .get_ro = sdhci_get_ro,
f75979b7 1330 .enable_sdio_irq = sdhci_enable_sdio_irq,
d129bceb
PO
1331};
1332
1333/*****************************************************************************\
1334 * *
1335 * Tasklets *
1336 * *
1337\*****************************************************************************/
1338
1339static void sdhci_tasklet_card(unsigned long param)
1340{
1341 struct sdhci_host *host;
1342 unsigned long flags;
1343
1344 host = (struct sdhci_host*)param;
1345
1346 spin_lock_irqsave(&host->lock, flags);
1347
4e4141a5 1348 if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) {
d129bceb
PO
1349 if (host->mrq) {
1350 printk(KERN_ERR "%s: Card removed during transfer!\n",
1351 mmc_hostname(host->mmc));
1352 printk(KERN_ERR "%s: Resetting controller.\n",
1353 mmc_hostname(host->mmc));
1354
1355 sdhci_reset(host, SDHCI_RESET_CMD);
1356 sdhci_reset(host, SDHCI_RESET_DATA);
1357
17b0429d 1358 host->mrq->cmd->error = -ENOMEDIUM;
d129bceb
PO
1359 tasklet_schedule(&host->finish_tasklet);
1360 }
1361 }
1362
1363 spin_unlock_irqrestore(&host->lock, flags);
1364
04cf585d 1365 mmc_detect_change(host->mmc, msecs_to_jiffies(200));
d129bceb
PO
1366}
1367
1368static void sdhci_tasklet_finish(unsigned long param)
1369{
1370 struct sdhci_host *host;
1371 unsigned long flags;
1372 struct mmc_request *mrq;
1373
1374 host = (struct sdhci_host*)param;
1375
0c9c99a7
CB
1376 /*
1377 * If this tasklet gets rescheduled while running, it will
1378 * be run again afterwards but without any active request.
1379 */
1380 if (!host->mrq)
1381 return;
1382
d129bceb
PO
1383 spin_lock_irqsave(&host->lock, flags);
1384
1385 del_timer(&host->timer);
1386
1387 mrq = host->mrq;
1388
d129bceb
PO
1389 /*
1390 * The controller needs a reset of internal state machines
1391 * upon error conditions.
1392 */
1e72859e 1393 if (!(host->flags & SDHCI_DEVICE_DEAD) &&
b7b4d342 1394 ((mrq->cmd && mrq->cmd->error) ||
1e72859e
PO
1395 (mrq->data && (mrq->data->error ||
1396 (mrq->data->stop && mrq->data->stop->error))) ||
1397 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
645289dc
PO
1398
1399 /* Some controllers need this kick or reset won't work here */
b8c86fc5 1400 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) {
645289dc
PO
1401 unsigned int clock;
1402
1403 /* This is to force an update */
1404 clock = host->clock;
1405 host->clock = 0;
1406 sdhci_set_clock(host, clock);
1407 }
1408
1409 /* Spec says we should do both at the same time, but Ricoh
1410 controllers do not like that. */
d129bceb
PO
1411 sdhci_reset(host, SDHCI_RESET_CMD);
1412 sdhci_reset(host, SDHCI_RESET_DATA);
1413 }
1414
1415 host->mrq = NULL;
1416 host->cmd = NULL;
1417 host->data = NULL;
1418
f9134319 1419#ifndef SDHCI_USE_LEDS_CLASS
d129bceb 1420 sdhci_deactivate_led(host);
2f730fec 1421#endif
d129bceb 1422
5f25a66f 1423 mmiowb();
d129bceb
PO
1424 spin_unlock_irqrestore(&host->lock, flags);
1425
1426 mmc_request_done(host->mmc, mrq);
1427}
1428
1429static void sdhci_timeout_timer(unsigned long data)
1430{
1431 struct sdhci_host *host;
1432 unsigned long flags;
1433
1434 host = (struct sdhci_host*)data;
1435
1436 spin_lock_irqsave(&host->lock, flags);
1437
1438 if (host->mrq) {
acf1da45
PO
1439 printk(KERN_ERR "%s: Timeout waiting for hardware "
1440 "interrupt.\n", mmc_hostname(host->mmc));
d129bceb
PO
1441 sdhci_dumpregs(host);
1442
1443 if (host->data) {
17b0429d 1444 host->data->error = -ETIMEDOUT;
d129bceb
PO
1445 sdhci_finish_data(host);
1446 } else {
1447 if (host->cmd)
17b0429d 1448 host->cmd->error = -ETIMEDOUT;
d129bceb 1449 else
17b0429d 1450 host->mrq->cmd->error = -ETIMEDOUT;
d129bceb
PO
1451
1452 tasklet_schedule(&host->finish_tasklet);
1453 }
1454 }
1455
5f25a66f 1456 mmiowb();
d129bceb
PO
1457 spin_unlock_irqrestore(&host->lock, flags);
1458}
1459
1460/*****************************************************************************\
1461 * *
1462 * Interrupt handling *
1463 * *
1464\*****************************************************************************/
1465
1466static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
1467{
1468 BUG_ON(intmask == 0);
1469
1470 if (!host->cmd) {
b67ac3f3
PO
1471 printk(KERN_ERR "%s: Got command interrupt 0x%08x even "
1472 "though no command operation was in progress.\n",
1473 mmc_hostname(host->mmc), (unsigned)intmask);
d129bceb
PO
1474 sdhci_dumpregs(host);
1475 return;
1476 }
1477
43b58b36 1478 if (intmask & SDHCI_INT_TIMEOUT)
17b0429d
PO
1479 host->cmd->error = -ETIMEDOUT;
1480 else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT |
1481 SDHCI_INT_INDEX))
1482 host->cmd->error = -EILSEQ;
43b58b36 1483
e809517f 1484 if (host->cmd->error) {
d129bceb 1485 tasklet_schedule(&host->finish_tasklet);
e809517f
PO
1486 return;
1487 }
1488
1489 /*
1490 * The host can send and interrupt when the busy state has
1491 * ended, allowing us to wait without wasting CPU cycles.
1492 * Unfortunately this is overloaded on the "data complete"
1493 * interrupt, so we need to take some care when handling
1494 * it.
1495 *
1496 * Note: The 1.0 specification is a bit ambiguous about this
1497 * feature so there might be some problems with older
1498 * controllers.
1499 */
1500 if (host->cmd->flags & MMC_RSP_BUSY) {
1501 if (host->cmd->data)
1502 DBG("Cannot wait for busy signal when also "
1503 "doing a data transfer");
f945405c 1504 else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ))
e809517f 1505 return;
f945405c
BD
1506
1507 /* The controller does not support the end-of-busy IRQ,
1508 * fall through and take the SDHCI_INT_RESPONSE */
e809517f
PO
1509 }
1510
1511 if (intmask & SDHCI_INT_RESPONSE)
43b58b36 1512 sdhci_finish_command(host);
d129bceb
PO
1513}
1514
0957c333 1515#ifdef CONFIG_MMC_DEBUG
6882a8c0
BD
1516static void sdhci_show_adma_error(struct sdhci_host *host)
1517{
1518 const char *name = mmc_hostname(host->mmc);
1519 u8 *desc = host->adma_desc;
1520 __le32 *dma;
1521 __le16 *len;
1522 u8 attr;
1523
1524 sdhci_dumpregs(host);
1525
1526 while (true) {
1527 dma = (__le32 *)(desc + 4);
1528 len = (__le16 *)(desc + 2);
1529 attr = *desc;
1530
1531 DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
1532 name, desc, le32_to_cpu(*dma), le16_to_cpu(*len), attr);
1533
1534 desc += 8;
1535
1536 if (attr & 2)
1537 break;
1538 }
1539}
1540#else
1541static void sdhci_show_adma_error(struct sdhci_host *host) { }
1542#endif
1543
d129bceb
PO
1544static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
1545{
1546 BUG_ON(intmask == 0);
1547
1548 if (!host->data) {
1549 /*
e809517f
PO
1550 * The "data complete" interrupt is also used to
1551 * indicate that a busy state has ended. See comment
1552 * above in sdhci_cmd_irq().
d129bceb 1553 */
e809517f
PO
1554 if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) {
1555 if (intmask & SDHCI_INT_DATA_END) {
1556 sdhci_finish_command(host);
1557 return;
1558 }
1559 }
d129bceb 1560
b67ac3f3
PO
1561 printk(KERN_ERR "%s: Got data interrupt 0x%08x even "
1562 "though no data operation was in progress.\n",
1563 mmc_hostname(host->mmc), (unsigned)intmask);
d129bceb
PO
1564 sdhci_dumpregs(host);
1565
1566 return;
1567 }
1568
1569 if (intmask & SDHCI_INT_DATA_TIMEOUT)
17b0429d 1570 host->data->error = -ETIMEDOUT;
22113efd
AL
1571 else if (intmask & SDHCI_INT_DATA_END_BIT)
1572 host->data->error = -EILSEQ;
1573 else if ((intmask & SDHCI_INT_DATA_CRC) &&
1574 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
1575 != MMC_BUS_TEST_R)
17b0429d 1576 host->data->error = -EILSEQ;
6882a8c0
BD
1577 else if (intmask & SDHCI_INT_ADMA_ERROR) {
1578 printk(KERN_ERR "%s: ADMA error\n", mmc_hostname(host->mmc));
1579 sdhci_show_adma_error(host);
2134a922 1580 host->data->error = -EIO;
6882a8c0 1581 }
d129bceb 1582
17b0429d 1583 if (host->data->error)
d129bceb
PO
1584 sdhci_finish_data(host);
1585 else {
a406f5a3 1586 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
d129bceb
PO
1587 sdhci_transfer_pio(host);
1588
6ba736a1
PO
1589 /*
1590 * We currently don't do anything fancy with DMA
1591 * boundaries, but as we can't disable the feature
1592 * we need to at least restart the transfer.
f6a03cbf
MV
1593 *
1594 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
1595 * should return a valid address to continue from, but as
1596 * some controllers are faulty, don't trust them.
6ba736a1 1597 */
f6a03cbf
MV
1598 if (intmask & SDHCI_INT_DMA_END) {
1599 u32 dmastart, dmanow;
1600 dmastart = sg_dma_address(host->data->sg);
1601 dmanow = dmastart + host->data->bytes_xfered;
1602 /*
1603 * Force update to the next DMA block boundary.
1604 */
1605 dmanow = (dmanow &
1606 ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
1607 SDHCI_DEFAULT_BOUNDARY_SIZE;
1608 host->data->bytes_xfered = dmanow - dmastart;
1609 DBG("%s: DMA base 0x%08x, transferred 0x%06x bytes,"
1610 " next 0x%08x\n",
1611 mmc_hostname(host->mmc), dmastart,
1612 host->data->bytes_xfered, dmanow);
1613 sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
1614 }
6ba736a1 1615
e538fbe8
PO
1616 if (intmask & SDHCI_INT_DATA_END) {
1617 if (host->cmd) {
1618 /*
1619 * Data managed to finish before the
1620 * command completed. Make sure we do
1621 * things in the proper order.
1622 */
1623 host->data_early = 1;
1624 } else {
1625 sdhci_finish_data(host);
1626 }
1627 }
d129bceb
PO
1628 }
1629}
1630
7d12e780 1631static irqreturn_t sdhci_irq(int irq, void *dev_id)
d129bceb
PO
1632{
1633 irqreturn_t result;
1634 struct sdhci_host* host = dev_id;
1635 u32 intmask;
f75979b7 1636 int cardint = 0;
d129bceb
PO
1637
1638 spin_lock(&host->lock);
1639
4e4141a5 1640 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
d129bceb 1641
62df67a5 1642 if (!intmask || intmask == 0xffffffff) {
d129bceb
PO
1643 result = IRQ_NONE;
1644 goto out;
1645 }
1646
b69c9058
PO
1647 DBG("*** %s got interrupt: 0x%08x\n",
1648 mmc_hostname(host->mmc), intmask);
d129bceb 1649
3192a28f 1650 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
4e4141a5
AV
1651 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
1652 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
d129bceb 1653 tasklet_schedule(&host->card_tasklet);
3192a28f 1654 }
d129bceb 1655
3192a28f 1656 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
d129bceb 1657
3192a28f 1658 if (intmask & SDHCI_INT_CMD_MASK) {
4e4141a5
AV
1659 sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK,
1660 SDHCI_INT_STATUS);
3192a28f 1661 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
d129bceb
PO
1662 }
1663
1664 if (intmask & SDHCI_INT_DATA_MASK) {
4e4141a5
AV
1665 sdhci_writel(host, intmask & SDHCI_INT_DATA_MASK,
1666 SDHCI_INT_STATUS);
3192a28f 1667 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
d129bceb
PO
1668 }
1669
1670 intmask &= ~(SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK);
1671
964f9ce2
PO
1672 intmask &= ~SDHCI_INT_ERROR;
1673
d129bceb 1674 if (intmask & SDHCI_INT_BUS_POWER) {
3192a28f 1675 printk(KERN_ERR "%s: Card is consuming too much power!\n",
d129bceb 1676 mmc_hostname(host->mmc));
4e4141a5 1677 sdhci_writel(host, SDHCI_INT_BUS_POWER, SDHCI_INT_STATUS);
d129bceb
PO
1678 }
1679
9d26a5d3 1680 intmask &= ~SDHCI_INT_BUS_POWER;
3192a28f 1681
f75979b7
PO
1682 if (intmask & SDHCI_INT_CARD_INT)
1683 cardint = 1;
1684
1685 intmask &= ~SDHCI_INT_CARD_INT;
1686
3192a28f 1687 if (intmask) {
acf1da45 1688 printk(KERN_ERR "%s: Unexpected interrupt 0x%08x.\n",
3192a28f 1689 mmc_hostname(host->mmc), intmask);
d129bceb
PO
1690 sdhci_dumpregs(host);
1691
4e4141a5 1692 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3192a28f 1693 }
d129bceb
PO
1694
1695 result = IRQ_HANDLED;
1696
5f25a66f 1697 mmiowb();
d129bceb
PO
1698out:
1699 spin_unlock(&host->lock);
1700
f75979b7
PO
1701 /*
1702 * We have to delay this as it calls back into the driver.
1703 */
1704 if (cardint)
1705 mmc_signal_sdio_irq(host->mmc);
1706
d129bceb
PO
1707 return result;
1708}
1709
1710/*****************************************************************************\
1711 * *
1712 * Suspend/resume *
1713 * *
1714\*****************************************************************************/
1715
1716#ifdef CONFIG_PM
1717
b8c86fc5 1718int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
d129bceb 1719{
b8c86fc5 1720 int ret;
a715dfc7 1721
7260cf5e
AV
1722 sdhci_disable_card_detection(host);
1723
1a13f8fa 1724 ret = mmc_suspend_host(host->mmc);
b8c86fc5
PO
1725 if (ret)
1726 return ret;
a715dfc7 1727
b8c86fc5 1728 free_irq(host->irq, host);
d129bceb 1729
9bea3c85
MS
1730 if (host->vmmc)
1731 ret = regulator_disable(host->vmmc);
1732
1733 return ret;
d129bceb
PO
1734}
1735
b8c86fc5 1736EXPORT_SYMBOL_GPL(sdhci_suspend_host);
d129bceb 1737
b8c86fc5
PO
1738int sdhci_resume_host(struct sdhci_host *host)
1739{
1740 int ret;
d129bceb 1741
9bea3c85
MS
1742 if (host->vmmc) {
1743 int ret = regulator_enable(host->vmmc);
1744 if (ret)
1745 return ret;
1746 }
1747
1748
a13abc7b 1749 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
b8c86fc5
PO
1750 if (host->ops->enable_dma)
1751 host->ops->enable_dma(host);
1752 }
d129bceb 1753
b8c86fc5
PO
1754 ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
1755 mmc_hostname(host->mmc), host);
df1c4b7b
PO
1756 if (ret)
1757 return ret;
d129bceb 1758
2f4cbb3d 1759 sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
b8c86fc5
PO
1760 mmiowb();
1761
1762 ret = mmc_resume_host(host->mmc);
7260cf5e
AV
1763 sdhci_enable_card_detection(host);
1764
2f4cbb3d 1765 return ret;
d129bceb
PO
1766}
1767
b8c86fc5 1768EXPORT_SYMBOL_GPL(sdhci_resume_host);
d129bceb 1769
5f619704
DD
1770void sdhci_enable_irq_wakeups(struct sdhci_host *host)
1771{
1772 u8 val;
1773 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
1774 val |= SDHCI_WAKE_ON_INT;
1775 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
1776}
1777
1778EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
1779
d129bceb
PO
1780#endif /* CONFIG_PM */
1781
1782/*****************************************************************************\
1783 * *
b8c86fc5 1784 * Device allocation/registration *
d129bceb
PO
1785 * *
1786\*****************************************************************************/
1787
b8c86fc5
PO
1788struct sdhci_host *sdhci_alloc_host(struct device *dev,
1789 size_t priv_size)
d129bceb 1790{
d129bceb
PO
1791 struct mmc_host *mmc;
1792 struct sdhci_host *host;
1793
b8c86fc5 1794 WARN_ON(dev == NULL);
d129bceb 1795
b8c86fc5 1796 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
d129bceb 1797 if (!mmc)
b8c86fc5 1798 return ERR_PTR(-ENOMEM);
d129bceb
PO
1799
1800 host = mmc_priv(mmc);
1801 host->mmc = mmc;
1802
b8c86fc5
PO
1803 return host;
1804}
8a4da143 1805
b8c86fc5 1806EXPORT_SYMBOL_GPL(sdhci_alloc_host);
d129bceb 1807
b8c86fc5
PO
1808int sdhci_add_host(struct sdhci_host *host)
1809{
1810 struct mmc_host *mmc;
8f230f45 1811 unsigned int caps, ocr_avail;
b8c86fc5 1812 int ret;
d129bceb 1813
b8c86fc5
PO
1814 WARN_ON(host == NULL);
1815 if (host == NULL)
1816 return -EINVAL;
d129bceb 1817
b8c86fc5 1818 mmc = host->mmc;
d129bceb 1819
b8c86fc5
PO
1820 if (debug_quirks)
1821 host->quirks = debug_quirks;
d129bceb 1822
d96649ed
PO
1823 sdhci_reset(host, SDHCI_RESET_ALL);
1824
4e4141a5 1825 host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
2134a922
PO
1826 host->version = (host->version & SDHCI_SPEC_VER_MASK)
1827 >> SDHCI_SPEC_VER_SHIFT;
85105c53 1828 if (host->version > SDHCI_SPEC_300) {
4a965505 1829 printk(KERN_ERR "%s: Unknown controller version (%d). "
b69c9058 1830 "You may experience problems.\n", mmc_hostname(mmc),
2134a922 1831 host->version);
4a965505
PO
1832 }
1833
ccc92c23
ML
1834 caps = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps :
1835 sdhci_readl(host, SDHCI_CAPABILITIES);
d129bceb 1836
b8c86fc5 1837 if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
a13abc7b
RR
1838 host->flags |= SDHCI_USE_SDMA;
1839 else if (!(caps & SDHCI_CAN_DO_SDMA))
1840 DBG("Controller doesn't have SDMA capability\n");
67435274 1841 else
a13abc7b 1842 host->flags |= SDHCI_USE_SDMA;
d129bceb 1843
b8c86fc5 1844 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
a13abc7b 1845 (host->flags & SDHCI_USE_SDMA)) {
cee687ce 1846 DBG("Disabling DMA as it is marked broken\n");
a13abc7b 1847 host->flags &= ~SDHCI_USE_SDMA;
7c168e3d
FT
1848 }
1849
a13abc7b
RR
1850 if ((host->version >= SDHCI_SPEC_200) && (caps & SDHCI_CAN_DO_ADMA2))
1851 host->flags |= SDHCI_USE_ADMA;
2134a922
PO
1852
1853 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
1854 (host->flags & SDHCI_USE_ADMA)) {
1855 DBG("Disabling ADMA as it is marked broken\n");
1856 host->flags &= ~SDHCI_USE_ADMA;
1857 }
1858
a13abc7b 1859 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
b8c86fc5
PO
1860 if (host->ops->enable_dma) {
1861 if (host->ops->enable_dma(host)) {
1862 printk(KERN_WARNING "%s: No suitable DMA "
1863 "available. Falling back to PIO.\n",
1864 mmc_hostname(mmc));
a13abc7b
RR
1865 host->flags &=
1866 ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
b8c86fc5 1867 }
d129bceb
PO
1868 }
1869 }
1870
2134a922
PO
1871 if (host->flags & SDHCI_USE_ADMA) {
1872 /*
1873 * We need to allocate descriptors for all sg entries
1874 * (128) and potentially one alignment transfer for
1875 * each of those entries.
1876 */
1877 host->adma_desc = kmalloc((128 * 2 + 1) * 4, GFP_KERNEL);
1878 host->align_buffer = kmalloc(128 * 4, GFP_KERNEL);
1879 if (!host->adma_desc || !host->align_buffer) {
1880 kfree(host->adma_desc);
1881 kfree(host->align_buffer);
1882 printk(KERN_WARNING "%s: Unable to allocate ADMA "
1883 "buffers. Falling back to standard DMA.\n",
1884 mmc_hostname(mmc));
1885 host->flags &= ~SDHCI_USE_ADMA;
1886 }
1887 }
1888
7659150c
PO
1889 /*
1890 * If we use DMA, then it's up to the caller to set the DMA
1891 * mask, but PIO does not need the hw shim so we set a new
1892 * mask here in that case.
1893 */
a13abc7b 1894 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
7659150c
PO
1895 host->dma_mask = DMA_BIT_MASK(64);
1896 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
1897 }
d129bceb 1898
c4687d5f
ZG
1899 if (host->version >= SDHCI_SPEC_300)
1900 host->max_clk = (caps & SDHCI_CLOCK_V3_BASE_MASK)
1901 >> SDHCI_CLOCK_BASE_SHIFT;
1902 else
1903 host->max_clk = (caps & SDHCI_CLOCK_BASE_MASK)
1904 >> SDHCI_CLOCK_BASE_SHIFT;
1905
4240ff0a 1906 host->max_clk *= 1000000;
f27f47ef
AV
1907 if (host->max_clk == 0 || host->quirks &
1908 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
4240ff0a
BD
1909 if (!host->ops->get_max_clock) {
1910 printk(KERN_ERR
1911 "%s: Hardware doesn't specify base clock "
1912 "frequency.\n", mmc_hostname(mmc));
1913 return -ENODEV;
1914 }
1915 host->max_clk = host->ops->get_max_clock(host);
8ef1a143 1916 }
d129bceb 1917
1c8cde92
PO
1918 host->timeout_clk =
1919 (caps & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
1920 if (host->timeout_clk == 0) {
81b39802
AV
1921 if (host->ops->get_timeout_clock) {
1922 host->timeout_clk = host->ops->get_timeout_clock(host);
1923 } else if (!(host->quirks &
1924 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
4240ff0a
BD
1925 printk(KERN_ERR
1926 "%s: Hardware doesn't specify timeout clock "
1927 "frequency.\n", mmc_hostname(mmc));
1928 return -ENODEV;
1929 }
1c8cde92
PO
1930 }
1931 if (caps & SDHCI_TIMEOUT_CLK_UNIT)
1932 host->timeout_clk *= 1000;
d129bceb
PO
1933
1934 /*
1935 * Set host parameters.
1936 */
1937 mmc->ops = &sdhci_ops;
ce5f036b 1938 if (host->ops->get_min_clock)
a9e58f25 1939 mmc->f_min = host->ops->get_min_clock(host);
0397526d
ZG
1940 else if (host->version >= SDHCI_SPEC_300)
1941 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
a9e58f25 1942 else
0397526d 1943 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
15ec4461 1944
d129bceb 1945 mmc->f_max = host->max_clk;
a3c7778f 1946 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE;
5fe23c7f 1947
15ec4461
PR
1948 /*
1949 * A controller may support 8-bit width, but the board itself
1950 * might not have the pins brought out. Boards that support
1951 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
1952 * their platform code before calling sdhci_add_host(), and we
1953 * won't assume 8-bit width for hosts without that CAP.
1954 */
5fe23c7f 1955 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
15ec4461 1956 mmc->caps |= MMC_CAP_4_BIT_DATA;
d129bceb 1957
86a6a874 1958 if (caps & SDHCI_CAN_DO_HISPD)
a29e7e18 1959 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
cd9277c0 1960
176d1ed4
JC
1961 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
1962 mmc_card_is_removable(mmc))
68d1fb7e
AV
1963 mmc->caps |= MMC_CAP_NEEDS_POLL;
1964
8f230f45 1965 ocr_avail = 0;
146ad66e 1966 if (caps & SDHCI_CAN_VDD_330)
8f230f45 1967 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
c70840e8 1968 if (caps & SDHCI_CAN_VDD_300)
8f230f45 1969 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
c70840e8 1970 if (caps & SDHCI_CAN_VDD_180)
8f230f45
TI
1971 ocr_avail |= MMC_VDD_165_195;
1972
1973 mmc->ocr_avail = ocr_avail;
1974 mmc->ocr_avail_sdio = ocr_avail;
1975 if (host->ocr_avail_sdio)
1976 mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
1977 mmc->ocr_avail_sd = ocr_avail;
1978 if (host->ocr_avail_sd)
1979 mmc->ocr_avail_sd &= host->ocr_avail_sd;
1980 else /* normal SD controllers don't support 1.8V */
1981 mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
1982 mmc->ocr_avail_mmc = ocr_avail;
1983 if (host->ocr_avail_mmc)
1984 mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
146ad66e
PO
1985
1986 if (mmc->ocr_avail == 0) {
1987 printk(KERN_ERR "%s: Hardware doesn't report any "
b69c9058 1988 "support voltages.\n", mmc_hostname(mmc));
b8c86fc5 1989 return -ENODEV;
146ad66e
PO
1990 }
1991
d129bceb
PO
1992 spin_lock_init(&host->lock);
1993
1994 /*
2134a922
PO
1995 * Maximum number of segments. Depends on if the hardware
1996 * can do scatter/gather or not.
d129bceb 1997 */
2134a922 1998 if (host->flags & SDHCI_USE_ADMA)
a36274e0 1999 mmc->max_segs = 128;
a13abc7b 2000 else if (host->flags & SDHCI_USE_SDMA)
a36274e0 2001 mmc->max_segs = 1;
2134a922 2002 else /* PIO */
a36274e0 2003 mmc->max_segs = 128;
d129bceb
PO
2004
2005 /*
bab76961 2006 * Maximum number of sectors in one transfer. Limited by DMA boundary
55db890a 2007 * size (512KiB).
d129bceb 2008 */
55db890a 2009 mmc->max_req_size = 524288;
d129bceb
PO
2010
2011 /*
2012 * Maximum segment size. Could be one segment with the maximum number
2134a922
PO
2013 * of bytes. When doing hardware scatter/gather, each entry cannot
2014 * be larger than 64 KiB though.
d129bceb 2015 */
30652aa3
OJ
2016 if (host->flags & SDHCI_USE_ADMA) {
2017 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
2018 mmc->max_seg_size = 65535;
2019 else
2020 mmc->max_seg_size = 65536;
2021 } else {
2134a922 2022 mmc->max_seg_size = mmc->max_req_size;
30652aa3 2023 }
d129bceb 2024
fe4a3c7a
PO
2025 /*
2026 * Maximum block size. This varies from controller to controller and
2027 * is specified in the capabilities register.
2028 */
0633f654
AV
2029 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
2030 mmc->max_blk_size = 2;
2031 } else {
2032 mmc->max_blk_size = (caps & SDHCI_MAX_BLOCK_MASK) >>
2033 SDHCI_MAX_BLOCK_SHIFT;
2034 if (mmc->max_blk_size >= 3) {
2035 printk(KERN_WARNING "%s: Invalid maximum block size, "
2036 "assuming 512 bytes\n", mmc_hostname(mmc));
2037 mmc->max_blk_size = 0;
2038 }
2039 }
2040
2041 mmc->max_blk_size = 512 << mmc->max_blk_size;
fe4a3c7a 2042
55db890a
PO
2043 /*
2044 * Maximum block count.
2045 */
1388eefd 2046 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
55db890a 2047
d129bceb
PO
2048 /*
2049 * Init tasklets.
2050 */
2051 tasklet_init(&host->card_tasklet,
2052 sdhci_tasklet_card, (unsigned long)host);
2053 tasklet_init(&host->finish_tasklet,
2054 sdhci_tasklet_finish, (unsigned long)host);
2055
e4cad1b5 2056 setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
d129bceb 2057
dace1453 2058 ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
b69c9058 2059 mmc_hostname(mmc), host);
d129bceb 2060 if (ret)
8ef1a143 2061 goto untasklet;
d129bceb 2062
9bea3c85
MS
2063 host->vmmc = regulator_get(mmc_dev(mmc), "vmmc");
2064 if (IS_ERR(host->vmmc)) {
2065 printk(KERN_INFO "%s: no vmmc regulator found\n", mmc_hostname(mmc));
2066 host->vmmc = NULL;
2067 } else {
2068 regulator_enable(host->vmmc);
2069 }
2070
2f4cbb3d 2071 sdhci_init(host, 0);
d129bceb
PO
2072
2073#ifdef CONFIG_MMC_DEBUG
2074 sdhci_dumpregs(host);
2075#endif
2076
f9134319 2077#ifdef SDHCI_USE_LEDS_CLASS
5dbace0c
HS
2078 snprintf(host->led_name, sizeof(host->led_name),
2079 "%s::", mmc_hostname(mmc));
2080 host->led.name = host->led_name;
2f730fec
PO
2081 host->led.brightness = LED_OFF;
2082 host->led.default_trigger = mmc_hostname(mmc);
2083 host->led.brightness_set = sdhci_led_control;
2084
b8c86fc5 2085 ret = led_classdev_register(mmc_dev(mmc), &host->led);
2f730fec
PO
2086 if (ret)
2087 goto reset;
2088#endif
2089
5f25a66f
PO
2090 mmiowb();
2091
d129bceb
PO
2092 mmc_add_host(mmc);
2093
a13abc7b 2094 printk(KERN_INFO "%s: SDHCI controller on %s [%s] using %s\n",
d1b26863 2095 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
a13abc7b
RR
2096 (host->flags & SDHCI_USE_ADMA) ? "ADMA" :
2097 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
d129bceb 2098
7260cf5e
AV
2099 sdhci_enable_card_detection(host);
2100
d129bceb
PO
2101 return 0;
2102
f9134319 2103#ifdef SDHCI_USE_LEDS_CLASS
2f730fec
PO
2104reset:
2105 sdhci_reset(host, SDHCI_RESET_ALL);
2106 free_irq(host->irq, host);
2107#endif
8ef1a143 2108untasklet:
d129bceb
PO
2109 tasklet_kill(&host->card_tasklet);
2110 tasklet_kill(&host->finish_tasklet);
d129bceb
PO
2111
2112 return ret;
2113}
2114
b8c86fc5 2115EXPORT_SYMBOL_GPL(sdhci_add_host);
d129bceb 2116
1e72859e 2117void sdhci_remove_host(struct sdhci_host *host, int dead)
b8c86fc5 2118{
1e72859e
PO
2119 unsigned long flags;
2120
2121 if (dead) {
2122 spin_lock_irqsave(&host->lock, flags);
2123
2124 host->flags |= SDHCI_DEVICE_DEAD;
2125
2126 if (host->mrq) {
2127 printk(KERN_ERR "%s: Controller removed during "
2128 " transfer!\n", mmc_hostname(host->mmc));
2129
2130 host->mrq->cmd->error = -ENOMEDIUM;
2131 tasklet_schedule(&host->finish_tasklet);
2132 }
2133
2134 spin_unlock_irqrestore(&host->lock, flags);
2135 }
2136
7260cf5e
AV
2137 sdhci_disable_card_detection(host);
2138
b8c86fc5 2139 mmc_remove_host(host->mmc);
d129bceb 2140
f9134319 2141#ifdef SDHCI_USE_LEDS_CLASS
2f730fec
PO
2142 led_classdev_unregister(&host->led);
2143#endif
2144
1e72859e
PO
2145 if (!dead)
2146 sdhci_reset(host, SDHCI_RESET_ALL);
d129bceb
PO
2147
2148 free_irq(host->irq, host);
2149
2150 del_timer_sync(&host->timer);
2151
2152 tasklet_kill(&host->card_tasklet);
2153 tasklet_kill(&host->finish_tasklet);
2134a922 2154
9bea3c85
MS
2155 if (host->vmmc) {
2156 regulator_disable(host->vmmc);
2157 regulator_put(host->vmmc);
2158 }
2159
2134a922
PO
2160 kfree(host->adma_desc);
2161 kfree(host->align_buffer);
2162
2163 host->adma_desc = NULL;
2164 host->align_buffer = NULL;
d129bceb
PO
2165}
2166
b8c86fc5 2167EXPORT_SYMBOL_GPL(sdhci_remove_host);
d129bceb 2168
b8c86fc5 2169void sdhci_free_host(struct sdhci_host *host)
d129bceb 2170{
b8c86fc5 2171 mmc_free_host(host->mmc);
d129bceb
PO
2172}
2173
b8c86fc5 2174EXPORT_SYMBOL_GPL(sdhci_free_host);
d129bceb
PO
2175
2176/*****************************************************************************\
2177 * *
2178 * Driver init/exit *
2179 * *
2180\*****************************************************************************/
2181
2182static int __init sdhci_drv_init(void)
2183{
2184 printk(KERN_INFO DRIVER_NAME
52fbf9c9 2185 ": Secure Digital Host Controller Interface driver\n");
d129bceb
PO
2186 printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
2187
b8c86fc5 2188 return 0;
d129bceb
PO
2189}
2190
2191static void __exit sdhci_drv_exit(void)
2192{
d129bceb
PO
2193}
2194
2195module_init(sdhci_drv_init);
2196module_exit(sdhci_drv_exit);
2197
df673b22 2198module_param(debug_quirks, uint, 0444);
67435274 2199
32710e8f 2200MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
b8c86fc5 2201MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
d129bceb 2202MODULE_LICENSE("GPL");
67435274 2203
df673b22 2204MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
This page took 0.624459 seconds and 5 git commands to generate.