mmc: core: Add helper function for EXT_CSD support
[deliverable/linux.git] / drivers / mmc / core / mmc_ops.c
CommitLineData
da7fbe58 1/*
70f10482 2 * linux/drivers/mmc/core/mmc_ops.h
da7fbe58
PO
3 *
4 * Copyright 2006-2007 Pierre Ossman
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 */
11
5a0e3ad6 12#include <linux/slab.h>
3ef77af1 13#include <linux/export.h>
da7fbe58 14#include <linux/types.h>
da7fbe58
PO
15#include <linux/scatterlist.h>
16
17#include <linux/mmc/host.h>
18#include <linux/mmc/card.h>
19#include <linux/mmc/mmc.h>
20
21#include "core.h"
22#include "mmc_ops.h"
23
8fee476b
TR
24#define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
25
a27fbf2f
SJ
26static inline int __mmc_send_status(struct mmc_card *card, u32 *status,
27 bool ignore_crc)
28{
29 int err;
30 struct mmc_command cmd = {0};
31
32 BUG_ON(!card);
33 BUG_ON(!card->host);
34
35 cmd.opcode = MMC_SEND_STATUS;
36 if (!mmc_host_is_spi(card->host))
37 cmd.arg = card->rca << 16;
38 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
39 if (ignore_crc)
40 cmd.flags &= ~MMC_RSP_CRC;
41
42 err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
43 if (err)
44 return err;
45
46 /* NOTE: callers are required to understand the difference
47 * between "native" and SPI format status words!
48 */
49 if (status)
50 *status = cmd.resp[0];
51
52 return 0;
53}
54
55int mmc_send_status(struct mmc_card *card, u32 *status)
56{
57 return __mmc_send_status(card, status, false);
58}
59
da7fbe58
PO
60static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
61{
62 int err;
1278dba1 63 struct mmc_command cmd = {0};
da7fbe58
PO
64
65 BUG_ON(!host);
66
da7fbe58
PO
67 cmd.opcode = MMC_SELECT_CARD;
68
69 if (card) {
70 cmd.arg = card->rca << 16;
71 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
72 } else {
73 cmd.arg = 0;
74 cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
75 }
76
77 err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
17b0429d 78 if (err)
da7fbe58
PO
79 return err;
80
17b0429d 81 return 0;
da7fbe58
PO
82}
83
84int mmc_select_card(struct mmc_card *card)
85{
86 BUG_ON(!card);
87
88 return _mmc_select_card(card->host, card);
89}
90
91int mmc_deselect_cards(struct mmc_host *host)
92{
93 return _mmc_select_card(host, NULL);
94}
95
3d705d14
SH
96/*
97 * Write the value specified in the device tree or board code into the optional
98 * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
99 * drive strength of the DAT and CMD outputs. The actual meaning of a given
100 * value is hardware dependant.
101 * The presence of the DSR register can be determined from the CSD register,
102 * bit 76.
103 */
104int mmc_set_dsr(struct mmc_host *host)
105{
106 struct mmc_command cmd = {0};
107
108 cmd.opcode = MMC_SET_DSR;
109
110 cmd.arg = (host->dsr << 16) | 0xffff;
111 cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
112
113 return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
114}
115
da7fbe58
PO
116int mmc_go_idle(struct mmc_host *host)
117{
118 int err;
1278dba1 119 struct mmc_command cmd = {0};
da7fbe58 120
af517150
DB
121 /*
122 * Non-SPI hosts need to prevent chipselect going active during
123 * GO_IDLE; that would put chips into SPI mode. Remind them of
124 * that in case of hardware that won't pull up DAT3/nCS otherwise.
125 *
126 * SPI hosts ignore ios.chip_select; it's managed according to
25985edc 127 * rules that must accommodate non-MMC slaves which this layer
af517150
DB
128 * won't even know about.
129 */
130 if (!mmc_host_is_spi(host)) {
131 mmc_set_chip_select(host, MMC_CS_HIGH);
132 mmc_delay(1);
133 }
da7fbe58 134
da7fbe58
PO
135 cmd.opcode = MMC_GO_IDLE_STATE;
136 cmd.arg = 0;
af517150 137 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
da7fbe58
PO
138
139 err = mmc_wait_for_cmd(host, &cmd, 0);
140
141 mmc_delay(1);
142
af517150
DB
143 if (!mmc_host_is_spi(host)) {
144 mmc_set_chip_select(host, MMC_CS_DONTCARE);
145 mmc_delay(1);
146 }
da7fbe58 147
af517150 148 host->use_spi_crc = 0;
da7fbe58
PO
149
150 return err;
151}
152
153int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
154{
1278dba1 155 struct mmc_command cmd = {0};
da7fbe58
PO
156 int i, err = 0;
157
158 BUG_ON(!host);
159
da7fbe58 160 cmd.opcode = MMC_SEND_OP_COND;
af517150
DB
161 cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
162 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
da7fbe58
PO
163
164 for (i = 100; i; i--) {
165 err = mmc_wait_for_cmd(host, &cmd, 0);
17b0429d 166 if (err)
da7fbe58
PO
167 break;
168
af517150
DB
169 /* if we're just probing, do a single pass */
170 if (ocr == 0)
da7fbe58
PO
171 break;
172
af517150
DB
173 /* otherwise wait until reset completes */
174 if (mmc_host_is_spi(host)) {
175 if (!(cmd.resp[0] & R1_SPI_IDLE))
176 break;
177 } else {
178 if (cmd.resp[0] & MMC_CARD_BUSY)
179 break;
180 }
181
17b0429d 182 err = -ETIMEDOUT;
da7fbe58
PO
183
184 mmc_delay(10);
185 }
186
af517150 187 if (rocr && !mmc_host_is_spi(host))
da7fbe58
PO
188 *rocr = cmd.resp[0];
189
190 return err;
191}
192
193int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
194{
195 int err;
1278dba1 196 struct mmc_command cmd = {0};
da7fbe58
PO
197
198 BUG_ON(!host);
199 BUG_ON(!cid);
200
da7fbe58
PO
201 cmd.opcode = MMC_ALL_SEND_CID;
202 cmd.arg = 0;
203 cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR;
204
205 err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
17b0429d 206 if (err)
da7fbe58
PO
207 return err;
208
209 memcpy(cid, cmd.resp, sizeof(u32) * 4);
210
17b0429d 211 return 0;
da7fbe58
PO
212}
213
214int mmc_set_relative_addr(struct mmc_card *card)
215{
216 int err;
1278dba1 217 struct mmc_command cmd = {0};
da7fbe58
PO
218
219 BUG_ON(!card);
220 BUG_ON(!card->host);
221
da7fbe58
PO
222 cmd.opcode = MMC_SET_RELATIVE_ADDR;
223 cmd.arg = card->rca << 16;
224 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
225
226 err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
17b0429d 227 if (err)
da7fbe58
PO
228 return err;
229
17b0429d 230 return 0;
da7fbe58
PO
231}
232
af517150
DB
233static int
234mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
da7fbe58
PO
235{
236 int err;
1278dba1 237 struct mmc_command cmd = {0};
da7fbe58 238
af517150
DB
239 BUG_ON(!host);
240 BUG_ON(!cxd);
da7fbe58 241
af517150
DB
242 cmd.opcode = opcode;
243 cmd.arg = arg;
da7fbe58
PO
244 cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
245
af517150 246 err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
17b0429d 247 if (err)
da7fbe58
PO
248 return err;
249
af517150 250 memcpy(cxd, cmd.resp, sizeof(u32) * 4);
da7fbe58 251
17b0429d 252 return 0;
da7fbe58
PO
253}
254
1a41313e
KL
255/*
256 * NOTE: void *buf, caller for the buf is required to use DMA-capable
257 * buffer or on-stack buffer (with some overhead in callee).
258 */
af517150
DB
259static int
260mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
261 u32 opcode, void *buf, unsigned len)
da7fbe58 262{
ad5fd972 263 struct mmc_request mrq = {NULL};
1278dba1 264 struct mmc_command cmd = {0};
a61ad2b4 265 struct mmc_data data = {0};
da7fbe58 266 struct scatterlist sg;
af517150 267 void *data_buf;
1a41313e 268 int is_on_stack;
da7fbe58 269
1a41313e
KL
270 is_on_stack = object_is_on_stack(buf);
271 if (is_on_stack) {
272 /*
273 * dma onto stack is unsafe/nonportable, but callers to this
274 * routine normally provide temporary on-stack buffers ...
275 */
276 data_buf = kmalloc(len, GFP_KERNEL);
277 if (!data_buf)
278 return -ENOMEM;
279 } else
280 data_buf = buf;
da7fbe58 281
da7fbe58
PO
282 mrq.cmd = &cmd;
283 mrq.data = &data;
284
af517150 285 cmd.opcode = opcode;
da7fbe58 286 cmd.arg = 0;
da7fbe58 287
af517150
DB
288 /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
289 * rely on callers to never use this with "native" calls for reading
290 * CSD or CID. Native versions of those commands use the R2 type,
291 * not R1 plus a data block.
292 */
293 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
294
295 data.blksz = len;
da7fbe58
PO
296 data.blocks = 1;
297 data.flags = MMC_DATA_READ;
298 data.sg = &sg;
299 data.sg_len = 1;
300
af517150 301 sg_init_one(&sg, data_buf, len);
da7fbe58 302
cda56ac2
AH
303 if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
304 /*
305 * The spec states that CSR and CID accesses have a timeout
306 * of 64 clock cycles.
307 */
308 data.timeout_ns = 0;
309 data.timeout_clks = 64;
310 } else
311 mmc_set_data_timeout(&data, card);
da7fbe58 312
af517150
DB
313 mmc_wait_for_req(host, &mrq);
314
1a41313e
KL
315 if (is_on_stack) {
316 memcpy(buf, data_buf, len);
317 kfree(data_buf);
318 }
da7fbe58 319
17b0429d 320 if (cmd.error)
da7fbe58 321 return cmd.error;
17b0429d 322 if (data.error)
da7fbe58
PO
323 return data.error;
324
17b0429d 325 return 0;
da7fbe58
PO
326}
327
af517150
DB
328int mmc_send_csd(struct mmc_card *card, u32 *csd)
329{
78e48073 330 int ret, i;
1a41313e 331 u32 *csd_tmp;
78e48073 332
af517150
DB
333 if (!mmc_host_is_spi(card->host))
334 return mmc_send_cxd_native(card->host, card->rca << 16,
335 csd, MMC_SEND_CSD);
336
1a41313e
KL
337 csd_tmp = kmalloc(16, GFP_KERNEL);
338 if (!csd_tmp)
339 return -ENOMEM;
340
341 ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16);
78e48073 342 if (ret)
1a41313e 343 goto err;
78e48073
PO
344
345 for (i = 0;i < 4;i++)
1a41313e 346 csd[i] = be32_to_cpu(csd_tmp[i]);
78e48073 347
1a41313e
KL
348err:
349 kfree(csd_tmp);
350 return ret;
af517150
DB
351}
352
353int mmc_send_cid(struct mmc_host *host, u32 *cid)
354{
78e48073 355 int ret, i;
1a41313e 356 u32 *cid_tmp;
78e48073 357
af517150
DB
358 if (!mmc_host_is_spi(host)) {
359 if (!host->card)
360 return -EINVAL;
361 return mmc_send_cxd_native(host, host->card->rca << 16,
362 cid, MMC_SEND_CID);
363 }
364
1a41313e
KL
365 cid_tmp = kmalloc(16, GFP_KERNEL);
366 if (!cid_tmp)
367 return -ENOMEM;
368
369 ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16);
78e48073 370 if (ret)
1a41313e 371 goto err;
78e48073
PO
372
373 for (i = 0;i < 4;i++)
1a41313e 374 cid[i] = be32_to_cpu(cid_tmp[i]);
78e48073 375
1a41313e
KL
376err:
377 kfree(cid_tmp);
378 return ret;
af517150
DB
379}
380
381int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd)
382{
383 return mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD,
384 ext_csd, 512);
385}
ce39f9d1 386EXPORT_SYMBOL_GPL(mmc_send_ext_csd);
af517150
DB
387
388int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
389{
1278dba1 390 struct mmc_command cmd = {0};
af517150
DB
391 int err;
392
af517150
DB
393 cmd.opcode = MMC_SPI_READ_OCR;
394 cmd.arg = highcap ? (1 << 30) : 0;
395 cmd.flags = MMC_RSP_SPI_R3;
396
397 err = mmc_wait_for_cmd(host, &cmd, 0);
398
399 *ocrp = cmd.resp[1];
400 return err;
401}
402
403int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
404{
1278dba1 405 struct mmc_command cmd = {0};
af517150
DB
406 int err;
407
af517150
DB
408 cmd.opcode = MMC_SPI_CRC_ON_OFF;
409 cmd.flags = MMC_RSP_SPI_R1;
410 cmd.arg = use_crc;
411
412 err = mmc_wait_for_cmd(host, &cmd, 0);
413 if (!err)
414 host->use_spi_crc = use_crc;
415 return err;
416}
417
d3a8d95d 418/**
950d56ac 419 * __mmc_switch - modify EXT_CSD register
d3a8d95d
AW
420 * @card: the MMC card associated with the data transfer
421 * @set: cmd set values
422 * @index: EXT_CSD register index
423 * @value: value to program into EXT_CSD register
424 * @timeout_ms: timeout (ms) for operation performed by register write,
425 * timeout of zero implies maximum possible timeout
950d56ac 426 * @use_busy_signal: use the busy signal as response type
878e200b 427 * @send_status: send status cmd to poll for busy
4509f847 428 * @ignore_crc: ignore CRC errors when sending status cmd to poll for busy
d3a8d95d
AW
429 *
430 * Modifies the EXT_CSD register for selected card.
431 */
950d56ac 432int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
4509f847
UH
433 unsigned int timeout_ms, bool use_busy_signal, bool send_status,
434 bool ignore_crc)
da7fbe58 435{
636bd13c 436 struct mmc_host *host = card->host;
da7fbe58 437 int err;
1278dba1 438 struct mmc_command cmd = {0};
8fee476b 439 unsigned long timeout;
ecd3a7da 440 u32 status = 0;
b9ec2616
UH
441 bool use_r1b_resp = use_busy_signal;
442
443 /*
444 * If the cmd timeout and the max_busy_timeout of the host are both
445 * specified, let's validate them. A failure means we need to prevent
446 * the host from doing hw busy detection, which is done by converting
447 * to a R1 response instead of a R1B.
448 */
449 if (timeout_ms && host->max_busy_timeout &&
450 (timeout_ms > host->max_busy_timeout))
451 use_r1b_resp = false;
da7fbe58 452
da7fbe58
PO
453 cmd.opcode = MMC_SWITCH;
454 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
455 (index << 16) |
456 (value << 8) |
457 set;
950d56ac 458 cmd.flags = MMC_CMD_AC;
b9ec2616 459 if (use_r1b_resp) {
950d56ac 460 cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
b9ec2616
UH
461 /*
462 * A busy_timeout of zero means the host can decide to use
463 * whatever value it finds suitable.
464 */
465 cmd.busy_timeout = timeout_ms;
466 } else {
950d56ac 467 cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
b9ec2616 468 }
950d56ac 469
775a9362
ME
470 if (index == EXT_CSD_SANITIZE_START)
471 cmd.sanitize_busy = true;
da7fbe58 472
636bd13c 473 err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
17b0429d 474 if (err)
da7fbe58
PO
475 return err;
476
950d56ac
JC
477 /* No need to check card status in case of unblocking command */
478 if (!use_busy_signal)
479 return 0;
480
a27fbf2f 481 /*
4509f847
UH
482 * CRC errors shall only be ignored in cases were CMD13 is used to poll
483 * to detect busy completion.
a27fbf2f 484 */
b9ec2616 485 if ((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
4509f847 486 ignore_crc = false;
a27fbf2f 487
b9ec2616
UH
488 /* We have an unspecified cmd timeout, use the fallback value. */
489 if (!timeout_ms)
490 timeout_ms = MMC_OPS_TIMEOUT_MS;
491
4509f847 492 /* Must check status to be sure of no errors. */
b9ec2616 493 timeout = jiffies + msecs_to_jiffies(timeout_ms);
ef0b27d4 494 do {
878e200b
UH
495 if (send_status) {
496 err = __mmc_send_status(card, &status, ignore_crc);
497 if (err)
498 return err;
499 }
b9ec2616 500 if ((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
ef0b27d4 501 break;
636bd13c 502 if (mmc_host_is_spi(host))
ef0b27d4 503 break;
8fee476b 504
878e200b
UH
505 /*
506 * We are not allowed to issue a status command and the host
507 * does'nt support MMC_CAP_WAIT_WHILE_BUSY, then we can only
508 * rely on waiting for the stated timeout to be sufficient.
509 */
510 if (!send_status) {
511 mmc_delay(timeout_ms);
512 return 0;
513 }
514
8fee476b
TR
515 /* Timeout if the device never leaves the program state. */
516 if (time_after(jiffies, timeout)) {
517 pr_err("%s: Card stuck in programming state! %s\n",
636bd13c 518 mmc_hostname(host), __func__);
8fee476b
TR
519 return -ETIMEDOUT;
520 }
7435bb79 521 } while (R1_CURRENT_STATE(status) == R1_STATE_PRG);
ef0b27d4 522
636bd13c 523 if (mmc_host_is_spi(host)) {
ef0b27d4
AH
524 if (status & R1_SPI_ILLEGAL_COMMAND)
525 return -EBADMSG;
526 } else {
527 if (status & 0xFDFFA000)
636bd13c
UH
528 pr_warn("%s: unexpected status %#x after switch\n",
529 mmc_hostname(host), status);
ef0b27d4
AH
530 if (status & R1_SWITCH_ERROR)
531 return -EBADMSG;
532 }
533
17b0429d 534 return 0;
da7fbe58 535}
950d56ac
JC
536EXPORT_SYMBOL_GPL(__mmc_switch);
537
538int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
539 unsigned int timeout_ms)
540{
4509f847
UH
541 return __mmc_switch(card, set, index, value, timeout_ms, true, true,
542 false);
950d56ac 543}
d3a8d95d 544EXPORT_SYMBOL_GPL(mmc_switch);
da7fbe58 545
22113efd
AL
546static int
547mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
548 u8 len)
549{
ad5fd972 550 struct mmc_request mrq = {NULL};
1278dba1 551 struct mmc_command cmd = {0};
a61ad2b4 552 struct mmc_data data = {0};
22113efd
AL
553 struct scatterlist sg;
554 u8 *data_buf;
555 u8 *test_buf;
556 int i, err;
557 static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
558 static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
559
560 /* dma onto stack is unsafe/nonportable, but callers to this
561 * routine normally provide temporary on-stack buffers ...
562 */
563 data_buf = kmalloc(len, GFP_KERNEL);
564 if (!data_buf)
565 return -ENOMEM;
566
567 if (len == 8)
568 test_buf = testdata_8bit;
569 else if (len == 4)
570 test_buf = testdata_4bit;
571 else {
a3c76eb9 572 pr_err("%s: Invalid bus_width %d\n",
22113efd
AL
573 mmc_hostname(host), len);
574 kfree(data_buf);
575 return -EINVAL;
576 }
577
578 if (opcode == MMC_BUS_TEST_W)
579 memcpy(data_buf, test_buf, len);
580
22113efd
AL
581 mrq.cmd = &cmd;
582 mrq.data = &data;
583 cmd.opcode = opcode;
584 cmd.arg = 0;
585
586 /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
587 * rely on callers to never use this with "native" calls for reading
588 * CSD or CID. Native versions of those commands use the R2 type,
589 * not R1 plus a data block.
590 */
591 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
592
593 data.blksz = len;
594 data.blocks = 1;
595 if (opcode == MMC_BUS_TEST_R)
596 data.flags = MMC_DATA_READ;
597 else
598 data.flags = MMC_DATA_WRITE;
599
600 data.sg = &sg;
601 data.sg_len = 1;
84532e33 602 mmc_set_data_timeout(&data, card);
22113efd
AL
603 sg_init_one(&sg, data_buf, len);
604 mmc_wait_for_req(host, &mrq);
605 err = 0;
606 if (opcode == MMC_BUS_TEST_R) {
607 for (i = 0; i < len / 4; i++)
608 if ((test_buf[i] ^ data_buf[i]) != 0xff) {
609 err = -EIO;
610 break;
611 }
612 }
613 kfree(data_buf);
614
615 if (cmd.error)
616 return cmd.error;
617 if (data.error)
618 return data.error;
619
620 return err;
621}
622
623int mmc_bus_test(struct mmc_card *card, u8 bus_width)
624{
625 int err, width;
626
627 if (bus_width == MMC_BUS_WIDTH_8)
628 width = 8;
629 else if (bus_width == MMC_BUS_WIDTH_4)
630 width = 4;
631 else if (bus_width == MMC_BUS_WIDTH_1)
632 return 0; /* no need for test */
633 else
634 return -EINVAL;
635
636 /*
637 * Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there
638 * is a problem. This improves chances that the test will work.
639 */
640 mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
641 err = mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
642 return err;
643}
eb0d8f13
JC
644
645int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
646{
647 struct mmc_command cmd = {0};
648 unsigned int opcode;
eb0d8f13
JC
649 int err;
650
2378975b 651 if (!card->ext_csd.hpi) {
6606110d
JP
652 pr_warn("%s: Card didn't support HPI command\n",
653 mmc_hostname(card->host));
2378975b
JC
654 return -EINVAL;
655 }
656
eb0d8f13
JC
657 opcode = card->ext_csd.hpi_cmd;
658 if (opcode == MMC_STOP_TRANSMISSION)
2378975b 659 cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
eb0d8f13 660 else if (opcode == MMC_SEND_STATUS)
2378975b 661 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
eb0d8f13
JC
662
663 cmd.opcode = opcode;
664 cmd.arg = card->rca << 16 | 1;
eb0d8f13
JC
665
666 err = mmc_wait_for_cmd(card->host, &cmd, 0);
667 if (err) {
668 pr_warn("%s: error %d interrupting operation. "
669 "HPI command response %#x\n", mmc_hostname(card->host),
670 err, cmd.resp[0]);
671 return err;
672 }
673 if (status)
674 *status = cmd.resp[0];
675
676 return 0;
677}
148bcab2
UH
678
679int mmc_can_ext_csd(struct mmc_card *card)
680{
681 return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
682}
This page took 0.622022 seconds and 5 git commands to generate.