Merge branch 'samsung/pinctrl' into next/drivers
[deliverable/linux.git] / drivers / staging / rts_pstor / rtsx_transport.c
1 /* Driver for Realtek PCI-Express card reader
2 *
3 * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2, or (at your option) any
8 * later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, see <http://www.gnu.org/licenses/>.
17 *
18 * Author:
19 * wwang (wei_wang@realsil.com.cn)
20 * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
21 */
22
23 #include <linux/blkdev.h>
24 #include <linux/kthread.h>
25 #include <linux/sched.h>
26
27 #include "rtsx.h"
28 #include "rtsx_scsi.h"
29 #include "rtsx_transport.h"
30 #include "rtsx_chip.h"
31 #include "rtsx_card.h"
32 #include "debug.h"
33
34 /***********************************************************************
35 * Scatter-gather transfer buffer access routines
36 ***********************************************************************/
37
38 /* Copy a buffer of length buflen to/from the srb's transfer buffer.
39 * (Note: for scatter-gather transfers (srb->use_sg > 0), srb->request_buffer
40 * points to a list of s-g entries and we ignore srb->request_bufflen.
41 * For non-scatter-gather transfers, srb->request_buffer points to the
42 * transfer buffer itself and srb->request_bufflen is the buffer's length.)
43 * Update the *index and *offset variables so that the next copy will
44 * pick up from where this one left off. */
45
46 unsigned int rtsx_stor_access_xfer_buf(unsigned char *buffer,
47 unsigned int buflen, struct scsi_cmnd *srb, unsigned int *index,
48 unsigned int *offset, enum xfer_buf_dir dir)
49 {
50 unsigned int cnt;
51
52 /* If not using scatter-gather, just transfer the data directly.
53 * Make certain it will fit in the available buffer space. */
54 if (scsi_sg_count(srb) == 0) {
55 if (*offset >= scsi_bufflen(srb))
56 return 0;
57 cnt = min(buflen, scsi_bufflen(srb) - *offset);
58 if (dir == TO_XFER_BUF)
59 memcpy((unsigned char *) scsi_sglist(srb) + *offset,
60 buffer, cnt);
61 else
62 memcpy(buffer, (unsigned char *) scsi_sglist(srb) +
63 *offset, cnt);
64 *offset += cnt;
65
66 /* Using scatter-gather. We have to go through the list one entry
67 * at a time. Each s-g entry contains some number of pages, and
68 * each page has to be kmap()'ed separately. If the page is already
69 * in kernel-addressable memory then kmap() will return its address.
70 * If the page is not directly accessible -- such as a user buffer
71 * located in high memory -- then kmap() will map it to a temporary
72 * position in the kernel's virtual address space. */
73 } else {
74 struct scatterlist *sg =
75 (struct scatterlist *) scsi_sglist(srb)
76 + *index;
77
78 /* This loop handles a single s-g list entry, which may
79 * include multiple pages. Find the initial page structure
80 * and the starting offset within the page, and update
81 * the *offset and *index values for the next loop. */
82 cnt = 0;
83 while (cnt < buflen && *index < scsi_sg_count(srb)) {
84 struct page *page = sg_page(sg) +
85 ((sg->offset + *offset) >> PAGE_SHIFT);
86 unsigned int poff =
87 (sg->offset + *offset) & (PAGE_SIZE-1);
88 unsigned int sglen = sg->length - *offset;
89
90 if (sglen > buflen - cnt) {
91
92 /* Transfer ends within this s-g entry */
93 sglen = buflen - cnt;
94 *offset += sglen;
95 } else {
96
97 /* Transfer continues to next s-g entry */
98 *offset = 0;
99 ++*index;
100 ++sg;
101 }
102
103 /* Transfer the data for all the pages in this
104 * s-g entry. For each page: call kmap(), do the
105 * transfer, and call kunmap() immediately after. */
106 while (sglen > 0) {
107 unsigned int plen = min(sglen, (unsigned int)
108 PAGE_SIZE - poff);
109 unsigned char *ptr = kmap(page);
110
111 if (dir == TO_XFER_BUF)
112 memcpy(ptr + poff, buffer + cnt, plen);
113 else
114 memcpy(buffer + cnt, ptr + poff, plen);
115 kunmap(page);
116
117 /* Start at the beginning of the next page */
118 poff = 0;
119 ++page;
120 cnt += plen;
121 sglen -= plen;
122 }
123 }
124 }
125
126 /* Return the amount actually transferred */
127 return cnt;
128 }
129
130 /* Store the contents of buffer into srb's transfer buffer and set the
131 * SCSI residue. */
132 void rtsx_stor_set_xfer_buf(unsigned char *buffer,
133 unsigned int buflen, struct scsi_cmnd *srb)
134 {
135 unsigned int index = 0, offset = 0;
136
137 rtsx_stor_access_xfer_buf(buffer, buflen, srb, &index, &offset,
138 TO_XFER_BUF);
139 if (buflen < scsi_bufflen(srb))
140 scsi_set_resid(srb, scsi_bufflen(srb) - buflen);
141 }
142
143 void rtsx_stor_get_xfer_buf(unsigned char *buffer,
144 unsigned int buflen, struct scsi_cmnd *srb)
145 {
146 unsigned int index = 0, offset = 0;
147
148 rtsx_stor_access_xfer_buf(buffer, buflen, srb, &index, &offset,
149 FROM_XFER_BUF);
150 if (buflen < scsi_bufflen(srb))
151 scsi_set_resid(srb, scsi_bufflen(srb) - buflen);
152 }
153
154
155 /***********************************************************************
156 * Transport routines
157 ***********************************************************************/
158
159 /* Invoke the transport and basic error-handling/recovery methods
160 *
161 * This is used to send the message to the device and receive the response.
162 */
163 void rtsx_invoke_transport(struct scsi_cmnd *srb, struct rtsx_chip *chip)
164 {
165 int result;
166
167 result = rtsx_scsi_handler(srb, chip);
168
169 /* if the command gets aborted by the higher layers, we need to
170 * short-circuit all other processing
171 */
172 if (rtsx_chk_stat(chip, RTSX_STAT_ABORT)) {
173 RTSX_DEBUGP("-- command was aborted\n");
174 srb->result = DID_ABORT << 16;
175 goto Handle_Errors;
176 }
177
178 /* if there is a transport error, reset and don't auto-sense */
179 if (result == TRANSPORT_ERROR) {
180 RTSX_DEBUGP("-- transport indicates error, resetting\n");
181 srb->result = DID_ERROR << 16;
182 goto Handle_Errors;
183 }
184
185 srb->result = SAM_STAT_GOOD;
186
187 /*
188 * If we have a failure, we're going to do a REQUEST_SENSE
189 * automatically. Note that we differentiate between a command
190 * "failure" and an "error" in the transport mechanism.
191 */
192 if (result == TRANSPORT_FAILED) {
193 /* set the result so the higher layers expect this data */
194 srb->result = SAM_STAT_CHECK_CONDITION;
195 memcpy(srb->sense_buffer,
196 (unsigned char *)&(chip->sense_buffer[SCSI_LUN(srb)]),
197 sizeof(struct sense_data_t));
198 }
199
200 return;
201
202 /* Error and abort processing: try to resynchronize with the device
203 * by issuing a port reset. If that fails, try a class-specific
204 * device reset. */
205 Handle_Errors:
206 return;
207 }
208
209 void rtsx_add_cmd(struct rtsx_chip *chip,
210 u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
211 {
212 u32 *cb = (u32 *)(chip->host_cmds_ptr);
213 u32 val = 0;
214
215 val |= (u32)(cmd_type & 0x03) << 30;
216 val |= (u32)(reg_addr & 0x3FFF) << 16;
217 val |= (u32)mask << 8;
218 val |= (u32)data;
219
220 spin_lock_irq(&chip->rtsx->reg_lock);
221 if (chip->ci < (HOST_CMDS_BUF_LEN / 4)) {
222 cb[(chip->ci)++] = cpu_to_le32(val);
223 }
224 spin_unlock_irq(&chip->rtsx->reg_lock);
225 }
226
227 void rtsx_send_cmd_no_wait(struct rtsx_chip *chip)
228 {
229 u32 val = 1 << 31;
230
231 rtsx_writel(chip, RTSX_HCBAR, chip->host_cmds_addr);
232
233 val |= (u32)(chip->ci * 4) & 0x00FFFFFF;
234 /* Hardware Auto Response */
235 val |= 0x40000000;
236 rtsx_writel(chip, RTSX_HCBCTLR, val);
237 }
238
239 int rtsx_send_cmd(struct rtsx_chip *chip, u8 card, int timeout)
240 {
241 struct rtsx_dev *rtsx = chip->rtsx;
242 struct completion trans_done;
243 u32 val = 1 << 31;
244 long timeleft;
245 int err = 0;
246
247 if (card == SD_CARD) {
248 rtsx->check_card_cd = SD_EXIST;
249 } else if (card == MS_CARD) {
250 rtsx->check_card_cd = MS_EXIST;
251 } else if (card == XD_CARD) {
252 rtsx->check_card_cd = XD_EXIST;
253 } else {
254 rtsx->check_card_cd = 0;
255 }
256
257 spin_lock_irq(&rtsx->reg_lock);
258
259 /* set up data structures for the wakeup system */
260 rtsx->done = &trans_done;
261 rtsx->trans_result = TRANS_NOT_READY;
262 init_completion(&trans_done);
263 rtsx->trans_state = STATE_TRANS_CMD;
264
265 rtsx_writel(chip, RTSX_HCBAR, chip->host_cmds_addr);
266
267 val |= (u32)(chip->ci * 4) & 0x00FFFFFF;
268 /* Hardware Auto Response */
269 val |= 0x40000000;
270 rtsx_writel(chip, RTSX_HCBCTLR, val);
271
272 spin_unlock_irq(&rtsx->reg_lock);
273
274 /* Wait for TRANS_OK_INT */
275 timeleft = wait_for_completion_interruptible_timeout(
276 &trans_done, timeout * HZ / 1000);
277 if (timeleft <= 0) {
278 RTSX_DEBUGP("chip->int_reg = 0x%x\n", chip->int_reg);
279 err = -ETIMEDOUT;
280 TRACE_GOTO(chip, finish_send_cmd);
281 }
282
283 spin_lock_irq(&rtsx->reg_lock);
284 if (rtsx->trans_result == TRANS_RESULT_FAIL) {
285 err = -EIO;
286 } else if (rtsx->trans_result == TRANS_RESULT_OK) {
287 err = 0;
288 }
289 spin_unlock_irq(&rtsx->reg_lock);
290
291 finish_send_cmd:
292 rtsx->done = NULL;
293 rtsx->trans_state = STATE_TRANS_NONE;
294
295 if (err < 0)
296 rtsx_stop_cmd(chip, card);
297
298 return err;
299 }
300
301 static inline void rtsx_add_sg_tbl(
302 struct rtsx_chip *chip, u32 addr, u32 len, u8 option)
303 {
304 u64 *sgb = (u64 *)(chip->host_sg_tbl_ptr);
305 u64 val = 0;
306 u32 temp_len = 0;
307 u8 temp_opt = 0;
308
309 do {
310 if (len > 0x80000) {
311 temp_len = 0x80000;
312 temp_opt = option & (~SG_END);
313 } else {
314 temp_len = len;
315 temp_opt = option;
316 }
317 val = ((u64)addr << 32) | ((u64)temp_len << 12) | temp_opt;
318
319 if (chip->sgi < (HOST_SG_TBL_BUF_LEN / 8))
320 sgb[(chip->sgi)++] = cpu_to_le64(val);
321
322 len -= temp_len;
323 addr += temp_len;
324 } while (len);
325 }
326
327 static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
328 struct scatterlist *sg, int num_sg, unsigned int *index,
329 unsigned int *offset, int size,
330 enum dma_data_direction dma_dir, int timeout)
331 {
332 struct rtsx_dev *rtsx = chip->rtsx;
333 struct completion trans_done;
334 u8 dir;
335 int sg_cnt, i, resid;
336 int err = 0;
337 long timeleft;
338 struct scatterlist *sg_ptr;
339 u32 val = TRIG_DMA;
340
341 if ((sg == NULL) || (num_sg <= 0) || !offset || !index)
342 return -EIO;
343
344 if (dma_dir == DMA_TO_DEVICE) {
345 dir = HOST_TO_DEVICE;
346 } else if (dma_dir == DMA_FROM_DEVICE) {
347 dir = DEVICE_TO_HOST;
348 } else {
349 return -ENXIO;
350 }
351
352 if (card == SD_CARD) {
353 rtsx->check_card_cd = SD_EXIST;
354 } else if (card == MS_CARD) {
355 rtsx->check_card_cd = MS_EXIST;
356 } else if (card == XD_CARD) {
357 rtsx->check_card_cd = XD_EXIST;
358 } else {
359 rtsx->check_card_cd = 0;
360 }
361
362 spin_lock_irq(&rtsx->reg_lock);
363
364 /* set up data structures for the wakeup system */
365 rtsx->done = &trans_done;
366
367 rtsx->trans_state = STATE_TRANS_SG;
368 rtsx->trans_result = TRANS_NOT_READY;
369
370 spin_unlock_irq(&rtsx->reg_lock);
371
372 sg_cnt = dma_map_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
373
374 resid = size;
375 sg_ptr = sg;
376 chip->sgi = 0;
377 /* Usually the next entry will be @sg@ + 1, but if this sg element
378 * is part of a chained scatterlist, it could jump to the start of
379 * a new scatterlist array. So here we use sg_next to move to
380 * the proper sg
381 */
382 for (i = 0; i < *index; i++)
383 sg_ptr = sg_next(sg_ptr);
384 for (i = *index; i < sg_cnt; i++) {
385 dma_addr_t addr;
386 unsigned int len;
387 u8 option;
388
389 addr = sg_dma_address(sg_ptr);
390 len = sg_dma_len(sg_ptr);
391
392 RTSX_DEBUGP("DMA addr: 0x%x, Len: 0x%x\n",
393 (unsigned int)addr, len);
394 RTSX_DEBUGP("*index = %d, *offset = %d\n", *index, *offset);
395
396 addr += *offset;
397
398 if ((len - *offset) > resid) {
399 *offset += resid;
400 len = resid;
401 resid = 0;
402 } else {
403 resid -= (len - *offset);
404 len -= *offset;
405 *offset = 0;
406 *index = *index + 1;
407 }
408 if ((i == (sg_cnt - 1)) || !resid) {
409 option = SG_VALID | SG_END | SG_TRANS_DATA;
410 } else {
411 option = SG_VALID | SG_TRANS_DATA;
412 }
413
414 rtsx_add_sg_tbl(chip, (u32)addr, (u32)len, option);
415
416 if (!resid)
417 break;
418
419 sg_ptr = sg_next(sg_ptr);
420 }
421
422 RTSX_DEBUGP("SG table count = %d\n", chip->sgi);
423
424 val |= (u32)(dir & 0x01) << 29;
425 val |= ADMA_MODE;
426
427 spin_lock_irq(&rtsx->reg_lock);
428
429 init_completion(&trans_done);
430
431 rtsx_writel(chip, RTSX_HDBAR, chip->host_sg_tbl_addr);
432 rtsx_writel(chip, RTSX_HDBCTLR, val);
433
434 spin_unlock_irq(&rtsx->reg_lock);
435
436 timeleft = wait_for_completion_interruptible_timeout(
437 &trans_done, timeout * HZ / 1000);
438 if (timeleft <= 0) {
439 RTSX_DEBUGP("Timeout (%s %d)\n", __func__, __LINE__);
440 RTSX_DEBUGP("chip->int_reg = 0x%x\n", chip->int_reg);
441 err = -ETIMEDOUT;
442 goto out;
443 }
444
445 spin_lock_irq(&rtsx->reg_lock);
446 if (rtsx->trans_result == TRANS_RESULT_FAIL) {
447 err = -EIO;
448 spin_unlock_irq(&rtsx->reg_lock);
449 goto out;
450 }
451 spin_unlock_irq(&rtsx->reg_lock);
452
453 /* Wait for TRANS_OK_INT */
454 spin_lock_irq(&rtsx->reg_lock);
455 if (rtsx->trans_result == TRANS_NOT_READY) {
456 init_completion(&trans_done);
457 spin_unlock_irq(&rtsx->reg_lock);
458 timeleft = wait_for_completion_interruptible_timeout(
459 &trans_done, timeout * HZ / 1000);
460 if (timeleft <= 0) {
461 RTSX_DEBUGP("Timeout (%s %d)\n", __func__, __LINE__);
462 RTSX_DEBUGP("chip->int_reg = 0x%x\n", chip->int_reg);
463 err = -ETIMEDOUT;
464 goto out;
465 }
466 } else {
467 spin_unlock_irq(&rtsx->reg_lock);
468 }
469
470 spin_lock_irq(&rtsx->reg_lock);
471 if (rtsx->trans_result == TRANS_RESULT_FAIL) {
472 err = -EIO;
473 } else if (rtsx->trans_result == TRANS_RESULT_OK) {
474 err = 0;
475 }
476 spin_unlock_irq(&rtsx->reg_lock);
477
478 out:
479 rtsx->done = NULL;
480 rtsx->trans_state = STATE_TRANS_NONE;
481 dma_unmap_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
482
483 if (err < 0)
484 rtsx_stop_cmd(chip, card);
485
486 return err;
487 }
488
489 static int rtsx_transfer_sglist_adma(struct rtsx_chip *chip, u8 card,
490 struct scatterlist *sg, int num_sg,
491 enum dma_data_direction dma_dir, int timeout)
492 {
493 struct rtsx_dev *rtsx = chip->rtsx;
494 struct completion trans_done;
495 u8 dir;
496 int buf_cnt, i;
497 int err = 0;
498 long timeleft;
499 struct scatterlist *sg_ptr;
500
501 if ((sg == NULL) || (num_sg <= 0))
502 return -EIO;
503
504 if (dma_dir == DMA_TO_DEVICE) {
505 dir = HOST_TO_DEVICE;
506 } else if (dma_dir == DMA_FROM_DEVICE) {
507 dir = DEVICE_TO_HOST;
508 } else {
509 return -ENXIO;
510 }
511
512 if (card == SD_CARD) {
513 rtsx->check_card_cd = SD_EXIST;
514 } else if (card == MS_CARD) {
515 rtsx->check_card_cd = MS_EXIST;
516 } else if (card == XD_CARD) {
517 rtsx->check_card_cd = XD_EXIST;
518 } else {
519 rtsx->check_card_cd = 0;
520 }
521
522 spin_lock_irq(&rtsx->reg_lock);
523
524 /* set up data structures for the wakeup system */
525 rtsx->done = &trans_done;
526
527 rtsx->trans_state = STATE_TRANS_SG;
528 rtsx->trans_result = TRANS_NOT_READY;
529
530 spin_unlock_irq(&rtsx->reg_lock);
531
532 buf_cnt = dma_map_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
533
534 sg_ptr = sg;
535
536 for (i = 0; i <= buf_cnt / (HOST_SG_TBL_BUF_LEN / 8); i++) {
537 u32 val = TRIG_DMA;
538 int sg_cnt, j;
539
540 if (i == buf_cnt / (HOST_SG_TBL_BUF_LEN / 8)) {
541 sg_cnt = buf_cnt % (HOST_SG_TBL_BUF_LEN / 8);
542 } else {
543 sg_cnt = (HOST_SG_TBL_BUF_LEN / 8);
544 }
545
546 chip->sgi = 0;
547 for (j = 0; j < sg_cnt; j++) {
548 dma_addr_t addr = sg_dma_address(sg_ptr);
549 unsigned int len = sg_dma_len(sg_ptr);
550 u8 option;
551
552 RTSX_DEBUGP("DMA addr: 0x%x, Len: 0x%x\n",
553 (unsigned int)addr, len);
554
555 if (j == (sg_cnt - 1)) {
556 option = SG_VALID | SG_END | SG_TRANS_DATA;
557 } else {
558 option = SG_VALID | SG_TRANS_DATA;
559 }
560
561 rtsx_add_sg_tbl(chip, (u32)addr, (u32)len, option);
562
563 sg_ptr = sg_next(sg_ptr);
564 }
565
566 RTSX_DEBUGP("SG table count = %d\n", chip->sgi);
567
568 val |= (u32)(dir & 0x01) << 29;
569 val |= ADMA_MODE;
570
571 spin_lock_irq(&rtsx->reg_lock);
572
573 init_completion(&trans_done);
574
575 rtsx_writel(chip, RTSX_HDBAR, chip->host_sg_tbl_addr);
576 rtsx_writel(chip, RTSX_HDBCTLR, val);
577
578 spin_unlock_irq(&rtsx->reg_lock);
579
580 timeleft = wait_for_completion_interruptible_timeout(
581 &trans_done, timeout * HZ / 1000);
582 if (timeleft <= 0) {
583 RTSX_DEBUGP("Timeout (%s %d)\n", __func__, __LINE__);
584 RTSX_DEBUGP("chip->int_reg = 0x%x\n", chip->int_reg);
585 err = -ETIMEDOUT;
586 goto out;
587 }
588
589 spin_lock_irq(&rtsx->reg_lock);
590 if (rtsx->trans_result == TRANS_RESULT_FAIL) {
591 err = -EIO;
592 spin_unlock_irq(&rtsx->reg_lock);
593 goto out;
594 }
595 spin_unlock_irq(&rtsx->reg_lock);
596
597 sg_ptr += sg_cnt;
598 }
599
600 /* Wait for TRANS_OK_INT */
601 spin_lock_irq(&rtsx->reg_lock);
602 if (rtsx->trans_result == TRANS_NOT_READY) {
603 init_completion(&trans_done);
604 spin_unlock_irq(&rtsx->reg_lock);
605 timeleft = wait_for_completion_interruptible_timeout(
606 &trans_done, timeout * HZ / 1000);
607 if (timeleft <= 0) {
608 RTSX_DEBUGP("Timeout (%s %d)\n", __func__, __LINE__);
609 RTSX_DEBUGP("chip->int_reg = 0x%x\n", chip->int_reg);
610 err = -ETIMEDOUT;
611 goto out;
612 }
613 } else {
614 spin_unlock_irq(&rtsx->reg_lock);
615 }
616
617 spin_lock_irq(&rtsx->reg_lock);
618 if (rtsx->trans_result == TRANS_RESULT_FAIL) {
619 err = -EIO;
620 } else if (rtsx->trans_result == TRANS_RESULT_OK) {
621 err = 0;
622 }
623 spin_unlock_irq(&rtsx->reg_lock);
624
625 out:
626 rtsx->done = NULL;
627 rtsx->trans_state = STATE_TRANS_NONE;
628 dma_unmap_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
629
630 if (err < 0)
631 rtsx_stop_cmd(chip, card);
632
633 return err;
634 }
635
636 static int rtsx_transfer_buf(struct rtsx_chip *chip, u8 card, void *buf, size_t len,
637 enum dma_data_direction dma_dir, int timeout)
638 {
639 struct rtsx_dev *rtsx = chip->rtsx;
640 struct completion trans_done;
641 dma_addr_t addr;
642 u8 dir;
643 int err = 0;
644 u32 val = (1 << 31);
645 long timeleft;
646
647 if ((buf == NULL) || (len <= 0))
648 return -EIO;
649
650 if (dma_dir == DMA_TO_DEVICE) {
651 dir = HOST_TO_DEVICE;
652 } else if (dma_dir == DMA_FROM_DEVICE) {
653 dir = DEVICE_TO_HOST;
654 } else {
655 return -ENXIO;
656 }
657
658 addr = dma_map_single(&(rtsx->pci->dev), buf, len, dma_dir);
659 if (!addr)
660 return -ENOMEM;
661
662 if (card == SD_CARD) {
663 rtsx->check_card_cd = SD_EXIST;
664 } else if (card == MS_CARD) {
665 rtsx->check_card_cd = MS_EXIST;
666 } else if (card == XD_CARD) {
667 rtsx->check_card_cd = XD_EXIST;
668 } else {
669 rtsx->check_card_cd = 0;
670 }
671
672 val |= (u32)(dir & 0x01) << 29;
673 val |= (u32)(len & 0x00FFFFFF);
674
675 spin_lock_irq(&rtsx->reg_lock);
676
677 /* set up data structures for the wakeup system */
678 rtsx->done = &trans_done;
679
680 init_completion(&trans_done);
681
682 rtsx->trans_state = STATE_TRANS_BUF;
683 rtsx->trans_result = TRANS_NOT_READY;
684
685 rtsx_writel(chip, RTSX_HDBAR, addr);
686 rtsx_writel(chip, RTSX_HDBCTLR, val);
687
688 spin_unlock_irq(&rtsx->reg_lock);
689
690 /* Wait for TRANS_OK_INT */
691 timeleft = wait_for_completion_interruptible_timeout(
692 &trans_done, timeout * HZ / 1000);
693 if (timeleft <= 0) {
694 RTSX_DEBUGP("Timeout (%s %d)\n", __func__, __LINE__);
695 RTSX_DEBUGP("chip->int_reg = 0x%x\n", chip->int_reg);
696 err = -ETIMEDOUT;
697 goto out;
698 }
699
700 spin_lock_irq(&rtsx->reg_lock);
701 if (rtsx->trans_result == TRANS_RESULT_FAIL) {
702 err = -EIO;
703 } else if (rtsx->trans_result == TRANS_RESULT_OK) {
704 err = 0;
705 }
706 spin_unlock_irq(&rtsx->reg_lock);
707
708 out:
709 rtsx->done = NULL;
710 rtsx->trans_state = STATE_TRANS_NONE;
711 dma_unmap_single(&(rtsx->pci->dev), addr, len, dma_dir);
712
713 if (err < 0)
714 rtsx_stop_cmd(chip, card);
715
716 return err;
717 }
718
719 int rtsx_transfer_data_partial(struct rtsx_chip *chip, u8 card,
720 void *buf, size_t len, int use_sg, unsigned int *index,
721 unsigned int *offset, enum dma_data_direction dma_dir,
722 int timeout)
723 {
724 int err = 0;
725
726 /* don't transfer data during abort processing */
727 if (rtsx_chk_stat(chip, RTSX_STAT_ABORT))
728 return -EIO;
729
730 if (use_sg) {
731 err = rtsx_transfer_sglist_adma_partial(chip, card,
732 (struct scatterlist *)buf, use_sg,
733 index, offset, (int)len, dma_dir, timeout);
734 } else {
735 err = rtsx_transfer_buf(chip, card,
736 buf, len, dma_dir, timeout);
737 }
738
739 if (err < 0) {
740 if (RTSX_TST_DELINK(chip)) {
741 RTSX_CLR_DELINK(chip);
742 chip->need_reinit = SD_CARD | MS_CARD | XD_CARD;
743 rtsx_reinit_cards(chip, 1);
744 }
745 }
746
747 return err;
748 }
749
750 int rtsx_transfer_data(struct rtsx_chip *chip, u8 card, void *buf, size_t len,
751 int use_sg, enum dma_data_direction dma_dir, int timeout)
752 {
753 int err = 0;
754
755 RTSX_DEBUGP("use_sg = %d\n", use_sg);
756
757 /* don't transfer data during abort processing */
758 if (rtsx_chk_stat(chip, RTSX_STAT_ABORT))
759 return -EIO;
760
761 if (use_sg) {
762 err = rtsx_transfer_sglist_adma(chip, card,
763 (struct scatterlist *)buf,
764 use_sg, dma_dir, timeout);
765 } else {
766 err = rtsx_transfer_buf(chip, card, buf, len, dma_dir, timeout);
767 }
768
769 if (err < 0) {
770 if (RTSX_TST_DELINK(chip)) {
771 RTSX_CLR_DELINK(chip);
772 chip->need_reinit = SD_CARD | MS_CARD | XD_CARD;
773 rtsx_reinit_cards(chip, 1);
774 }
775 }
776
777 return err;
778 }
779
This page took 0.056092 seconds and 5 git commands to generate.