2 * Copyright (c) 2010 Broadcom Corporation
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 /* ****************** SDIO CARD Interface Functions **************************/
18 #include <linux/types.h>
19 #include <linux/netdevice.h>
20 #include <linux/export.h>
21 #include <linux/pci.h>
22 #include <linux/pci_ids.h>
23 #include <linux/sched.h>
24 #include <linux/completion.h>
25 #include <linux/scatterlist.h>
26 #include <linux/mmc/sdio.h>
27 #include <linux/mmc/sdio_func.h>
28 #include <linux/mmc/card.h>
29 #include <linux/platform_data/brcmfmac-sdio.h>
32 #include <brcm_hw_ids.h>
33 #include <brcmu_utils.h>
34 #include <brcmu_wifi.h>
38 #include "sdio_host.h"
40 #define SDIOH_API_ACCESS_RETRY_LIMIT 2
43 static irqreturn_t
brcmf_sdio_oob_irqhandler(int irq
, void *dev_id
)
45 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev_id
);
46 struct brcmf_sdio_dev
*sdiodev
= bus_if
->bus_priv
.sdio
;
48 brcmf_dbg(INTR
, "OOB intr triggered\n");
50 /* out-of-band interrupt is level-triggered which won't
51 * be cleared until dpc
53 if (sdiodev
->irq_en
) {
54 disable_irq_nosync(irq
);
55 sdiodev
->irq_en
= false;
58 brcmf_sdbrcm_isr(sdiodev
->bus
);
63 static void brcmf_sdio_ib_irqhandler(struct sdio_func
*func
)
65 struct brcmf_bus
*bus_if
= dev_get_drvdata(&func
->dev
);
66 struct brcmf_sdio_dev
*sdiodev
= bus_if
->bus_priv
.sdio
;
68 brcmf_dbg(INTR
, "IB intr triggered\n");
70 brcmf_sdbrcm_isr(sdiodev
->bus
);
73 /* dummy handler for SDIO function 2 interrupt */
74 static void brcmf_sdio_dummy_irqhandler(struct sdio_func
*func
)
78 int brcmf_sdio_intr_register(struct brcmf_sdio_dev
*sdiodev
)
84 if ((sdiodev
->pdata
) && (sdiodev
->pdata
->oob_irq_supported
)) {
85 brcmf_dbg(SDIO
, "Enter, register OOB IRQ %d\n",
86 sdiodev
->pdata
->oob_irq_nr
);
87 ret
= request_irq(sdiodev
->pdata
->oob_irq_nr
,
88 brcmf_sdio_oob_irqhandler
,
89 sdiodev
->pdata
->oob_irq_flags
,
91 &sdiodev
->func
[1]->dev
);
93 brcmf_err("request_irq failed %d\n", ret
);
96 sdiodev
->oob_irq_requested
= true;
97 spin_lock_init(&sdiodev
->irq_en_lock
);
98 spin_lock_irqsave(&sdiodev
->irq_en_lock
, flags
);
99 sdiodev
->irq_en
= true;
100 spin_unlock_irqrestore(&sdiodev
->irq_en_lock
, flags
);
102 ret
= enable_irq_wake(sdiodev
->pdata
->oob_irq_nr
);
104 brcmf_err("enable_irq_wake failed %d\n", ret
);
107 sdiodev
->irq_wake
= true;
109 sdio_claim_host(sdiodev
->func
[1]);
111 /* must configure SDIO_CCCR_IENx to enable irq */
112 data
= brcmf_sdio_regrb(sdiodev
, SDIO_CCCR_IENx
, &ret
);
113 data
|= 1 << SDIO_FUNC_1
| 1 << SDIO_FUNC_2
| 1;
114 brcmf_sdio_regwb(sdiodev
, SDIO_CCCR_IENx
, data
, &ret
);
116 /* redirect, configure and enable io for interrupt signal */
117 data
= SDIO_SEPINT_MASK
| SDIO_SEPINT_OE
;
118 if (sdiodev
->pdata
->oob_irq_flags
& IRQF_TRIGGER_HIGH
)
119 data
|= SDIO_SEPINT_ACT_HI
;
120 brcmf_sdio_regwb(sdiodev
, SDIO_CCCR_BRCM_SEPINT
, data
, &ret
);
122 sdio_release_host(sdiodev
->func
[1]);
124 brcmf_dbg(SDIO
, "Entering\n");
125 sdio_claim_host(sdiodev
->func
[1]);
126 sdio_claim_irq(sdiodev
->func
[1], brcmf_sdio_ib_irqhandler
);
127 sdio_claim_irq(sdiodev
->func
[2], brcmf_sdio_dummy_irqhandler
);
128 sdio_release_host(sdiodev
->func
[1]);
134 int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev
*sdiodev
)
136 brcmf_dbg(SDIO
, "Entering\n");
138 if ((sdiodev
->pdata
) && (sdiodev
->pdata
->oob_irq_supported
)) {
139 sdio_claim_host(sdiodev
->func
[1]);
140 brcmf_sdio_regwb(sdiodev
, SDIO_CCCR_BRCM_SEPINT
, 0, NULL
);
141 brcmf_sdio_regwb(sdiodev
, SDIO_CCCR_IENx
, 0, NULL
);
142 sdio_release_host(sdiodev
->func
[1]);
144 if (sdiodev
->oob_irq_requested
) {
145 sdiodev
->oob_irq_requested
= false;
146 if (sdiodev
->irq_wake
) {
147 disable_irq_wake(sdiodev
->pdata
->oob_irq_nr
);
148 sdiodev
->irq_wake
= false;
150 free_irq(sdiodev
->pdata
->oob_irq_nr
,
151 &sdiodev
->func
[1]->dev
);
152 sdiodev
->irq_en
= false;
155 sdio_claim_host(sdiodev
->func
[1]);
156 sdio_release_irq(sdiodev
->func
[2]);
157 sdio_release_irq(sdiodev
->func
[1]);
158 sdio_release_host(sdiodev
->func
[1]);
165 brcmf_sdcard_set_sbaddr_window(struct brcmf_sdio_dev
*sdiodev
, u32 address
)
171 addr
[0] = (address
>> 8) & SBSDIO_SBADDRLOW_MASK
;
172 addr
[1] = (address
>> 16) & SBSDIO_SBADDRMID_MASK
;
173 addr
[2] = (address
>> 24) & SBSDIO_SBADDRHIGH_MASK
;
175 for (i
= 0; i
< 3; i
++) {
179 usleep_range(1000, 2000);
180 err
= brcmf_sdioh_request_byte(sdiodev
, SDIOH_WRITE
,
181 SDIO_FUNC_1
, SBSDIO_FUNC1_SBADDRLOW
+ i
,
183 } while (err
!= 0 && retry
++ < SDIOH_API_ACCESS_RETRY_LIMIT
);
186 brcmf_err("failed at addr:0x%0x\n",
187 SBSDIO_FUNC1_SBADDRLOW
+ i
);
196 brcmf_sdio_addrprep(struct brcmf_sdio_dev
*sdiodev
, uint width
, u32
*addr
)
198 uint bar0
= *addr
& ~SBSDIO_SB_OFT_ADDR_MASK
;
201 if (bar0
!= sdiodev
->sbwad
) {
202 err
= brcmf_sdcard_set_sbaddr_window(sdiodev
, bar0
);
206 sdiodev
->sbwad
= bar0
;
209 *addr
&= SBSDIO_SB_OFT_ADDR_MASK
;
212 *addr
|= SBSDIO_SB_ACCESS_2_4B_FLAG
;
218 brcmf_sdio_regrw_helper(struct brcmf_sdio_dev
*sdiodev
, u32 addr
,
219 void *data
, bool write
)
221 u8 func_num
, reg_size
;
226 * figure out how to read the register based on address range
227 * 0x00 ~ 0x7FF: function 0 CCCR and FBR
228 * 0x10000 ~ 0x1FFFF: function 1 miscellaneous registers
229 * The rest: function 1 silicon backplane core registers
231 if ((addr
& ~REG_F0_REG_MASK
) == 0) {
232 func_num
= SDIO_FUNC_0
;
234 } else if ((addr
& ~REG_F1_MISC_MASK
) == 0) {
235 func_num
= SDIO_FUNC_1
;
238 func_num
= SDIO_FUNC_1
;
241 ret
= brcmf_sdio_addrprep(sdiodev
, reg_size
, &addr
);
248 memset(data
, 0, reg_size
);
249 if (retry
) /* wait for 1 ms till bus get settled down */
250 usleep_range(1000, 2000);
252 ret
= brcmf_sdioh_request_byte(sdiodev
, write
,
253 func_num
, addr
, data
);
255 ret
= brcmf_sdioh_request_word(sdiodev
, write
,
256 func_num
, addr
, data
, 4);
257 } while (ret
!= 0 && retry
++ < SDIOH_API_ACCESS_RETRY_LIMIT
);
261 brcmf_err("failed with %d\n", ret
);
266 u8
brcmf_sdio_regrb(struct brcmf_sdio_dev
*sdiodev
, u32 addr
, int *ret
)
271 brcmf_dbg(SDIO
, "addr:0x%08x\n", addr
);
272 retval
= brcmf_sdio_regrw_helper(sdiodev
, addr
, &data
, false);
273 brcmf_dbg(SDIO
, "data:0x%02x\n", data
);
281 u32
brcmf_sdio_regrl(struct brcmf_sdio_dev
*sdiodev
, u32 addr
, int *ret
)
286 brcmf_dbg(SDIO
, "addr:0x%08x\n", addr
);
287 retval
= brcmf_sdio_regrw_helper(sdiodev
, addr
, &data
, false);
288 brcmf_dbg(SDIO
, "data:0x%08x\n", data
);
296 void brcmf_sdio_regwb(struct brcmf_sdio_dev
*sdiodev
, u32 addr
,
301 brcmf_dbg(SDIO
, "addr:0x%08x, data:0x%02x\n", addr
, data
);
302 retval
= brcmf_sdio_regrw_helper(sdiodev
, addr
, &data
, true);
308 void brcmf_sdio_regwl(struct brcmf_sdio_dev
*sdiodev
, u32 addr
,
313 brcmf_dbg(SDIO
, "addr:0x%08x, data:0x%08x\n", addr
, data
);
314 retval
= brcmf_sdio_regrw_helper(sdiodev
, addr
, &data
, true);
320 static int brcmf_sdio_buffrw(struct brcmf_sdio_dev
*sdiodev
, uint fn
,
321 bool write
, u32 addr
, struct sk_buff
*pkt
)
325 brcmf_pm_resume_wait(sdiodev
, &sdiodev
->request_buffer_wait
);
326 if (brcmf_pm_resume_error(sdiodev
))
329 /* Single skb use the standard mmc interface */
330 req_sz
= pkt
->len
+ 3;
334 return sdio_memcpy_toio(sdiodev
->func
[fn
], addr
,
338 return sdio_memcpy_fromio(sdiodev
->func
[fn
],
342 /* function 2 read is FIFO operation */
343 return sdio_readsb(sdiodev
->func
[fn
],
344 ((u8
*)(pkt
->data
)), addr
,
349 * brcmf_sdio_sglist_rw - SDIO interface function for block data access
350 * @sdiodev: brcmfmac sdio device
351 * @fn: SDIO function number
352 * @write: direction flag
353 * @addr: dongle memory address as source/destination
356 * This function takes the respbonsibility as the interface function to MMC
357 * stack for block data access. It assumes that the skb passed down by the
358 * caller has already been padded and aligned.
360 static int brcmf_sdio_sglist_rw(struct brcmf_sdio_dev
*sdiodev
, uint fn
,
361 bool write
, u32 addr
,
362 struct sk_buff_head
*pktlist
)
364 unsigned int req_sz
, func_blk_sz
, sg_cnt
, sg_data_sz
, pkt_offset
;
365 unsigned int max_req_sz
, orig_offset
, dst_offset
;
366 unsigned short max_seg_cnt
, seg_sz
;
367 unsigned char *pkt_data
, *orig_data
, *dst_data
;
368 struct sk_buff
*pkt_next
= NULL
, *local_pkt_next
;
369 struct sk_buff_head local_list
, *target_list
;
370 struct mmc_request mmc_req
;
371 struct mmc_command mmc_cmd
;
372 struct mmc_data mmc_dat
;
374 struct scatterlist
*sgl
;
380 brcmf_pm_resume_wait(sdiodev
, &sdiodev
->request_buffer_wait
);
381 if (brcmf_pm_resume_error(sdiodev
))
384 target_list
= pktlist
;
385 /* for host with broken sg support, prepare a page aligned list */
386 __skb_queue_head_init(&local_list
);
387 if (sdiodev
->pdata
&& sdiodev
->pdata
->broken_sg_support
&& !write
) {
389 skb_queue_walk(pktlist
, pkt_next
)
390 req_sz
+= pkt_next
->len
;
391 req_sz
= ALIGN(req_sz
, sdiodev
->func
[fn
]->cur_blksize
);
392 while (req_sz
> PAGE_SIZE
) {
393 pkt_next
= brcmu_pkt_buf_get_skb(PAGE_SIZE
);
394 if (pkt_next
== NULL
) {
398 __skb_queue_tail(&local_list
, pkt_next
);
401 pkt_next
= brcmu_pkt_buf_get_skb(req_sz
);
402 if (pkt_next
== NULL
) {
406 __skb_queue_tail(&local_list
, pkt_next
);
407 target_list
= &local_list
;
410 func_blk_sz
= sdiodev
->func
[fn
]->cur_blksize
;
411 max_req_sz
= sdiodev
->max_request_size
;
412 max_seg_cnt
= min_t(unsigned short, sdiodev
->max_segment_count
,
414 seg_sz
= target_list
->qlen
;
416 pkt_next
= target_list
->next
;
418 if (sg_alloc_table(&st
, max_seg_cnt
, GFP_KERNEL
)) {
423 memset(&mmc_req
, 0, sizeof(struct mmc_request
));
424 memset(&mmc_cmd
, 0, sizeof(struct mmc_command
));
425 memset(&mmc_dat
, 0, sizeof(struct mmc_data
));
428 mmc_dat
.blksz
= func_blk_sz
;
429 mmc_dat
.flags
= write
? MMC_DATA_WRITE
: MMC_DATA_READ
;
430 mmc_cmd
.opcode
= SD_IO_RW_EXTENDED
;
431 mmc_cmd
.arg
= write
? 1<<31 : 0; /* write flag */
432 mmc_cmd
.arg
|= (fn
& 0x7) << 28; /* SDIO func num */
433 mmc_cmd
.arg
|= 1<<27; /* block mode */
434 /* for function 1 the addr will be incremented */
435 mmc_cmd
.arg
|= (fn
== 1) ? 1<<26 : 0;
436 mmc_cmd
.flags
= MMC_RSP_SPI_R5
| MMC_RSP_R5
| MMC_CMD_ADTC
;
437 mmc_req
.cmd
= &mmc_cmd
;
438 mmc_req
.data
= &mmc_dat
;
445 while (pkt_next
!= (struct sk_buff
*)target_list
) {
446 pkt_data
= pkt_next
->data
+ pkt_offset
;
447 sg_data_sz
= pkt_next
->len
- pkt_offset
;
448 if (sg_data_sz
> sdiodev
->max_segment_size
)
449 sg_data_sz
= sdiodev
->max_segment_size
;
450 if (sg_data_sz
> max_req_sz
- req_sz
)
451 sg_data_sz
= max_req_sz
- req_sz
;
453 sg_set_buf(sgl
, pkt_data
, sg_data_sz
);
457 req_sz
+= sg_data_sz
;
458 pkt_offset
+= sg_data_sz
;
459 if (pkt_offset
== pkt_next
->len
) {
461 pkt_next
= pkt_next
->next
;
464 if (req_sz
>= max_req_sz
|| sg_cnt
>= max_seg_cnt
)
469 if (req_sz
% func_blk_sz
!= 0) {
470 brcmf_err("sg request length %u is not %u aligned\n",
471 req_sz
, func_blk_sz
);
476 mmc_dat
.sg_len
= sg_cnt
;
477 mmc_dat
.blocks
= req_sz
/ func_blk_sz
;
478 mmc_cmd
.arg
|= (addr
& 0x1FFFF) << 9; /* address */
479 mmc_cmd
.arg
|= mmc_dat
.blocks
& 0x1FF; /* block count */
480 /* incrementing addr for function 1 */
484 mmc_set_data_timeout(&mmc_dat
, sdiodev
->func
[fn
]->card
);
485 mmc_wait_for_req(sdiodev
->func
[fn
]->card
->host
, &mmc_req
);
487 ret
= mmc_cmd
.error
? mmc_cmd
.error
: mmc_dat
.error
;
489 brcmf_err("CMD53 sg block %s failed %d\n",
490 write
? "write" : "read", ret
);
496 if (sdiodev
->pdata
&& sdiodev
->pdata
->broken_sg_support
&& !write
) {
497 local_pkt_next
= local_list
.next
;
499 skb_queue_walk(pktlist
, pkt_next
) {
502 req_sz
= local_pkt_next
->len
- orig_offset
;
503 req_sz
= min_t(uint
, pkt_next
->len
- dst_offset
,
505 orig_data
= local_pkt_next
->data
+ orig_offset
;
506 dst_data
= pkt_next
->data
+ dst_offset
;
507 memcpy(dst_data
, orig_data
, req_sz
);
508 orig_offset
+= req_sz
;
509 dst_offset
+= req_sz
;
510 if (orig_offset
== local_pkt_next
->len
) {
512 local_pkt_next
= local_pkt_next
->next
;
514 if (dst_offset
== pkt_next
->len
)
516 } while (!skb_queue_empty(&local_list
));
522 while ((pkt_next
= __skb_dequeue(&local_list
)) != NULL
)
523 brcmu_pkt_buf_free_skb(pkt_next
);
529 brcmf_sdcard_recv_buf(struct brcmf_sdio_dev
*sdiodev
, u32 addr
, uint fn
,
530 uint flags
, u8
*buf
, uint nbytes
)
532 struct sk_buff
*mypkt
;
535 mypkt
= brcmu_pkt_buf_get_skb(nbytes
);
537 brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
542 err
= brcmf_sdcard_recv_pkt(sdiodev
, addr
, fn
, flags
, mypkt
);
544 memcpy(buf
, mypkt
->data
, nbytes
);
546 brcmu_pkt_buf_free_skb(mypkt
);
551 brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev
*sdiodev
, u32 addr
, uint fn
,
552 uint flags
, struct sk_buff
*pkt
)
557 brcmf_dbg(SDIO
, "fun = %d, addr = 0x%x, size = %d\n",
560 width
= (flags
& SDIO_REQ_4BYTE
) ? 4 : 2;
561 err
= brcmf_sdio_addrprep(sdiodev
, width
, &addr
);
565 err
= brcmf_sdio_buffrw(sdiodev
, fn
, false, addr
, pkt
);
571 int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev
*sdiodev
, u32 addr
, uint fn
,
572 uint flags
, struct sk_buff_head
*pktq
, uint totlen
)
574 struct sk_buff
*glom_skb
;
579 brcmf_dbg(SDIO
, "fun = %d, addr = 0x%x, size = %d\n",
580 fn
, addr
, pktq
->qlen
);
582 width
= (flags
& SDIO_REQ_4BYTE
) ? 4 : 2;
583 err
= brcmf_sdio_addrprep(sdiodev
, width
, &addr
);
588 err
= brcmf_sdio_buffrw(sdiodev
, fn
, false, addr
, pktq
->next
);
589 else if (!sdiodev
->sg_support
) {
590 glom_skb
= brcmu_pkt_buf_get_skb(totlen
);
593 err
= brcmf_sdio_buffrw(sdiodev
, fn
, false, addr
, glom_skb
);
597 skb_queue_walk(pktq
, skb
) {
598 memcpy(skb
->data
, glom_skb
->data
, skb
->len
);
599 skb_pull(glom_skb
, skb
->len
);
602 err
= brcmf_sdio_sglist_rw(sdiodev
, fn
, false, addr
, pktq
);
609 brcmf_sdcard_send_buf(struct brcmf_sdio_dev
*sdiodev
, u32 addr
, uint fn
,
610 uint flags
, u8
*buf
, uint nbytes
)
612 struct sk_buff
*mypkt
;
616 mypkt
= brcmu_pkt_buf_get_skb(nbytes
);
618 brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
623 memcpy(mypkt
->data
, buf
, nbytes
);
625 width
= (flags
& SDIO_REQ_4BYTE
) ? 4 : 2;
626 err
= brcmf_sdio_addrprep(sdiodev
, width
, &addr
);
629 err
= brcmf_sdio_buffrw(sdiodev
, fn
, true, addr
, mypkt
);
631 brcmu_pkt_buf_free_skb(mypkt
);
637 brcmf_sdcard_send_pkt(struct brcmf_sdio_dev
*sdiodev
, u32 addr
, uint fn
,
638 uint flags
, struct sk_buff_head
*pktq
)
644 brcmf_dbg(SDIO
, "fun = %d, addr = 0x%x, size = %d\n",
645 fn
, addr
, pktq
->qlen
);
647 width
= (flags
& SDIO_REQ_4BYTE
) ? 4 : 2;
648 err
= brcmf_sdio_addrprep(sdiodev
, width
, &addr
);
652 if (pktq
->qlen
== 1 || !sdiodev
->sg_support
)
653 skb_queue_walk(pktq
, skb
) {
654 err
= brcmf_sdio_buffrw(sdiodev
, fn
, true, addr
, skb
);
659 err
= brcmf_sdio_sglist_rw(sdiodev
, fn
, true, addr
, pktq
);
665 brcmf_sdio_ramrw(struct brcmf_sdio_dev
*sdiodev
, bool write
, u32 address
,
673 dsize
= min_t(uint
, SBSDIO_SB_OFT_ADDR_LIMIT
, size
);
674 pkt
= dev_alloc_skb(dsize
);
676 brcmf_err("dev_alloc_skb failed: len %d\n", dsize
);
681 /* Determine initial transfer parameters */
682 sdaddr
= address
& SBSDIO_SB_OFT_ADDR_MASK
;
683 if ((sdaddr
+ size
) & SBSDIO_SBWINDOW_MASK
)
684 dsize
= (SBSDIO_SB_OFT_ADDR_LIMIT
- sdaddr
);
688 sdio_claim_host(sdiodev
->func
[1]);
690 /* Do the transfer(s) */
692 /* Set the backplane window to include the start address */
693 bcmerror
= brcmf_sdcard_set_sbaddr_window(sdiodev
, address
);
697 brcmf_dbg(SDIO
, "%s %d bytes at offset 0x%08x in window 0x%08x\n",
698 write
? "write" : "read", dsize
,
699 sdaddr
, address
& SBSDIO_SBWINDOW_MASK
);
701 sdaddr
&= SBSDIO_SB_OFT_ADDR_MASK
;
702 sdaddr
|= SBSDIO_SB_ACCESS_2_4B_FLAG
;
706 memcpy(pkt
->data
, data
, dsize
);
707 bcmerror
= brcmf_sdio_buffrw(sdiodev
, SDIO_FUNC_1
, write
,
710 brcmf_err("membytes transfer failed\n");
714 memcpy(data
, pkt
->data
, dsize
);
715 skb_trim(pkt
, dsize
);
717 /* Adjust for next transfer (if any) */
723 dsize
= min_t(uint
, SBSDIO_SB_OFT_ADDR_LIMIT
, size
);
729 /* Return the window to backplane enumeration space for core access */
730 if (brcmf_sdcard_set_sbaddr_window(sdiodev
, sdiodev
->sbwad
))
731 brcmf_err("FAILED to set window back to 0x%x\n",
734 sdio_release_host(sdiodev
->func
[1]);
739 int brcmf_sdcard_abort(struct brcmf_sdio_dev
*sdiodev
, uint fn
)
741 char t_func
= (char)fn
;
742 brcmf_dbg(SDIO
, "Enter\n");
744 /* issue abort cmd52 command through F0 */
745 brcmf_sdioh_request_byte(sdiodev
, SDIOH_WRITE
, SDIO_FUNC_0
,
746 SDIO_CCCR_ABORT
, &t_func
);
748 brcmf_dbg(SDIO
, "Exit\n");
752 int brcmf_sdio_probe(struct brcmf_sdio_dev
*sdiodev
)
757 ret
= brcmf_sdioh_attach(sdiodev
);
763 /* try to attach to the target device */
764 sdiodev
->bus
= brcmf_sdbrcm_probe(regs
, sdiodev
);
766 brcmf_err("device attach failed\n");
773 brcmf_sdio_remove(sdiodev
);
777 EXPORT_SYMBOL(brcmf_sdio_probe
);
779 int brcmf_sdio_remove(struct brcmf_sdio_dev
*sdiodev
)
781 sdiodev
->bus_if
->state
= BRCMF_BUS_DOWN
;
784 brcmf_sdbrcm_disconnect(sdiodev
->bus
);
788 brcmf_sdioh_detach(sdiodev
);
794 EXPORT_SYMBOL(brcmf_sdio_remove
);
796 void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev
*sdiodev
, bool enable
)
799 brcmf_sdbrcm_wd_timer(sdiodev
->bus
, BRCMF_WD_POLL_MS
);
801 brcmf_sdbrcm_wd_timer(sdiodev
->bus
, 0);