2 * Copyright (c) 2010 Broadcom Corporation
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 /* ****************** SDIO CARD Interface Functions **************************/
18 #include <linux/types.h>
19 #include <linux/netdevice.h>
20 #include <linux/pci.h>
21 #include <linux/pci_ids.h>
22 #include <linux/sched.h>
23 #include <linux/completion.h>
24 #include <linux/scatterlist.h>
25 #include <linux/mmc/sdio.h>
26 #include <linux/mmc/sdio_func.h>
27 #include <linux/mmc/card.h>
28 #include <linux/platform_data/brcmfmac-sdio.h>
31 #include <brcm_hw_ids.h>
32 #include <brcmu_utils.h>
33 #include <brcmu_wifi.h>
37 #include "sdio_host.h"
39 #define SDIOH_API_ACCESS_RETRY_LIMIT 2
42 static irqreturn_t
brcmf_sdio_oob_irqhandler(int irq
, void *dev_id
)
44 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev_id
);
45 struct brcmf_sdio_dev
*sdiodev
= bus_if
->bus_priv
.sdio
;
47 brcmf_dbg(INTR
, "OOB intr triggered\n");
49 /* out-of-band interrupt is level-triggered which won't
50 * be cleared until dpc
52 if (sdiodev
->irq_en
) {
53 disable_irq_nosync(irq
);
54 sdiodev
->irq_en
= false;
57 brcmf_sdbrcm_isr(sdiodev
->bus
);
62 static void brcmf_sdio_ib_irqhandler(struct sdio_func
*func
)
64 struct brcmf_bus
*bus_if
= dev_get_drvdata(&func
->dev
);
65 struct brcmf_sdio_dev
*sdiodev
= bus_if
->bus_priv
.sdio
;
67 brcmf_dbg(INTR
, "IB intr triggered\n");
69 brcmf_sdbrcm_isr(sdiodev
->bus
);
72 /* dummy handler for SDIO function 2 interrupt */
73 static void brcmf_sdio_dummy_irqhandler(struct sdio_func
*func
)
77 int brcmf_sdio_intr_register(struct brcmf_sdio_dev
*sdiodev
)
83 if ((sdiodev
->pdata
) && (sdiodev
->pdata
->oob_irq_supported
)) {
84 brcmf_dbg(SDIO
, "Enter, register OOB IRQ %d\n",
85 sdiodev
->pdata
->oob_irq_nr
);
86 ret
= request_irq(sdiodev
->pdata
->oob_irq_nr
,
87 brcmf_sdio_oob_irqhandler
,
88 sdiodev
->pdata
->oob_irq_flags
,
90 &sdiodev
->func
[1]->dev
);
92 brcmf_err("request_irq failed %d\n", ret
);
95 sdiodev
->oob_irq_requested
= true;
96 spin_lock_init(&sdiodev
->irq_en_lock
);
97 spin_lock_irqsave(&sdiodev
->irq_en_lock
, flags
);
98 sdiodev
->irq_en
= true;
99 spin_unlock_irqrestore(&sdiodev
->irq_en_lock
, flags
);
101 ret
= enable_irq_wake(sdiodev
->pdata
->oob_irq_nr
);
103 brcmf_err("enable_irq_wake failed %d\n", ret
);
106 sdiodev
->irq_wake
= true;
108 sdio_claim_host(sdiodev
->func
[1]);
110 /* must configure SDIO_CCCR_IENx to enable irq */
111 data
= brcmf_sdio_regrb(sdiodev
, SDIO_CCCR_IENx
, &ret
);
112 data
|= 1 << SDIO_FUNC_1
| 1 << SDIO_FUNC_2
| 1;
113 brcmf_sdio_regwb(sdiodev
, SDIO_CCCR_IENx
, data
, &ret
);
115 /* redirect, configure and enable io for interrupt signal */
116 data
= SDIO_SEPINT_MASK
| SDIO_SEPINT_OE
;
117 if (sdiodev
->pdata
->oob_irq_flags
& IRQF_TRIGGER_HIGH
)
118 data
|= SDIO_SEPINT_ACT_HI
;
119 brcmf_sdio_regwb(sdiodev
, SDIO_CCCR_BRCM_SEPINT
, data
, &ret
);
121 sdio_release_host(sdiodev
->func
[1]);
123 brcmf_dbg(SDIO
, "Entering\n");
124 sdio_claim_host(sdiodev
->func
[1]);
125 sdio_claim_irq(sdiodev
->func
[1], brcmf_sdio_ib_irqhandler
);
126 sdio_claim_irq(sdiodev
->func
[2], brcmf_sdio_dummy_irqhandler
);
127 sdio_release_host(sdiodev
->func
[1]);
133 int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev
*sdiodev
)
135 brcmf_dbg(SDIO
, "Entering\n");
137 if ((sdiodev
->pdata
) && (sdiodev
->pdata
->oob_irq_supported
)) {
138 sdio_claim_host(sdiodev
->func
[1]);
139 brcmf_sdio_regwb(sdiodev
, SDIO_CCCR_BRCM_SEPINT
, 0, NULL
);
140 brcmf_sdio_regwb(sdiodev
, SDIO_CCCR_IENx
, 0, NULL
);
141 sdio_release_host(sdiodev
->func
[1]);
143 if (sdiodev
->oob_irq_requested
) {
144 sdiodev
->oob_irq_requested
= false;
145 if (sdiodev
->irq_wake
) {
146 disable_irq_wake(sdiodev
->pdata
->oob_irq_nr
);
147 sdiodev
->irq_wake
= false;
149 free_irq(sdiodev
->pdata
->oob_irq_nr
,
150 &sdiodev
->func
[1]->dev
);
151 sdiodev
->irq_en
= false;
154 sdio_claim_host(sdiodev
->func
[1]);
155 sdio_release_irq(sdiodev
->func
[2]);
156 sdio_release_irq(sdiodev
->func
[1]);
157 sdio_release_host(sdiodev
->func
[1]);
164 brcmf_sdcard_set_sbaddr_window(struct brcmf_sdio_dev
*sdiodev
, u32 address
)
170 addr
[0] = (address
>> 8) & SBSDIO_SBADDRLOW_MASK
;
171 addr
[1] = (address
>> 16) & SBSDIO_SBADDRMID_MASK
;
172 addr
[2] = (address
>> 24) & SBSDIO_SBADDRHIGH_MASK
;
174 for (i
= 0; i
< 3; i
++) {
178 usleep_range(1000, 2000);
179 err
= brcmf_sdioh_request_byte(sdiodev
, SDIOH_WRITE
,
180 SDIO_FUNC_1
, SBSDIO_FUNC1_SBADDRLOW
+ i
,
182 } while (err
!= 0 && retry
++ < SDIOH_API_ACCESS_RETRY_LIMIT
);
185 brcmf_err("failed at addr:0x%0x\n",
186 SBSDIO_FUNC1_SBADDRLOW
+ i
);
195 brcmf_sdio_addrprep(struct brcmf_sdio_dev
*sdiodev
, uint width
, u32
*addr
)
197 uint bar0
= *addr
& ~SBSDIO_SB_OFT_ADDR_MASK
;
200 if (bar0
!= sdiodev
->sbwad
) {
201 err
= brcmf_sdcard_set_sbaddr_window(sdiodev
, bar0
);
205 sdiodev
->sbwad
= bar0
;
208 *addr
&= SBSDIO_SB_OFT_ADDR_MASK
;
211 *addr
|= SBSDIO_SB_ACCESS_2_4B_FLAG
;
217 brcmf_sdio_regrw_helper(struct brcmf_sdio_dev
*sdiodev
, u32 addr
,
218 void *data
, bool write
)
220 u8 func_num
, reg_size
;
225 * figure out how to read the register based on address range
226 * 0x00 ~ 0x7FF: function 0 CCCR and FBR
227 * 0x10000 ~ 0x1FFFF: function 1 miscellaneous registers
228 * The rest: function 1 silicon backplane core registers
230 if ((addr
& ~REG_F0_REG_MASK
) == 0) {
231 func_num
= SDIO_FUNC_0
;
233 } else if ((addr
& ~REG_F1_MISC_MASK
) == 0) {
234 func_num
= SDIO_FUNC_1
;
237 func_num
= SDIO_FUNC_1
;
240 ret
= brcmf_sdio_addrprep(sdiodev
, reg_size
, &addr
);
247 memset(data
, 0, reg_size
);
248 if (retry
) /* wait for 1 ms till bus get settled down */
249 usleep_range(1000, 2000);
251 ret
= brcmf_sdioh_request_byte(sdiodev
, write
,
252 func_num
, addr
, data
);
254 ret
= brcmf_sdioh_request_word(sdiodev
, write
,
255 func_num
, addr
, data
, 4);
256 } while (ret
!= 0 && retry
++ < SDIOH_API_ACCESS_RETRY_LIMIT
);
260 brcmf_err("failed with %d\n", ret
);
265 u8
brcmf_sdio_regrb(struct brcmf_sdio_dev
*sdiodev
, u32 addr
, int *ret
)
270 brcmf_dbg(SDIO
, "addr:0x%08x\n", addr
);
271 retval
= brcmf_sdio_regrw_helper(sdiodev
, addr
, &data
, false);
272 brcmf_dbg(SDIO
, "data:0x%02x\n", data
);
280 u32
brcmf_sdio_regrl(struct brcmf_sdio_dev
*sdiodev
, u32 addr
, int *ret
)
285 brcmf_dbg(SDIO
, "addr:0x%08x\n", addr
);
286 retval
= brcmf_sdio_regrw_helper(sdiodev
, addr
, &data
, false);
287 brcmf_dbg(SDIO
, "data:0x%08x\n", data
);
295 void brcmf_sdio_regwb(struct brcmf_sdio_dev
*sdiodev
, u32 addr
,
300 brcmf_dbg(SDIO
, "addr:0x%08x, data:0x%02x\n", addr
, data
);
301 retval
= brcmf_sdio_regrw_helper(sdiodev
, addr
, &data
, true);
307 void brcmf_sdio_regwl(struct brcmf_sdio_dev
*sdiodev
, u32 addr
,
312 brcmf_dbg(SDIO
, "addr:0x%08x, data:0x%08x\n", addr
, data
);
313 retval
= brcmf_sdio_regrw_helper(sdiodev
, addr
, &data
, true);
319 static int brcmf_sdio_buffrw(struct brcmf_sdio_dev
*sdiodev
, uint fn
,
320 bool write
, u32 addr
, struct sk_buff
*pkt
)
324 brcmf_pm_resume_wait(sdiodev
, &sdiodev
->request_buffer_wait
);
325 if (brcmf_pm_resume_error(sdiodev
))
328 /* Single skb use the standard mmc interface */
329 req_sz
= pkt
->len
+ 3;
333 return sdio_memcpy_toio(sdiodev
->func
[fn
], addr
,
337 return sdio_memcpy_fromio(sdiodev
->func
[fn
],
341 /* function 2 read is FIFO operation */
342 return sdio_readsb(sdiodev
->func
[fn
],
343 ((u8
*)(pkt
->data
)), addr
,
348 * brcmf_sdio_sglist_rw - SDIO interface function for block data access
349 * @sdiodev: brcmfmac sdio device
350 * @fn: SDIO function number
351 * @write: direction flag
352 * @addr: dongle memory address as source/destination
355 * This function takes the respbonsibility as the interface function to MMC
356 * stack for block data access. It assumes that the skb passed down by the
357 * caller has already been padded and aligned.
359 static int brcmf_sdio_sglist_rw(struct brcmf_sdio_dev
*sdiodev
, uint fn
,
360 bool write
, u32 addr
,
361 struct sk_buff_head
*pktlist
)
363 unsigned int req_sz
, func_blk_sz
, sg_cnt
, sg_data_sz
, pkt_offset
;
364 unsigned int max_req_sz
, orig_offset
, dst_offset
;
365 unsigned short max_seg_cnt
, seg_sz
;
366 unsigned char *pkt_data
, *orig_data
, *dst_data
;
367 struct sk_buff
*pkt_next
= NULL
, *local_pkt_next
;
368 struct sk_buff_head local_list
, *target_list
;
369 struct mmc_request mmc_req
;
370 struct mmc_command mmc_cmd
;
371 struct mmc_data mmc_dat
;
373 struct scatterlist
*sgl
;
379 brcmf_pm_resume_wait(sdiodev
, &sdiodev
->request_buffer_wait
);
380 if (brcmf_pm_resume_error(sdiodev
))
383 target_list
= pktlist
;
384 /* for host with broken sg support, prepare a page aligned list */
385 __skb_queue_head_init(&local_list
);
386 if (sdiodev
->pdata
&& sdiodev
->pdata
->broken_sg_support
&& !write
) {
388 skb_queue_walk(pktlist
, pkt_next
)
389 req_sz
+= pkt_next
->len
;
390 req_sz
= ALIGN(req_sz
, sdiodev
->func
[fn
]->cur_blksize
);
391 while (req_sz
> PAGE_SIZE
) {
392 pkt_next
= brcmu_pkt_buf_get_skb(PAGE_SIZE
);
393 if (pkt_next
== NULL
) {
397 __skb_queue_tail(&local_list
, pkt_next
);
400 pkt_next
= brcmu_pkt_buf_get_skb(req_sz
);
401 if (pkt_next
== NULL
) {
405 __skb_queue_tail(&local_list
, pkt_next
);
406 target_list
= &local_list
;
409 func_blk_sz
= sdiodev
->func
[fn
]->cur_blksize
;
410 max_req_sz
= sdiodev
->max_request_size
;
411 max_seg_cnt
= min_t(unsigned short, sdiodev
->max_segment_count
,
413 seg_sz
= target_list
->qlen
;
415 pkt_next
= target_list
->next
;
417 if (sg_alloc_table(&st
, max_seg_cnt
, GFP_KERNEL
)) {
422 memset(&mmc_req
, 0, sizeof(struct mmc_request
));
423 memset(&mmc_cmd
, 0, sizeof(struct mmc_command
));
424 memset(&mmc_dat
, 0, sizeof(struct mmc_data
));
427 mmc_dat
.blksz
= func_blk_sz
;
428 mmc_dat
.flags
= write
? MMC_DATA_WRITE
: MMC_DATA_READ
;
429 mmc_cmd
.opcode
= SD_IO_RW_EXTENDED
;
430 mmc_cmd
.arg
= write
? 1<<31 : 0; /* write flag */
431 mmc_cmd
.arg
|= (fn
& 0x7) << 28; /* SDIO func num */
432 mmc_cmd
.arg
|= 1<<27; /* block mode */
433 /* for function 1 the addr will be incremented */
434 mmc_cmd
.arg
|= (fn
== 1) ? 1<<26 : 0;
435 mmc_cmd
.flags
= MMC_RSP_SPI_R5
| MMC_RSP_R5
| MMC_CMD_ADTC
;
436 mmc_req
.cmd
= &mmc_cmd
;
437 mmc_req
.data
= &mmc_dat
;
444 while (pkt_next
!= (struct sk_buff
*)target_list
) {
445 pkt_data
= pkt_next
->data
+ pkt_offset
;
446 sg_data_sz
= pkt_next
->len
- pkt_offset
;
447 if (sg_data_sz
> sdiodev
->max_segment_size
)
448 sg_data_sz
= sdiodev
->max_segment_size
;
449 if (sg_data_sz
> max_req_sz
- req_sz
)
450 sg_data_sz
= max_req_sz
- req_sz
;
452 sg_set_buf(sgl
, pkt_data
, sg_data_sz
);
456 req_sz
+= sg_data_sz
;
457 pkt_offset
+= sg_data_sz
;
458 if (pkt_offset
== pkt_next
->len
) {
460 pkt_next
= pkt_next
->next
;
463 if (req_sz
>= max_req_sz
|| sg_cnt
>= max_seg_cnt
)
468 if (req_sz
% func_blk_sz
!= 0) {
469 brcmf_err("sg request length %u is not %u aligned\n",
470 req_sz
, func_blk_sz
);
475 mmc_dat
.sg_len
= sg_cnt
;
476 mmc_dat
.blocks
= req_sz
/ func_blk_sz
;
477 mmc_cmd
.arg
|= (addr
& 0x1FFFF) << 9; /* address */
478 mmc_cmd
.arg
|= mmc_dat
.blocks
& 0x1FF; /* block count */
479 /* incrementing addr for function 1 */
483 mmc_set_data_timeout(&mmc_dat
, sdiodev
->func
[fn
]->card
);
484 mmc_wait_for_req(sdiodev
->func
[fn
]->card
->host
, &mmc_req
);
486 ret
= mmc_cmd
.error
? mmc_cmd
.error
: mmc_dat
.error
;
488 brcmf_err("CMD53 sg block %s failed %d\n",
489 write
? "write" : "read", ret
);
495 if (sdiodev
->pdata
&& sdiodev
->pdata
->broken_sg_support
&& !write
) {
496 local_pkt_next
= local_list
.next
;
498 skb_queue_walk(pktlist
, pkt_next
) {
501 req_sz
= local_pkt_next
->len
- orig_offset
;
502 req_sz
= min_t(uint
, pkt_next
->len
- dst_offset
,
504 orig_data
= local_pkt_next
->data
+ orig_offset
;
505 dst_data
= pkt_next
->data
+ dst_offset
;
506 memcpy(dst_data
, orig_data
, req_sz
);
507 orig_offset
+= req_sz
;
508 dst_offset
+= req_sz
;
509 if (orig_offset
== local_pkt_next
->len
) {
511 local_pkt_next
= local_pkt_next
->next
;
513 if (dst_offset
== pkt_next
->len
)
515 } while (!skb_queue_empty(&local_list
));
521 while ((pkt_next
= __skb_dequeue(&local_list
)) != NULL
)
522 brcmu_pkt_buf_free_skb(pkt_next
);
528 brcmf_sdcard_recv_buf(struct brcmf_sdio_dev
*sdiodev
, u32 addr
, uint fn
,
529 uint flags
, u8
*buf
, uint nbytes
)
531 struct sk_buff
*mypkt
;
534 mypkt
= brcmu_pkt_buf_get_skb(nbytes
);
536 brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
541 err
= brcmf_sdcard_recv_pkt(sdiodev
, addr
, fn
, flags
, mypkt
);
543 memcpy(buf
, mypkt
->data
, nbytes
);
545 brcmu_pkt_buf_free_skb(mypkt
);
550 brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev
*sdiodev
, u32 addr
, uint fn
,
551 uint flags
, struct sk_buff
*pkt
)
556 brcmf_dbg(SDIO
, "fun = %d, addr = 0x%x, size = %d\n",
559 width
= (flags
& SDIO_REQ_4BYTE
) ? 4 : 2;
560 err
= brcmf_sdio_addrprep(sdiodev
, width
, &addr
);
564 err
= brcmf_sdio_buffrw(sdiodev
, fn
, false, addr
, pkt
);
570 int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev
*sdiodev
, u32 addr
, uint fn
,
571 uint flags
, struct sk_buff_head
*pktq
, uint totlen
)
573 struct sk_buff
*glom_skb
;
578 brcmf_dbg(SDIO
, "fun = %d, addr = 0x%x, size = %d\n",
579 fn
, addr
, pktq
->qlen
);
581 width
= (flags
& SDIO_REQ_4BYTE
) ? 4 : 2;
582 err
= brcmf_sdio_addrprep(sdiodev
, width
, &addr
);
587 err
= brcmf_sdio_buffrw(sdiodev
, fn
, false, addr
, pktq
->next
);
588 else if (!sdiodev
->sg_support
) {
589 glom_skb
= brcmu_pkt_buf_get_skb(totlen
);
592 err
= brcmf_sdio_buffrw(sdiodev
, fn
, false, addr
, glom_skb
);
596 skb_queue_walk(pktq
, skb
) {
597 memcpy(skb
->data
, glom_skb
->data
, skb
->len
);
598 skb_pull(glom_skb
, skb
->len
);
601 err
= brcmf_sdio_sglist_rw(sdiodev
, fn
, false, addr
, pktq
);
608 brcmf_sdcard_send_buf(struct brcmf_sdio_dev
*sdiodev
, u32 addr
, uint fn
,
609 uint flags
, u8
*buf
, uint nbytes
)
611 struct sk_buff
*mypkt
;
615 mypkt
= brcmu_pkt_buf_get_skb(nbytes
);
617 brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
622 memcpy(mypkt
->data
, buf
, nbytes
);
624 width
= (flags
& SDIO_REQ_4BYTE
) ? 4 : 2;
625 err
= brcmf_sdio_addrprep(sdiodev
, width
, &addr
);
628 err
= brcmf_sdio_buffrw(sdiodev
, fn
, true, addr
, mypkt
);
630 brcmu_pkt_buf_free_skb(mypkt
);
636 brcmf_sdcard_send_pkt(struct brcmf_sdio_dev
*sdiodev
, u32 addr
, uint fn
,
637 uint flags
, struct sk_buff_head
*pktq
)
643 brcmf_dbg(SDIO
, "fun = %d, addr = 0x%x, size = %d\n",
644 fn
, addr
, pktq
->qlen
);
646 width
= (flags
& SDIO_REQ_4BYTE
) ? 4 : 2;
647 err
= brcmf_sdio_addrprep(sdiodev
, width
, &addr
);
651 if (pktq
->qlen
== 1 || !sdiodev
->sg_support
)
652 skb_queue_walk(pktq
, skb
) {
653 err
= brcmf_sdio_buffrw(sdiodev
, fn
, true, addr
, skb
);
658 err
= brcmf_sdio_sglist_rw(sdiodev
, fn
, true, addr
, pktq
);
664 brcmf_sdio_ramrw(struct brcmf_sdio_dev
*sdiodev
, bool write
, u32 address
,
672 dsize
= min_t(uint
, SBSDIO_SB_OFT_ADDR_LIMIT
, size
);
673 pkt
= dev_alloc_skb(dsize
);
675 brcmf_err("dev_alloc_skb failed: len %d\n", dsize
);
680 /* Determine initial transfer parameters */
681 sdaddr
= address
& SBSDIO_SB_OFT_ADDR_MASK
;
682 if ((sdaddr
+ size
) & SBSDIO_SBWINDOW_MASK
)
683 dsize
= (SBSDIO_SB_OFT_ADDR_LIMIT
- sdaddr
);
687 sdio_claim_host(sdiodev
->func
[1]);
689 /* Do the transfer(s) */
691 /* Set the backplane window to include the start address */
692 bcmerror
= brcmf_sdcard_set_sbaddr_window(sdiodev
, address
);
696 brcmf_dbg(SDIO
, "%s %d bytes at offset 0x%08x in window 0x%08x\n",
697 write
? "write" : "read", dsize
,
698 sdaddr
, address
& SBSDIO_SBWINDOW_MASK
);
700 sdaddr
&= SBSDIO_SB_OFT_ADDR_MASK
;
701 sdaddr
|= SBSDIO_SB_ACCESS_2_4B_FLAG
;
705 memcpy(pkt
->data
, data
, dsize
);
706 bcmerror
= brcmf_sdio_buffrw(sdiodev
, SDIO_FUNC_1
, write
,
709 brcmf_err("membytes transfer failed\n");
713 memcpy(data
, pkt
->data
, dsize
);
714 skb_trim(pkt
, dsize
);
716 /* Adjust for next transfer (if any) */
722 dsize
= min_t(uint
, SBSDIO_SB_OFT_ADDR_LIMIT
, size
);
728 /* Return the window to backplane enumeration space for core access */
729 if (brcmf_sdcard_set_sbaddr_window(sdiodev
, sdiodev
->sbwad
))
730 brcmf_err("FAILED to set window back to 0x%x\n",
733 sdio_release_host(sdiodev
->func
[1]);
738 int brcmf_sdcard_abort(struct brcmf_sdio_dev
*sdiodev
, uint fn
)
740 char t_func
= (char)fn
;
741 brcmf_dbg(SDIO
, "Enter\n");
743 /* issue abort cmd52 command through F0 */
744 brcmf_sdioh_request_byte(sdiodev
, SDIOH_WRITE
, SDIO_FUNC_0
,
745 SDIO_CCCR_ABORT
, &t_func
);
747 brcmf_dbg(SDIO
, "Exit\n");
751 int brcmf_sdio_probe(struct brcmf_sdio_dev
*sdiodev
)
756 ret
= brcmf_sdioh_attach(sdiodev
);
762 /* try to attach to the target device */
763 sdiodev
->bus
= brcmf_sdbrcm_probe(regs
, sdiodev
);
765 brcmf_err("device attach failed\n");
772 brcmf_sdio_remove(sdiodev
);
777 int brcmf_sdio_remove(struct brcmf_sdio_dev
*sdiodev
)
779 sdiodev
->bus_if
->state
= BRCMF_BUS_DOWN
;
782 brcmf_sdbrcm_disconnect(sdiodev
->bus
);
786 brcmf_sdioh_detach(sdiodev
);
793 void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev
*sdiodev
, bool enable
)
796 brcmf_sdbrcm_wd_timer(sdiodev
->bus
, BRCMF_WD_POLL_MS
);
798 brcmf_sdbrcm_wd_timer(sdiodev
->bus
, 0);