Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / drivers / net / wireless / brcm80211 / brcmfmac / bcmsdh.c
1 /*
2 * Copyright (c) 2010 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16 /* ****************** SDIO CARD Interface Functions **************************/
17
18 #include <linux/types.h>
19 #include <linux/netdevice.h>
20 #include <linux/export.h>
21 #include <linux/pci.h>
22 #include <linux/pci_ids.h>
23 #include <linux/sched.h>
24 #include <linux/completion.h>
25 #include <linux/scatterlist.h>
26 #include <linux/mmc/sdio.h>
27 #include <linux/mmc/sdio_func.h>
28 #include <linux/mmc/card.h>
29 #include <linux/mmc/host.h>
30 #include <linux/platform_data/brcmfmac-sdio.h>
31
32 #include <defs.h>
33 #include <brcm_hw_ids.h>
34 #include <brcmu_utils.h>
35 #include <brcmu_wifi.h>
36 #include <soc.h>
37 #include "dhd_bus.h"
38 #include "dhd_dbg.h"
39 #include "sdio_host.h"
40
41 #define SDIOH_API_ACCESS_RETRY_LIMIT 2
42
43
44 static irqreturn_t brcmf_sdio_oob_irqhandler(int irq, void *dev_id)
45 {
46 struct brcmf_bus *bus_if = dev_get_drvdata(dev_id);
47 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
48
49 brcmf_dbg(INTR, "OOB intr triggered\n");
50
51 /* out-of-band interrupt is level-triggered which won't
52 * be cleared until dpc
53 */
54 if (sdiodev->irq_en) {
55 disable_irq_nosync(irq);
56 sdiodev->irq_en = false;
57 }
58
59 brcmf_sdbrcm_isr(sdiodev->bus);
60
61 return IRQ_HANDLED;
62 }
63
64 static void brcmf_sdio_ib_irqhandler(struct sdio_func *func)
65 {
66 struct brcmf_bus *bus_if = dev_get_drvdata(&func->dev);
67 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
68
69 brcmf_dbg(INTR, "IB intr triggered\n");
70
71 brcmf_sdbrcm_isr(sdiodev->bus);
72 }
73
74 /* dummy handler for SDIO function 2 interrupt */
75 static void brcmf_sdio_dummy_irqhandler(struct sdio_func *func)
76 {
77 }
78
79 int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev)
80 {
81 int ret = 0;
82 u8 data;
83 unsigned long flags;
84
85 if ((sdiodev->pdata) && (sdiodev->pdata->oob_irq_supported)) {
86 brcmf_dbg(SDIO, "Enter, register OOB IRQ %d\n",
87 sdiodev->pdata->oob_irq_nr);
88 ret = request_irq(sdiodev->pdata->oob_irq_nr,
89 brcmf_sdio_oob_irqhandler,
90 sdiodev->pdata->oob_irq_flags,
91 "brcmf_oob_intr",
92 &sdiodev->func[1]->dev);
93 if (ret != 0) {
94 brcmf_err("request_irq failed %d\n", ret);
95 return ret;
96 }
97 sdiodev->oob_irq_requested = true;
98 spin_lock_init(&sdiodev->irq_en_lock);
99 spin_lock_irqsave(&sdiodev->irq_en_lock, flags);
100 sdiodev->irq_en = true;
101 spin_unlock_irqrestore(&sdiodev->irq_en_lock, flags);
102
103 ret = enable_irq_wake(sdiodev->pdata->oob_irq_nr);
104 if (ret != 0) {
105 brcmf_err("enable_irq_wake failed %d\n", ret);
106 return ret;
107 }
108 sdiodev->irq_wake = true;
109
110 sdio_claim_host(sdiodev->func[1]);
111
112 /* must configure SDIO_CCCR_IENx to enable irq */
113 data = brcmf_sdio_regrb(sdiodev, SDIO_CCCR_IENx, &ret);
114 data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1;
115 brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret);
116
117 /* redirect, configure and enable io for interrupt signal */
118 data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE;
119 if (sdiodev->pdata->oob_irq_flags & IRQF_TRIGGER_HIGH)
120 data |= SDIO_SEPINT_ACT_HI;
121 brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret);
122
123 sdio_release_host(sdiodev->func[1]);
124 } else {
125 brcmf_dbg(SDIO, "Entering\n");
126 sdio_claim_host(sdiodev->func[1]);
127 sdio_claim_irq(sdiodev->func[1], brcmf_sdio_ib_irqhandler);
128 sdio_claim_irq(sdiodev->func[2], brcmf_sdio_dummy_irqhandler);
129 sdio_release_host(sdiodev->func[1]);
130 }
131
132 return 0;
133 }
134
135 int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev)
136 {
137 brcmf_dbg(SDIO, "Entering\n");
138
139 if ((sdiodev->pdata) && (sdiodev->pdata->oob_irq_supported)) {
140 sdio_claim_host(sdiodev->func[1]);
141 brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL);
142 brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, 0, NULL);
143 sdio_release_host(sdiodev->func[1]);
144
145 if (sdiodev->oob_irq_requested) {
146 sdiodev->oob_irq_requested = false;
147 if (sdiodev->irq_wake) {
148 disable_irq_wake(sdiodev->pdata->oob_irq_nr);
149 sdiodev->irq_wake = false;
150 }
151 free_irq(sdiodev->pdata->oob_irq_nr,
152 &sdiodev->func[1]->dev);
153 sdiodev->irq_en = false;
154 }
155 } else {
156 sdio_claim_host(sdiodev->func[1]);
157 sdio_release_irq(sdiodev->func[2]);
158 sdio_release_irq(sdiodev->func[1]);
159 sdio_release_host(sdiodev->func[1]);
160 }
161
162 return 0;
163 }
164
165 static int
166 brcmf_sdcard_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
167 {
168 int err = 0, i;
169 u8 addr[3];
170 s32 retry;
171
172 addr[0] = (address >> 8) & SBSDIO_SBADDRLOW_MASK;
173 addr[1] = (address >> 16) & SBSDIO_SBADDRMID_MASK;
174 addr[2] = (address >> 24) & SBSDIO_SBADDRHIGH_MASK;
175
176 for (i = 0; i < 3; i++) {
177 retry = 0;
178 do {
179 if (retry)
180 usleep_range(1000, 2000);
181 err = brcmf_sdioh_request_byte(sdiodev, SDIOH_WRITE,
182 SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW + i,
183 &addr[i]);
184 } while (err != 0 && retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
185
186 if (err) {
187 brcmf_err("failed at addr:0x%0x\n",
188 SBSDIO_FUNC1_SBADDRLOW + i);
189 break;
190 }
191 }
192
193 return err;
194 }
195
196 static int
197 brcmf_sdio_addrprep(struct brcmf_sdio_dev *sdiodev, uint width, u32 *addr)
198 {
199 uint bar0 = *addr & ~SBSDIO_SB_OFT_ADDR_MASK;
200 int err = 0;
201
202 if (bar0 != sdiodev->sbwad) {
203 err = brcmf_sdcard_set_sbaddr_window(sdiodev, bar0);
204 if (err)
205 return err;
206
207 sdiodev->sbwad = bar0;
208 }
209
210 *addr &= SBSDIO_SB_OFT_ADDR_MASK;
211
212 if (width == 4)
213 *addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
214
215 return 0;
216 }
217
218 int
219 brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
220 void *data, bool write)
221 {
222 u8 func_num, reg_size;
223 s32 retry = 0;
224 int ret;
225
226 /*
227 * figure out how to read the register based on address range
228 * 0x00 ~ 0x7FF: function 0 CCCR and FBR
229 * 0x10000 ~ 0x1FFFF: function 1 miscellaneous registers
230 * The rest: function 1 silicon backplane core registers
231 */
232 if ((addr & ~REG_F0_REG_MASK) == 0) {
233 func_num = SDIO_FUNC_0;
234 reg_size = 1;
235 } else if ((addr & ~REG_F1_MISC_MASK) == 0) {
236 func_num = SDIO_FUNC_1;
237 reg_size = 1;
238 } else {
239 func_num = SDIO_FUNC_1;
240 reg_size = 4;
241
242 brcmf_sdio_addrprep(sdiodev, reg_size, &addr);
243 }
244
245 do {
246 if (!write)
247 memset(data, 0, reg_size);
248 if (retry) /* wait for 1 ms till bus get settled down */
249 usleep_range(1000, 2000);
250 if (reg_size == 1)
251 ret = brcmf_sdioh_request_byte(sdiodev, write,
252 func_num, addr, data);
253 else
254 ret = brcmf_sdioh_request_word(sdiodev, write,
255 func_num, addr, data, 4);
256 } while (ret != 0 && retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
257
258 if (ret != 0)
259 brcmf_err("failed with %d\n", ret);
260
261 return ret;
262 }
263
264 u8 brcmf_sdio_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
265 {
266 u8 data;
267 int retval;
268
269 brcmf_dbg(SDIO, "addr:0x%08x\n", addr);
270 retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, false);
271 brcmf_dbg(SDIO, "data:0x%02x\n", data);
272
273 if (ret)
274 *ret = retval;
275
276 return data;
277 }
278
279 u32 brcmf_sdio_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
280 {
281 u32 data;
282 int retval;
283
284 brcmf_dbg(SDIO, "addr:0x%08x\n", addr);
285 retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, false);
286 brcmf_dbg(SDIO, "data:0x%08x\n", data);
287
288 if (ret)
289 *ret = retval;
290
291 return data;
292 }
293
294 void brcmf_sdio_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
295 u8 data, int *ret)
296 {
297 int retval;
298
299 brcmf_dbg(SDIO, "addr:0x%08x, data:0x%02x\n", addr, data);
300 retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, true);
301
302 if (ret)
303 *ret = retval;
304 }
305
306 void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
307 u32 data, int *ret)
308 {
309 int retval;
310
311 brcmf_dbg(SDIO, "addr:0x%08x, data:0x%08x\n", addr, data);
312 retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, true);
313
314 if (ret)
315 *ret = retval;
316 }
317
318 /**
319 * brcmf_sdio_buffrw - SDIO interface function for block data access
320 * @sdiodev: brcmfmac sdio device
321 * @fn: SDIO function number
322 * @write: direction flag
323 * @addr: dongle memory address as source/destination
324 * @pkt: skb pointer
325 *
326 * This function takes the respbonsibility as the interface function to MMC
327 * stack for block data access. It assumes that the skb passed down by the
328 * caller has already been padded and aligned.
329 */
330 static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
331 bool write, u32 addr, struct sk_buff_head *pktlist)
332 {
333 unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset;
334 unsigned int max_blks, max_req_sz, orig_offset, dst_offset;
335 unsigned short max_seg_sz, seg_sz;
336 unsigned char *pkt_data, *orig_data, *dst_data;
337 struct sk_buff *pkt_next = NULL, *local_pkt_next;
338 struct sk_buff_head local_list, *target_list;
339 struct mmc_request mmc_req;
340 struct mmc_command mmc_cmd;
341 struct mmc_data mmc_dat;
342 struct sg_table st;
343 struct scatterlist *sgl;
344 struct mmc_host *host;
345 int ret = 0;
346
347 if (!pktlist->qlen)
348 return -EINVAL;
349
350 brcmf_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
351 if (brcmf_pm_resume_error(sdiodev))
352 return -EIO;
353
354 /* Single skb use the standard mmc interface */
355 if (pktlist->qlen == 1) {
356 pkt_next = pktlist->next;
357 req_sz = pkt_next->len + 3;
358 req_sz &= (uint)~3;
359
360 if (write)
361 return sdio_memcpy_toio(sdiodev->func[fn], addr,
362 ((u8 *)(pkt_next->data)),
363 req_sz);
364 else if (fn == 1)
365 return sdio_memcpy_fromio(sdiodev->func[fn],
366 ((u8 *)(pkt_next->data)),
367 addr, req_sz);
368 else
369 /* function 2 read is FIFO operation */
370 return sdio_readsb(sdiodev->func[fn],
371 ((u8 *)(pkt_next->data)), addr,
372 req_sz);
373 }
374
375 target_list = pktlist;
376 /* for host with broken sg support, prepare a page aligned list */
377 __skb_queue_head_init(&local_list);
378 if (sdiodev->pdata && sdiodev->pdata->broken_sg_support && !write) {
379 req_sz = 0;
380 skb_queue_walk(pktlist, pkt_next)
381 req_sz += pkt_next->len;
382 req_sz = ALIGN(req_sz, sdiodev->func[fn]->cur_blksize);
383 while (req_sz > PAGE_SIZE) {
384 pkt_next = brcmu_pkt_buf_get_skb(PAGE_SIZE);
385 if (pkt_next == NULL) {
386 ret = -ENOMEM;
387 goto exit;
388 }
389 __skb_queue_tail(&local_list, pkt_next);
390 req_sz -= PAGE_SIZE;
391 }
392 pkt_next = brcmu_pkt_buf_get_skb(req_sz);
393 if (pkt_next == NULL) {
394 ret = -ENOMEM;
395 goto exit;
396 }
397 __skb_queue_tail(&local_list, pkt_next);
398 target_list = &local_list;
399 }
400
401 host = sdiodev->func[fn]->card->host;
402 func_blk_sz = sdiodev->func[fn]->cur_blksize;
403 /* Blocks per command is limited by host count, host transfer
404 * size and the maximum for IO_RW_EXTENDED of 511 blocks.
405 */
406 max_blks = min_t(unsigned int, host->max_blk_count, 511u);
407 max_req_sz = min_t(unsigned int, host->max_req_size,
408 max_blks * func_blk_sz);
409 max_seg_sz = min_t(unsigned short, host->max_segs, SG_MAX_SINGLE_ALLOC);
410 max_seg_sz = min_t(unsigned short, max_seg_sz, target_list->qlen);
411 seg_sz = target_list->qlen;
412 pkt_offset = 0;
413 pkt_next = target_list->next;
414
415 if (sg_alloc_table(&st, max_seg_sz, GFP_KERNEL)) {
416 ret = -ENOMEM;
417 goto exit;
418 }
419
420 while (seg_sz) {
421 req_sz = 0;
422 sg_cnt = 0;
423 memset(&mmc_req, 0, sizeof(struct mmc_request));
424 memset(&mmc_cmd, 0, sizeof(struct mmc_command));
425 memset(&mmc_dat, 0, sizeof(struct mmc_data));
426 sgl = st.sgl;
427 /* prep sg table */
428 while (pkt_next != (struct sk_buff *)target_list) {
429 pkt_data = pkt_next->data + pkt_offset;
430 sg_data_sz = pkt_next->len - pkt_offset;
431 if (sg_data_sz > host->max_seg_size)
432 sg_data_sz = host->max_seg_size;
433 if (sg_data_sz > max_req_sz - req_sz)
434 sg_data_sz = max_req_sz - req_sz;
435
436 sg_set_buf(sgl, pkt_data, sg_data_sz);
437
438 sg_cnt++;
439 sgl = sg_next(sgl);
440 req_sz += sg_data_sz;
441 pkt_offset += sg_data_sz;
442 if (pkt_offset == pkt_next->len) {
443 pkt_offset = 0;
444 pkt_next = pkt_next->next;
445 }
446
447 if (req_sz >= max_req_sz || sg_cnt >= max_seg_sz)
448 break;
449 }
450 seg_sz -= sg_cnt;
451
452 if (req_sz % func_blk_sz != 0) {
453 brcmf_err("sg request length %u is not %u aligned\n",
454 req_sz, func_blk_sz);
455 ret = -ENOTBLK;
456 goto exit;
457 }
458 mmc_dat.sg = st.sgl;
459 mmc_dat.sg_len = sg_cnt;
460 mmc_dat.blksz = func_blk_sz;
461 mmc_dat.blocks = req_sz / func_blk_sz;
462 mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
463 mmc_cmd.opcode = SD_IO_RW_EXTENDED;
464 mmc_cmd.arg = write ? 1<<31 : 0; /* write flag */
465 mmc_cmd.arg |= (fn & 0x7) << 28; /* SDIO func num */
466 mmc_cmd.arg |= 1<<27; /* block mode */
467 /* incrementing addr for function 1 */
468 mmc_cmd.arg |= (fn == 1) ? 1<<26 : 0;
469 mmc_cmd.arg |= (addr & 0x1FFFF) << 9; /* address */
470 mmc_cmd.arg |= mmc_dat.blocks & 0x1FF; /* block count */
471 mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
472 mmc_req.cmd = &mmc_cmd;
473 mmc_req.data = &mmc_dat;
474 if (fn == 1)
475 addr += req_sz;
476
477 mmc_set_data_timeout(&mmc_dat, sdiodev->func[fn]->card);
478 mmc_wait_for_req(host, &mmc_req);
479
480 ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error;
481 if (ret != 0) {
482 brcmf_err("CMD53 sg block %s failed %d\n",
483 write ? "write" : "read", ret);
484 ret = -EIO;
485 break;
486 }
487 }
488
489 if (sdiodev->pdata && sdiodev->pdata->broken_sg_support && !write) {
490 local_pkt_next = local_list.next;
491 orig_offset = 0;
492 skb_queue_walk(pktlist, pkt_next) {
493 dst_offset = 0;
494 do {
495 req_sz = local_pkt_next->len - orig_offset;
496 req_sz = min_t(uint, pkt_next->len - dst_offset,
497 req_sz);
498 orig_data = local_pkt_next->data + orig_offset;
499 dst_data = pkt_next->data + dst_offset;
500 memcpy(dst_data, orig_data, req_sz);
501 orig_offset += req_sz;
502 dst_offset += req_sz;
503 if (orig_offset == local_pkt_next->len) {
504 orig_offset = 0;
505 local_pkt_next = local_pkt_next->next;
506 }
507 if (dst_offset == pkt_next->len)
508 break;
509 } while (!skb_queue_empty(&local_list));
510 }
511 }
512
513 exit:
514 sg_free_table(&st);
515 while ((pkt_next = __skb_dequeue(&local_list)) != NULL)
516 brcmu_pkt_buf_free_skb(pkt_next);
517
518 return ret;
519 }
520
521 int
522 brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
523 uint flags, u8 *buf, uint nbytes)
524 {
525 struct sk_buff *mypkt;
526 int err;
527
528 mypkt = brcmu_pkt_buf_get_skb(nbytes);
529 if (!mypkt) {
530 brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
531 nbytes);
532 return -EIO;
533 }
534
535 err = brcmf_sdcard_recv_pkt(sdiodev, addr, fn, flags, mypkt);
536 if (!err)
537 memcpy(buf, mypkt->data, nbytes);
538
539 brcmu_pkt_buf_free_skb(mypkt);
540 return err;
541 }
542
543 int
544 brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
545 uint flags, struct sk_buff *pkt)
546 {
547 uint width;
548 int err = 0;
549 struct sk_buff_head pkt_list;
550
551 brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
552 fn, addr, pkt->len);
553
554 width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
555 err = brcmf_sdio_addrprep(sdiodev, width, &addr);
556 if (err)
557 goto done;
558
559 skb_queue_head_init(&pkt_list);
560 skb_queue_tail(&pkt_list, pkt);
561 err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, &pkt_list);
562 skb_dequeue_tail(&pkt_list);
563
564 done:
565 return err;
566 }
567
568 int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
569 uint flags, struct sk_buff_head *pktq)
570 {
571 uint incr_fix;
572 uint width;
573 int err = 0;
574
575 brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
576 fn, addr, pktq->qlen);
577
578 width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
579 err = brcmf_sdio_addrprep(sdiodev, width, &addr);
580 if (err)
581 goto done;
582
583 incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
584 err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, pktq);
585
586 done:
587 return err;
588 }
589
590 int
591 brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
592 uint flags, u8 *buf, uint nbytes)
593 {
594 struct sk_buff *mypkt;
595 struct sk_buff_head pktq;
596 int err;
597
598 mypkt = brcmu_pkt_buf_get_skb(nbytes);
599 if (!mypkt) {
600 brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
601 nbytes);
602 return -EIO;
603 }
604
605 memcpy(mypkt->data, buf, nbytes);
606 __skb_queue_head_init(&pktq);
607 __skb_queue_tail(&pktq, mypkt);
608 err = brcmf_sdcard_send_pkt(sdiodev, addr, fn, flags, &pktq);
609 __skb_dequeue_tail(&pktq);
610
611 brcmu_pkt_buf_free_skb(mypkt);
612 return err;
613
614 }
615
616 int
617 brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
618 uint flags, struct sk_buff_head *pktq)
619 {
620 uint width;
621 int err = 0;
622
623 brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
624 fn, addr, pktq->qlen);
625
626 width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
627 brcmf_sdio_addrprep(sdiodev, width, &addr);
628
629 err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, pktq);
630
631 return err;
632 }
633
634 int
635 brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
636 u8 *data, uint size)
637 {
638 int bcmerror = 0;
639 struct sk_buff *pkt;
640 u32 sdaddr;
641 uint dsize;
642 struct sk_buff_head pkt_list;
643
644 dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
645 pkt = dev_alloc_skb(dsize);
646 if (!pkt) {
647 brcmf_err("dev_alloc_skb failed: len %d\n", dsize);
648 return -EIO;
649 }
650 pkt->priority = 0;
651 skb_queue_head_init(&pkt_list);
652
653 /* Determine initial transfer parameters */
654 sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK;
655 if ((sdaddr + size) & SBSDIO_SBWINDOW_MASK)
656 dsize = (SBSDIO_SB_OFT_ADDR_LIMIT - sdaddr);
657 else
658 dsize = size;
659
660 sdio_claim_host(sdiodev->func[1]);
661
662 /* Do the transfer(s) */
663 while (size) {
664 /* Set the backplane window to include the start address */
665 bcmerror = brcmf_sdcard_set_sbaddr_window(sdiodev, address);
666 if (bcmerror)
667 break;
668
669 brcmf_dbg(SDIO, "%s %d bytes at offset 0x%08x in window 0x%08x\n",
670 write ? "write" : "read", dsize,
671 sdaddr, address & SBSDIO_SBWINDOW_MASK);
672
673 sdaddr &= SBSDIO_SB_OFT_ADDR_MASK;
674 sdaddr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
675
676 skb_put(pkt, dsize);
677 if (write)
678 memcpy(pkt->data, data, dsize);
679 skb_queue_tail(&pkt_list, pkt);
680 bcmerror = brcmf_sdio_buffrw(sdiodev, SDIO_FUNC_1, write,
681 sdaddr, &pkt_list);
682 skb_dequeue_tail(&pkt_list);
683 if (bcmerror) {
684 brcmf_err("membytes transfer failed\n");
685 break;
686 }
687 if (!write)
688 memcpy(data, pkt->data, dsize);
689 skb_trim(pkt, dsize);
690
691 /* Adjust for next transfer (if any) */
692 size -= dsize;
693 if (size) {
694 data += dsize;
695 address += dsize;
696 sdaddr = 0;
697 dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
698 }
699 }
700
701 dev_kfree_skb(pkt);
702
703 /* Return the window to backplane enumeration space for core access */
704 if (brcmf_sdcard_set_sbaddr_window(sdiodev, sdiodev->sbwad))
705 brcmf_err("FAILED to set window back to 0x%x\n",
706 sdiodev->sbwad);
707
708 sdio_release_host(sdiodev->func[1]);
709
710 return bcmerror;
711 }
712
713 int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
714 {
715 char t_func = (char)fn;
716 brcmf_dbg(SDIO, "Enter\n");
717
718 /* issue abort cmd52 command through F0 */
719 brcmf_sdioh_request_byte(sdiodev, SDIOH_WRITE, SDIO_FUNC_0,
720 SDIO_CCCR_ABORT, &t_func);
721
722 brcmf_dbg(SDIO, "Exit\n");
723 return 0;
724 }
725
726 int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
727 {
728 u32 regs = 0;
729 int ret = 0;
730
731 ret = brcmf_sdioh_attach(sdiodev);
732 if (ret)
733 goto out;
734
735 regs = SI_ENUM_BASE;
736
737 /* try to attach to the target device */
738 sdiodev->bus = brcmf_sdbrcm_probe(regs, sdiodev);
739 if (!sdiodev->bus) {
740 brcmf_err("device attach failed\n");
741 ret = -ENODEV;
742 goto out;
743 }
744
745 out:
746 if (ret)
747 brcmf_sdio_remove(sdiodev);
748
749 return ret;
750 }
751 EXPORT_SYMBOL(brcmf_sdio_probe);
752
753 int brcmf_sdio_remove(struct brcmf_sdio_dev *sdiodev)
754 {
755 sdiodev->bus_if->state = BRCMF_BUS_DOWN;
756
757 if (sdiodev->bus) {
758 brcmf_sdbrcm_disconnect(sdiodev->bus);
759 sdiodev->bus = NULL;
760 }
761
762 brcmf_sdioh_detach(sdiodev);
763
764 sdiodev->sbwad = 0;
765
766 return 0;
767 }
768 EXPORT_SYMBOL(brcmf_sdio_remove);
769
770 void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev, bool enable)
771 {
772 if (enable)
773 brcmf_sdbrcm_wd_timer(sdiodev->bus, BRCMF_WD_POLL_MS);
774 else
775 brcmf_sdbrcm_wd_timer(sdiodev->bus, 0);
776 }
This page took 0.047154 seconds and 6 git commands to generate.