Merge branch 'v4l_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab...
[deliverable/linux.git] / drivers / net / wireless / brcm80211 / brcmfmac / dhd_sdio.c
1 /*
2 * Copyright (c) 2010 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/kthread.h>
20 #include <linux/printk.h>
21 #include <linux/pci_ids.h>
22 #include <linux/netdevice.h>
23 #include <linux/interrupt.h>
24 #include <linux/sched.h>
25 #include <linux/mmc/sdio.h>
26 #include <linux/mmc/sdio_func.h>
27 #include <linux/mmc/card.h>
28 #include <linux/semaphore.h>
29 #include <linux/firmware.h>
30 #include <linux/module.h>
31 #include <linux/bcma/bcma.h>
32 #include <linux/debugfs.h>
33 #include <linux/vmalloc.h>
34 #include <linux/platform_data/brcmfmac-sdio.h>
35 #include <asm/unaligned.h>
36 #include <defs.h>
37 #include <brcmu_wifi.h>
38 #include <brcmu_utils.h>
39 #include <brcm_hw_ids.h>
40 #include <soc.h>
41 #include "sdio_host.h"
42 #include "sdio_chip.h"
43
44 #define DCMD_RESP_TIMEOUT 2000 /* In milli second */
45
46 #ifdef DEBUG
47
48 #define BRCMF_TRAP_INFO_SIZE 80
49
50 #define CBUF_LEN (128)
51
52 /* Device console log buffer state */
53 #define CONSOLE_BUFFER_MAX 2024
54
55 struct rte_log_le {
56 __le32 buf; /* Can't be pointer on (64-bit) hosts */
57 __le32 buf_size;
58 __le32 idx;
59 char *_buf_compat; /* Redundant pointer for backward compat. */
60 };
61
62 struct rte_console {
63 /* Virtual UART
64 * When there is no UART (e.g. Quickturn),
65 * the host should write a complete
66 * input line directly into cbuf and then write
67 * the length into vcons_in.
68 * This may also be used when there is a real UART
69 * (at risk of conflicting with
70 * the real UART). vcons_out is currently unused.
71 */
72 uint vcons_in;
73 uint vcons_out;
74
75 /* Output (logging) buffer
76 * Console output is written to a ring buffer log_buf at index log_idx.
77 * The host may read the output when it sees log_idx advance.
78 * Output will be lost if the output wraps around faster than the host
79 * polls.
80 */
81 struct rte_log_le log_le;
82
83 /* Console input line buffer
84 * Characters are read one at a time into cbuf
85 * until <CR> is received, then
86 * the buffer is processed as a command line.
87 * Also used for virtual UART.
88 */
89 uint cbuf_idx;
90 char cbuf[CBUF_LEN];
91 };
92
93 #endif /* DEBUG */
94 #include <chipcommon.h>
95
96 #include "dhd_bus.h"
97 #include "dhd_dbg.h"
98 #include "tracepoint.h"
99
100 #define TXQLEN 2048 /* bulk tx queue length */
101 #define TXHI (TXQLEN - 256) /* turn on flow control above TXHI */
102 #define TXLOW (TXHI - 256) /* turn off flow control below TXLOW */
103 #define PRIOMASK 7
104
105 #define TXRETRIES 2 /* # of retries for tx frames */
106
107 #define BRCMF_RXBOUND 50 /* Default for max rx frames in
108 one scheduling */
109
110 #define BRCMF_TXBOUND 20 /* Default for max tx frames in
111 one scheduling */
112
113 #define BRCMF_TXMINMAX 1 /* Max tx frames if rx still pending */
114
115 #define MEMBLOCK 2048 /* Block size used for downloading
116 of dongle image */
117 #define MAX_DATA_BUF (32 * 1024) /* Must be large enough to hold
118 biggest possible glom */
119
120 #define BRCMF_FIRSTREAD (1 << 6)
121
122
123 /* SBSDIO_DEVICE_CTL */
124
125 /* 1: device will assert busy signal when receiving CMD53 */
126 #define SBSDIO_DEVCTL_SETBUSY 0x01
127 /* 1: assertion of sdio interrupt is synchronous to the sdio clock */
128 #define SBSDIO_DEVCTL_SPI_INTR_SYNC 0x02
129 /* 1: mask all interrupts to host except the chipActive (rev 8) */
130 #define SBSDIO_DEVCTL_CA_INT_ONLY 0x04
131 /* 1: isolate internal sdio signals, put external pads in tri-state; requires
132 * sdio bus power cycle to clear (rev 9) */
133 #define SBSDIO_DEVCTL_PADS_ISO 0x08
134 /* Force SD->SB reset mapping (rev 11) */
135 #define SBSDIO_DEVCTL_SB_RST_CTL 0x30
136 /* Determined by CoreControl bit */
137 #define SBSDIO_DEVCTL_RST_CORECTL 0x00
138 /* Force backplane reset */
139 #define SBSDIO_DEVCTL_RST_BPRESET 0x10
140 /* Force no backplane reset */
141 #define SBSDIO_DEVCTL_RST_NOBPRESET 0x20
142
143 /* direct(mapped) cis space */
144
145 /* MAPPED common CIS address */
146 #define SBSDIO_CIS_BASE_COMMON 0x1000
147 /* maximum bytes in one CIS */
148 #define SBSDIO_CIS_SIZE_LIMIT 0x200
149 /* cis offset addr is < 17 bits */
150 #define SBSDIO_CIS_OFT_ADDR_MASK 0x1FFFF
151
152 /* manfid tuple length, include tuple, link bytes */
153 #define SBSDIO_CIS_MANFID_TUPLE_LEN 6
154
155 /* intstatus */
156 #define I_SMB_SW0 (1 << 0) /* To SB Mail S/W interrupt 0 */
157 #define I_SMB_SW1 (1 << 1) /* To SB Mail S/W interrupt 1 */
158 #define I_SMB_SW2 (1 << 2) /* To SB Mail S/W interrupt 2 */
159 #define I_SMB_SW3 (1 << 3) /* To SB Mail S/W interrupt 3 */
160 #define I_SMB_SW_MASK 0x0000000f /* To SB Mail S/W interrupts mask */
161 #define I_SMB_SW_SHIFT 0 /* To SB Mail S/W interrupts shift */
162 #define I_HMB_SW0 (1 << 4) /* To Host Mail S/W interrupt 0 */
163 #define I_HMB_SW1 (1 << 5) /* To Host Mail S/W interrupt 1 */
164 #define I_HMB_SW2 (1 << 6) /* To Host Mail S/W interrupt 2 */
165 #define I_HMB_SW3 (1 << 7) /* To Host Mail S/W interrupt 3 */
166 #define I_HMB_SW_MASK 0x000000f0 /* To Host Mail S/W interrupts mask */
167 #define I_HMB_SW_SHIFT 4 /* To Host Mail S/W interrupts shift */
168 #define I_WR_OOSYNC (1 << 8) /* Write Frame Out Of Sync */
169 #define I_RD_OOSYNC (1 << 9) /* Read Frame Out Of Sync */
170 #define I_PC (1 << 10) /* descriptor error */
171 #define I_PD (1 << 11) /* data error */
172 #define I_DE (1 << 12) /* Descriptor protocol Error */
173 #define I_RU (1 << 13) /* Receive descriptor Underflow */
174 #define I_RO (1 << 14) /* Receive fifo Overflow */
175 #define I_XU (1 << 15) /* Transmit fifo Underflow */
176 #define I_RI (1 << 16) /* Receive Interrupt */
177 #define I_BUSPWR (1 << 17) /* SDIO Bus Power Change (rev 9) */
178 #define I_XMTDATA_AVAIL (1 << 23) /* bits in fifo */
179 #define I_XI (1 << 24) /* Transmit Interrupt */
180 #define I_RF_TERM (1 << 25) /* Read Frame Terminate */
181 #define I_WF_TERM (1 << 26) /* Write Frame Terminate */
182 #define I_PCMCIA_XU (1 << 27) /* PCMCIA Transmit FIFO Underflow */
183 #define I_SBINT (1 << 28) /* sbintstatus Interrupt */
184 #define I_CHIPACTIVE (1 << 29) /* chip from doze to active state */
185 #define I_SRESET (1 << 30) /* CCCR RES interrupt */
186 #define I_IOE2 (1U << 31) /* CCCR IOE2 Bit Changed */
187 #define I_ERRORS (I_PC | I_PD | I_DE | I_RU | I_RO | I_XU)
188 #define I_DMA (I_RI | I_XI | I_ERRORS)
189
190 /* corecontrol */
191 #define CC_CISRDY (1 << 0) /* CIS Ready */
192 #define CC_BPRESEN (1 << 1) /* CCCR RES signal */
193 #define CC_F2RDY (1 << 2) /* set CCCR IOR2 bit */
194 #define CC_CLRPADSISO (1 << 3) /* clear SDIO pads isolation */
195 #define CC_XMTDATAAVAIL_MODE (1 << 4)
196 #define CC_XMTDATAAVAIL_CTRL (1 << 5)
197
198 /* SDA_FRAMECTRL */
199 #define SFC_RF_TERM (1 << 0) /* Read Frame Terminate */
200 #define SFC_WF_TERM (1 << 1) /* Write Frame Terminate */
201 #define SFC_CRC4WOOS (1 << 2) /* CRC error for write out of sync */
202 #define SFC_ABORTALL (1 << 3) /* Abort all in-progress frames */
203
204 /* HW frame tag */
205 #define SDPCM_FRAMETAG_LEN 4 /* 2 bytes len, 2 bytes check val */
206
207 /* Total length of frame header for dongle protocol */
208 #define SDPCM_HDRLEN (SDPCM_FRAMETAG_LEN + SDPCM_SWHEADER_LEN)
209 #define SDPCM_RESERVE (SDPCM_HDRLEN + BRCMF_SDALIGN)
210
211 /*
212 * Software allocation of To SB Mailbox resources
213 */
214
215 /* tosbmailbox bits corresponding to intstatus bits */
216 #define SMB_NAK (1 << 0) /* Frame NAK */
217 #define SMB_INT_ACK (1 << 1) /* Host Interrupt ACK */
218 #define SMB_USE_OOB (1 << 2) /* Use OOB Wakeup */
219 #define SMB_DEV_INT (1 << 3) /* Miscellaneous Interrupt */
220
221 /* tosbmailboxdata */
222 #define SMB_DATA_VERSION_SHIFT 16 /* host protocol version */
223
224 /*
225 * Software allocation of To Host Mailbox resources
226 */
227
228 /* intstatus bits */
229 #define I_HMB_FC_STATE I_HMB_SW0 /* Flow Control State */
230 #define I_HMB_FC_CHANGE I_HMB_SW1 /* Flow Control State Changed */
231 #define I_HMB_FRAME_IND I_HMB_SW2 /* Frame Indication */
232 #define I_HMB_HOST_INT I_HMB_SW3 /* Miscellaneous Interrupt */
233
234 /* tohostmailboxdata */
235 #define HMB_DATA_NAKHANDLED 1 /* retransmit NAK'd frame */
236 #define HMB_DATA_DEVREADY 2 /* talk to host after enable */
237 #define HMB_DATA_FC 4 /* per prio flowcontrol update flag */
238 #define HMB_DATA_FWREADY 8 /* fw ready for protocol activity */
239
240 #define HMB_DATA_FCDATA_MASK 0xff000000
241 #define HMB_DATA_FCDATA_SHIFT 24
242
243 #define HMB_DATA_VERSION_MASK 0x00ff0000
244 #define HMB_DATA_VERSION_SHIFT 16
245
246 /*
247 * Software-defined protocol header
248 */
249
250 /* Current protocol version */
251 #define SDPCM_PROT_VERSION 4
252
253 /* SW frame header */
254 #define SDPCM_PACKET_SEQUENCE(p) (((u8 *)p)[0] & 0xff)
255
256 #define SDPCM_CHANNEL_MASK 0x00000f00
257 #define SDPCM_CHANNEL_SHIFT 8
258 #define SDPCM_PACKET_CHANNEL(p) (((u8 *)p)[1] & 0x0f)
259
260 #define SDPCM_NEXTLEN_OFFSET 2
261
262 /* Data Offset from SOF (HW Tag, SW Tag, Pad) */
263 #define SDPCM_DOFFSET_OFFSET 3 /* Data Offset */
264 #define SDPCM_DOFFSET_VALUE(p) (((u8 *)p)[SDPCM_DOFFSET_OFFSET] & 0xff)
265 #define SDPCM_DOFFSET_MASK 0xff000000
266 #define SDPCM_DOFFSET_SHIFT 24
267 #define SDPCM_FCMASK_OFFSET 4 /* Flow control */
268 #define SDPCM_FCMASK_VALUE(p) (((u8 *)p)[SDPCM_FCMASK_OFFSET] & 0xff)
269 #define SDPCM_WINDOW_OFFSET 5 /* Credit based fc */
270 #define SDPCM_WINDOW_VALUE(p) (((u8 *)p)[SDPCM_WINDOW_OFFSET] & 0xff)
271
272 #define SDPCM_SWHEADER_LEN 8 /* SW header is 64 bits */
273
274 /* logical channel numbers */
275 #define SDPCM_CONTROL_CHANNEL 0 /* Control channel Id */
276 #define SDPCM_EVENT_CHANNEL 1 /* Asyc Event Indication Channel Id */
277 #define SDPCM_DATA_CHANNEL 2 /* Data Xmit/Recv Channel Id */
278 #define SDPCM_GLOM_CHANNEL 3 /* For coalesced packets */
279 #define SDPCM_TEST_CHANNEL 15 /* Reserved for test/debug packets */
280
281 #define SDPCM_SEQUENCE_WRAP 256 /* wrap-around val for 8bit frame seq */
282
283 #define SDPCM_GLOMDESC(p) (((u8 *)p)[1] & 0x80)
284
285 /*
286 * Shared structure between dongle and the host.
287 * The structure contains pointers to trap or assert information.
288 */
289 #define SDPCM_SHARED_VERSION 0x0003
290 #define SDPCM_SHARED_VERSION_MASK 0x00FF
291 #define SDPCM_SHARED_ASSERT_BUILT 0x0100
292 #define SDPCM_SHARED_ASSERT 0x0200
293 #define SDPCM_SHARED_TRAP 0x0400
294
295 /* Space for header read, limit for data packets */
296 #define MAX_HDR_READ (1 << 6)
297 #define MAX_RX_DATASZ 2048
298
299 /* Maximum milliseconds to wait for F2 to come up */
300 #define BRCMF_WAIT_F2RDY 3000
301
302 /* Bump up limit on waiting for HT to account for first startup;
303 * if the image is doing a CRC calculation before programming the PMU
304 * for HT availability, it could take a couple hundred ms more, so
305 * max out at a 1 second (1000000us).
306 */
307 #undef PMU_MAX_TRANSITION_DLY
308 #define PMU_MAX_TRANSITION_DLY 1000000
309
310 /* Value for ChipClockCSR during initial setup */
311 #define BRCMF_INIT_CLKCTL1 (SBSDIO_FORCE_HW_CLKREQ_OFF | \
312 SBSDIO_ALP_AVAIL_REQ)
313
314 /* Flags for SDH calls */
315 #define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
316
317 #define BRCMF_SDIO_FW_NAME "brcm/brcmfmac-sdio.bin"
318 #define BRCMF_SDIO_NV_NAME "brcm/brcmfmac-sdio.txt"
319 MODULE_FIRMWARE(BRCMF_SDIO_FW_NAME);
320 MODULE_FIRMWARE(BRCMF_SDIO_NV_NAME);
321
322 #define BRCMF_IDLE_IMMEDIATE (-1) /* Enter idle immediately */
323 #define BRCMF_IDLE_ACTIVE 0 /* Do not request any SD clock change
324 * when idle
325 */
326 #define BRCMF_IDLE_INTERVAL 1
327
328 #define KSO_WAIT_US 50
329 #define MAX_KSO_ATTEMPTS (PMU_MAX_TRANSITION_DLY/KSO_WAIT_US)
330
331 /*
332 * Conversion of 802.1D priority to precedence level
333 */
334 static uint prio2prec(u32 prio)
335 {
336 return (prio == PRIO_8021D_NONE || prio == PRIO_8021D_BE) ?
337 (prio^2) : prio;
338 }
339
340 #ifdef DEBUG
341 /* Device console log buffer state */
342 struct brcmf_console {
343 uint count; /* Poll interval msec counter */
344 uint log_addr; /* Log struct address (fixed) */
345 struct rte_log_le log_le; /* Log struct (host copy) */
346 uint bufsize; /* Size of log buffer */
347 u8 *buf; /* Log buffer (host copy) */
348 uint last; /* Last buffer read index */
349 };
350
351 struct brcmf_trap_info {
352 __le32 type;
353 __le32 epc;
354 __le32 cpsr;
355 __le32 spsr;
356 __le32 r0; /* a1 */
357 __le32 r1; /* a2 */
358 __le32 r2; /* a3 */
359 __le32 r3; /* a4 */
360 __le32 r4; /* v1 */
361 __le32 r5; /* v2 */
362 __le32 r6; /* v3 */
363 __le32 r7; /* v4 */
364 __le32 r8; /* v5 */
365 __le32 r9; /* sb/v6 */
366 __le32 r10; /* sl/v7 */
367 __le32 r11; /* fp/v8 */
368 __le32 r12; /* ip */
369 __le32 r13; /* sp */
370 __le32 r14; /* lr */
371 __le32 pc; /* r15 */
372 };
373 #endif /* DEBUG */
374
375 struct sdpcm_shared {
376 u32 flags;
377 u32 trap_addr;
378 u32 assert_exp_addr;
379 u32 assert_file_addr;
380 u32 assert_line;
381 u32 console_addr; /* Address of struct rte_console */
382 u32 msgtrace_addr;
383 u8 tag[32];
384 u32 brpt_addr;
385 };
386
387 struct sdpcm_shared_le {
388 __le32 flags;
389 __le32 trap_addr;
390 __le32 assert_exp_addr;
391 __le32 assert_file_addr;
392 __le32 assert_line;
393 __le32 console_addr; /* Address of struct rte_console */
394 __le32 msgtrace_addr;
395 u8 tag[32];
396 __le32 brpt_addr;
397 };
398
399 /* SDIO read frame info */
400 struct brcmf_sdio_read {
401 u8 seq_num;
402 u8 channel;
403 u16 len;
404 u16 len_left;
405 u16 len_nxtfrm;
406 u8 dat_offset;
407 };
408
409 /* misc chip info needed by some of the routines */
410 /* Private data for SDIO bus interaction */
411 struct brcmf_sdio {
412 struct brcmf_sdio_dev *sdiodev; /* sdio device handler */
413 struct chip_info *ci; /* Chip info struct */
414 char *vars; /* Variables (from CIS and/or other) */
415 uint varsz; /* Size of variables buffer */
416
417 u32 ramsize; /* Size of RAM in SOCRAM (bytes) */
418
419 u32 hostintmask; /* Copy of Host Interrupt Mask */
420 atomic_t intstatus; /* Intstatus bits (events) pending */
421 atomic_t fcstate; /* State of dongle flow-control */
422
423 uint blocksize; /* Block size of SDIO transfers */
424 uint roundup; /* Max roundup limit */
425
426 struct pktq txq; /* Queue length used for flow-control */
427 u8 flowcontrol; /* per prio flow control bitmask */
428 u8 tx_seq; /* Transmit sequence number (next) */
429 u8 tx_max; /* Maximum transmit sequence allowed */
430
431 u8 hdrbuf[MAX_HDR_READ + BRCMF_SDALIGN];
432 u8 *rxhdr; /* Header of current rx frame (in hdrbuf) */
433 u8 rx_seq; /* Receive sequence number (expected) */
434 struct brcmf_sdio_read cur_read;
435 /* info of current read frame */
436 bool rxskip; /* Skip receive (awaiting NAK ACK) */
437 bool rxpending; /* Data frame pending in dongle */
438
439 uint rxbound; /* Rx frames to read before resched */
440 uint txbound; /* Tx frames to send before resched */
441 uint txminmax;
442
443 struct sk_buff *glomd; /* Packet containing glomming descriptor */
444 struct sk_buff_head glom; /* Packet list for glommed superframe */
445 uint glomerr; /* Glom packet read errors */
446
447 u8 *rxbuf; /* Buffer for receiving control packets */
448 uint rxblen; /* Allocated length of rxbuf */
449 u8 *rxctl; /* Aligned pointer into rxbuf */
450 u8 *rxctl_orig; /* pointer for freeing rxctl */
451 uint rxlen; /* Length of valid data in buffer */
452 spinlock_t rxctl_lock; /* protection lock for ctrl frame resources */
453
454 u8 sdpcm_ver; /* Bus protocol reported by dongle */
455
456 bool intr; /* Use interrupts */
457 bool poll; /* Use polling */
458 atomic_t ipend; /* Device interrupt is pending */
459 uint spurious; /* Count of spurious interrupts */
460 uint pollrate; /* Ticks between device polls */
461 uint polltick; /* Tick counter */
462
463 #ifdef DEBUG
464 uint console_interval;
465 struct brcmf_console console; /* Console output polling support */
466 uint console_addr; /* Console address from shared struct */
467 #endif /* DEBUG */
468
469 uint clkstate; /* State of sd and backplane clock(s) */
470 bool activity; /* Activity flag for clock down */
471 s32 idletime; /* Control for activity timeout */
472 s32 idlecount; /* Activity timeout counter */
473 s32 idleclock; /* How to set bus driver when idle */
474 bool rxflow_mode; /* Rx flow control mode */
475 bool rxflow; /* Is rx flow control on */
476 bool alp_only; /* Don't use HT clock (ALP only) */
477
478 u8 *ctrl_frame_buf;
479 u32 ctrl_frame_len;
480 bool ctrl_frame_stat;
481
482 spinlock_t txqlock;
483 wait_queue_head_t ctrl_wait;
484 wait_queue_head_t dcmd_resp_wait;
485
486 struct timer_list timer;
487 struct completion watchdog_wait;
488 struct task_struct *watchdog_tsk;
489 bool wd_timer_valid;
490 uint save_ms;
491
492 struct workqueue_struct *brcmf_wq;
493 struct work_struct datawork;
494 atomic_t dpc_tskcnt;
495
496 const struct firmware *firmware;
497 u32 fw_ptr;
498
499 bool txoff; /* Transmit flow-controlled */
500 struct brcmf_sdio_count sdcnt;
501 bool sr_enabled; /* SaveRestore enabled */
502 bool sleeping; /* SDIO bus sleeping */
503 };
504
505 /* clkstate */
506 #define CLK_NONE 0
507 #define CLK_SDONLY 1
508 #define CLK_PENDING 2
509 #define CLK_AVAIL 3
510
511 #ifdef DEBUG
512 static int qcount[NUMPRIO];
513 static int tx_packets[NUMPRIO];
514 #endif /* DEBUG */
515
516 #define DEFAULT_SDIO_DRIVE_STRENGTH 6 /* in milliamps */
517
518 #define RETRYCHAN(chan) ((chan) == SDPCM_EVENT_CHANNEL)
519
520 /* Retry count for register access failures */
521 static const uint retry_limit = 2;
522
523 /* Limit on rounding up frames */
524 static const uint max_roundup = 512;
525
526 #define ALIGNMENT 4
527
528 enum brcmf_sdio_frmtype {
529 BRCMF_SDIO_FT_NORMAL,
530 BRCMF_SDIO_FT_SUPER,
531 BRCMF_SDIO_FT_SUB,
532 };
533
534 static void pkt_align(struct sk_buff *p, int len, int align)
535 {
536 uint datalign;
537 datalign = (unsigned long)(p->data);
538 datalign = roundup(datalign, (align)) - datalign;
539 if (datalign)
540 skb_pull(p, datalign);
541 __skb_trim(p, len);
542 }
543
544 /* To check if there's window offered */
545 static bool data_ok(struct brcmf_sdio *bus)
546 {
547 return (u8)(bus->tx_max - bus->tx_seq) != 0 &&
548 ((u8)(bus->tx_max - bus->tx_seq) & 0x80) == 0;
549 }
550
551 /*
552 * Reads a register in the SDIO hardware block. This block occupies a series of
553 * adresses on the 32 bit backplane bus.
554 */
555 static int
556 r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 offset)
557 {
558 u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
559 int ret;
560
561 *regvar = brcmf_sdio_regrl(bus->sdiodev,
562 bus->ci->c_inf[idx].base + offset, &ret);
563
564 return ret;
565 }
566
567 static int
568 w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset)
569 {
570 u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
571 int ret;
572
573 brcmf_sdio_regwl(bus->sdiodev,
574 bus->ci->c_inf[idx].base + reg_offset,
575 regval, &ret);
576
577 return ret;
578 }
579
580 static int
581 brcmf_sdbrcm_kso_control(struct brcmf_sdio *bus, bool on)
582 {
583 u8 wr_val = 0, rd_val, cmp_val, bmask;
584 int err = 0;
585 int try_cnt = 0;
586
587 brcmf_dbg(TRACE, "Enter\n");
588
589 wr_val = (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
590 /* 1st KSO write goes to AOS wake up core if device is asleep */
591 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
592 wr_val, &err);
593 if (err) {
594 brcmf_err("SDIO_AOS KSO write error: %d\n", err);
595 return err;
596 }
597
598 if (on) {
599 /* device WAKEUP through KSO:
600 * write bit 0 & read back until
601 * both bits 0 (kso bit) & 1 (dev on status) are set
602 */
603 cmp_val = SBSDIO_FUNC1_SLEEPCSR_KSO_MASK |
604 SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK;
605 bmask = cmp_val;
606 usleep_range(2000, 3000);
607 } else {
608 /* Put device to sleep, turn off KSO */
609 cmp_val = 0;
610 /* only check for bit0, bit1(dev on status) may not
611 * get cleared right away
612 */
613 bmask = SBSDIO_FUNC1_SLEEPCSR_KSO_MASK;
614 }
615
616 do {
617 /* reliable KSO bit set/clr:
618 * the sdiod sleep write access is synced to PMU 32khz clk
619 * just one write attempt may fail,
620 * read it back until it matches written value
621 */
622 rd_val = brcmf_sdio_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
623 &err);
624 if (((rd_val & bmask) == cmp_val) && !err)
625 break;
626 brcmf_dbg(SDIO, "KSO wr/rd retry:%d (max: %d) ERR:%x\n",
627 try_cnt, MAX_KSO_ATTEMPTS, err);
628 udelay(KSO_WAIT_US);
629 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
630 wr_val, &err);
631 } while (try_cnt++ < MAX_KSO_ATTEMPTS);
632
633 return err;
634 }
635
636 #define PKT_AVAILABLE() (intstatus & I_HMB_FRAME_IND)
637
638 #define HOSTINTMASK (I_HMB_SW_MASK | I_CHIPACTIVE)
639
640 /* Turn backplane clock on or off */
641 static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
642 {
643 int err;
644 u8 clkctl, clkreq, devctl;
645 unsigned long timeout;
646
647 brcmf_dbg(SDIO, "Enter\n");
648
649 clkctl = 0;
650
651 if (bus->sr_enabled) {
652 bus->clkstate = (on ? CLK_AVAIL : CLK_SDONLY);
653 return 0;
654 }
655
656 if (on) {
657 /* Request HT Avail */
658 clkreq =
659 bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ;
660
661 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
662 clkreq, &err);
663 if (err) {
664 brcmf_err("HT Avail request error: %d\n", err);
665 return -EBADE;
666 }
667
668 /* Check current status */
669 clkctl = brcmf_sdio_regrb(bus->sdiodev,
670 SBSDIO_FUNC1_CHIPCLKCSR, &err);
671 if (err) {
672 brcmf_err("HT Avail read error: %d\n", err);
673 return -EBADE;
674 }
675
676 /* Go to pending and await interrupt if appropriate */
677 if (!SBSDIO_CLKAV(clkctl, bus->alp_only) && pendok) {
678 /* Allow only clock-available interrupt */
679 devctl = brcmf_sdio_regrb(bus->sdiodev,
680 SBSDIO_DEVICE_CTL, &err);
681 if (err) {
682 brcmf_err("Devctl error setting CA: %d\n",
683 err);
684 return -EBADE;
685 }
686
687 devctl |= SBSDIO_DEVCTL_CA_INT_ONLY;
688 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
689 devctl, &err);
690 brcmf_dbg(SDIO, "CLKCTL: set PENDING\n");
691 bus->clkstate = CLK_PENDING;
692
693 return 0;
694 } else if (bus->clkstate == CLK_PENDING) {
695 /* Cancel CA-only interrupt filter */
696 devctl = brcmf_sdio_regrb(bus->sdiodev,
697 SBSDIO_DEVICE_CTL, &err);
698 devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
699 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
700 devctl, &err);
701 }
702
703 /* Otherwise, wait here (polling) for HT Avail */
704 timeout = jiffies +
705 msecs_to_jiffies(PMU_MAX_TRANSITION_DLY/1000);
706 while (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
707 clkctl = brcmf_sdio_regrb(bus->sdiodev,
708 SBSDIO_FUNC1_CHIPCLKCSR,
709 &err);
710 if (time_after(jiffies, timeout))
711 break;
712 else
713 usleep_range(5000, 10000);
714 }
715 if (err) {
716 brcmf_err("HT Avail request error: %d\n", err);
717 return -EBADE;
718 }
719 if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
720 brcmf_err("HT Avail timeout (%d): clkctl 0x%02x\n",
721 PMU_MAX_TRANSITION_DLY, clkctl);
722 return -EBADE;
723 }
724
725 /* Mark clock available */
726 bus->clkstate = CLK_AVAIL;
727 brcmf_dbg(SDIO, "CLKCTL: turned ON\n");
728
729 #if defined(DEBUG)
730 if (!bus->alp_only) {
731 if (SBSDIO_ALPONLY(clkctl))
732 brcmf_err("HT Clock should be on\n");
733 }
734 #endif /* defined (DEBUG) */
735
736 bus->activity = true;
737 } else {
738 clkreq = 0;
739
740 if (bus->clkstate == CLK_PENDING) {
741 /* Cancel CA-only interrupt filter */
742 devctl = brcmf_sdio_regrb(bus->sdiodev,
743 SBSDIO_DEVICE_CTL, &err);
744 devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
745 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
746 devctl, &err);
747 }
748
749 bus->clkstate = CLK_SDONLY;
750 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
751 clkreq, &err);
752 brcmf_dbg(SDIO, "CLKCTL: turned OFF\n");
753 if (err) {
754 brcmf_err("Failed access turning clock off: %d\n",
755 err);
756 return -EBADE;
757 }
758 }
759 return 0;
760 }
761
762 /* Change idle/active SD state */
763 static int brcmf_sdbrcm_sdclk(struct brcmf_sdio *bus, bool on)
764 {
765 brcmf_dbg(SDIO, "Enter\n");
766
767 if (on)
768 bus->clkstate = CLK_SDONLY;
769 else
770 bus->clkstate = CLK_NONE;
771
772 return 0;
773 }
774
775 /* Transition SD and backplane clock readiness */
776 static int brcmf_sdbrcm_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
777 {
778 #ifdef DEBUG
779 uint oldstate = bus->clkstate;
780 #endif /* DEBUG */
781
782 brcmf_dbg(SDIO, "Enter\n");
783
784 /* Early exit if we're already there */
785 if (bus->clkstate == target) {
786 if (target == CLK_AVAIL) {
787 brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
788 bus->activity = true;
789 }
790 return 0;
791 }
792
793 switch (target) {
794 case CLK_AVAIL:
795 /* Make sure SD clock is available */
796 if (bus->clkstate == CLK_NONE)
797 brcmf_sdbrcm_sdclk(bus, true);
798 /* Now request HT Avail on the backplane */
799 brcmf_sdbrcm_htclk(bus, true, pendok);
800 brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
801 bus->activity = true;
802 break;
803
804 case CLK_SDONLY:
805 /* Remove HT request, or bring up SD clock */
806 if (bus->clkstate == CLK_NONE)
807 brcmf_sdbrcm_sdclk(bus, true);
808 else if (bus->clkstate == CLK_AVAIL)
809 brcmf_sdbrcm_htclk(bus, false, false);
810 else
811 brcmf_err("request for %d -> %d\n",
812 bus->clkstate, target);
813 brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
814 break;
815
816 case CLK_NONE:
817 /* Make sure to remove HT request */
818 if (bus->clkstate == CLK_AVAIL)
819 brcmf_sdbrcm_htclk(bus, false, false);
820 /* Now remove the SD clock */
821 brcmf_sdbrcm_sdclk(bus, false);
822 brcmf_sdbrcm_wd_timer(bus, 0);
823 break;
824 }
825 #ifdef DEBUG
826 brcmf_dbg(SDIO, "%d -> %d\n", oldstate, bus->clkstate);
827 #endif /* DEBUG */
828
829 return 0;
830 }
831
832 static int
833 brcmf_sdbrcm_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
834 {
835 int err = 0;
836 brcmf_dbg(TRACE, "Enter\n");
837 brcmf_dbg(SDIO, "request %s currently %s\n",
838 (sleep ? "SLEEP" : "WAKE"),
839 (bus->sleeping ? "SLEEP" : "WAKE"));
840
841 /* If SR is enabled control bus state with KSO */
842 if (bus->sr_enabled) {
843 /* Done if we're already in the requested state */
844 if (sleep == bus->sleeping)
845 goto end;
846
847 /* Going to sleep */
848 if (sleep) {
849 /* Don't sleep if something is pending */
850 if (atomic_read(&bus->intstatus) ||
851 atomic_read(&bus->ipend) > 0 ||
852 (!atomic_read(&bus->fcstate) &&
853 brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
854 data_ok(bus)))
855 return -EBUSY;
856 err = brcmf_sdbrcm_kso_control(bus, false);
857 /* disable watchdog */
858 if (!err)
859 brcmf_sdbrcm_wd_timer(bus, 0);
860 } else {
861 bus->idlecount = 0;
862 err = brcmf_sdbrcm_kso_control(bus, true);
863 }
864 if (!err) {
865 /* Change state */
866 bus->sleeping = sleep;
867 brcmf_dbg(SDIO, "new state %s\n",
868 (sleep ? "SLEEP" : "WAKE"));
869 } else {
870 brcmf_err("error while changing bus sleep state %d\n",
871 err);
872 return err;
873 }
874 }
875
876 end:
877 /* control clocks */
878 if (sleep) {
879 if (!bus->sr_enabled)
880 brcmf_sdbrcm_clkctl(bus, CLK_NONE, pendok);
881 } else {
882 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, pendok);
883 }
884
885 return err;
886
887 }
888
889 static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
890 {
891 u32 intstatus = 0;
892 u32 hmb_data;
893 u8 fcbits;
894 int ret;
895
896 brcmf_dbg(SDIO, "Enter\n");
897
898 /* Read mailbox data and ack that we did so */
899 ret = r_sdreg32(bus, &hmb_data,
900 offsetof(struct sdpcmd_regs, tohostmailboxdata));
901
902 if (ret == 0)
903 w_sdreg32(bus, SMB_INT_ACK,
904 offsetof(struct sdpcmd_regs, tosbmailbox));
905 bus->sdcnt.f1regdata += 2;
906
907 /* Dongle recomposed rx frames, accept them again */
908 if (hmb_data & HMB_DATA_NAKHANDLED) {
909 brcmf_dbg(SDIO, "Dongle reports NAK handled, expect rtx of %d\n",
910 bus->rx_seq);
911 if (!bus->rxskip)
912 brcmf_err("unexpected NAKHANDLED!\n");
913
914 bus->rxskip = false;
915 intstatus |= I_HMB_FRAME_IND;
916 }
917
918 /*
919 * DEVREADY does not occur with gSPI.
920 */
921 if (hmb_data & (HMB_DATA_DEVREADY | HMB_DATA_FWREADY)) {
922 bus->sdpcm_ver =
923 (hmb_data & HMB_DATA_VERSION_MASK) >>
924 HMB_DATA_VERSION_SHIFT;
925 if (bus->sdpcm_ver != SDPCM_PROT_VERSION)
926 brcmf_err("Version mismatch, dongle reports %d, "
927 "expecting %d\n",
928 bus->sdpcm_ver, SDPCM_PROT_VERSION);
929 else
930 brcmf_dbg(SDIO, "Dongle ready, protocol version %d\n",
931 bus->sdpcm_ver);
932 }
933
934 /*
935 * Flow Control has been moved into the RX headers and this out of band
936 * method isn't used any more.
937 * remaining backward compatible with older dongles.
938 */
939 if (hmb_data & HMB_DATA_FC) {
940 fcbits = (hmb_data & HMB_DATA_FCDATA_MASK) >>
941 HMB_DATA_FCDATA_SHIFT;
942
943 if (fcbits & ~bus->flowcontrol)
944 bus->sdcnt.fc_xoff++;
945
946 if (bus->flowcontrol & ~fcbits)
947 bus->sdcnt.fc_xon++;
948
949 bus->sdcnt.fc_rcvd++;
950 bus->flowcontrol = fcbits;
951 }
952
953 /* Shouldn't be any others */
954 if (hmb_data & ~(HMB_DATA_DEVREADY |
955 HMB_DATA_NAKHANDLED |
956 HMB_DATA_FC |
957 HMB_DATA_FWREADY |
958 HMB_DATA_FCDATA_MASK | HMB_DATA_VERSION_MASK))
959 brcmf_err("Unknown mailbox data content: 0x%02x\n",
960 hmb_data);
961
962 return intstatus;
963 }
964
965 static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
966 {
967 uint retries = 0;
968 u16 lastrbc;
969 u8 hi, lo;
970 int err;
971
972 brcmf_err("%sterminate frame%s\n",
973 abort ? "abort command, " : "",
974 rtx ? ", send NAK" : "");
975
976 if (abort)
977 brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
978
979 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
980 SFC_RF_TERM, &err);
981 bus->sdcnt.f1regdata++;
982
983 /* Wait until the packet has been flushed (device/FIFO stable) */
984 for (lastrbc = retries = 0xffff; retries > 0; retries--) {
985 hi = brcmf_sdio_regrb(bus->sdiodev,
986 SBSDIO_FUNC1_RFRAMEBCHI, &err);
987 lo = brcmf_sdio_regrb(bus->sdiodev,
988 SBSDIO_FUNC1_RFRAMEBCLO, &err);
989 bus->sdcnt.f1regdata += 2;
990
991 if ((hi == 0) && (lo == 0))
992 break;
993
994 if ((hi > (lastrbc >> 8)) && (lo > (lastrbc & 0x00ff))) {
995 brcmf_err("count growing: last 0x%04x now 0x%04x\n",
996 lastrbc, (hi << 8) + lo);
997 }
998 lastrbc = (hi << 8) + lo;
999 }
1000
1001 if (!retries)
1002 brcmf_err("count never zeroed: last 0x%04x\n", lastrbc);
1003 else
1004 brcmf_dbg(SDIO, "flush took %d iterations\n", 0xffff - retries);
1005
1006 if (rtx) {
1007 bus->sdcnt.rxrtx++;
1008 err = w_sdreg32(bus, SMB_NAK,
1009 offsetof(struct sdpcmd_regs, tosbmailbox));
1010
1011 bus->sdcnt.f1regdata++;
1012 if (err == 0)
1013 bus->rxskip = true;
1014 }
1015
1016 /* Clear partial in any case */
1017 bus->cur_read.len = 0;
1018
1019 /* If we can't reach the device, signal failure */
1020 if (err)
1021 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
1022 }
1023
1024 /* return total length of buffer chain */
1025 static uint brcmf_sdbrcm_glom_len(struct brcmf_sdio *bus)
1026 {
1027 struct sk_buff *p;
1028 uint total;
1029
1030 total = 0;
1031 skb_queue_walk(&bus->glom, p)
1032 total += p->len;
1033 return total;
1034 }
1035
1036 static void brcmf_sdbrcm_free_glom(struct brcmf_sdio *bus)
1037 {
1038 struct sk_buff *cur, *next;
1039
1040 skb_queue_walk_safe(&bus->glom, cur, next) {
1041 skb_unlink(cur, &bus->glom);
1042 brcmu_pkt_buf_free_skb(cur);
1043 }
1044 }
1045
1046 static int brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
1047 struct brcmf_sdio_read *rd,
1048 enum brcmf_sdio_frmtype type)
1049 {
1050 u16 len, checksum;
1051 u8 rx_seq, fc, tx_seq_max;
1052
1053 /*
1054 * 4 bytes hardware header (frame tag)
1055 * Byte 0~1: Frame length
1056 * Byte 2~3: Checksum, bit-wise inverse of frame length
1057 */
1058 len = get_unaligned_le16(header);
1059 checksum = get_unaligned_le16(header + sizeof(u16));
1060 /* All zero means no more to read */
1061 if (!(len | checksum)) {
1062 bus->rxpending = false;
1063 return -ENODATA;
1064 }
1065 if ((u16)(~(len ^ checksum))) {
1066 brcmf_err("HW header checksum error\n");
1067 bus->sdcnt.rx_badhdr++;
1068 brcmf_sdbrcm_rxfail(bus, false, false);
1069 return -EIO;
1070 }
1071 if (len < SDPCM_HDRLEN) {
1072 brcmf_err("HW header length error\n");
1073 return -EPROTO;
1074 }
1075 if (type == BRCMF_SDIO_FT_SUPER &&
1076 (roundup(len, bus->blocksize) != rd->len)) {
1077 brcmf_err("HW superframe header length error\n");
1078 return -EPROTO;
1079 }
1080 if (type == BRCMF_SDIO_FT_SUB && len > rd->len) {
1081 brcmf_err("HW subframe header length error\n");
1082 return -EPROTO;
1083 }
1084 rd->len = len;
1085
1086 /*
1087 * 8 bytes hardware header
1088 * Byte 0: Rx sequence number
1089 * Byte 1: 4 MSB Channel number, 4 LSB arbitrary flag
1090 * Byte 2: Length of next data frame
1091 * Byte 3: Data offset
1092 * Byte 4: Flow control bits
1093 * Byte 5: Maximum Sequence number allow for Tx
1094 * Byte 6~7: Reserved
1095 */
1096 if (type == BRCMF_SDIO_FT_SUPER &&
1097 SDPCM_GLOMDESC(&header[SDPCM_FRAMETAG_LEN])) {
1098 brcmf_err("Glom descriptor found in superframe head\n");
1099 rd->len = 0;
1100 return -EINVAL;
1101 }
1102 rx_seq = SDPCM_PACKET_SEQUENCE(&header[SDPCM_FRAMETAG_LEN]);
1103 rd->channel = SDPCM_PACKET_CHANNEL(&header[SDPCM_FRAMETAG_LEN]);
1104 if (len > MAX_RX_DATASZ && rd->channel != SDPCM_CONTROL_CHANNEL &&
1105 type != BRCMF_SDIO_FT_SUPER) {
1106 brcmf_err("HW header length too long\n");
1107 bus->sdcnt.rx_toolong++;
1108 brcmf_sdbrcm_rxfail(bus, false, false);
1109 rd->len = 0;
1110 return -EPROTO;
1111 }
1112 if (type == BRCMF_SDIO_FT_SUPER && rd->channel != SDPCM_GLOM_CHANNEL) {
1113 brcmf_err("Wrong channel for superframe\n");
1114 rd->len = 0;
1115 return -EINVAL;
1116 }
1117 if (type == BRCMF_SDIO_FT_SUB && rd->channel != SDPCM_DATA_CHANNEL &&
1118 rd->channel != SDPCM_EVENT_CHANNEL) {
1119 brcmf_err("Wrong channel for subframe\n");
1120 rd->len = 0;
1121 return -EINVAL;
1122 }
1123 rd->dat_offset = SDPCM_DOFFSET_VALUE(&header[SDPCM_FRAMETAG_LEN]);
1124 if (rd->dat_offset < SDPCM_HDRLEN || rd->dat_offset > rd->len) {
1125 brcmf_err("seq %d: bad data offset\n", rx_seq);
1126 bus->sdcnt.rx_badhdr++;
1127 brcmf_sdbrcm_rxfail(bus, false, false);
1128 rd->len = 0;
1129 return -ENXIO;
1130 }
1131 if (rd->seq_num != rx_seq) {
1132 brcmf_err("seq %d: sequence number error, expect %d\n",
1133 rx_seq, rd->seq_num);
1134 bus->sdcnt.rx_badseq++;
1135 rd->seq_num = rx_seq;
1136 }
1137 /* no need to check the reset for subframe */
1138 if (type == BRCMF_SDIO_FT_SUB)
1139 return 0;
1140 rd->len_nxtfrm = header[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
1141 if (rd->len_nxtfrm << 4 > MAX_RX_DATASZ) {
1142 /* only warm for NON glom packet */
1143 if (rd->channel != SDPCM_GLOM_CHANNEL)
1144 brcmf_err("seq %d: next length error\n", rx_seq);
1145 rd->len_nxtfrm = 0;
1146 }
1147 fc = SDPCM_FCMASK_VALUE(&header[SDPCM_FRAMETAG_LEN]);
1148 if (bus->flowcontrol != fc) {
1149 if (~bus->flowcontrol & fc)
1150 bus->sdcnt.fc_xoff++;
1151 if (bus->flowcontrol & ~fc)
1152 bus->sdcnt.fc_xon++;
1153 bus->sdcnt.fc_rcvd++;
1154 bus->flowcontrol = fc;
1155 }
1156 tx_seq_max = SDPCM_WINDOW_VALUE(&header[SDPCM_FRAMETAG_LEN]);
1157 if ((u8)(tx_seq_max - bus->tx_seq) > 0x40) {
1158 brcmf_err("seq %d: max tx seq number error\n", rx_seq);
1159 tx_seq_max = bus->tx_seq + 2;
1160 }
1161 bus->tx_max = tx_seq_max;
1162
1163 return 0;
1164 }
1165
1166 static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1167 {
1168 u16 dlen, totlen;
1169 u8 *dptr, num = 0;
1170
1171 u16 sublen;
1172 struct sk_buff *pfirst, *pnext;
1173
1174 int errcode;
1175 u8 doff, sfdoff;
1176
1177 struct brcmf_sdio_read rd_new;
1178
1179 /* If packets, issue read(s) and send up packet chain */
1180 /* Return sequence numbers consumed? */
1181
1182 brcmf_dbg(SDIO, "start: glomd %p glom %p\n",
1183 bus->glomd, skb_peek(&bus->glom));
1184
1185 /* If there's a descriptor, generate the packet chain */
1186 if (bus->glomd) {
1187 pfirst = pnext = NULL;
1188 dlen = (u16) (bus->glomd->len);
1189 dptr = bus->glomd->data;
1190 if (!dlen || (dlen & 1)) {
1191 brcmf_err("bad glomd len(%d), ignore descriptor\n",
1192 dlen);
1193 dlen = 0;
1194 }
1195
1196 for (totlen = num = 0; dlen; num++) {
1197 /* Get (and move past) next length */
1198 sublen = get_unaligned_le16(dptr);
1199 dlen -= sizeof(u16);
1200 dptr += sizeof(u16);
1201 if ((sublen < SDPCM_HDRLEN) ||
1202 ((num == 0) && (sublen < (2 * SDPCM_HDRLEN)))) {
1203 brcmf_err("descriptor len %d bad: %d\n",
1204 num, sublen);
1205 pnext = NULL;
1206 break;
1207 }
1208 if (sublen % BRCMF_SDALIGN) {
1209 brcmf_err("sublen %d not multiple of %d\n",
1210 sublen, BRCMF_SDALIGN);
1211 }
1212 totlen += sublen;
1213
1214 /* For last frame, adjust read len so total
1215 is a block multiple */
1216 if (!dlen) {
1217 sublen +=
1218 (roundup(totlen, bus->blocksize) - totlen);
1219 totlen = roundup(totlen, bus->blocksize);
1220 }
1221
1222 /* Allocate/chain packet for next subframe */
1223 pnext = brcmu_pkt_buf_get_skb(sublen + BRCMF_SDALIGN);
1224 if (pnext == NULL) {
1225 brcmf_err("bcm_pkt_buf_get_skb failed, num %d len %d\n",
1226 num, sublen);
1227 break;
1228 }
1229 skb_queue_tail(&bus->glom, pnext);
1230
1231 /* Adhere to start alignment requirements */
1232 pkt_align(pnext, sublen, BRCMF_SDALIGN);
1233 }
1234
1235 /* If all allocations succeeded, save packet chain
1236 in bus structure */
1237 if (pnext) {
1238 brcmf_dbg(GLOM, "allocated %d-byte packet chain for %d subframes\n",
1239 totlen, num);
1240 if (BRCMF_GLOM_ON() && bus->cur_read.len &&
1241 totlen != bus->cur_read.len) {
1242 brcmf_dbg(GLOM, "glomdesc mismatch: nextlen %d glomdesc %d rxseq %d\n",
1243 bus->cur_read.len, totlen, rxseq);
1244 }
1245 pfirst = pnext = NULL;
1246 } else {
1247 brcmf_sdbrcm_free_glom(bus);
1248 num = 0;
1249 }
1250
1251 /* Done with descriptor packet */
1252 brcmu_pkt_buf_free_skb(bus->glomd);
1253 bus->glomd = NULL;
1254 bus->cur_read.len = 0;
1255 }
1256
1257 /* Ok -- either we just generated a packet chain,
1258 or had one from before */
1259 if (!skb_queue_empty(&bus->glom)) {
1260 if (BRCMF_GLOM_ON()) {
1261 brcmf_dbg(GLOM, "try superframe read, packet chain:\n");
1262 skb_queue_walk(&bus->glom, pnext) {
1263 brcmf_dbg(GLOM, " %p: %p len 0x%04x (%d)\n",
1264 pnext, (u8 *) (pnext->data),
1265 pnext->len, pnext->len);
1266 }
1267 }
1268
1269 pfirst = skb_peek(&bus->glom);
1270 dlen = (u16) brcmf_sdbrcm_glom_len(bus);
1271
1272 /* Do an SDIO read for the superframe. Configurable iovar to
1273 * read directly into the chained packet, or allocate a large
1274 * packet and and copy into the chain.
1275 */
1276 sdio_claim_host(bus->sdiodev->func[1]);
1277 errcode = brcmf_sdcard_recv_chain(bus->sdiodev,
1278 bus->sdiodev->sbwad,
1279 SDIO_FUNC_2, F2SYNC, &bus->glom);
1280 sdio_release_host(bus->sdiodev->func[1]);
1281 bus->sdcnt.f2rxdata++;
1282
1283 /* On failure, kill the superframe, allow a couple retries */
1284 if (errcode < 0) {
1285 brcmf_err("glom read of %d bytes failed: %d\n",
1286 dlen, errcode);
1287
1288 sdio_claim_host(bus->sdiodev->func[1]);
1289 if (bus->glomerr++ < 3) {
1290 brcmf_sdbrcm_rxfail(bus, true, true);
1291 } else {
1292 bus->glomerr = 0;
1293 brcmf_sdbrcm_rxfail(bus, true, false);
1294 bus->sdcnt.rxglomfail++;
1295 brcmf_sdbrcm_free_glom(bus);
1296 }
1297 sdio_release_host(bus->sdiodev->func[1]);
1298 return 0;
1299 }
1300
1301 brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1302 pfirst->data, min_t(int, pfirst->len, 48),
1303 "SUPERFRAME:\n");
1304
1305 rd_new.seq_num = rxseq;
1306 rd_new.len = dlen;
1307 sdio_claim_host(bus->sdiodev->func[1]);
1308 errcode = brcmf_sdio_hdparser(bus, pfirst->data, &rd_new,
1309 BRCMF_SDIO_FT_SUPER);
1310 sdio_release_host(bus->sdiodev->func[1]);
1311 bus->cur_read.len = rd_new.len_nxtfrm << 4;
1312
1313 /* Remove superframe header, remember offset */
1314 skb_pull(pfirst, rd_new.dat_offset);
1315 sfdoff = rd_new.dat_offset;
1316 num = 0;
1317
1318 /* Validate all the subframe headers */
1319 skb_queue_walk(&bus->glom, pnext) {
1320 /* leave when invalid subframe is found */
1321 if (errcode)
1322 break;
1323
1324 rd_new.len = pnext->len;
1325 rd_new.seq_num = rxseq++;
1326 sdio_claim_host(bus->sdiodev->func[1]);
1327 errcode = brcmf_sdio_hdparser(bus, pnext->data, &rd_new,
1328 BRCMF_SDIO_FT_SUB);
1329 sdio_release_host(bus->sdiodev->func[1]);
1330 brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1331 pnext->data, 32, "subframe:\n");
1332
1333 num++;
1334 }
1335
1336 if (errcode) {
1337 /* Terminate frame on error, request
1338 a couple retries */
1339 sdio_claim_host(bus->sdiodev->func[1]);
1340 if (bus->glomerr++ < 3) {
1341 /* Restore superframe header space */
1342 skb_push(pfirst, sfdoff);
1343 brcmf_sdbrcm_rxfail(bus, true, true);
1344 } else {
1345 bus->glomerr = 0;
1346 brcmf_sdbrcm_rxfail(bus, true, false);
1347 bus->sdcnt.rxglomfail++;
1348 brcmf_sdbrcm_free_glom(bus);
1349 }
1350 sdio_release_host(bus->sdiodev->func[1]);
1351 bus->cur_read.len = 0;
1352 return 0;
1353 }
1354
1355 /* Basic SD framing looks ok - process each packet (header) */
1356
1357 skb_queue_walk_safe(&bus->glom, pfirst, pnext) {
1358 dptr = (u8 *) (pfirst->data);
1359 sublen = get_unaligned_le16(dptr);
1360 doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
1361
1362 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(),
1363 dptr, pfirst->len,
1364 "Rx Subframe Data:\n");
1365
1366 __skb_trim(pfirst, sublen);
1367 skb_pull(pfirst, doff);
1368
1369 if (pfirst->len == 0) {
1370 skb_unlink(pfirst, &bus->glom);
1371 brcmu_pkt_buf_free_skb(pfirst);
1372 continue;
1373 }
1374
1375 brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1376 pfirst->data,
1377 min_t(int, pfirst->len, 32),
1378 "subframe %d to stack, %p (%p/%d) nxt/lnk %p/%p\n",
1379 bus->glom.qlen, pfirst, pfirst->data,
1380 pfirst->len, pfirst->next,
1381 pfirst->prev);
1382 }
1383 /* sent any remaining packets up */
1384 if (bus->glom.qlen)
1385 brcmf_rx_frames(bus->sdiodev->dev, &bus->glom);
1386
1387 bus->sdcnt.rxglomframes++;
1388 bus->sdcnt.rxglompkts += bus->glom.qlen;
1389 }
1390 return num;
1391 }
1392
1393 static int brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_sdio *bus, uint *condition,
1394 bool *pending)
1395 {
1396 DECLARE_WAITQUEUE(wait, current);
1397 int timeout = msecs_to_jiffies(DCMD_RESP_TIMEOUT);
1398
1399 /* Wait until control frame is available */
1400 add_wait_queue(&bus->dcmd_resp_wait, &wait);
1401 set_current_state(TASK_INTERRUPTIBLE);
1402
1403 while (!(*condition) && (!signal_pending(current) && timeout))
1404 timeout = schedule_timeout(timeout);
1405
1406 if (signal_pending(current))
1407 *pending = true;
1408
1409 set_current_state(TASK_RUNNING);
1410 remove_wait_queue(&bus->dcmd_resp_wait, &wait);
1411
1412 return timeout;
1413 }
1414
1415 static int brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_sdio *bus)
1416 {
1417 if (waitqueue_active(&bus->dcmd_resp_wait))
1418 wake_up_interruptible(&bus->dcmd_resp_wait);
1419
1420 return 0;
1421 }
1422 static void
1423 brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
1424 {
1425 uint rdlen, pad;
1426 u8 *buf = NULL, *rbuf;
1427 int sdret;
1428
1429 brcmf_dbg(TRACE, "Enter\n");
1430
1431 if (bus->rxblen)
1432 buf = vzalloc(bus->rxblen);
1433 if (!buf)
1434 goto done;
1435
1436 rbuf = bus->rxbuf;
1437 pad = ((unsigned long)rbuf % BRCMF_SDALIGN);
1438 if (pad)
1439 rbuf += (BRCMF_SDALIGN - pad);
1440
1441 /* Copy the already-read portion over */
1442 memcpy(buf, hdr, BRCMF_FIRSTREAD);
1443 if (len <= BRCMF_FIRSTREAD)
1444 goto gotpkt;
1445
1446 /* Raise rdlen to next SDIO block to avoid tail command */
1447 rdlen = len - BRCMF_FIRSTREAD;
1448 if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
1449 pad = bus->blocksize - (rdlen % bus->blocksize);
1450 if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
1451 ((len + pad) < bus->sdiodev->bus_if->maxctl))
1452 rdlen += pad;
1453 } else if (rdlen % BRCMF_SDALIGN) {
1454 rdlen += BRCMF_SDALIGN - (rdlen % BRCMF_SDALIGN);
1455 }
1456
1457 /* Satisfy length-alignment requirements */
1458 if (rdlen & (ALIGNMENT - 1))
1459 rdlen = roundup(rdlen, ALIGNMENT);
1460
1461 /* Drop if the read is too big or it exceeds our maximum */
1462 if ((rdlen + BRCMF_FIRSTREAD) > bus->sdiodev->bus_if->maxctl) {
1463 brcmf_err("%d-byte control read exceeds %d-byte buffer\n",
1464 rdlen, bus->sdiodev->bus_if->maxctl);
1465 brcmf_sdbrcm_rxfail(bus, false, false);
1466 goto done;
1467 }
1468
1469 if ((len - doff) > bus->sdiodev->bus_if->maxctl) {
1470 brcmf_err("%d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n",
1471 len, len - doff, bus->sdiodev->bus_if->maxctl);
1472 bus->sdcnt.rx_toolong++;
1473 brcmf_sdbrcm_rxfail(bus, false, false);
1474 goto done;
1475 }
1476
1477 /* Read remain of frame body */
1478 sdret = brcmf_sdcard_recv_buf(bus->sdiodev,
1479 bus->sdiodev->sbwad,
1480 SDIO_FUNC_2,
1481 F2SYNC, rbuf, rdlen);
1482 bus->sdcnt.f2rxdata++;
1483
1484 /* Control frame failures need retransmission */
1485 if (sdret < 0) {
1486 brcmf_err("read %d control bytes failed: %d\n",
1487 rdlen, sdret);
1488 bus->sdcnt.rxc_errors++;
1489 brcmf_sdbrcm_rxfail(bus, true, true);
1490 goto done;
1491 } else
1492 memcpy(buf + BRCMF_FIRSTREAD, rbuf, rdlen);
1493
1494 gotpkt:
1495
1496 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_CTL_ON(),
1497 buf, len, "RxCtrl:\n");
1498
1499 /* Point to valid data and indicate its length */
1500 spin_lock_bh(&bus->rxctl_lock);
1501 if (bus->rxctl) {
1502 brcmf_err("last control frame is being processed.\n");
1503 spin_unlock_bh(&bus->rxctl_lock);
1504 vfree(buf);
1505 goto done;
1506 }
1507 bus->rxctl = buf + doff;
1508 bus->rxctl_orig = buf;
1509 bus->rxlen = len - doff;
1510 spin_unlock_bh(&bus->rxctl_lock);
1511
1512 done:
1513 /* Awake any waiters */
1514 brcmf_sdbrcm_dcmd_resp_wake(bus);
1515 }
1516
1517 /* Pad read to blocksize for efficiency */
1518 static void brcmf_pad(struct brcmf_sdio *bus, u16 *pad, u16 *rdlen)
1519 {
1520 if (bus->roundup && bus->blocksize && *rdlen > bus->blocksize) {
1521 *pad = bus->blocksize - (*rdlen % bus->blocksize);
1522 if (*pad <= bus->roundup && *pad < bus->blocksize &&
1523 *rdlen + *pad + BRCMF_FIRSTREAD < MAX_RX_DATASZ)
1524 *rdlen += *pad;
1525 } else if (*rdlen % BRCMF_SDALIGN) {
1526 *rdlen += BRCMF_SDALIGN - (*rdlen % BRCMF_SDALIGN);
1527 }
1528 }
1529
1530 static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
1531 {
1532 struct sk_buff *pkt; /* Packet for event or data frames */
1533 struct sk_buff_head pktlist; /* needed for bus interface */
1534 u16 pad; /* Number of pad bytes to read */
1535 uint rxleft = 0; /* Remaining number of frames allowed */
1536 int ret; /* Return code from calls */
1537 uint rxcount = 0; /* Total frames read */
1538 struct brcmf_sdio_read *rd = &bus->cur_read, rd_new;
1539 u8 head_read = 0;
1540
1541 brcmf_dbg(TRACE, "Enter\n");
1542
1543 /* Not finished unless we encounter no more frames indication */
1544 bus->rxpending = true;
1545
1546 for (rd->seq_num = bus->rx_seq, rxleft = maxframes;
1547 !bus->rxskip && rxleft &&
1548 bus->sdiodev->bus_if->state != BRCMF_BUS_DOWN;
1549 rd->seq_num++, rxleft--) {
1550
1551 /* Handle glomming separately */
1552 if (bus->glomd || !skb_queue_empty(&bus->glom)) {
1553 u8 cnt;
1554 brcmf_dbg(GLOM, "calling rxglom: glomd %p, glom %p\n",
1555 bus->glomd, skb_peek(&bus->glom));
1556 cnt = brcmf_sdbrcm_rxglom(bus, rd->seq_num);
1557 brcmf_dbg(GLOM, "rxglom returned %d\n", cnt);
1558 rd->seq_num += cnt - 1;
1559 rxleft = (rxleft > cnt) ? (rxleft - cnt) : 1;
1560 continue;
1561 }
1562
1563 rd->len_left = rd->len;
1564 /* read header first for unknow frame length */
1565 sdio_claim_host(bus->sdiodev->func[1]);
1566 if (!rd->len) {
1567 ret = brcmf_sdcard_recv_buf(bus->sdiodev,
1568 bus->sdiodev->sbwad,
1569 SDIO_FUNC_2, F2SYNC,
1570 bus->rxhdr,
1571 BRCMF_FIRSTREAD);
1572 bus->sdcnt.f2rxhdrs++;
1573 if (ret < 0) {
1574 brcmf_err("RXHEADER FAILED: %d\n",
1575 ret);
1576 bus->sdcnt.rx_hdrfail++;
1577 brcmf_sdbrcm_rxfail(bus, true, true);
1578 sdio_release_host(bus->sdiodev->func[1]);
1579 continue;
1580 }
1581
1582 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() || BRCMF_HDRS_ON(),
1583 bus->rxhdr, SDPCM_HDRLEN,
1584 "RxHdr:\n");
1585
1586 if (brcmf_sdio_hdparser(bus, bus->rxhdr, rd,
1587 BRCMF_SDIO_FT_NORMAL)) {
1588 sdio_release_host(bus->sdiodev->func[1]);
1589 if (!bus->rxpending)
1590 break;
1591 else
1592 continue;
1593 }
1594
1595 if (rd->channel == SDPCM_CONTROL_CHANNEL) {
1596 brcmf_sdbrcm_read_control(bus, bus->rxhdr,
1597 rd->len,
1598 rd->dat_offset);
1599 /* prepare the descriptor for the next read */
1600 rd->len = rd->len_nxtfrm << 4;
1601 rd->len_nxtfrm = 0;
1602 /* treat all packet as event if we don't know */
1603 rd->channel = SDPCM_EVENT_CHANNEL;
1604 sdio_release_host(bus->sdiodev->func[1]);
1605 continue;
1606 }
1607 rd->len_left = rd->len > BRCMF_FIRSTREAD ?
1608 rd->len - BRCMF_FIRSTREAD : 0;
1609 head_read = BRCMF_FIRSTREAD;
1610 }
1611
1612 brcmf_pad(bus, &pad, &rd->len_left);
1613
1614 pkt = brcmu_pkt_buf_get_skb(rd->len_left + head_read +
1615 BRCMF_SDALIGN);
1616 if (!pkt) {
1617 /* Give up on data, request rtx of events */
1618 brcmf_err("brcmu_pkt_buf_get_skb failed\n");
1619 brcmf_sdbrcm_rxfail(bus, false,
1620 RETRYCHAN(rd->channel));
1621 sdio_release_host(bus->sdiodev->func[1]);
1622 continue;
1623 }
1624 skb_pull(pkt, head_read);
1625 pkt_align(pkt, rd->len_left, BRCMF_SDALIGN);
1626
1627 ret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
1628 SDIO_FUNC_2, F2SYNC, pkt);
1629 bus->sdcnt.f2rxdata++;
1630 sdio_release_host(bus->sdiodev->func[1]);
1631
1632 if (ret < 0) {
1633 brcmf_err("read %d bytes from channel %d failed: %d\n",
1634 rd->len, rd->channel, ret);
1635 brcmu_pkt_buf_free_skb(pkt);
1636 sdio_claim_host(bus->sdiodev->func[1]);
1637 brcmf_sdbrcm_rxfail(bus, true,
1638 RETRYCHAN(rd->channel));
1639 sdio_release_host(bus->sdiodev->func[1]);
1640 continue;
1641 }
1642
1643 if (head_read) {
1644 skb_push(pkt, head_read);
1645 memcpy(pkt->data, bus->rxhdr, head_read);
1646 head_read = 0;
1647 } else {
1648 memcpy(bus->rxhdr, pkt->data, SDPCM_HDRLEN);
1649 rd_new.seq_num = rd->seq_num;
1650 sdio_claim_host(bus->sdiodev->func[1]);
1651 if (brcmf_sdio_hdparser(bus, bus->rxhdr, &rd_new,
1652 BRCMF_SDIO_FT_NORMAL)) {
1653 rd->len = 0;
1654 brcmu_pkt_buf_free_skb(pkt);
1655 }
1656 bus->sdcnt.rx_readahead_cnt++;
1657 if (rd->len != roundup(rd_new.len, 16)) {
1658 brcmf_err("frame length mismatch:read %d, should be %d\n",
1659 rd->len,
1660 roundup(rd_new.len, 16) >> 4);
1661 rd->len = 0;
1662 brcmf_sdbrcm_rxfail(bus, true, true);
1663 sdio_release_host(bus->sdiodev->func[1]);
1664 brcmu_pkt_buf_free_skb(pkt);
1665 continue;
1666 }
1667 sdio_release_host(bus->sdiodev->func[1]);
1668 rd->len_nxtfrm = rd_new.len_nxtfrm;
1669 rd->channel = rd_new.channel;
1670 rd->dat_offset = rd_new.dat_offset;
1671
1672 brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() &&
1673 BRCMF_DATA_ON()) &&
1674 BRCMF_HDRS_ON(),
1675 bus->rxhdr, SDPCM_HDRLEN,
1676 "RxHdr:\n");
1677
1678 if (rd_new.channel == SDPCM_CONTROL_CHANNEL) {
1679 brcmf_err("readahead on control packet %d?\n",
1680 rd_new.seq_num);
1681 /* Force retry w/normal header read */
1682 rd->len = 0;
1683 sdio_claim_host(bus->sdiodev->func[1]);
1684 brcmf_sdbrcm_rxfail(bus, false, true);
1685 sdio_release_host(bus->sdiodev->func[1]);
1686 brcmu_pkt_buf_free_skb(pkt);
1687 continue;
1688 }
1689 }
1690
1691 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(),
1692 pkt->data, rd->len, "Rx Data:\n");
1693
1694 /* Save superframe descriptor and allocate packet frame */
1695 if (rd->channel == SDPCM_GLOM_CHANNEL) {
1696 if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_FRAMETAG_LEN])) {
1697 brcmf_dbg(GLOM, "glom descriptor, %d bytes:\n",
1698 rd->len);
1699 brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1700 pkt->data, rd->len,
1701 "Glom Data:\n");
1702 __skb_trim(pkt, rd->len);
1703 skb_pull(pkt, SDPCM_HDRLEN);
1704 bus->glomd = pkt;
1705 } else {
1706 brcmf_err("%s: glom superframe w/o "
1707 "descriptor!\n", __func__);
1708 sdio_claim_host(bus->sdiodev->func[1]);
1709 brcmf_sdbrcm_rxfail(bus, false, false);
1710 sdio_release_host(bus->sdiodev->func[1]);
1711 }
1712 /* prepare the descriptor for the next read */
1713 rd->len = rd->len_nxtfrm << 4;
1714 rd->len_nxtfrm = 0;
1715 /* treat all packet as event if we don't know */
1716 rd->channel = SDPCM_EVENT_CHANNEL;
1717 continue;
1718 }
1719
1720 /* Fill in packet len and prio, deliver upward */
1721 __skb_trim(pkt, rd->len);
1722 skb_pull(pkt, rd->dat_offset);
1723
1724 /* prepare the descriptor for the next read */
1725 rd->len = rd->len_nxtfrm << 4;
1726 rd->len_nxtfrm = 0;
1727 /* treat all packet as event if we don't know */
1728 rd->channel = SDPCM_EVENT_CHANNEL;
1729
1730 if (pkt->len == 0) {
1731 brcmu_pkt_buf_free_skb(pkt);
1732 continue;
1733 }
1734
1735 skb_queue_head_init(&pktlist);
1736 skb_queue_tail(&pktlist, pkt);
1737 brcmf_rx_frames(bus->sdiodev->dev, &pktlist);
1738 }
1739
1740 rxcount = maxframes - rxleft;
1741 /* Message if we hit the limit */
1742 if (!rxleft)
1743 brcmf_dbg(DATA, "hit rx limit of %d frames\n", maxframes);
1744 else
1745 brcmf_dbg(DATA, "processed %d frames\n", rxcount);
1746 /* Back off rxseq if awaiting rtx, update rx_seq */
1747 if (bus->rxskip)
1748 rd->seq_num--;
1749 bus->rx_seq = rd->seq_num;
1750
1751 return rxcount;
1752 }
1753
1754 static void
1755 brcmf_sdbrcm_wait_event_wakeup(struct brcmf_sdio *bus)
1756 {
1757 if (waitqueue_active(&bus->ctrl_wait))
1758 wake_up_interruptible(&bus->ctrl_wait);
1759 return;
1760 }
1761
1762 /* Writes a HW/SW header into the packet and sends it. */
1763 /* Assumes: (a) header space already there, (b) caller holds lock */
1764 static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
1765 uint chan)
1766 {
1767 int ret;
1768 u8 *frame;
1769 u16 len, pad = 0;
1770 u32 swheader;
1771 int i;
1772
1773 brcmf_dbg(TRACE, "Enter\n");
1774
1775 frame = (u8 *) (pkt->data);
1776
1777 /* Add alignment padding, allocate new packet if needed */
1778 pad = ((unsigned long)frame % BRCMF_SDALIGN);
1779 if (pad) {
1780 if (skb_headroom(pkt) < pad) {
1781 brcmf_dbg(INFO, "insufficient headroom %d for %d pad\n",
1782 skb_headroom(pkt), pad);
1783 bus->sdiodev->bus_if->tx_realloc++;
1784 ret = skb_cow(pkt, BRCMF_SDALIGN);
1785 if (ret)
1786 goto done;
1787 pad = ((unsigned long)frame % BRCMF_SDALIGN);
1788 }
1789 skb_push(pkt, pad);
1790 frame = (u8 *) (pkt->data);
1791 memset(frame, 0, pad + SDPCM_HDRLEN);
1792 }
1793 /* precondition: pad < BRCMF_SDALIGN */
1794
1795 /* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
1796 len = (u16) (pkt->len);
1797 *(__le16 *) frame = cpu_to_le16(len);
1798 *(((__le16 *) frame) + 1) = cpu_to_le16(~len);
1799
1800 /* Software tag: channel, sequence number, data offset */
1801 swheader =
1802 ((chan << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK) | bus->tx_seq |
1803 (((pad +
1804 SDPCM_HDRLEN) << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
1805
1806 *(((__le32 *) frame) + 1) = cpu_to_le32(swheader);
1807 *(((__le32 *) frame) + 2) = 0;
1808
1809 #ifdef DEBUG
1810 tx_packets[pkt->priority]++;
1811 #endif
1812
1813 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() &&
1814 ((BRCMF_CTL_ON() && chan == SDPCM_CONTROL_CHANNEL) ||
1815 (BRCMF_DATA_ON() && chan != SDPCM_CONTROL_CHANNEL)),
1816 frame, len, "Tx Frame:\n");
1817 brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() &&
1818 ((BRCMF_CTL_ON() &&
1819 chan == SDPCM_CONTROL_CHANNEL) ||
1820 (BRCMF_DATA_ON() &&
1821 chan != SDPCM_CONTROL_CHANNEL))) &&
1822 BRCMF_HDRS_ON(),
1823 frame, min_t(u16, len, 16), "TxHdr:\n");
1824
1825 /* Raise len to next SDIO block to eliminate tail command */
1826 if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
1827 u16 pad = bus->blocksize - (len % bus->blocksize);
1828 if ((pad <= bus->roundup) && (pad < bus->blocksize))
1829 len += pad;
1830 } else if (len % BRCMF_SDALIGN) {
1831 len += BRCMF_SDALIGN - (len % BRCMF_SDALIGN);
1832 }
1833
1834 /* Some controllers have trouble with odd bytes -- round to even */
1835 if (len & (ALIGNMENT - 1))
1836 len = roundup(len, ALIGNMENT);
1837
1838 sdio_claim_host(bus->sdiodev->func[1]);
1839 ret = brcmf_sdcard_send_pkt(bus->sdiodev, bus->sdiodev->sbwad,
1840 SDIO_FUNC_2, F2SYNC, pkt);
1841 bus->sdcnt.f2txdata++;
1842
1843 if (ret < 0) {
1844 /* On failure, abort the command and terminate the frame */
1845 brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
1846 ret);
1847 bus->sdcnt.tx_sderrs++;
1848
1849 brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
1850 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
1851 SFC_WF_TERM, NULL);
1852 bus->sdcnt.f1regdata++;
1853
1854 for (i = 0; i < 3; i++) {
1855 u8 hi, lo;
1856 hi = brcmf_sdio_regrb(bus->sdiodev,
1857 SBSDIO_FUNC1_WFRAMEBCHI, NULL);
1858 lo = brcmf_sdio_regrb(bus->sdiodev,
1859 SBSDIO_FUNC1_WFRAMEBCLO, NULL);
1860 bus->sdcnt.f1regdata += 2;
1861 if ((hi == 0) && (lo == 0))
1862 break;
1863 }
1864
1865 }
1866 sdio_release_host(bus->sdiodev->func[1]);
1867 if (ret == 0)
1868 bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
1869
1870 done:
1871 /* restore pkt buffer pointer before calling tx complete routine */
1872 skb_pull(pkt, SDPCM_HDRLEN + pad);
1873 brcmf_txcomplete(bus->sdiodev->dev, pkt, ret == 0);
1874 return ret;
1875 }
1876
1877 static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
1878 {
1879 struct sk_buff *pkt;
1880 u32 intstatus = 0;
1881 int ret = 0, prec_out;
1882 uint cnt = 0;
1883 uint datalen;
1884 u8 tx_prec_map;
1885
1886 brcmf_dbg(TRACE, "Enter\n");
1887
1888 tx_prec_map = ~bus->flowcontrol;
1889
1890 /* Send frames until the limit or some other event */
1891 for (cnt = 0; (cnt < maxframes) && data_ok(bus); cnt++) {
1892 spin_lock_bh(&bus->txqlock);
1893 pkt = brcmu_pktq_mdeq(&bus->txq, tx_prec_map, &prec_out);
1894 if (pkt == NULL) {
1895 spin_unlock_bh(&bus->txqlock);
1896 break;
1897 }
1898 spin_unlock_bh(&bus->txqlock);
1899 datalen = pkt->len - SDPCM_HDRLEN;
1900
1901 ret = brcmf_sdbrcm_txpkt(bus, pkt, SDPCM_DATA_CHANNEL);
1902
1903 /* In poll mode, need to check for other events */
1904 if (!bus->intr && cnt) {
1905 /* Check device status, signal pending interrupt */
1906 sdio_claim_host(bus->sdiodev->func[1]);
1907 ret = r_sdreg32(bus, &intstatus,
1908 offsetof(struct sdpcmd_regs,
1909 intstatus));
1910 sdio_release_host(bus->sdiodev->func[1]);
1911 bus->sdcnt.f2txdata++;
1912 if (ret != 0)
1913 break;
1914 if (intstatus & bus->hostintmask)
1915 atomic_set(&bus->ipend, 1);
1916 }
1917 }
1918
1919 /* Deflow-control stack if needed */
1920 if ((bus->sdiodev->bus_if->state == BRCMF_BUS_DATA) &&
1921 bus->txoff && (pktq_len(&bus->txq) < TXLOW)) {
1922 bus->txoff = false;
1923 brcmf_txflowblock(bus->sdiodev->dev, false);
1924 }
1925
1926 return cnt;
1927 }
1928
1929 static void brcmf_sdbrcm_bus_stop(struct device *dev)
1930 {
1931 u32 local_hostintmask;
1932 u8 saveclk;
1933 int err;
1934 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1935 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
1936 struct brcmf_sdio *bus = sdiodev->bus;
1937
1938 brcmf_dbg(TRACE, "Enter\n");
1939
1940 if (bus->watchdog_tsk) {
1941 send_sig(SIGTERM, bus->watchdog_tsk, 1);
1942 kthread_stop(bus->watchdog_tsk);
1943 bus->watchdog_tsk = NULL;
1944 }
1945
1946 sdio_claim_host(bus->sdiodev->func[1]);
1947
1948 /* Enable clock for device interrupts */
1949 brcmf_sdbrcm_bus_sleep(bus, false, false);
1950
1951 /* Disable and clear interrupts at the chip level also */
1952 w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, hostintmask));
1953 local_hostintmask = bus->hostintmask;
1954 bus->hostintmask = 0;
1955
1956 /* Change our idea of bus state */
1957 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
1958
1959 /* Force clocks on backplane to be sure F2 interrupt propagates */
1960 saveclk = brcmf_sdio_regrb(bus->sdiodev,
1961 SBSDIO_FUNC1_CHIPCLKCSR, &err);
1962 if (!err) {
1963 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
1964 (saveclk | SBSDIO_FORCE_HT), &err);
1965 }
1966 if (err)
1967 brcmf_err("Failed to force clock for F2: err %d\n", err);
1968
1969 /* Turn off the bus (F2), free any pending packets */
1970 brcmf_dbg(INTR, "disable SDIO interrupts\n");
1971 brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_IOEx, SDIO_FUNC_ENABLE_1,
1972 NULL);
1973
1974 /* Clear any pending interrupts now that F2 is disabled */
1975 w_sdreg32(bus, local_hostintmask,
1976 offsetof(struct sdpcmd_regs, intstatus));
1977
1978 /* Turn off the backplane clock (only) */
1979 brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
1980 sdio_release_host(bus->sdiodev->func[1]);
1981
1982 /* Clear the data packet queues */
1983 brcmu_pktq_flush(&bus->txq, true, NULL, NULL);
1984
1985 /* Clear any held glomming stuff */
1986 if (bus->glomd)
1987 brcmu_pkt_buf_free_skb(bus->glomd);
1988 brcmf_sdbrcm_free_glom(bus);
1989
1990 /* Clear rx control and wake any waiters */
1991 spin_lock_bh(&bus->rxctl_lock);
1992 bus->rxlen = 0;
1993 spin_unlock_bh(&bus->rxctl_lock);
1994 brcmf_sdbrcm_dcmd_resp_wake(bus);
1995
1996 /* Reset some F2 state stuff */
1997 bus->rxskip = false;
1998 bus->tx_seq = bus->rx_seq = 0;
1999 }
2000
2001 static inline void brcmf_sdbrcm_clrintr(struct brcmf_sdio *bus)
2002 {
2003 unsigned long flags;
2004
2005 if (bus->sdiodev->oob_irq_requested) {
2006 spin_lock_irqsave(&bus->sdiodev->irq_en_lock, flags);
2007 if (!bus->sdiodev->irq_en && !atomic_read(&bus->ipend)) {
2008 enable_irq(bus->sdiodev->pdata->oob_irq_nr);
2009 bus->sdiodev->irq_en = true;
2010 }
2011 spin_unlock_irqrestore(&bus->sdiodev->irq_en_lock, flags);
2012 }
2013 }
2014
2015 static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
2016 {
2017 u8 idx;
2018 u32 addr;
2019 unsigned long val;
2020 int n, ret;
2021
2022 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
2023 addr = bus->ci->c_inf[idx].base +
2024 offsetof(struct sdpcmd_regs, intstatus);
2025
2026 ret = brcmf_sdio_regrw_helper(bus->sdiodev, addr, &val, false);
2027 bus->sdcnt.f1regdata++;
2028 if (ret != 0)
2029 val = 0;
2030
2031 val &= bus->hostintmask;
2032 atomic_set(&bus->fcstate, !!(val & I_HMB_FC_STATE));
2033
2034 /* Clear interrupts */
2035 if (val) {
2036 ret = brcmf_sdio_regrw_helper(bus->sdiodev, addr, &val, true);
2037 bus->sdcnt.f1regdata++;
2038 }
2039
2040 if (ret) {
2041 atomic_set(&bus->intstatus, 0);
2042 } else if (val) {
2043 for_each_set_bit(n, &val, 32)
2044 set_bit(n, (unsigned long *)&bus->intstatus.counter);
2045 }
2046
2047 return ret;
2048 }
2049
2050 static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
2051 {
2052 u32 newstatus = 0;
2053 unsigned long intstatus;
2054 uint rxlimit = bus->rxbound; /* Rx frames to read before resched */
2055 uint txlimit = bus->txbound; /* Tx frames to send before resched */
2056 uint framecnt = 0; /* Temporary counter of tx/rx frames */
2057 int err = 0, n;
2058
2059 brcmf_dbg(TRACE, "Enter\n");
2060
2061 sdio_claim_host(bus->sdiodev->func[1]);
2062
2063 /* If waiting for HTAVAIL, check status */
2064 if (!bus->sr_enabled && bus->clkstate == CLK_PENDING) {
2065 u8 clkctl, devctl = 0;
2066
2067 #ifdef DEBUG
2068 /* Check for inconsistent device control */
2069 devctl = brcmf_sdio_regrb(bus->sdiodev,
2070 SBSDIO_DEVICE_CTL, &err);
2071 if (err) {
2072 brcmf_err("error reading DEVCTL: %d\n", err);
2073 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
2074 }
2075 #endif /* DEBUG */
2076
2077 /* Read CSR, if clock on switch to AVAIL, else ignore */
2078 clkctl = brcmf_sdio_regrb(bus->sdiodev,
2079 SBSDIO_FUNC1_CHIPCLKCSR, &err);
2080 if (err) {
2081 brcmf_err("error reading CSR: %d\n",
2082 err);
2083 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
2084 }
2085
2086 brcmf_dbg(SDIO, "DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n",
2087 devctl, clkctl);
2088
2089 if (SBSDIO_HTAV(clkctl)) {
2090 devctl = brcmf_sdio_regrb(bus->sdiodev,
2091 SBSDIO_DEVICE_CTL, &err);
2092 if (err) {
2093 brcmf_err("error reading DEVCTL: %d\n",
2094 err);
2095 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
2096 }
2097 devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
2098 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
2099 devctl, &err);
2100 if (err) {
2101 brcmf_err("error writing DEVCTL: %d\n",
2102 err);
2103 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
2104 }
2105 bus->clkstate = CLK_AVAIL;
2106 }
2107 }
2108
2109 /* Make sure backplane clock is on */
2110 brcmf_sdbrcm_bus_sleep(bus, false, true);
2111
2112 /* Pending interrupt indicates new device status */
2113 if (atomic_read(&bus->ipend) > 0) {
2114 atomic_set(&bus->ipend, 0);
2115 err = brcmf_sdio_intr_rstatus(bus);
2116 }
2117
2118 /* Start with leftover status bits */
2119 intstatus = atomic_xchg(&bus->intstatus, 0);
2120
2121 /* Handle flow-control change: read new state in case our ack
2122 * crossed another change interrupt. If change still set, assume
2123 * FC ON for safety, let next loop through do the debounce.
2124 */
2125 if (intstatus & I_HMB_FC_CHANGE) {
2126 intstatus &= ~I_HMB_FC_CHANGE;
2127 err = w_sdreg32(bus, I_HMB_FC_CHANGE,
2128 offsetof(struct sdpcmd_regs, intstatus));
2129
2130 err = r_sdreg32(bus, &newstatus,
2131 offsetof(struct sdpcmd_regs, intstatus));
2132 bus->sdcnt.f1regdata += 2;
2133 atomic_set(&bus->fcstate,
2134 !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE)));
2135 intstatus |= (newstatus & bus->hostintmask);
2136 }
2137
2138 /* Handle host mailbox indication */
2139 if (intstatus & I_HMB_HOST_INT) {
2140 intstatus &= ~I_HMB_HOST_INT;
2141 intstatus |= brcmf_sdbrcm_hostmail(bus);
2142 }
2143
2144 sdio_release_host(bus->sdiodev->func[1]);
2145
2146 /* Generally don't ask for these, can get CRC errors... */
2147 if (intstatus & I_WR_OOSYNC) {
2148 brcmf_err("Dongle reports WR_OOSYNC\n");
2149 intstatus &= ~I_WR_OOSYNC;
2150 }
2151
2152 if (intstatus & I_RD_OOSYNC) {
2153 brcmf_err("Dongle reports RD_OOSYNC\n");
2154 intstatus &= ~I_RD_OOSYNC;
2155 }
2156
2157 if (intstatus & I_SBINT) {
2158 brcmf_err("Dongle reports SBINT\n");
2159 intstatus &= ~I_SBINT;
2160 }
2161
2162 /* Would be active due to wake-wlan in gSPI */
2163 if (intstatus & I_CHIPACTIVE) {
2164 brcmf_dbg(INFO, "Dongle reports CHIPACTIVE\n");
2165 intstatus &= ~I_CHIPACTIVE;
2166 }
2167
2168 /* Ignore frame indications if rxskip is set */
2169 if (bus->rxskip)
2170 intstatus &= ~I_HMB_FRAME_IND;
2171
2172 /* On frame indication, read available frames */
2173 if (PKT_AVAILABLE() && bus->clkstate == CLK_AVAIL) {
2174 framecnt = brcmf_sdio_readframes(bus, rxlimit);
2175 if (!bus->rxpending)
2176 intstatus &= ~I_HMB_FRAME_IND;
2177 rxlimit -= min(framecnt, rxlimit);
2178 }
2179
2180 /* Keep still-pending events for next scheduling */
2181 if (intstatus) {
2182 for_each_set_bit(n, &intstatus, 32)
2183 set_bit(n, (unsigned long *)&bus->intstatus.counter);
2184 }
2185
2186 brcmf_sdbrcm_clrintr(bus);
2187
2188 if (data_ok(bus) && bus->ctrl_frame_stat &&
2189 (bus->clkstate == CLK_AVAIL)) {
2190 int i;
2191
2192 sdio_claim_host(bus->sdiodev->func[1]);
2193 err = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
2194 SDIO_FUNC_2, F2SYNC, bus->ctrl_frame_buf,
2195 (u32) bus->ctrl_frame_len);
2196
2197 if (err < 0) {
2198 /* On failure, abort the command and
2199 terminate the frame */
2200 brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
2201 err);
2202 bus->sdcnt.tx_sderrs++;
2203
2204 brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
2205
2206 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
2207 SFC_WF_TERM, &err);
2208 bus->sdcnt.f1regdata++;
2209
2210 for (i = 0; i < 3; i++) {
2211 u8 hi, lo;
2212 hi = brcmf_sdio_regrb(bus->sdiodev,
2213 SBSDIO_FUNC1_WFRAMEBCHI,
2214 &err);
2215 lo = brcmf_sdio_regrb(bus->sdiodev,
2216 SBSDIO_FUNC1_WFRAMEBCLO,
2217 &err);
2218 bus->sdcnt.f1regdata += 2;
2219 if ((hi == 0) && (lo == 0))
2220 break;
2221 }
2222
2223 } else {
2224 bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
2225 }
2226 sdio_release_host(bus->sdiodev->func[1]);
2227 bus->ctrl_frame_stat = false;
2228 brcmf_sdbrcm_wait_event_wakeup(bus);
2229 }
2230 /* Send queued frames (limit 1 if rx may still be pending) */
2231 else if ((bus->clkstate == CLK_AVAIL) && !atomic_read(&bus->fcstate) &&
2232 brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit
2233 && data_ok(bus)) {
2234 framecnt = bus->rxpending ? min(txlimit, bus->txminmax) :
2235 txlimit;
2236 framecnt = brcmf_sdbrcm_sendfromq(bus, framecnt);
2237 txlimit -= framecnt;
2238 }
2239
2240 if ((bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) || (err != 0)) {
2241 brcmf_err("failed backplane access over SDIO, halting operation\n");
2242 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
2243 atomic_set(&bus->intstatus, 0);
2244 } else if (atomic_read(&bus->intstatus) ||
2245 atomic_read(&bus->ipend) > 0 ||
2246 (!atomic_read(&bus->fcstate) &&
2247 brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
2248 data_ok(bus)) || PKT_AVAILABLE()) {
2249 atomic_inc(&bus->dpc_tskcnt);
2250 }
2251
2252 /* If we're done for now, turn off clock request. */
2253 if ((bus->clkstate != CLK_PENDING)
2254 && bus->idletime == BRCMF_IDLE_IMMEDIATE) {
2255 bus->activity = false;
2256 brcmf_dbg(SDIO, "idle state\n");
2257 sdio_claim_host(bus->sdiodev->func[1]);
2258 brcmf_sdbrcm_bus_sleep(bus, true, false);
2259 sdio_release_host(bus->sdiodev->func[1]);
2260 }
2261 }
2262
2263 static struct pktq *brcmf_sdbrcm_bus_gettxq(struct device *dev)
2264 {
2265 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2266 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
2267 struct brcmf_sdio *bus = sdiodev->bus;
2268
2269 return &bus->txq;
2270 }
2271
2272 static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
2273 {
2274 int ret = -EBADE;
2275 uint datalen, prec;
2276 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2277 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
2278 struct brcmf_sdio *bus = sdiodev->bus;
2279
2280 brcmf_dbg(TRACE, "Enter\n");
2281
2282 datalen = pkt->len;
2283
2284 /* Add space for the header */
2285 skb_push(pkt, SDPCM_HDRLEN);
2286 /* precondition: IS_ALIGNED((unsigned long)(pkt->data), 2) */
2287
2288 prec = prio2prec((pkt->priority & PRIOMASK));
2289
2290 /* Check for existing queue, current flow-control,
2291 pending event, or pending clock */
2292 brcmf_dbg(TRACE, "deferring pktq len %d\n", pktq_len(&bus->txq));
2293 bus->sdcnt.fcqueued++;
2294
2295 /* Priority based enq */
2296 spin_lock_bh(&bus->txqlock);
2297 if (!brcmf_c_prec_enq(bus->sdiodev->dev, &bus->txq, pkt, prec)) {
2298 skb_pull(pkt, SDPCM_HDRLEN);
2299 brcmf_txcomplete(bus->sdiodev->dev, pkt, false);
2300 brcmf_err("out of bus->txq !!!\n");
2301 ret = -ENOSR;
2302 } else {
2303 ret = 0;
2304 }
2305
2306 if (pktq_len(&bus->txq) >= TXHI) {
2307 bus->txoff = true;
2308 brcmf_txflowblock(bus->sdiodev->dev, true);
2309 }
2310 spin_unlock_bh(&bus->txqlock);
2311
2312 #ifdef DEBUG
2313 if (pktq_plen(&bus->txq, prec) > qcount[prec])
2314 qcount[prec] = pktq_plen(&bus->txq, prec);
2315 #endif
2316
2317 if (atomic_read(&bus->dpc_tskcnt) == 0) {
2318 atomic_inc(&bus->dpc_tskcnt);
2319 queue_work(bus->brcmf_wq, &bus->datawork);
2320 }
2321
2322 return ret;
2323 }
2324
2325 #ifdef DEBUG
2326 #define CONSOLE_LINE_MAX 192
2327
2328 static int brcmf_sdbrcm_readconsole(struct brcmf_sdio *bus)
2329 {
2330 struct brcmf_console *c = &bus->console;
2331 u8 line[CONSOLE_LINE_MAX], ch;
2332 u32 n, idx, addr;
2333 int rv;
2334
2335 /* Don't do anything until FWREADY updates console address */
2336 if (bus->console_addr == 0)
2337 return 0;
2338
2339 /* Read console log struct */
2340 addr = bus->console_addr + offsetof(struct rte_console, log_le);
2341 rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr, (u8 *)&c->log_le,
2342 sizeof(c->log_le));
2343 if (rv < 0)
2344 return rv;
2345
2346 /* Allocate console buffer (one time only) */
2347 if (c->buf == NULL) {
2348 c->bufsize = le32_to_cpu(c->log_le.buf_size);
2349 c->buf = kmalloc(c->bufsize, GFP_ATOMIC);
2350 if (c->buf == NULL)
2351 return -ENOMEM;
2352 }
2353
2354 idx = le32_to_cpu(c->log_le.idx);
2355
2356 /* Protect against corrupt value */
2357 if (idx > c->bufsize)
2358 return -EBADE;
2359
2360 /* Skip reading the console buffer if the index pointer
2361 has not moved */
2362 if (idx == c->last)
2363 return 0;
2364
2365 /* Read the console buffer */
2366 addr = le32_to_cpu(c->log_le.buf);
2367 rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr, c->buf, c->bufsize);
2368 if (rv < 0)
2369 return rv;
2370
2371 while (c->last != idx) {
2372 for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
2373 if (c->last == idx) {
2374 /* This would output a partial line.
2375 * Instead, back up
2376 * the buffer pointer and output this
2377 * line next time around.
2378 */
2379 if (c->last >= n)
2380 c->last -= n;
2381 else
2382 c->last = c->bufsize - n;
2383 goto break2;
2384 }
2385 ch = c->buf[c->last];
2386 c->last = (c->last + 1) % c->bufsize;
2387 if (ch == '\n')
2388 break;
2389 line[n] = ch;
2390 }
2391
2392 if (n > 0) {
2393 if (line[n - 1] == '\r')
2394 n--;
2395 line[n] = 0;
2396 pr_debug("CONSOLE: %s\n", line);
2397 }
2398 }
2399 break2:
2400
2401 return 0;
2402 }
2403 #endif /* DEBUG */
2404
2405 static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
2406 {
2407 int i;
2408 int ret;
2409
2410 bus->ctrl_frame_stat = false;
2411 ret = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
2412 SDIO_FUNC_2, F2SYNC, frame, len);
2413
2414 if (ret < 0) {
2415 /* On failure, abort the command and terminate the frame */
2416 brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
2417 ret);
2418 bus->sdcnt.tx_sderrs++;
2419
2420 brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
2421
2422 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
2423 SFC_WF_TERM, NULL);
2424 bus->sdcnt.f1regdata++;
2425
2426 for (i = 0; i < 3; i++) {
2427 u8 hi, lo;
2428 hi = brcmf_sdio_regrb(bus->sdiodev,
2429 SBSDIO_FUNC1_WFRAMEBCHI, NULL);
2430 lo = brcmf_sdio_regrb(bus->sdiodev,
2431 SBSDIO_FUNC1_WFRAMEBCLO, NULL);
2432 bus->sdcnt.f1regdata += 2;
2433 if (hi == 0 && lo == 0)
2434 break;
2435 }
2436 return ret;
2437 }
2438
2439 bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
2440
2441 return ret;
2442 }
2443
2444 static int
2445 brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
2446 {
2447 u8 *frame;
2448 u16 len;
2449 u32 swheader;
2450 uint retries = 0;
2451 u8 doff = 0;
2452 int ret = -1;
2453 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2454 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
2455 struct brcmf_sdio *bus = sdiodev->bus;
2456
2457 brcmf_dbg(TRACE, "Enter\n");
2458
2459 /* Back the pointer to make a room for bus header */
2460 frame = msg - SDPCM_HDRLEN;
2461 len = (msglen += SDPCM_HDRLEN);
2462
2463 /* Add alignment padding (optional for ctl frames) */
2464 doff = ((unsigned long)frame % BRCMF_SDALIGN);
2465 if (doff) {
2466 frame -= doff;
2467 len += doff;
2468 msglen += doff;
2469 memset(frame, 0, doff + SDPCM_HDRLEN);
2470 }
2471 /* precondition: doff < BRCMF_SDALIGN */
2472 doff += SDPCM_HDRLEN;
2473
2474 /* Round send length to next SDIO block */
2475 if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
2476 u16 pad = bus->blocksize - (len % bus->blocksize);
2477 if ((pad <= bus->roundup) && (pad < bus->blocksize))
2478 len += pad;
2479 } else if (len % BRCMF_SDALIGN) {
2480 len += BRCMF_SDALIGN - (len % BRCMF_SDALIGN);
2481 }
2482
2483 /* Satisfy length-alignment requirements */
2484 if (len & (ALIGNMENT - 1))
2485 len = roundup(len, ALIGNMENT);
2486
2487 /* precondition: IS_ALIGNED((unsigned long)frame, 2) */
2488
2489 /* Make sure backplane clock is on */
2490 sdio_claim_host(bus->sdiodev->func[1]);
2491 brcmf_sdbrcm_bus_sleep(bus, false, false);
2492 sdio_release_host(bus->sdiodev->func[1]);
2493
2494 /* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
2495 *(__le16 *) frame = cpu_to_le16((u16) msglen);
2496 *(((__le16 *) frame) + 1) = cpu_to_le16(~msglen);
2497
2498 /* Software tag: channel, sequence number, data offset */
2499 swheader =
2500 ((SDPCM_CONTROL_CHANNEL << SDPCM_CHANNEL_SHIFT) &
2501 SDPCM_CHANNEL_MASK)
2502 | bus->tx_seq | ((doff << SDPCM_DOFFSET_SHIFT) &
2503 SDPCM_DOFFSET_MASK);
2504 put_unaligned_le32(swheader, frame + SDPCM_FRAMETAG_LEN);
2505 put_unaligned_le32(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader));
2506
2507 if (!data_ok(bus)) {
2508 brcmf_dbg(INFO, "No bus credit bus->tx_max %d, bus->tx_seq %d\n",
2509 bus->tx_max, bus->tx_seq);
2510 bus->ctrl_frame_stat = true;
2511 /* Send from dpc */
2512 bus->ctrl_frame_buf = frame;
2513 bus->ctrl_frame_len = len;
2514
2515 wait_event_interruptible_timeout(bus->ctrl_wait,
2516 !bus->ctrl_frame_stat,
2517 msecs_to_jiffies(2000));
2518
2519 if (!bus->ctrl_frame_stat) {
2520 brcmf_dbg(SDIO, "ctrl_frame_stat == false\n");
2521 ret = 0;
2522 } else {
2523 brcmf_dbg(SDIO, "ctrl_frame_stat == true\n");
2524 ret = -1;
2525 }
2526 }
2527
2528 if (ret == -1) {
2529 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_CTL_ON(),
2530 frame, len, "Tx Frame:\n");
2531 brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() && BRCMF_CTL_ON()) &&
2532 BRCMF_HDRS_ON(),
2533 frame, min_t(u16, len, 16), "TxHdr:\n");
2534
2535 do {
2536 sdio_claim_host(bus->sdiodev->func[1]);
2537 ret = brcmf_tx_frame(bus, frame, len);
2538 sdio_release_host(bus->sdiodev->func[1]);
2539 } while (ret < 0 && retries++ < TXRETRIES);
2540 }
2541
2542 if ((bus->idletime == BRCMF_IDLE_IMMEDIATE) &&
2543 atomic_read(&bus->dpc_tskcnt) == 0) {
2544 bus->activity = false;
2545 sdio_claim_host(bus->sdiodev->func[1]);
2546 brcmf_dbg(INFO, "idle\n");
2547 brcmf_sdbrcm_clkctl(bus, CLK_NONE, true);
2548 sdio_release_host(bus->sdiodev->func[1]);
2549 }
2550
2551 if (ret)
2552 bus->sdcnt.tx_ctlerrs++;
2553 else
2554 bus->sdcnt.tx_ctlpkts++;
2555
2556 return ret ? -EIO : 0;
2557 }
2558
2559 #ifdef DEBUG
2560 static inline bool brcmf_sdio_valid_shared_address(u32 addr)
2561 {
2562 return !(addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff));
2563 }
2564
2565 static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
2566 struct sdpcm_shared *sh)
2567 {
2568 u32 addr;
2569 int rv;
2570 u32 shaddr = 0;
2571 struct sdpcm_shared_le sh_le;
2572 __le32 addr_le;
2573
2574 shaddr = bus->ci->rambase + bus->ramsize - 4;
2575
2576 /*
2577 * Read last word in socram to determine
2578 * address of sdpcm_shared structure
2579 */
2580 sdio_claim_host(bus->sdiodev->func[1]);
2581 brcmf_sdbrcm_bus_sleep(bus, false, false);
2582 rv = brcmf_sdio_ramrw(bus->sdiodev, false, shaddr, (u8 *)&addr_le, 4);
2583 sdio_release_host(bus->sdiodev->func[1]);
2584 if (rv < 0)
2585 return rv;
2586
2587 addr = le32_to_cpu(addr_le);
2588
2589 brcmf_dbg(SDIO, "sdpcm_shared address 0x%08X\n", addr);
2590
2591 /*
2592 * Check if addr is valid.
2593 * NVRAM length at the end of memory should have been overwritten.
2594 */
2595 if (!brcmf_sdio_valid_shared_address(addr)) {
2596 brcmf_err("invalid sdpcm_shared address 0x%08X\n",
2597 addr);
2598 return -EINVAL;
2599 }
2600
2601 /* Read hndrte_shared structure */
2602 rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr, (u8 *)&sh_le,
2603 sizeof(struct sdpcm_shared_le));
2604 if (rv < 0)
2605 return rv;
2606
2607 /* Endianness */
2608 sh->flags = le32_to_cpu(sh_le.flags);
2609 sh->trap_addr = le32_to_cpu(sh_le.trap_addr);
2610 sh->assert_exp_addr = le32_to_cpu(sh_le.assert_exp_addr);
2611 sh->assert_file_addr = le32_to_cpu(sh_le.assert_file_addr);
2612 sh->assert_line = le32_to_cpu(sh_le.assert_line);
2613 sh->console_addr = le32_to_cpu(sh_le.console_addr);
2614 sh->msgtrace_addr = le32_to_cpu(sh_le.msgtrace_addr);
2615
2616 if ((sh->flags & SDPCM_SHARED_VERSION_MASK) > SDPCM_SHARED_VERSION) {
2617 brcmf_err("sdpcm shared version unsupported: dhd %d dongle %d\n",
2618 SDPCM_SHARED_VERSION,
2619 sh->flags & SDPCM_SHARED_VERSION_MASK);
2620 return -EPROTO;
2621 }
2622
2623 return 0;
2624 }
2625
2626 static int brcmf_sdio_dump_console(struct brcmf_sdio *bus,
2627 struct sdpcm_shared *sh, char __user *data,
2628 size_t count)
2629 {
2630 u32 addr, console_ptr, console_size, console_index;
2631 char *conbuf = NULL;
2632 __le32 sh_val;
2633 int rv;
2634 loff_t pos = 0;
2635 int nbytes = 0;
2636
2637 /* obtain console information from device memory */
2638 addr = sh->console_addr + offsetof(struct rte_console, log_le);
2639 rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr,
2640 (u8 *)&sh_val, sizeof(u32));
2641 if (rv < 0)
2642 return rv;
2643 console_ptr = le32_to_cpu(sh_val);
2644
2645 addr = sh->console_addr + offsetof(struct rte_console, log_le.buf_size);
2646 rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr,
2647 (u8 *)&sh_val, sizeof(u32));
2648 if (rv < 0)
2649 return rv;
2650 console_size = le32_to_cpu(sh_val);
2651
2652 addr = sh->console_addr + offsetof(struct rte_console, log_le.idx);
2653 rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr,
2654 (u8 *)&sh_val, sizeof(u32));
2655 if (rv < 0)
2656 return rv;
2657 console_index = le32_to_cpu(sh_val);
2658
2659 /* allocate buffer for console data */
2660 if (console_size <= CONSOLE_BUFFER_MAX)
2661 conbuf = vzalloc(console_size+1);
2662
2663 if (!conbuf)
2664 return -ENOMEM;
2665
2666 /* obtain the console data from device */
2667 conbuf[console_size] = '\0';
2668 rv = brcmf_sdio_ramrw(bus->sdiodev, false, console_ptr, (u8 *)conbuf,
2669 console_size);
2670 if (rv < 0)
2671 goto done;
2672
2673 rv = simple_read_from_buffer(data, count, &pos,
2674 conbuf + console_index,
2675 console_size - console_index);
2676 if (rv < 0)
2677 goto done;
2678
2679 nbytes = rv;
2680 if (console_index > 0) {
2681 pos = 0;
2682 rv = simple_read_from_buffer(data+nbytes, count, &pos,
2683 conbuf, console_index - 1);
2684 if (rv < 0)
2685 goto done;
2686 rv += nbytes;
2687 }
2688 done:
2689 vfree(conbuf);
2690 return rv;
2691 }
2692
2693 static int brcmf_sdio_trap_info(struct brcmf_sdio *bus, struct sdpcm_shared *sh,
2694 char __user *data, size_t count)
2695 {
2696 int error, res;
2697 char buf[350];
2698 struct brcmf_trap_info tr;
2699 loff_t pos = 0;
2700
2701 if ((sh->flags & SDPCM_SHARED_TRAP) == 0) {
2702 brcmf_dbg(INFO, "no trap in firmware\n");
2703 return 0;
2704 }
2705
2706 error = brcmf_sdio_ramrw(bus->sdiodev, false, sh->trap_addr, (u8 *)&tr,
2707 sizeof(struct brcmf_trap_info));
2708 if (error < 0)
2709 return error;
2710
2711 res = scnprintf(buf, sizeof(buf),
2712 "dongle trap info: type 0x%x @ epc 0x%08x\n"
2713 " cpsr 0x%08x spsr 0x%08x sp 0x%08x\n"
2714 " lr 0x%08x pc 0x%08x offset 0x%x\n"
2715 " r0 0x%08x r1 0x%08x r2 0x%08x r3 0x%08x\n"
2716 " r4 0x%08x r5 0x%08x r6 0x%08x r7 0x%08x\n",
2717 le32_to_cpu(tr.type), le32_to_cpu(tr.epc),
2718 le32_to_cpu(tr.cpsr), le32_to_cpu(tr.spsr),
2719 le32_to_cpu(tr.r13), le32_to_cpu(tr.r14),
2720 le32_to_cpu(tr.pc), sh->trap_addr,
2721 le32_to_cpu(tr.r0), le32_to_cpu(tr.r1),
2722 le32_to_cpu(tr.r2), le32_to_cpu(tr.r3),
2723 le32_to_cpu(tr.r4), le32_to_cpu(tr.r5),
2724 le32_to_cpu(tr.r6), le32_to_cpu(tr.r7));
2725
2726 return simple_read_from_buffer(data, count, &pos, buf, res);
2727 }
2728
2729 static int brcmf_sdio_assert_info(struct brcmf_sdio *bus,
2730 struct sdpcm_shared *sh, char __user *data,
2731 size_t count)
2732 {
2733 int error = 0;
2734 char buf[200];
2735 char file[80] = "?";
2736 char expr[80] = "<???>";
2737 int res;
2738 loff_t pos = 0;
2739
2740 if ((sh->flags & SDPCM_SHARED_ASSERT_BUILT) == 0) {
2741 brcmf_dbg(INFO, "firmware not built with -assert\n");
2742 return 0;
2743 } else if ((sh->flags & SDPCM_SHARED_ASSERT) == 0) {
2744 brcmf_dbg(INFO, "no assert in dongle\n");
2745 return 0;
2746 }
2747
2748 sdio_claim_host(bus->sdiodev->func[1]);
2749 if (sh->assert_file_addr != 0) {
2750 error = brcmf_sdio_ramrw(bus->sdiodev, false,
2751 sh->assert_file_addr, (u8 *)file, 80);
2752 if (error < 0)
2753 return error;
2754 }
2755 if (sh->assert_exp_addr != 0) {
2756 error = brcmf_sdio_ramrw(bus->sdiodev, false,
2757 sh->assert_exp_addr, (u8 *)expr, 80);
2758 if (error < 0)
2759 return error;
2760 }
2761 sdio_release_host(bus->sdiodev->func[1]);
2762
2763 res = scnprintf(buf, sizeof(buf),
2764 "dongle assert: %s:%d: assert(%s)\n",
2765 file, sh->assert_line, expr);
2766 return simple_read_from_buffer(data, count, &pos, buf, res);
2767 }
2768
2769 static int brcmf_sdbrcm_checkdied(struct brcmf_sdio *bus)
2770 {
2771 int error;
2772 struct sdpcm_shared sh;
2773
2774 error = brcmf_sdio_readshared(bus, &sh);
2775
2776 if (error < 0)
2777 return error;
2778
2779 if ((sh.flags & SDPCM_SHARED_ASSERT_BUILT) == 0)
2780 brcmf_dbg(INFO, "firmware not built with -assert\n");
2781 else if (sh.flags & SDPCM_SHARED_ASSERT)
2782 brcmf_err("assertion in dongle\n");
2783
2784 if (sh.flags & SDPCM_SHARED_TRAP)
2785 brcmf_err("firmware trap in dongle\n");
2786
2787 return 0;
2788 }
2789
2790 static int brcmf_sdbrcm_died_dump(struct brcmf_sdio *bus, char __user *data,
2791 size_t count, loff_t *ppos)
2792 {
2793 int error = 0;
2794 struct sdpcm_shared sh;
2795 int nbytes = 0;
2796 loff_t pos = *ppos;
2797
2798 if (pos != 0)
2799 return 0;
2800
2801 error = brcmf_sdio_readshared(bus, &sh);
2802 if (error < 0)
2803 goto done;
2804
2805 error = brcmf_sdio_assert_info(bus, &sh, data, count);
2806 if (error < 0)
2807 goto done;
2808 nbytes = error;
2809
2810 error = brcmf_sdio_trap_info(bus, &sh, data+nbytes, count);
2811 if (error < 0)
2812 goto done;
2813 nbytes += error;
2814
2815 error = brcmf_sdio_dump_console(bus, &sh, data+nbytes, count);
2816 if (error < 0)
2817 goto done;
2818 nbytes += error;
2819
2820 error = nbytes;
2821 *ppos += nbytes;
2822 done:
2823 return error;
2824 }
2825
2826 static ssize_t brcmf_sdio_forensic_read(struct file *f, char __user *data,
2827 size_t count, loff_t *ppos)
2828 {
2829 struct brcmf_sdio *bus = f->private_data;
2830 int res;
2831
2832 res = brcmf_sdbrcm_died_dump(bus, data, count, ppos);
2833 if (res > 0)
2834 *ppos += res;
2835 return (ssize_t)res;
2836 }
2837
2838 static const struct file_operations brcmf_sdio_forensic_ops = {
2839 .owner = THIS_MODULE,
2840 .open = simple_open,
2841 .read = brcmf_sdio_forensic_read
2842 };
2843
2844 static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
2845 {
2846 struct brcmf_pub *drvr = bus->sdiodev->bus_if->drvr;
2847 struct dentry *dentry = brcmf_debugfs_get_devdir(drvr);
2848
2849 if (IS_ERR_OR_NULL(dentry))
2850 return;
2851
2852 debugfs_create_file("forensics", S_IRUGO, dentry, bus,
2853 &brcmf_sdio_forensic_ops);
2854 brcmf_debugfs_create_sdio_count(drvr, &bus->sdcnt);
2855 }
2856 #else
2857 static int brcmf_sdbrcm_checkdied(struct brcmf_sdio *bus)
2858 {
2859 return 0;
2860 }
2861
2862 static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
2863 {
2864 }
2865 #endif /* DEBUG */
2866
2867 static int
2868 brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
2869 {
2870 int timeleft;
2871 uint rxlen = 0;
2872 bool pending;
2873 u8 *buf;
2874 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2875 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
2876 struct brcmf_sdio *bus = sdiodev->bus;
2877
2878 brcmf_dbg(TRACE, "Enter\n");
2879
2880 /* Wait until control frame is available */
2881 timeleft = brcmf_sdbrcm_dcmd_resp_wait(bus, &bus->rxlen, &pending);
2882
2883 spin_lock_bh(&bus->rxctl_lock);
2884 rxlen = bus->rxlen;
2885 memcpy(msg, bus->rxctl, min(msglen, rxlen));
2886 bus->rxctl = NULL;
2887 buf = bus->rxctl_orig;
2888 bus->rxctl_orig = NULL;
2889 bus->rxlen = 0;
2890 spin_unlock_bh(&bus->rxctl_lock);
2891 vfree(buf);
2892
2893 if (rxlen) {
2894 brcmf_dbg(CTL, "resumed on rxctl frame, got %d expected %d\n",
2895 rxlen, msglen);
2896 } else if (timeleft == 0) {
2897 brcmf_err("resumed on timeout\n");
2898 brcmf_sdbrcm_checkdied(bus);
2899 } else if (pending) {
2900 brcmf_dbg(CTL, "cancelled\n");
2901 return -ERESTARTSYS;
2902 } else {
2903 brcmf_dbg(CTL, "resumed for unknown reason?\n");
2904 brcmf_sdbrcm_checkdied(bus);
2905 }
2906
2907 if (rxlen)
2908 bus->sdcnt.rx_ctlpkts++;
2909 else
2910 bus->sdcnt.rx_ctlerrs++;
2911
2912 return rxlen ? (int)rxlen : -ETIMEDOUT;
2913 }
2914
2915 static bool brcmf_sdbrcm_download_state(struct brcmf_sdio *bus, bool enter)
2916 {
2917 struct chip_info *ci = bus->ci;
2918
2919 /* To enter download state, disable ARM and reset SOCRAM.
2920 * To exit download state, simply reset ARM (default is RAM boot).
2921 */
2922 if (enter) {
2923 bus->alp_only = true;
2924
2925 brcmf_sdio_chip_enter_download(bus->sdiodev, ci);
2926 } else {
2927 if (!brcmf_sdio_chip_exit_download(bus->sdiodev, ci, bus->vars,
2928 bus->varsz))
2929 return false;
2930
2931 /* Allow HT Clock now that the ARM is running. */
2932 bus->alp_only = false;
2933
2934 bus->sdiodev->bus_if->state = BRCMF_BUS_LOAD;
2935 }
2936
2937 return true;
2938 }
2939
2940 static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_sdio *bus)
2941 {
2942 if (bus->firmware->size < bus->fw_ptr + len)
2943 len = bus->firmware->size - bus->fw_ptr;
2944
2945 memcpy(buf, &bus->firmware->data[bus->fw_ptr], len);
2946 bus->fw_ptr += len;
2947 return len;
2948 }
2949
2950 static int brcmf_sdbrcm_download_code_file(struct brcmf_sdio *bus)
2951 {
2952 int offset;
2953 uint len;
2954 u8 *memblock = NULL, *memptr;
2955 int ret;
2956 u8 idx;
2957
2958 brcmf_dbg(INFO, "Enter\n");
2959
2960 ret = request_firmware(&bus->firmware, BRCMF_SDIO_FW_NAME,
2961 &bus->sdiodev->func[2]->dev);
2962 if (ret) {
2963 brcmf_err("Fail to request firmware %d\n", ret);
2964 return ret;
2965 }
2966 bus->fw_ptr = 0;
2967
2968 memptr = memblock = kmalloc(MEMBLOCK + BRCMF_SDALIGN, GFP_ATOMIC);
2969 if (memblock == NULL) {
2970 ret = -ENOMEM;
2971 goto err;
2972 }
2973 if ((u32)(unsigned long)memblock % BRCMF_SDALIGN)
2974 memptr += (BRCMF_SDALIGN -
2975 ((u32)(unsigned long)memblock % BRCMF_SDALIGN));
2976
2977 offset = bus->ci->rambase;
2978
2979 /* Download image */
2980 len = brcmf_sdbrcm_get_image((char *)memptr, MEMBLOCK, bus);
2981 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_ARM_CR4);
2982 if (BRCMF_MAX_CORENUM != idx)
2983 memcpy(&bus->ci->rst_vec, memptr, sizeof(bus->ci->rst_vec));
2984 while (len) {
2985 ret = brcmf_sdio_ramrw(bus->sdiodev, true, offset, memptr, len);
2986 if (ret) {
2987 brcmf_err("error %d on writing %d membytes at 0x%08x\n",
2988 ret, MEMBLOCK, offset);
2989 goto err;
2990 }
2991
2992 offset += MEMBLOCK;
2993 len = brcmf_sdbrcm_get_image((char *)memptr, MEMBLOCK, bus);
2994 }
2995
2996 err:
2997 kfree(memblock);
2998
2999 release_firmware(bus->firmware);
3000 bus->fw_ptr = 0;
3001
3002 return ret;
3003 }
3004
3005 /*
3006 * ProcessVars:Takes a buffer of "<var>=<value>\n" lines read from a file
3007 * and ending in a NUL.
3008 * Removes carriage returns, empty lines, comment lines, and converts
3009 * newlines to NULs.
3010 * Shortens buffer as needed and pads with NULs. End of buffer is marked
3011 * by two NULs.
3012 */
3013
3014 static int brcmf_process_nvram_vars(struct brcmf_sdio *bus)
3015 {
3016 char *varbuf;
3017 char *dp;
3018 bool findNewline;
3019 int column;
3020 int ret = 0;
3021 uint buf_len, n, len;
3022
3023 len = bus->firmware->size;
3024 varbuf = vmalloc(len);
3025 if (!varbuf)
3026 return -ENOMEM;
3027
3028 memcpy(varbuf, bus->firmware->data, len);
3029 dp = varbuf;
3030
3031 findNewline = false;
3032 column = 0;
3033
3034 for (n = 0; n < len; n++) {
3035 if (varbuf[n] == 0)
3036 break;
3037 if (varbuf[n] == '\r')
3038 continue;
3039 if (findNewline && varbuf[n] != '\n')
3040 continue;
3041 findNewline = false;
3042 if (varbuf[n] == '#') {
3043 findNewline = true;
3044 continue;
3045 }
3046 if (varbuf[n] == '\n') {
3047 if (column == 0)
3048 continue;
3049 *dp++ = 0;
3050 column = 0;
3051 continue;
3052 }
3053 *dp++ = varbuf[n];
3054 column++;
3055 }
3056 buf_len = dp - varbuf;
3057 while (dp < varbuf + n)
3058 *dp++ = 0;
3059
3060 kfree(bus->vars);
3061 /* roundup needed for download to device */
3062 bus->varsz = roundup(buf_len + 1, 4);
3063 bus->vars = kmalloc(bus->varsz, GFP_KERNEL);
3064 if (bus->vars == NULL) {
3065 bus->varsz = 0;
3066 ret = -ENOMEM;
3067 goto err;
3068 }
3069
3070 /* copy the processed variables and add null termination */
3071 memcpy(bus->vars, varbuf, buf_len);
3072 bus->vars[buf_len] = 0;
3073 err:
3074 vfree(varbuf);
3075 return ret;
3076 }
3077
3078 static int brcmf_sdbrcm_download_nvram(struct brcmf_sdio *bus)
3079 {
3080 int ret;
3081
3082 ret = request_firmware(&bus->firmware, BRCMF_SDIO_NV_NAME,
3083 &bus->sdiodev->func[2]->dev);
3084 if (ret) {
3085 brcmf_err("Fail to request nvram %d\n", ret);
3086 return ret;
3087 }
3088
3089 ret = brcmf_process_nvram_vars(bus);
3090
3091 release_firmware(bus->firmware);
3092
3093 return ret;
3094 }
3095
3096 static int _brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
3097 {
3098 int bcmerror = -1;
3099
3100 /* Keep arm in reset */
3101 if (!brcmf_sdbrcm_download_state(bus, true)) {
3102 brcmf_err("error placing ARM core in reset\n");
3103 goto err;
3104 }
3105
3106 if (brcmf_sdbrcm_download_code_file(bus)) {
3107 brcmf_err("dongle image file download failed\n");
3108 goto err;
3109 }
3110
3111 if (brcmf_sdbrcm_download_nvram(bus)) {
3112 brcmf_err("dongle nvram file download failed\n");
3113 goto err;
3114 }
3115
3116 /* Take arm out of reset */
3117 if (!brcmf_sdbrcm_download_state(bus, false)) {
3118 brcmf_err("error getting out of ARM core reset\n");
3119 goto err;
3120 }
3121
3122 bcmerror = 0;
3123
3124 err:
3125 return bcmerror;
3126 }
3127
3128 static bool brcmf_sdbrcm_sr_capable(struct brcmf_sdio *bus)
3129 {
3130 u32 addr, reg;
3131
3132 brcmf_dbg(TRACE, "Enter\n");
3133
3134 /* old chips with PMU version less than 17 don't support save restore */
3135 if (bus->ci->pmurev < 17)
3136 return false;
3137
3138 /* read PMU chipcontrol register 3*/
3139 addr = CORE_CC_REG(bus->ci->c_inf[0].base, chipcontrol_addr);
3140 brcmf_sdio_regwl(bus->sdiodev, addr, 3, NULL);
3141 addr = CORE_CC_REG(bus->ci->c_inf[0].base, chipcontrol_data);
3142 reg = brcmf_sdio_regrl(bus->sdiodev, addr, NULL);
3143
3144 return (bool)reg;
3145 }
3146
3147 static void brcmf_sdbrcm_sr_init(struct brcmf_sdio *bus)
3148 {
3149 int err = 0;
3150 u8 val;
3151
3152 brcmf_dbg(TRACE, "Enter\n");
3153
3154 val = brcmf_sdio_regrb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL,
3155 &err);
3156 if (err) {
3157 brcmf_err("error reading SBSDIO_FUNC1_WAKEUPCTRL\n");
3158 return;
3159 }
3160
3161 val |= 1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT;
3162 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL,
3163 val, &err);
3164 if (err) {
3165 brcmf_err("error writing SBSDIO_FUNC1_WAKEUPCTRL\n");
3166 return;
3167 }
3168
3169 /* Add CMD14 Support */
3170 brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_BRCM_CARDCAP,
3171 (SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT |
3172 SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT),
3173 &err);
3174 if (err) {
3175 brcmf_err("error writing SDIO_CCCR_BRCM_CARDCAP\n");
3176 return;
3177 }
3178
3179 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
3180 SBSDIO_FORCE_HT, &err);
3181 if (err) {
3182 brcmf_err("error writing SBSDIO_FUNC1_CHIPCLKCSR\n");
3183 return;
3184 }
3185
3186 /* set flag */
3187 bus->sr_enabled = true;
3188 brcmf_dbg(INFO, "SR enabled\n");
3189 }
3190
3191 /* enable KSO bit */
3192 static int brcmf_sdbrcm_kso_init(struct brcmf_sdio *bus)
3193 {
3194 u8 val;
3195 int err = 0;
3196
3197 brcmf_dbg(TRACE, "Enter\n");
3198
3199 /* KSO bit added in SDIO core rev 12 */
3200 if (bus->ci->c_inf[1].rev < 12)
3201 return 0;
3202
3203 val = brcmf_sdio_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
3204 &err);
3205 if (err) {
3206 brcmf_err("error reading SBSDIO_FUNC1_SLEEPCSR\n");
3207 return err;
3208 }
3209
3210 if (!(val & SBSDIO_FUNC1_SLEEPCSR_KSO_MASK)) {
3211 val |= (SBSDIO_FUNC1_SLEEPCSR_KSO_EN <<
3212 SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
3213 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
3214 val, &err);
3215 if (err) {
3216 brcmf_err("error writing SBSDIO_FUNC1_SLEEPCSR\n");
3217 return err;
3218 }
3219 }
3220
3221 return 0;
3222 }
3223
3224
3225 static bool
3226 brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
3227 {
3228 bool ret;
3229
3230 sdio_claim_host(bus->sdiodev->func[1]);
3231
3232 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
3233
3234 ret = _brcmf_sdbrcm_download_firmware(bus) == 0;
3235
3236 brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
3237
3238 sdio_release_host(bus->sdiodev->func[1]);
3239
3240 return ret;
3241 }
3242
3243 static int brcmf_sdbrcm_bus_init(struct device *dev)
3244 {
3245 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
3246 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
3247 struct brcmf_sdio *bus = sdiodev->bus;
3248 unsigned long timeout;
3249 u8 ready, enable;
3250 int err, ret = 0;
3251 u8 saveclk;
3252
3253 brcmf_dbg(TRACE, "Enter\n");
3254
3255 /* try to download image and nvram to the dongle */
3256 if (bus_if->state == BRCMF_BUS_DOWN) {
3257 if (!(brcmf_sdbrcm_download_firmware(bus)))
3258 return -1;
3259 }
3260
3261 if (!bus->sdiodev->bus_if->drvr)
3262 return 0;
3263
3264 /* Start the watchdog timer */
3265 bus->sdcnt.tickcnt = 0;
3266 brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
3267
3268 sdio_claim_host(bus->sdiodev->func[1]);
3269
3270 /* Make sure backplane clock is on, needed to generate F2 interrupt */
3271 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
3272 if (bus->clkstate != CLK_AVAIL)
3273 goto exit;
3274
3275 /* Force clocks on backplane to be sure F2 interrupt propagates */
3276 saveclk = brcmf_sdio_regrb(bus->sdiodev,
3277 SBSDIO_FUNC1_CHIPCLKCSR, &err);
3278 if (!err) {
3279 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
3280 (saveclk | SBSDIO_FORCE_HT), &err);
3281 }
3282 if (err) {
3283 brcmf_err("Failed to force clock for F2: err %d\n", err);
3284 goto exit;
3285 }
3286
3287 /* Enable function 2 (frame transfers) */
3288 w_sdreg32(bus, SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT,
3289 offsetof(struct sdpcmd_regs, tosbmailboxdata));
3290 enable = (SDIO_FUNC_ENABLE_1 | SDIO_FUNC_ENABLE_2);
3291
3292 brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_IOEx, enable, NULL);
3293
3294 timeout = jiffies + msecs_to_jiffies(BRCMF_WAIT_F2RDY);
3295 ready = 0;
3296 while (enable != ready) {
3297 ready = brcmf_sdio_regrb(bus->sdiodev,
3298 SDIO_CCCR_IORx, NULL);
3299 if (time_after(jiffies, timeout))
3300 break;
3301 else if (time_after(jiffies, timeout - BRCMF_WAIT_F2RDY + 50))
3302 /* prevent busy waiting if it takes too long */
3303 msleep_interruptible(20);
3304 }
3305
3306 brcmf_dbg(INFO, "enable 0x%02x, ready 0x%02x\n", enable, ready);
3307
3308 /* If F2 successfully enabled, set core and enable interrupts */
3309 if (ready == enable) {
3310 /* Set up the interrupt mask and enable interrupts */
3311 bus->hostintmask = HOSTINTMASK;
3312 w_sdreg32(bus, bus->hostintmask,
3313 offsetof(struct sdpcmd_regs, hostintmask));
3314
3315 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_WATERMARK, 8, &err);
3316 } else {
3317 /* Disable F2 again */
3318 enable = SDIO_FUNC_ENABLE_1;
3319 brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_IOEx, enable, NULL);
3320 ret = -ENODEV;
3321 }
3322
3323 if (brcmf_sdbrcm_sr_capable(bus)) {
3324 brcmf_sdbrcm_sr_init(bus);
3325 } else {
3326 /* Restore previous clock setting */
3327 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
3328 saveclk, &err);
3329 }
3330
3331 if (ret == 0) {
3332 ret = brcmf_sdio_intr_register(bus->sdiodev);
3333 if (ret != 0)
3334 brcmf_err("intr register failed:%d\n", ret);
3335 }
3336
3337 /* If we didn't come up, turn off backplane clock */
3338 if (bus_if->state != BRCMF_BUS_DATA)
3339 brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
3340
3341 exit:
3342 sdio_release_host(bus->sdiodev->func[1]);
3343
3344 return ret;
3345 }
3346
3347 void brcmf_sdbrcm_isr(void *arg)
3348 {
3349 struct brcmf_sdio *bus = (struct brcmf_sdio *) arg;
3350
3351 brcmf_dbg(TRACE, "Enter\n");
3352
3353 if (!bus) {
3354 brcmf_err("bus is null pointer, exiting\n");
3355 return;
3356 }
3357
3358 if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) {
3359 brcmf_err("bus is down. we have nothing to do\n");
3360 return;
3361 }
3362 /* Count the interrupt call */
3363 bus->sdcnt.intrcount++;
3364 if (in_interrupt())
3365 atomic_set(&bus->ipend, 1);
3366 else
3367 if (brcmf_sdio_intr_rstatus(bus)) {
3368 brcmf_err("failed backplane access\n");
3369 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
3370 }
3371
3372 /* Disable additional interrupts (is this needed now)? */
3373 if (!bus->intr)
3374 brcmf_err("isr w/o interrupt configured!\n");
3375
3376 atomic_inc(&bus->dpc_tskcnt);
3377 queue_work(bus->brcmf_wq, &bus->datawork);
3378 }
3379
3380 static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
3381 {
3382 #ifdef DEBUG
3383 struct brcmf_bus *bus_if = dev_get_drvdata(bus->sdiodev->dev);
3384 #endif /* DEBUG */
3385
3386 brcmf_dbg(TIMER, "Enter\n");
3387
3388 /* Poll period: check device if appropriate. */
3389 if (!bus->sr_enabled &&
3390 bus->poll && (++bus->polltick >= bus->pollrate)) {
3391 u32 intstatus = 0;
3392
3393 /* Reset poll tick */
3394 bus->polltick = 0;
3395
3396 /* Check device if no interrupts */
3397 if (!bus->intr ||
3398 (bus->sdcnt.intrcount == bus->sdcnt.lastintrs)) {
3399
3400 if (atomic_read(&bus->dpc_tskcnt) == 0) {
3401 u8 devpend;
3402
3403 sdio_claim_host(bus->sdiodev->func[1]);
3404 devpend = brcmf_sdio_regrb(bus->sdiodev,
3405 SDIO_CCCR_INTx,
3406 NULL);
3407 sdio_release_host(bus->sdiodev->func[1]);
3408 intstatus =
3409 devpend & (INTR_STATUS_FUNC1 |
3410 INTR_STATUS_FUNC2);
3411 }
3412
3413 /* If there is something, make like the ISR and
3414 schedule the DPC */
3415 if (intstatus) {
3416 bus->sdcnt.pollcnt++;
3417 atomic_set(&bus->ipend, 1);
3418
3419 atomic_inc(&bus->dpc_tskcnt);
3420 queue_work(bus->brcmf_wq, &bus->datawork);
3421 }
3422 }
3423
3424 /* Update interrupt tracking */
3425 bus->sdcnt.lastintrs = bus->sdcnt.intrcount;
3426 }
3427 #ifdef DEBUG
3428 /* Poll for console output periodically */
3429 if (bus_if && bus_if->state == BRCMF_BUS_DATA &&
3430 bus->console_interval != 0) {
3431 bus->console.count += BRCMF_WD_POLL_MS;
3432 if (bus->console.count >= bus->console_interval) {
3433 bus->console.count -= bus->console_interval;
3434 sdio_claim_host(bus->sdiodev->func[1]);
3435 /* Make sure backplane clock is on */
3436 brcmf_sdbrcm_bus_sleep(bus, false, false);
3437 if (brcmf_sdbrcm_readconsole(bus) < 0)
3438 /* stop on error */
3439 bus->console_interval = 0;
3440 sdio_release_host(bus->sdiodev->func[1]);
3441 }
3442 }
3443 #endif /* DEBUG */
3444
3445 /* On idle timeout clear activity flag and/or turn off clock */
3446 if ((bus->idletime > 0) && (bus->clkstate == CLK_AVAIL)) {
3447 if (++bus->idlecount >= bus->idletime) {
3448 bus->idlecount = 0;
3449 if (bus->activity) {
3450 bus->activity = false;
3451 brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
3452 } else {
3453 brcmf_dbg(SDIO, "idle\n");
3454 sdio_claim_host(bus->sdiodev->func[1]);
3455 brcmf_sdbrcm_bus_sleep(bus, true, false);
3456 sdio_release_host(bus->sdiodev->func[1]);
3457 }
3458 }
3459 }
3460
3461 return (atomic_read(&bus->ipend) > 0);
3462 }
3463
3464 static void brcmf_sdio_dataworker(struct work_struct *work)
3465 {
3466 struct brcmf_sdio *bus = container_of(work, struct brcmf_sdio,
3467 datawork);
3468
3469 while (atomic_read(&bus->dpc_tskcnt)) {
3470 brcmf_sdbrcm_dpc(bus);
3471 atomic_dec(&bus->dpc_tskcnt);
3472 }
3473 }
3474
3475 static void brcmf_sdbrcm_release_malloc(struct brcmf_sdio *bus)
3476 {
3477 brcmf_dbg(TRACE, "Enter\n");
3478
3479 kfree(bus->rxbuf);
3480 bus->rxctl = bus->rxbuf = NULL;
3481 bus->rxlen = 0;
3482 }
3483
3484 static bool brcmf_sdbrcm_probe_malloc(struct brcmf_sdio *bus)
3485 {
3486 brcmf_dbg(TRACE, "Enter\n");
3487
3488 if (bus->sdiodev->bus_if->maxctl) {
3489 bus->rxblen =
3490 roundup((bus->sdiodev->bus_if->maxctl + SDPCM_HDRLEN),
3491 ALIGNMENT) + BRCMF_SDALIGN;
3492 bus->rxbuf = kmalloc(bus->rxblen, GFP_ATOMIC);
3493 if (!(bus->rxbuf))
3494 return false;
3495 }
3496
3497 return true;
3498 }
3499
3500 static bool
3501 brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
3502 {
3503 u8 clkctl = 0;
3504 int err = 0;
3505 int reg_addr;
3506 u32 reg_val;
3507 u32 drivestrength;
3508
3509 bus->alp_only = true;
3510
3511 sdio_claim_host(bus->sdiodev->func[1]);
3512
3513 pr_debug("F1 signature read @0x18000000=0x%4x\n",
3514 brcmf_sdio_regrl(bus->sdiodev, SI_ENUM_BASE, NULL));
3515
3516 /*
3517 * Force PLL off until brcmf_sdio_chip_attach()
3518 * programs PLL control regs
3519 */
3520
3521 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
3522 BRCMF_INIT_CLKCTL1, &err);
3523 if (!err)
3524 clkctl = brcmf_sdio_regrb(bus->sdiodev,
3525 SBSDIO_FUNC1_CHIPCLKCSR, &err);
3526
3527 if (err || ((clkctl & ~SBSDIO_AVBITS) != BRCMF_INIT_CLKCTL1)) {
3528 brcmf_err("ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n",
3529 err, BRCMF_INIT_CLKCTL1, clkctl);
3530 goto fail;
3531 }
3532
3533 if (brcmf_sdio_chip_attach(bus->sdiodev, &bus->ci, regsva)) {
3534 brcmf_err("brcmf_sdio_chip_attach failed!\n");
3535 goto fail;
3536 }
3537
3538 if (brcmf_sdbrcm_kso_init(bus)) {
3539 brcmf_err("error enabling KSO\n");
3540 goto fail;
3541 }
3542
3543 if ((bus->sdiodev->pdata) && (bus->sdiodev->pdata->drive_strength))
3544 drivestrength = bus->sdiodev->pdata->drive_strength;
3545 else
3546 drivestrength = DEFAULT_SDIO_DRIVE_STRENGTH;
3547 brcmf_sdio_chip_drivestrengthinit(bus->sdiodev, bus->ci, drivestrength);
3548
3549 /* Get info on the SOCRAM cores... */
3550 bus->ramsize = bus->ci->ramsize;
3551 if (!(bus->ramsize)) {
3552 brcmf_err("failed to find SOCRAM memory!\n");
3553 goto fail;
3554 }
3555
3556 /* Set card control so an SDIO card reset does a WLAN backplane reset */
3557 reg_val = brcmf_sdio_regrb(bus->sdiodev,
3558 SDIO_CCCR_BRCM_CARDCTRL, &err);
3559 if (err)
3560 goto fail;
3561
3562 reg_val |= SDIO_CCCR_BRCM_CARDCTRL_WLANRESET;
3563
3564 brcmf_sdio_regwb(bus->sdiodev,
3565 SDIO_CCCR_BRCM_CARDCTRL, reg_val, &err);
3566 if (err)
3567 goto fail;
3568
3569 /* set PMUControl so a backplane reset does PMU state reload */
3570 reg_addr = CORE_CC_REG(bus->ci->c_inf[0].base,
3571 pmucontrol);
3572 reg_val = brcmf_sdio_regrl(bus->sdiodev,
3573 reg_addr,
3574 &err);
3575 if (err)
3576 goto fail;
3577
3578 reg_val |= (BCMA_CC_PMU_CTL_RES_RELOAD << BCMA_CC_PMU_CTL_RES_SHIFT);
3579
3580 brcmf_sdio_regwl(bus->sdiodev,
3581 reg_addr,
3582 reg_val,
3583 &err);
3584 if (err)
3585 goto fail;
3586
3587
3588 sdio_release_host(bus->sdiodev->func[1]);
3589
3590 brcmu_pktq_init(&bus->txq, (PRIOMASK + 1), TXQLEN);
3591
3592 /* Locate an appropriately-aligned portion of hdrbuf */
3593 bus->rxhdr = (u8 *) roundup((unsigned long)&bus->hdrbuf[0],
3594 BRCMF_SDALIGN);
3595
3596 /* Set the poll and/or interrupt flags */
3597 bus->intr = true;
3598 bus->poll = false;
3599 if (bus->poll)
3600 bus->pollrate = 1;
3601
3602 return true;
3603
3604 fail:
3605 sdio_release_host(bus->sdiodev->func[1]);
3606 return false;
3607 }
3608
3609 static bool brcmf_sdbrcm_probe_init(struct brcmf_sdio *bus)
3610 {
3611 brcmf_dbg(TRACE, "Enter\n");
3612
3613 sdio_claim_host(bus->sdiodev->func[1]);
3614
3615 /* Disable F2 to clear any intermediate frame state on the dongle */
3616 brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_IOEx,
3617 SDIO_FUNC_ENABLE_1, NULL);
3618
3619 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
3620 bus->rxflow = false;
3621
3622 /* Done with backplane-dependent accesses, can drop clock... */
3623 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
3624
3625 sdio_release_host(bus->sdiodev->func[1]);
3626
3627 /* ...and initialize clock/power states */
3628 bus->clkstate = CLK_SDONLY;
3629 bus->idletime = BRCMF_IDLE_INTERVAL;
3630 bus->idleclock = BRCMF_IDLE_ACTIVE;
3631
3632 /* Query the F2 block size, set roundup accordingly */
3633 bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
3634 bus->roundup = min(max_roundup, bus->blocksize);
3635
3636 /* SR state */
3637 bus->sleeping = false;
3638 bus->sr_enabled = false;
3639
3640 return true;
3641 }
3642
3643 static int
3644 brcmf_sdbrcm_watchdog_thread(void *data)
3645 {
3646 struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
3647
3648 allow_signal(SIGTERM);
3649 /* Run until signal received */
3650 while (1) {
3651 if (kthread_should_stop())
3652 break;
3653 if (!wait_for_completion_interruptible(&bus->watchdog_wait)) {
3654 brcmf_sdbrcm_bus_watchdog(bus);
3655 /* Count the tick for reference */
3656 bus->sdcnt.tickcnt++;
3657 } else
3658 break;
3659 }
3660 return 0;
3661 }
3662
3663 static void
3664 brcmf_sdbrcm_watchdog(unsigned long data)
3665 {
3666 struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
3667
3668 if (bus->watchdog_tsk) {
3669 complete(&bus->watchdog_wait);
3670 /* Reschedule the watchdog */
3671 if (bus->wd_timer_valid)
3672 mod_timer(&bus->timer,
3673 jiffies + BRCMF_WD_POLL_MS * HZ / 1000);
3674 }
3675 }
3676
3677 static void brcmf_sdbrcm_release_dongle(struct brcmf_sdio *bus)
3678 {
3679 brcmf_dbg(TRACE, "Enter\n");
3680
3681 if (bus->ci) {
3682 sdio_claim_host(bus->sdiodev->func[1]);
3683 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
3684 brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
3685 sdio_release_host(bus->sdiodev->func[1]);
3686 brcmf_sdio_chip_detach(&bus->ci);
3687 if (bus->vars && bus->varsz)
3688 kfree(bus->vars);
3689 bus->vars = NULL;
3690 }
3691
3692 brcmf_dbg(TRACE, "Disconnected\n");
3693 }
3694
3695 /* Detach and free everything */
3696 static void brcmf_sdbrcm_release(struct brcmf_sdio *bus)
3697 {
3698 brcmf_dbg(TRACE, "Enter\n");
3699
3700 if (bus) {
3701 /* De-register interrupt handler */
3702 brcmf_sdio_intr_unregister(bus->sdiodev);
3703
3704 cancel_work_sync(&bus->datawork);
3705 if (bus->brcmf_wq)
3706 destroy_workqueue(bus->brcmf_wq);
3707
3708 if (bus->sdiodev->bus_if->drvr) {
3709 brcmf_detach(bus->sdiodev->dev);
3710 brcmf_sdbrcm_release_dongle(bus);
3711 }
3712
3713 brcmf_sdbrcm_release_malloc(bus);
3714
3715 kfree(bus);
3716 }
3717
3718 brcmf_dbg(TRACE, "Disconnected\n");
3719 }
3720
3721 static struct brcmf_bus_ops brcmf_sdio_bus_ops = {
3722 .stop = brcmf_sdbrcm_bus_stop,
3723 .init = brcmf_sdbrcm_bus_init,
3724 .txdata = brcmf_sdbrcm_bus_txdata,
3725 .txctl = brcmf_sdbrcm_bus_txctl,
3726 .rxctl = brcmf_sdbrcm_bus_rxctl,
3727 .gettxq = brcmf_sdbrcm_bus_gettxq,
3728 };
3729
3730 void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
3731 {
3732 int ret;
3733 struct brcmf_sdio *bus;
3734 struct brcmf_bus_dcmd *dlst;
3735 u32 dngl_txglom;
3736 u32 dngl_txglomalign;
3737 u8 idx;
3738
3739 brcmf_dbg(TRACE, "Enter\n");
3740
3741 /* We make an assumption about address window mappings:
3742 * regsva == SI_ENUM_BASE*/
3743
3744 /* Allocate private bus interface state */
3745 bus = kzalloc(sizeof(struct brcmf_sdio), GFP_ATOMIC);
3746 if (!bus)
3747 goto fail;
3748
3749 bus->sdiodev = sdiodev;
3750 sdiodev->bus = bus;
3751 skb_queue_head_init(&bus->glom);
3752 bus->txbound = BRCMF_TXBOUND;
3753 bus->rxbound = BRCMF_RXBOUND;
3754 bus->txminmax = BRCMF_TXMINMAX;
3755 bus->tx_seq = SDPCM_SEQUENCE_WRAP - 1;
3756
3757 INIT_WORK(&bus->datawork, brcmf_sdio_dataworker);
3758 bus->brcmf_wq = create_singlethread_workqueue("brcmf_wq");
3759 if (bus->brcmf_wq == NULL) {
3760 brcmf_err("insufficient memory to create txworkqueue\n");
3761 goto fail;
3762 }
3763
3764 /* attempt to attach to the dongle */
3765 if (!(brcmf_sdbrcm_probe_attach(bus, regsva))) {
3766 brcmf_err("brcmf_sdbrcm_probe_attach failed\n");
3767 goto fail;
3768 }
3769
3770 spin_lock_init(&bus->rxctl_lock);
3771 spin_lock_init(&bus->txqlock);
3772 init_waitqueue_head(&bus->ctrl_wait);
3773 init_waitqueue_head(&bus->dcmd_resp_wait);
3774
3775 /* Set up the watchdog timer */
3776 init_timer(&bus->timer);
3777 bus->timer.data = (unsigned long)bus;
3778 bus->timer.function = brcmf_sdbrcm_watchdog;
3779
3780 /* Initialize watchdog thread */
3781 init_completion(&bus->watchdog_wait);
3782 bus->watchdog_tsk = kthread_run(brcmf_sdbrcm_watchdog_thread,
3783 bus, "brcmf_watchdog");
3784 if (IS_ERR(bus->watchdog_tsk)) {
3785 pr_warn("brcmf_watchdog thread failed to start\n");
3786 bus->watchdog_tsk = NULL;
3787 }
3788 /* Initialize DPC thread */
3789 atomic_set(&bus->dpc_tskcnt, 0);
3790
3791 /* Assign bus interface call back */
3792 bus->sdiodev->bus_if->dev = bus->sdiodev->dev;
3793 bus->sdiodev->bus_if->ops = &brcmf_sdio_bus_ops;
3794 bus->sdiodev->bus_if->chip = bus->ci->chip;
3795 bus->sdiodev->bus_if->chiprev = bus->ci->chiprev;
3796
3797 /* Attach to the brcmf/OS/network interface */
3798 ret = brcmf_attach(SDPCM_RESERVE, bus->sdiodev->dev);
3799 if (ret != 0) {
3800 brcmf_err("brcmf_attach failed\n");
3801 goto fail;
3802 }
3803
3804 /* Allocate buffers */
3805 if (!(brcmf_sdbrcm_probe_malloc(bus))) {
3806 brcmf_err("brcmf_sdbrcm_probe_malloc failed\n");
3807 goto fail;
3808 }
3809
3810 if (!(brcmf_sdbrcm_probe_init(bus))) {
3811 brcmf_err("brcmf_sdbrcm_probe_init failed\n");
3812 goto fail;
3813 }
3814
3815 brcmf_sdio_debugfs_create(bus);
3816 brcmf_dbg(INFO, "completed!!\n");
3817
3818 /* sdio bus core specific dcmd */
3819 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
3820 dlst = kzalloc(sizeof(struct brcmf_bus_dcmd), GFP_KERNEL);
3821 if (dlst) {
3822 if (bus->ci->c_inf[idx].rev < 12) {
3823 /* for sdio core rev < 12, disable txgloming */
3824 dngl_txglom = 0;
3825 dlst->name = "bus:txglom";
3826 dlst->param = (char *)&dngl_txglom;
3827 dlst->param_len = sizeof(u32);
3828 } else {
3829 /* otherwise, set txglomalign */
3830 dngl_txglomalign = bus->sdiodev->bus_if->align;
3831 dlst->name = "bus:txglomalign";
3832 dlst->param = (char *)&dngl_txglomalign;
3833 dlst->param_len = sizeof(u32);
3834 }
3835 list_add(&dlst->list, &bus->sdiodev->bus_if->dcmd_list);
3836 }
3837
3838 /* if firmware path present try to download and bring up bus */
3839 ret = brcmf_bus_start(bus->sdiodev->dev);
3840 if (ret != 0) {
3841 brcmf_err("dongle is not responding\n");
3842 goto fail;
3843 }
3844
3845 return bus;
3846
3847 fail:
3848 brcmf_sdbrcm_release(bus);
3849 return NULL;
3850 }
3851
3852 void brcmf_sdbrcm_disconnect(void *ptr)
3853 {
3854 struct brcmf_sdio *bus = (struct brcmf_sdio *)ptr;
3855
3856 brcmf_dbg(TRACE, "Enter\n");
3857
3858 if (bus)
3859 brcmf_sdbrcm_release(bus);
3860
3861 brcmf_dbg(TRACE, "Disconnected\n");
3862 }
3863
3864 void
3865 brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick)
3866 {
3867 /* Totally stop the timer */
3868 if (!wdtick && bus->wd_timer_valid) {
3869 del_timer_sync(&bus->timer);
3870 bus->wd_timer_valid = false;
3871 bus->save_ms = wdtick;
3872 return;
3873 }
3874
3875 /* don't start the wd until fw is loaded */
3876 if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN)
3877 return;
3878
3879 if (wdtick) {
3880 if (bus->save_ms != BRCMF_WD_POLL_MS) {
3881 if (bus->wd_timer_valid)
3882 /* Stop timer and restart at new value */
3883 del_timer_sync(&bus->timer);
3884
3885 /* Create timer again when watchdog period is
3886 dynamically changed or in the first instance
3887 */
3888 bus->timer.expires =
3889 jiffies + BRCMF_WD_POLL_MS * HZ / 1000;
3890 add_timer(&bus->timer);
3891
3892 } else {
3893 /* Re arm the timer, at last watchdog period */
3894 mod_timer(&bus->timer,
3895 jiffies + BRCMF_WD_POLL_MS * HZ / 1000);
3896 }
3897
3898 bus->wd_timer_valid = true;
3899 bus->save_ms = wdtick;
3900 }
3901 }
This page took 0.141242 seconds and 5 git commands to generate.