brcmfmac: do not proceed if fail to download nvram to dongle
[deliverable/linux.git] / drivers / net / wireless / brcm80211 / brcmfmac / dhd_sdio.c
1 /*
2 * Copyright (c) 2010 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/kthread.h>
20 #include <linux/printk.h>
21 #include <linux/pci_ids.h>
22 #include <linux/netdevice.h>
23 #include <linux/interrupt.h>
24 #include <linux/sched.h>
25 #include <linux/mmc/sdio.h>
26 #include <linux/mmc/sdio_func.h>
27 #include <linux/mmc/card.h>
28 #include <linux/semaphore.h>
29 #include <linux/firmware.h>
30 #include <linux/module.h>
31 #include <linux/bcma/bcma.h>
32 #include <linux/debugfs.h>
33 #include <linux/vmalloc.h>
34 #include <asm/unaligned.h>
35 #include <defs.h>
36 #include <brcmu_wifi.h>
37 #include <brcmu_utils.h>
38 #include <brcm_hw_ids.h>
39 #include <soc.h>
40 #include "sdio_host.h"
41 #include "sdio_chip.h"
42
43 #define DCMD_RESP_TIMEOUT 2000 /* In milli second */
44
45 #ifdef DEBUG
46
47 #define BRCMF_TRAP_INFO_SIZE 80
48
49 #define CBUF_LEN (128)
50
51 /* Device console log buffer state */
52 #define CONSOLE_BUFFER_MAX 2024
53
54 struct rte_log_le {
55 __le32 buf; /* Can't be pointer on (64-bit) hosts */
56 __le32 buf_size;
57 __le32 idx;
58 char *_buf_compat; /* Redundant pointer for backward compat. */
59 };
60
61 struct rte_console {
62 /* Virtual UART
63 * When there is no UART (e.g. Quickturn),
64 * the host should write a complete
65 * input line directly into cbuf and then write
66 * the length into vcons_in.
67 * This may also be used when there is a real UART
68 * (at risk of conflicting with
69 * the real UART). vcons_out is currently unused.
70 */
71 uint vcons_in;
72 uint vcons_out;
73
74 /* Output (logging) buffer
75 * Console output is written to a ring buffer log_buf at index log_idx.
76 * The host may read the output when it sees log_idx advance.
77 * Output will be lost if the output wraps around faster than the host
78 * polls.
79 */
80 struct rte_log_le log_le;
81
82 /* Console input line buffer
83 * Characters are read one at a time into cbuf
84 * until <CR> is received, then
85 * the buffer is processed as a command line.
86 * Also used for virtual UART.
87 */
88 uint cbuf_idx;
89 char cbuf[CBUF_LEN];
90 };
91
92 #endif /* DEBUG */
93 #include <chipcommon.h>
94
95 #include "dhd_bus.h"
96 #include "dhd_dbg.h"
97
98 #define TXQLEN 2048 /* bulk tx queue length */
99 #define TXHI (TXQLEN - 256) /* turn on flow control above TXHI */
100 #define TXLOW (TXHI - 256) /* turn off flow control below TXLOW */
101 #define PRIOMASK 7
102
103 #define TXRETRIES 2 /* # of retries for tx frames */
104
105 #define BRCMF_RXBOUND 50 /* Default for max rx frames in
106 one scheduling */
107
108 #define BRCMF_TXBOUND 20 /* Default for max tx frames in
109 one scheduling */
110
111 #define BRCMF_TXMINMAX 1 /* Max tx frames if rx still pending */
112
113 #define MEMBLOCK 2048 /* Block size used for downloading
114 of dongle image */
115 #define MAX_DATA_BUF (32 * 1024) /* Must be large enough to hold
116 biggest possible glom */
117
118 #define BRCMF_FIRSTREAD (1 << 6)
119
120
121 /* SBSDIO_DEVICE_CTL */
122
123 /* 1: device will assert busy signal when receiving CMD53 */
124 #define SBSDIO_DEVCTL_SETBUSY 0x01
125 /* 1: assertion of sdio interrupt is synchronous to the sdio clock */
126 #define SBSDIO_DEVCTL_SPI_INTR_SYNC 0x02
127 /* 1: mask all interrupts to host except the chipActive (rev 8) */
128 #define SBSDIO_DEVCTL_CA_INT_ONLY 0x04
129 /* 1: isolate internal sdio signals, put external pads in tri-state; requires
130 * sdio bus power cycle to clear (rev 9) */
131 #define SBSDIO_DEVCTL_PADS_ISO 0x08
132 /* Force SD->SB reset mapping (rev 11) */
133 #define SBSDIO_DEVCTL_SB_RST_CTL 0x30
134 /* Determined by CoreControl bit */
135 #define SBSDIO_DEVCTL_RST_CORECTL 0x00
136 /* Force backplane reset */
137 #define SBSDIO_DEVCTL_RST_BPRESET 0x10
138 /* Force no backplane reset */
139 #define SBSDIO_DEVCTL_RST_NOBPRESET 0x20
140
141 /* direct(mapped) cis space */
142
143 /* MAPPED common CIS address */
144 #define SBSDIO_CIS_BASE_COMMON 0x1000
145 /* maximum bytes in one CIS */
146 #define SBSDIO_CIS_SIZE_LIMIT 0x200
147 /* cis offset addr is < 17 bits */
148 #define SBSDIO_CIS_OFT_ADDR_MASK 0x1FFFF
149
150 /* manfid tuple length, include tuple, link bytes */
151 #define SBSDIO_CIS_MANFID_TUPLE_LEN 6
152
153 /* intstatus */
154 #define I_SMB_SW0 (1 << 0) /* To SB Mail S/W interrupt 0 */
155 #define I_SMB_SW1 (1 << 1) /* To SB Mail S/W interrupt 1 */
156 #define I_SMB_SW2 (1 << 2) /* To SB Mail S/W interrupt 2 */
157 #define I_SMB_SW3 (1 << 3) /* To SB Mail S/W interrupt 3 */
158 #define I_SMB_SW_MASK 0x0000000f /* To SB Mail S/W interrupts mask */
159 #define I_SMB_SW_SHIFT 0 /* To SB Mail S/W interrupts shift */
160 #define I_HMB_SW0 (1 << 4) /* To Host Mail S/W interrupt 0 */
161 #define I_HMB_SW1 (1 << 5) /* To Host Mail S/W interrupt 1 */
162 #define I_HMB_SW2 (1 << 6) /* To Host Mail S/W interrupt 2 */
163 #define I_HMB_SW3 (1 << 7) /* To Host Mail S/W interrupt 3 */
164 #define I_HMB_SW_MASK 0x000000f0 /* To Host Mail S/W interrupts mask */
165 #define I_HMB_SW_SHIFT 4 /* To Host Mail S/W interrupts shift */
166 #define I_WR_OOSYNC (1 << 8) /* Write Frame Out Of Sync */
167 #define I_RD_OOSYNC (1 << 9) /* Read Frame Out Of Sync */
168 #define I_PC (1 << 10) /* descriptor error */
169 #define I_PD (1 << 11) /* data error */
170 #define I_DE (1 << 12) /* Descriptor protocol Error */
171 #define I_RU (1 << 13) /* Receive descriptor Underflow */
172 #define I_RO (1 << 14) /* Receive fifo Overflow */
173 #define I_XU (1 << 15) /* Transmit fifo Underflow */
174 #define I_RI (1 << 16) /* Receive Interrupt */
175 #define I_BUSPWR (1 << 17) /* SDIO Bus Power Change (rev 9) */
176 #define I_XMTDATA_AVAIL (1 << 23) /* bits in fifo */
177 #define I_XI (1 << 24) /* Transmit Interrupt */
178 #define I_RF_TERM (1 << 25) /* Read Frame Terminate */
179 #define I_WF_TERM (1 << 26) /* Write Frame Terminate */
180 #define I_PCMCIA_XU (1 << 27) /* PCMCIA Transmit FIFO Underflow */
181 #define I_SBINT (1 << 28) /* sbintstatus Interrupt */
182 #define I_CHIPACTIVE (1 << 29) /* chip from doze to active state */
183 #define I_SRESET (1 << 30) /* CCCR RES interrupt */
184 #define I_IOE2 (1U << 31) /* CCCR IOE2 Bit Changed */
185 #define I_ERRORS (I_PC | I_PD | I_DE | I_RU | I_RO | I_XU)
186 #define I_DMA (I_RI | I_XI | I_ERRORS)
187
188 /* corecontrol */
189 #define CC_CISRDY (1 << 0) /* CIS Ready */
190 #define CC_BPRESEN (1 << 1) /* CCCR RES signal */
191 #define CC_F2RDY (1 << 2) /* set CCCR IOR2 bit */
192 #define CC_CLRPADSISO (1 << 3) /* clear SDIO pads isolation */
193 #define CC_XMTDATAAVAIL_MODE (1 << 4)
194 #define CC_XMTDATAAVAIL_CTRL (1 << 5)
195
196 /* SDA_FRAMECTRL */
197 #define SFC_RF_TERM (1 << 0) /* Read Frame Terminate */
198 #define SFC_WF_TERM (1 << 1) /* Write Frame Terminate */
199 #define SFC_CRC4WOOS (1 << 2) /* CRC error for write out of sync */
200 #define SFC_ABORTALL (1 << 3) /* Abort all in-progress frames */
201
202 /* HW frame tag */
203 #define SDPCM_FRAMETAG_LEN 4 /* 2 bytes len, 2 bytes check val */
204
205 /* Total length of frame header for dongle protocol */
206 #define SDPCM_HDRLEN (SDPCM_FRAMETAG_LEN + SDPCM_SWHEADER_LEN)
207 #define SDPCM_RESERVE (SDPCM_HDRLEN + BRCMF_SDALIGN)
208
209 /*
210 * Software allocation of To SB Mailbox resources
211 */
212
213 /* tosbmailbox bits corresponding to intstatus bits */
214 #define SMB_NAK (1 << 0) /* Frame NAK */
215 #define SMB_INT_ACK (1 << 1) /* Host Interrupt ACK */
216 #define SMB_USE_OOB (1 << 2) /* Use OOB Wakeup */
217 #define SMB_DEV_INT (1 << 3) /* Miscellaneous Interrupt */
218
219 /* tosbmailboxdata */
220 #define SMB_DATA_VERSION_SHIFT 16 /* host protocol version */
221
222 /*
223 * Software allocation of To Host Mailbox resources
224 */
225
226 /* intstatus bits */
227 #define I_HMB_FC_STATE I_HMB_SW0 /* Flow Control State */
228 #define I_HMB_FC_CHANGE I_HMB_SW1 /* Flow Control State Changed */
229 #define I_HMB_FRAME_IND I_HMB_SW2 /* Frame Indication */
230 #define I_HMB_HOST_INT I_HMB_SW3 /* Miscellaneous Interrupt */
231
232 /* tohostmailboxdata */
233 #define HMB_DATA_NAKHANDLED 1 /* retransmit NAK'd frame */
234 #define HMB_DATA_DEVREADY 2 /* talk to host after enable */
235 #define HMB_DATA_FC 4 /* per prio flowcontrol update flag */
236 #define HMB_DATA_FWREADY 8 /* fw ready for protocol activity */
237
238 #define HMB_DATA_FCDATA_MASK 0xff000000
239 #define HMB_DATA_FCDATA_SHIFT 24
240
241 #define HMB_DATA_VERSION_MASK 0x00ff0000
242 #define HMB_DATA_VERSION_SHIFT 16
243
244 /*
245 * Software-defined protocol header
246 */
247
248 /* Current protocol version */
249 #define SDPCM_PROT_VERSION 4
250
251 /* SW frame header */
252 #define SDPCM_PACKET_SEQUENCE(p) (((u8 *)p)[0] & 0xff)
253
254 #define SDPCM_CHANNEL_MASK 0x00000f00
255 #define SDPCM_CHANNEL_SHIFT 8
256 #define SDPCM_PACKET_CHANNEL(p) (((u8 *)p)[1] & 0x0f)
257
258 #define SDPCM_NEXTLEN_OFFSET 2
259
260 /* Data Offset from SOF (HW Tag, SW Tag, Pad) */
261 #define SDPCM_DOFFSET_OFFSET 3 /* Data Offset */
262 #define SDPCM_DOFFSET_VALUE(p) (((u8 *)p)[SDPCM_DOFFSET_OFFSET] & 0xff)
263 #define SDPCM_DOFFSET_MASK 0xff000000
264 #define SDPCM_DOFFSET_SHIFT 24
265 #define SDPCM_FCMASK_OFFSET 4 /* Flow control */
266 #define SDPCM_FCMASK_VALUE(p) (((u8 *)p)[SDPCM_FCMASK_OFFSET] & 0xff)
267 #define SDPCM_WINDOW_OFFSET 5 /* Credit based fc */
268 #define SDPCM_WINDOW_VALUE(p) (((u8 *)p)[SDPCM_WINDOW_OFFSET] & 0xff)
269
270 #define SDPCM_SWHEADER_LEN 8 /* SW header is 64 bits */
271
272 /* logical channel numbers */
273 #define SDPCM_CONTROL_CHANNEL 0 /* Control channel Id */
274 #define SDPCM_EVENT_CHANNEL 1 /* Asyc Event Indication Channel Id */
275 #define SDPCM_DATA_CHANNEL 2 /* Data Xmit/Recv Channel Id */
276 #define SDPCM_GLOM_CHANNEL 3 /* For coalesced packets */
277 #define SDPCM_TEST_CHANNEL 15 /* Reserved for test/debug packets */
278
279 #define SDPCM_SEQUENCE_WRAP 256 /* wrap-around val for 8bit frame seq */
280
281 #define SDPCM_GLOMDESC(p) (((u8 *)p)[1] & 0x80)
282
283 /*
284 * Shared structure between dongle and the host.
285 * The structure contains pointers to trap or assert information.
286 */
287 #define SDPCM_SHARED_VERSION 0x0003
288 #define SDPCM_SHARED_VERSION_MASK 0x00FF
289 #define SDPCM_SHARED_ASSERT_BUILT 0x0100
290 #define SDPCM_SHARED_ASSERT 0x0200
291 #define SDPCM_SHARED_TRAP 0x0400
292
293 /* Space for header read, limit for data packets */
294 #define MAX_HDR_READ (1 << 6)
295 #define MAX_RX_DATASZ 2048
296
297 /* Maximum milliseconds to wait for F2 to come up */
298 #define BRCMF_WAIT_F2RDY 3000
299
300 /* Bump up limit on waiting for HT to account for first startup;
301 * if the image is doing a CRC calculation before programming the PMU
302 * for HT availability, it could take a couple hundred ms more, so
303 * max out at a 1 second (1000000us).
304 */
305 #undef PMU_MAX_TRANSITION_DLY
306 #define PMU_MAX_TRANSITION_DLY 1000000
307
308 /* Value for ChipClockCSR during initial setup */
309 #define BRCMF_INIT_CLKCTL1 (SBSDIO_FORCE_HW_CLKREQ_OFF | \
310 SBSDIO_ALP_AVAIL_REQ)
311
312 /* Flags for SDH calls */
313 #define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
314
315 #define BRCMF_SDIO_FW_NAME "brcm/brcmfmac-sdio.bin"
316 #define BRCMF_SDIO_NV_NAME "brcm/brcmfmac-sdio.txt"
317 MODULE_FIRMWARE(BRCMF_SDIO_FW_NAME);
318 MODULE_FIRMWARE(BRCMF_SDIO_NV_NAME);
319
320 #define BRCMF_IDLE_IMMEDIATE (-1) /* Enter idle immediately */
321 #define BRCMF_IDLE_ACTIVE 0 /* Do not request any SD clock change
322 * when idle
323 */
324 #define BRCMF_IDLE_INTERVAL 1
325
326 /*
327 * Conversion of 802.1D priority to precedence level
328 */
329 static uint prio2prec(u32 prio)
330 {
331 return (prio == PRIO_8021D_NONE || prio == PRIO_8021D_BE) ?
332 (prio^2) : prio;
333 }
334
335 /* core registers */
336 struct sdpcmd_regs {
337 u32 corecontrol; /* 0x00, rev8 */
338 u32 corestatus; /* rev8 */
339 u32 PAD[1];
340 u32 biststatus; /* rev8 */
341
342 /* PCMCIA access */
343 u16 pcmciamesportaladdr; /* 0x010, rev8 */
344 u16 PAD[1];
345 u16 pcmciamesportalmask; /* rev8 */
346 u16 PAD[1];
347 u16 pcmciawrframebc; /* rev8 */
348 u16 PAD[1];
349 u16 pcmciaunderflowtimer; /* rev8 */
350 u16 PAD[1];
351
352 /* interrupt */
353 u32 intstatus; /* 0x020, rev8 */
354 u32 hostintmask; /* rev8 */
355 u32 intmask; /* rev8 */
356 u32 sbintstatus; /* rev8 */
357 u32 sbintmask; /* rev8 */
358 u32 funcintmask; /* rev4 */
359 u32 PAD[2];
360 u32 tosbmailbox; /* 0x040, rev8 */
361 u32 tohostmailbox; /* rev8 */
362 u32 tosbmailboxdata; /* rev8 */
363 u32 tohostmailboxdata; /* rev8 */
364
365 /* synchronized access to registers in SDIO clock domain */
366 u32 sdioaccess; /* 0x050, rev8 */
367 u32 PAD[3];
368
369 /* PCMCIA frame control */
370 u8 pcmciaframectrl; /* 0x060, rev8 */
371 u8 PAD[3];
372 u8 pcmciawatermark; /* rev8 */
373 u8 PAD[155];
374
375 /* interrupt batching control */
376 u32 intrcvlazy; /* 0x100, rev8 */
377 u32 PAD[3];
378
379 /* counters */
380 u32 cmd52rd; /* 0x110, rev8 */
381 u32 cmd52wr; /* rev8 */
382 u32 cmd53rd; /* rev8 */
383 u32 cmd53wr; /* rev8 */
384 u32 abort; /* rev8 */
385 u32 datacrcerror; /* rev8 */
386 u32 rdoutofsync; /* rev8 */
387 u32 wroutofsync; /* rev8 */
388 u32 writebusy; /* rev8 */
389 u32 readwait; /* rev8 */
390 u32 readterm; /* rev8 */
391 u32 writeterm; /* rev8 */
392 u32 PAD[40];
393 u32 clockctlstatus; /* rev8 */
394 u32 PAD[7];
395
396 u32 PAD[128]; /* DMA engines */
397
398 /* SDIO/PCMCIA CIS region */
399 char cis[512]; /* 0x400-0x5ff, rev6 */
400
401 /* PCMCIA function control registers */
402 char pcmciafcr[256]; /* 0x600-6ff, rev6 */
403 u16 PAD[55];
404
405 /* PCMCIA backplane access */
406 u16 backplanecsr; /* 0x76E, rev6 */
407 u16 backplaneaddr0; /* rev6 */
408 u16 backplaneaddr1; /* rev6 */
409 u16 backplaneaddr2; /* rev6 */
410 u16 backplaneaddr3; /* rev6 */
411 u16 backplanedata0; /* rev6 */
412 u16 backplanedata1; /* rev6 */
413 u16 backplanedata2; /* rev6 */
414 u16 backplanedata3; /* rev6 */
415 u16 PAD[31];
416
417 /* sprom "size" & "blank" info */
418 u16 spromstatus; /* 0x7BE, rev2 */
419 u32 PAD[464];
420
421 u16 PAD[0x80];
422 };
423
424 #ifdef DEBUG
425 /* Device console log buffer state */
426 struct brcmf_console {
427 uint count; /* Poll interval msec counter */
428 uint log_addr; /* Log struct address (fixed) */
429 struct rte_log_le log_le; /* Log struct (host copy) */
430 uint bufsize; /* Size of log buffer */
431 u8 *buf; /* Log buffer (host copy) */
432 uint last; /* Last buffer read index */
433 };
434
435 struct brcmf_trap_info {
436 __le32 type;
437 __le32 epc;
438 __le32 cpsr;
439 __le32 spsr;
440 __le32 r0; /* a1 */
441 __le32 r1; /* a2 */
442 __le32 r2; /* a3 */
443 __le32 r3; /* a4 */
444 __le32 r4; /* v1 */
445 __le32 r5; /* v2 */
446 __le32 r6; /* v3 */
447 __le32 r7; /* v4 */
448 __le32 r8; /* v5 */
449 __le32 r9; /* sb/v6 */
450 __le32 r10; /* sl/v7 */
451 __le32 r11; /* fp/v8 */
452 __le32 r12; /* ip */
453 __le32 r13; /* sp */
454 __le32 r14; /* lr */
455 __le32 pc; /* r15 */
456 };
457 #endif /* DEBUG */
458
459 struct sdpcm_shared {
460 u32 flags;
461 u32 trap_addr;
462 u32 assert_exp_addr;
463 u32 assert_file_addr;
464 u32 assert_line;
465 u32 console_addr; /* Address of struct rte_console */
466 u32 msgtrace_addr;
467 u8 tag[32];
468 u32 brpt_addr;
469 };
470
471 struct sdpcm_shared_le {
472 __le32 flags;
473 __le32 trap_addr;
474 __le32 assert_exp_addr;
475 __le32 assert_file_addr;
476 __le32 assert_line;
477 __le32 console_addr; /* Address of struct rte_console */
478 __le32 msgtrace_addr;
479 u8 tag[32];
480 __le32 brpt_addr;
481 };
482
483 /* SDIO read frame info */
484 struct brcmf_sdio_read {
485 u8 seq_num;
486 u8 channel;
487 u16 len;
488 u16 len_left;
489 u16 len_nxtfrm;
490 u8 dat_offset;
491 };
492
493 /* misc chip info needed by some of the routines */
494 /* Private data for SDIO bus interaction */
495 struct brcmf_sdio {
496 struct brcmf_sdio_dev *sdiodev; /* sdio device handler */
497 struct chip_info *ci; /* Chip info struct */
498 char *vars; /* Variables (from CIS and/or other) */
499 uint varsz; /* Size of variables buffer */
500
501 u32 ramsize; /* Size of RAM in SOCRAM (bytes) */
502
503 u32 hostintmask; /* Copy of Host Interrupt Mask */
504 atomic_t intstatus; /* Intstatus bits (events) pending */
505 atomic_t fcstate; /* State of dongle flow-control */
506
507 uint blocksize; /* Block size of SDIO transfers */
508 uint roundup; /* Max roundup limit */
509
510 struct pktq txq; /* Queue length used for flow-control */
511 u8 flowcontrol; /* per prio flow control bitmask */
512 u8 tx_seq; /* Transmit sequence number (next) */
513 u8 tx_max; /* Maximum transmit sequence allowed */
514
515 u8 hdrbuf[MAX_HDR_READ + BRCMF_SDALIGN];
516 u8 *rxhdr; /* Header of current rx frame (in hdrbuf) */
517 u8 rx_seq; /* Receive sequence number (expected) */
518 struct brcmf_sdio_read cur_read;
519 /* info of current read frame */
520 bool rxskip; /* Skip receive (awaiting NAK ACK) */
521 bool rxpending; /* Data frame pending in dongle */
522
523 uint rxbound; /* Rx frames to read before resched */
524 uint txbound; /* Tx frames to send before resched */
525 uint txminmax;
526
527 struct sk_buff *glomd; /* Packet containing glomming descriptor */
528 struct sk_buff_head glom; /* Packet list for glommed superframe */
529 uint glomerr; /* Glom packet read errors */
530
531 u8 *rxbuf; /* Buffer for receiving control packets */
532 uint rxblen; /* Allocated length of rxbuf */
533 u8 *rxctl; /* Aligned pointer into rxbuf */
534 u8 *rxctl_orig; /* pointer for freeing rxctl */
535 u8 *databuf; /* Buffer for receiving big glom packet */
536 u8 *dataptr; /* Aligned pointer into databuf */
537 uint rxlen; /* Length of valid data in buffer */
538 spinlock_t rxctl_lock; /* protection lock for ctrl frame resources */
539
540 u8 sdpcm_ver; /* Bus protocol reported by dongle */
541
542 bool intr; /* Use interrupts */
543 bool poll; /* Use polling */
544 atomic_t ipend; /* Device interrupt is pending */
545 uint spurious; /* Count of spurious interrupts */
546 uint pollrate; /* Ticks between device polls */
547 uint polltick; /* Tick counter */
548
549 #ifdef DEBUG
550 uint console_interval;
551 struct brcmf_console console; /* Console output polling support */
552 uint console_addr; /* Console address from shared struct */
553 #endif /* DEBUG */
554
555 uint clkstate; /* State of sd and backplane clock(s) */
556 bool activity; /* Activity flag for clock down */
557 s32 idletime; /* Control for activity timeout */
558 s32 idlecount; /* Activity timeout counter */
559 s32 idleclock; /* How to set bus driver when idle */
560 s32 sd_rxchain;
561 bool use_rxchain; /* If brcmf should use PKT chains */
562 bool rxflow_mode; /* Rx flow control mode */
563 bool rxflow; /* Is rx flow control on */
564 bool alp_only; /* Don't use HT clock (ALP only) */
565
566 u8 *ctrl_frame_buf;
567 u32 ctrl_frame_len;
568 bool ctrl_frame_stat;
569
570 spinlock_t txqlock;
571 wait_queue_head_t ctrl_wait;
572 wait_queue_head_t dcmd_resp_wait;
573
574 struct timer_list timer;
575 struct completion watchdog_wait;
576 struct task_struct *watchdog_tsk;
577 bool wd_timer_valid;
578 uint save_ms;
579
580 struct workqueue_struct *brcmf_wq;
581 struct work_struct datawork;
582 struct list_head dpc_tsklst;
583 spinlock_t dpc_tl_lock;
584
585 const struct firmware *firmware;
586 u32 fw_ptr;
587
588 bool txoff; /* Transmit flow-controlled */
589 struct brcmf_sdio_count sdcnt;
590 };
591
592 /* clkstate */
593 #define CLK_NONE 0
594 #define CLK_SDONLY 1
595 #define CLK_PENDING 2 /* Not used yet */
596 #define CLK_AVAIL 3
597
598 #ifdef DEBUG
599 static int qcount[NUMPRIO];
600 static int tx_packets[NUMPRIO];
601 #endif /* DEBUG */
602
603 #define SDIO_DRIVE_STRENGTH 6 /* in milliamps */
604
605 #define RETRYCHAN(chan) ((chan) == SDPCM_EVENT_CHANNEL)
606
607 /* Retry count for register access failures */
608 static const uint retry_limit = 2;
609
610 /* Limit on rounding up frames */
611 static const uint max_roundup = 512;
612
613 #define ALIGNMENT 4
614
615 enum brcmf_sdio_frmtype {
616 BRCMF_SDIO_FT_NORMAL,
617 BRCMF_SDIO_FT_SUPER,
618 BRCMF_SDIO_FT_SUB,
619 };
620
621 static void pkt_align(struct sk_buff *p, int len, int align)
622 {
623 uint datalign;
624 datalign = (unsigned long)(p->data);
625 datalign = roundup(datalign, (align)) - datalign;
626 if (datalign)
627 skb_pull(p, datalign);
628 __skb_trim(p, len);
629 }
630
631 /* To check if there's window offered */
632 static bool data_ok(struct brcmf_sdio *bus)
633 {
634 return (u8)(bus->tx_max - bus->tx_seq) != 0 &&
635 ((u8)(bus->tx_max - bus->tx_seq) & 0x80) == 0;
636 }
637
638 /*
639 * Reads a register in the SDIO hardware block. This block occupies a series of
640 * adresses on the 32 bit backplane bus.
641 */
642 static int
643 r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 offset)
644 {
645 u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
646 int ret;
647
648 *regvar = brcmf_sdio_regrl(bus->sdiodev,
649 bus->ci->c_inf[idx].base + offset, &ret);
650
651 return ret;
652 }
653
654 static int
655 w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset)
656 {
657 u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
658 int ret;
659
660 brcmf_sdio_regwl(bus->sdiodev,
661 bus->ci->c_inf[idx].base + reg_offset,
662 regval, &ret);
663
664 return ret;
665 }
666
667 #define PKT_AVAILABLE() (intstatus & I_HMB_FRAME_IND)
668
669 #define HOSTINTMASK (I_HMB_SW_MASK | I_CHIPACTIVE)
670
671 /* Turn backplane clock on or off */
672 static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
673 {
674 int err;
675 u8 clkctl, clkreq, devctl;
676 unsigned long timeout;
677
678 brcmf_dbg(TRACE, "Enter\n");
679
680 clkctl = 0;
681
682 if (on) {
683 /* Request HT Avail */
684 clkreq =
685 bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ;
686
687 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
688 clkreq, &err);
689 if (err) {
690 brcmf_err("HT Avail request error: %d\n", err);
691 return -EBADE;
692 }
693
694 /* Check current status */
695 clkctl = brcmf_sdio_regrb(bus->sdiodev,
696 SBSDIO_FUNC1_CHIPCLKCSR, &err);
697 if (err) {
698 brcmf_err("HT Avail read error: %d\n", err);
699 return -EBADE;
700 }
701
702 /* Go to pending and await interrupt if appropriate */
703 if (!SBSDIO_CLKAV(clkctl, bus->alp_only) && pendok) {
704 /* Allow only clock-available interrupt */
705 devctl = brcmf_sdio_regrb(bus->sdiodev,
706 SBSDIO_DEVICE_CTL, &err);
707 if (err) {
708 brcmf_err("Devctl error setting CA: %d\n",
709 err);
710 return -EBADE;
711 }
712
713 devctl |= SBSDIO_DEVCTL_CA_INT_ONLY;
714 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
715 devctl, &err);
716 brcmf_dbg(INFO, "CLKCTL: set PENDING\n");
717 bus->clkstate = CLK_PENDING;
718
719 return 0;
720 } else if (bus->clkstate == CLK_PENDING) {
721 /* Cancel CA-only interrupt filter */
722 devctl = brcmf_sdio_regrb(bus->sdiodev,
723 SBSDIO_DEVICE_CTL, &err);
724 devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
725 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
726 devctl, &err);
727 }
728
729 /* Otherwise, wait here (polling) for HT Avail */
730 timeout = jiffies +
731 msecs_to_jiffies(PMU_MAX_TRANSITION_DLY/1000);
732 while (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
733 clkctl = brcmf_sdio_regrb(bus->sdiodev,
734 SBSDIO_FUNC1_CHIPCLKCSR,
735 &err);
736 if (time_after(jiffies, timeout))
737 break;
738 else
739 usleep_range(5000, 10000);
740 }
741 if (err) {
742 brcmf_err("HT Avail request error: %d\n", err);
743 return -EBADE;
744 }
745 if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
746 brcmf_err("HT Avail timeout (%d): clkctl 0x%02x\n",
747 PMU_MAX_TRANSITION_DLY, clkctl);
748 return -EBADE;
749 }
750
751 /* Mark clock available */
752 bus->clkstate = CLK_AVAIL;
753 brcmf_dbg(INFO, "CLKCTL: turned ON\n");
754
755 #if defined(DEBUG)
756 if (!bus->alp_only) {
757 if (SBSDIO_ALPONLY(clkctl))
758 brcmf_err("HT Clock should be on\n");
759 }
760 #endif /* defined (DEBUG) */
761
762 bus->activity = true;
763 } else {
764 clkreq = 0;
765
766 if (bus->clkstate == CLK_PENDING) {
767 /* Cancel CA-only interrupt filter */
768 devctl = brcmf_sdio_regrb(bus->sdiodev,
769 SBSDIO_DEVICE_CTL, &err);
770 devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
771 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
772 devctl, &err);
773 }
774
775 bus->clkstate = CLK_SDONLY;
776 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
777 clkreq, &err);
778 brcmf_dbg(INFO, "CLKCTL: turned OFF\n");
779 if (err) {
780 brcmf_err("Failed access turning clock off: %d\n",
781 err);
782 return -EBADE;
783 }
784 }
785 return 0;
786 }
787
788 /* Change idle/active SD state */
789 static int brcmf_sdbrcm_sdclk(struct brcmf_sdio *bus, bool on)
790 {
791 brcmf_dbg(TRACE, "Enter\n");
792
793 if (on)
794 bus->clkstate = CLK_SDONLY;
795 else
796 bus->clkstate = CLK_NONE;
797
798 return 0;
799 }
800
801 /* Transition SD and backplane clock readiness */
802 static int brcmf_sdbrcm_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
803 {
804 #ifdef DEBUG
805 uint oldstate = bus->clkstate;
806 #endif /* DEBUG */
807
808 brcmf_dbg(TRACE, "Enter\n");
809
810 /* Early exit if we're already there */
811 if (bus->clkstate == target) {
812 if (target == CLK_AVAIL) {
813 brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
814 bus->activity = true;
815 }
816 return 0;
817 }
818
819 switch (target) {
820 case CLK_AVAIL:
821 /* Make sure SD clock is available */
822 if (bus->clkstate == CLK_NONE)
823 brcmf_sdbrcm_sdclk(bus, true);
824 /* Now request HT Avail on the backplane */
825 brcmf_sdbrcm_htclk(bus, true, pendok);
826 brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
827 bus->activity = true;
828 break;
829
830 case CLK_SDONLY:
831 /* Remove HT request, or bring up SD clock */
832 if (bus->clkstate == CLK_NONE)
833 brcmf_sdbrcm_sdclk(bus, true);
834 else if (bus->clkstate == CLK_AVAIL)
835 brcmf_sdbrcm_htclk(bus, false, false);
836 else
837 brcmf_err("request for %d -> %d\n",
838 bus->clkstate, target);
839 brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
840 break;
841
842 case CLK_NONE:
843 /* Make sure to remove HT request */
844 if (bus->clkstate == CLK_AVAIL)
845 brcmf_sdbrcm_htclk(bus, false, false);
846 /* Now remove the SD clock */
847 brcmf_sdbrcm_sdclk(bus, false);
848 brcmf_sdbrcm_wd_timer(bus, 0);
849 break;
850 }
851 #ifdef DEBUG
852 brcmf_dbg(INFO, "%d -> %d\n", oldstate, bus->clkstate);
853 #endif /* DEBUG */
854
855 return 0;
856 }
857
858 static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
859 {
860 u32 intstatus = 0;
861 u32 hmb_data;
862 u8 fcbits;
863 int ret;
864
865 brcmf_dbg(TRACE, "Enter\n");
866
867 /* Read mailbox data and ack that we did so */
868 ret = r_sdreg32(bus, &hmb_data,
869 offsetof(struct sdpcmd_regs, tohostmailboxdata));
870
871 if (ret == 0)
872 w_sdreg32(bus, SMB_INT_ACK,
873 offsetof(struct sdpcmd_regs, tosbmailbox));
874 bus->sdcnt.f1regdata += 2;
875
876 /* Dongle recomposed rx frames, accept them again */
877 if (hmb_data & HMB_DATA_NAKHANDLED) {
878 brcmf_dbg(INFO, "Dongle reports NAK handled, expect rtx of %d\n",
879 bus->rx_seq);
880 if (!bus->rxskip)
881 brcmf_err("unexpected NAKHANDLED!\n");
882
883 bus->rxskip = false;
884 intstatus |= I_HMB_FRAME_IND;
885 }
886
887 /*
888 * DEVREADY does not occur with gSPI.
889 */
890 if (hmb_data & (HMB_DATA_DEVREADY | HMB_DATA_FWREADY)) {
891 bus->sdpcm_ver =
892 (hmb_data & HMB_DATA_VERSION_MASK) >>
893 HMB_DATA_VERSION_SHIFT;
894 if (bus->sdpcm_ver != SDPCM_PROT_VERSION)
895 brcmf_err("Version mismatch, dongle reports %d, "
896 "expecting %d\n",
897 bus->sdpcm_ver, SDPCM_PROT_VERSION);
898 else
899 brcmf_dbg(INFO, "Dongle ready, protocol version %d\n",
900 bus->sdpcm_ver);
901 }
902
903 /*
904 * Flow Control has been moved into the RX headers and this out of band
905 * method isn't used any more.
906 * remaining backward compatible with older dongles.
907 */
908 if (hmb_data & HMB_DATA_FC) {
909 fcbits = (hmb_data & HMB_DATA_FCDATA_MASK) >>
910 HMB_DATA_FCDATA_SHIFT;
911
912 if (fcbits & ~bus->flowcontrol)
913 bus->sdcnt.fc_xoff++;
914
915 if (bus->flowcontrol & ~fcbits)
916 bus->sdcnt.fc_xon++;
917
918 bus->sdcnt.fc_rcvd++;
919 bus->flowcontrol = fcbits;
920 }
921
922 /* Shouldn't be any others */
923 if (hmb_data & ~(HMB_DATA_DEVREADY |
924 HMB_DATA_NAKHANDLED |
925 HMB_DATA_FC |
926 HMB_DATA_FWREADY |
927 HMB_DATA_FCDATA_MASK | HMB_DATA_VERSION_MASK))
928 brcmf_err("Unknown mailbox data content: 0x%02x\n",
929 hmb_data);
930
931 return intstatus;
932 }
933
934 static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
935 {
936 uint retries = 0;
937 u16 lastrbc;
938 u8 hi, lo;
939 int err;
940
941 brcmf_err("%sterminate frame%s\n",
942 abort ? "abort command, " : "",
943 rtx ? ", send NAK" : "");
944
945 if (abort)
946 brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
947
948 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
949 SFC_RF_TERM, &err);
950 bus->sdcnt.f1regdata++;
951
952 /* Wait until the packet has been flushed (device/FIFO stable) */
953 for (lastrbc = retries = 0xffff; retries > 0; retries--) {
954 hi = brcmf_sdio_regrb(bus->sdiodev,
955 SBSDIO_FUNC1_RFRAMEBCHI, &err);
956 lo = brcmf_sdio_regrb(bus->sdiodev,
957 SBSDIO_FUNC1_RFRAMEBCLO, &err);
958 bus->sdcnt.f1regdata += 2;
959
960 if ((hi == 0) && (lo == 0))
961 break;
962
963 if ((hi > (lastrbc >> 8)) && (lo > (lastrbc & 0x00ff))) {
964 brcmf_err("count growing: last 0x%04x now 0x%04x\n",
965 lastrbc, (hi << 8) + lo);
966 }
967 lastrbc = (hi << 8) + lo;
968 }
969
970 if (!retries)
971 brcmf_err("count never zeroed: last 0x%04x\n", lastrbc);
972 else
973 brcmf_dbg(INFO, "flush took %d iterations\n", 0xffff - retries);
974
975 if (rtx) {
976 bus->sdcnt.rxrtx++;
977 err = w_sdreg32(bus, SMB_NAK,
978 offsetof(struct sdpcmd_regs, tosbmailbox));
979
980 bus->sdcnt.f1regdata++;
981 if (err == 0)
982 bus->rxskip = true;
983 }
984
985 /* Clear partial in any case */
986 bus->cur_read.len = 0;
987
988 /* If we can't reach the device, signal failure */
989 if (err)
990 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
991 }
992
993 /* copy a buffer into a pkt buffer chain */
994 static uint brcmf_sdbrcm_glom_from_buf(struct brcmf_sdio *bus, uint len)
995 {
996 uint n, ret = 0;
997 struct sk_buff *p;
998 u8 *buf;
999
1000 buf = bus->dataptr;
1001
1002 /* copy the data */
1003 skb_queue_walk(&bus->glom, p) {
1004 n = min_t(uint, p->len, len);
1005 memcpy(p->data, buf, n);
1006 buf += n;
1007 len -= n;
1008 ret += n;
1009 if (!len)
1010 break;
1011 }
1012
1013 return ret;
1014 }
1015
1016 /* return total length of buffer chain */
1017 static uint brcmf_sdbrcm_glom_len(struct brcmf_sdio *bus)
1018 {
1019 struct sk_buff *p;
1020 uint total;
1021
1022 total = 0;
1023 skb_queue_walk(&bus->glom, p)
1024 total += p->len;
1025 return total;
1026 }
1027
1028 static void brcmf_sdbrcm_free_glom(struct brcmf_sdio *bus)
1029 {
1030 struct sk_buff *cur, *next;
1031
1032 skb_queue_walk_safe(&bus->glom, cur, next) {
1033 skb_unlink(cur, &bus->glom);
1034 brcmu_pkt_buf_free_skb(cur);
1035 }
1036 }
1037
1038 static int brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
1039 struct brcmf_sdio_read *rd,
1040 enum brcmf_sdio_frmtype type)
1041 {
1042 u16 len, checksum;
1043 u8 rx_seq, fc, tx_seq_max;
1044
1045 /*
1046 * 4 bytes hardware header (frame tag)
1047 * Byte 0~1: Frame length
1048 * Byte 2~3: Checksum, bit-wise inverse of frame length
1049 */
1050 len = get_unaligned_le16(header);
1051 checksum = get_unaligned_le16(header + sizeof(u16));
1052 /* All zero means no more to read */
1053 if (!(len | checksum)) {
1054 bus->rxpending = false;
1055 return -ENODATA;
1056 }
1057 if ((u16)(~(len ^ checksum))) {
1058 brcmf_err("HW header checksum error\n");
1059 bus->sdcnt.rx_badhdr++;
1060 brcmf_sdbrcm_rxfail(bus, false, false);
1061 return -EIO;
1062 }
1063 if (len < SDPCM_HDRLEN) {
1064 brcmf_err("HW header length error\n");
1065 return -EPROTO;
1066 }
1067 if (type == BRCMF_SDIO_FT_SUPER &&
1068 (roundup(len, bus->blocksize) != rd->len)) {
1069 brcmf_err("HW superframe header length error\n");
1070 return -EPROTO;
1071 }
1072 if (type == BRCMF_SDIO_FT_SUB && len > rd->len) {
1073 brcmf_err("HW subframe header length error\n");
1074 return -EPROTO;
1075 }
1076 rd->len = len;
1077
1078 /*
1079 * 8 bytes hardware header
1080 * Byte 0: Rx sequence number
1081 * Byte 1: 4 MSB Channel number, 4 LSB arbitrary flag
1082 * Byte 2: Length of next data frame
1083 * Byte 3: Data offset
1084 * Byte 4: Flow control bits
1085 * Byte 5: Maximum Sequence number allow for Tx
1086 * Byte 6~7: Reserved
1087 */
1088 if (type == BRCMF_SDIO_FT_SUPER &&
1089 SDPCM_GLOMDESC(&header[SDPCM_FRAMETAG_LEN])) {
1090 brcmf_err("Glom descriptor found in superframe head\n");
1091 rd->len = 0;
1092 return -EINVAL;
1093 }
1094 rx_seq = SDPCM_PACKET_SEQUENCE(&header[SDPCM_FRAMETAG_LEN]);
1095 rd->channel = SDPCM_PACKET_CHANNEL(&header[SDPCM_FRAMETAG_LEN]);
1096 if (len > MAX_RX_DATASZ && rd->channel != SDPCM_CONTROL_CHANNEL &&
1097 type != BRCMF_SDIO_FT_SUPER) {
1098 brcmf_err("HW header length too long\n");
1099 bus->sdcnt.rx_toolong++;
1100 brcmf_sdbrcm_rxfail(bus, false, false);
1101 rd->len = 0;
1102 return -EPROTO;
1103 }
1104 if (type == BRCMF_SDIO_FT_SUPER && rd->channel != SDPCM_GLOM_CHANNEL) {
1105 brcmf_err("Wrong channel for superframe\n");
1106 rd->len = 0;
1107 return -EINVAL;
1108 }
1109 if (type == BRCMF_SDIO_FT_SUB && rd->channel != SDPCM_DATA_CHANNEL &&
1110 rd->channel != SDPCM_EVENT_CHANNEL) {
1111 brcmf_err("Wrong channel for subframe\n");
1112 rd->len = 0;
1113 return -EINVAL;
1114 }
1115 rd->dat_offset = SDPCM_DOFFSET_VALUE(&header[SDPCM_FRAMETAG_LEN]);
1116 if (rd->dat_offset < SDPCM_HDRLEN || rd->dat_offset > rd->len) {
1117 brcmf_err("seq %d: bad data offset\n", rx_seq);
1118 bus->sdcnt.rx_badhdr++;
1119 brcmf_sdbrcm_rxfail(bus, false, false);
1120 rd->len = 0;
1121 return -ENXIO;
1122 }
1123 if (rd->seq_num != rx_seq) {
1124 brcmf_err("seq %d: sequence number error, expect %d\n",
1125 rx_seq, rd->seq_num);
1126 bus->sdcnt.rx_badseq++;
1127 rd->seq_num = rx_seq;
1128 }
1129 /* no need to check the reset for subframe */
1130 if (type == BRCMF_SDIO_FT_SUB)
1131 return 0;
1132 rd->len_nxtfrm = header[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
1133 if (rd->len_nxtfrm << 4 > MAX_RX_DATASZ) {
1134 /* only warm for NON glom packet */
1135 if (rd->channel != SDPCM_GLOM_CHANNEL)
1136 brcmf_err("seq %d: next length error\n", rx_seq);
1137 rd->len_nxtfrm = 0;
1138 }
1139 fc = SDPCM_FCMASK_VALUE(&header[SDPCM_FRAMETAG_LEN]);
1140 if (bus->flowcontrol != fc) {
1141 if (~bus->flowcontrol & fc)
1142 bus->sdcnt.fc_xoff++;
1143 if (bus->flowcontrol & ~fc)
1144 bus->sdcnt.fc_xon++;
1145 bus->sdcnt.fc_rcvd++;
1146 bus->flowcontrol = fc;
1147 }
1148 tx_seq_max = SDPCM_WINDOW_VALUE(&header[SDPCM_FRAMETAG_LEN]);
1149 if ((u8)(tx_seq_max - bus->tx_seq) > 0x40) {
1150 brcmf_err("seq %d: max tx seq number error\n", rx_seq);
1151 tx_seq_max = bus->tx_seq + 2;
1152 }
1153 bus->tx_max = tx_seq_max;
1154
1155 return 0;
1156 }
1157
1158 static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1159 {
1160 u16 dlen, totlen;
1161 u8 *dptr, num = 0;
1162
1163 u16 sublen;
1164 struct sk_buff *pfirst, *pnext;
1165
1166 int errcode;
1167 u8 doff, sfdoff;
1168
1169 bool usechain = bus->use_rxchain;
1170
1171 struct brcmf_sdio_read rd_new;
1172
1173 /* If packets, issue read(s) and send up packet chain */
1174 /* Return sequence numbers consumed? */
1175
1176 brcmf_dbg(TRACE, "start: glomd %p glom %p\n",
1177 bus->glomd, skb_peek(&bus->glom));
1178
1179 /* If there's a descriptor, generate the packet chain */
1180 if (bus->glomd) {
1181 pfirst = pnext = NULL;
1182 dlen = (u16) (bus->glomd->len);
1183 dptr = bus->glomd->data;
1184 if (!dlen || (dlen & 1)) {
1185 brcmf_err("bad glomd len(%d), ignore descriptor\n",
1186 dlen);
1187 dlen = 0;
1188 }
1189
1190 for (totlen = num = 0; dlen; num++) {
1191 /* Get (and move past) next length */
1192 sublen = get_unaligned_le16(dptr);
1193 dlen -= sizeof(u16);
1194 dptr += sizeof(u16);
1195 if ((sublen < SDPCM_HDRLEN) ||
1196 ((num == 0) && (sublen < (2 * SDPCM_HDRLEN)))) {
1197 brcmf_err("descriptor len %d bad: %d\n",
1198 num, sublen);
1199 pnext = NULL;
1200 break;
1201 }
1202 if (sublen % BRCMF_SDALIGN) {
1203 brcmf_err("sublen %d not multiple of %d\n",
1204 sublen, BRCMF_SDALIGN);
1205 usechain = false;
1206 }
1207 totlen += sublen;
1208
1209 /* For last frame, adjust read len so total
1210 is a block multiple */
1211 if (!dlen) {
1212 sublen +=
1213 (roundup(totlen, bus->blocksize) - totlen);
1214 totlen = roundup(totlen, bus->blocksize);
1215 }
1216
1217 /* Allocate/chain packet for next subframe */
1218 pnext = brcmu_pkt_buf_get_skb(sublen + BRCMF_SDALIGN);
1219 if (pnext == NULL) {
1220 brcmf_err("bcm_pkt_buf_get_skb failed, num %d len %d\n",
1221 num, sublen);
1222 break;
1223 }
1224 skb_queue_tail(&bus->glom, pnext);
1225
1226 /* Adhere to start alignment requirements */
1227 pkt_align(pnext, sublen, BRCMF_SDALIGN);
1228 }
1229
1230 /* If all allocations succeeded, save packet chain
1231 in bus structure */
1232 if (pnext) {
1233 brcmf_dbg(GLOM, "allocated %d-byte packet chain for %d subframes\n",
1234 totlen, num);
1235 if (BRCMF_GLOM_ON() && bus->cur_read.len &&
1236 totlen != bus->cur_read.len) {
1237 brcmf_dbg(GLOM, "glomdesc mismatch: nextlen %d glomdesc %d rxseq %d\n",
1238 bus->cur_read.len, totlen, rxseq);
1239 }
1240 pfirst = pnext = NULL;
1241 } else {
1242 brcmf_sdbrcm_free_glom(bus);
1243 num = 0;
1244 }
1245
1246 /* Done with descriptor packet */
1247 brcmu_pkt_buf_free_skb(bus->glomd);
1248 bus->glomd = NULL;
1249 bus->cur_read.len = 0;
1250 }
1251
1252 /* Ok -- either we just generated a packet chain,
1253 or had one from before */
1254 if (!skb_queue_empty(&bus->glom)) {
1255 if (BRCMF_GLOM_ON()) {
1256 brcmf_dbg(GLOM, "try superframe read, packet chain:\n");
1257 skb_queue_walk(&bus->glom, pnext) {
1258 brcmf_dbg(GLOM, " %p: %p len 0x%04x (%d)\n",
1259 pnext, (u8 *) (pnext->data),
1260 pnext->len, pnext->len);
1261 }
1262 }
1263
1264 pfirst = skb_peek(&bus->glom);
1265 dlen = (u16) brcmf_sdbrcm_glom_len(bus);
1266
1267 /* Do an SDIO read for the superframe. Configurable iovar to
1268 * read directly into the chained packet, or allocate a large
1269 * packet and and copy into the chain.
1270 */
1271 sdio_claim_host(bus->sdiodev->func[1]);
1272 if (usechain) {
1273 errcode = brcmf_sdcard_recv_chain(bus->sdiodev,
1274 bus->sdiodev->sbwad,
1275 SDIO_FUNC_2, F2SYNC, &bus->glom);
1276 } else if (bus->dataptr) {
1277 errcode = brcmf_sdcard_recv_buf(bus->sdiodev,
1278 bus->sdiodev->sbwad,
1279 SDIO_FUNC_2, F2SYNC,
1280 bus->dataptr, dlen);
1281 sublen = (u16) brcmf_sdbrcm_glom_from_buf(bus, dlen);
1282 if (sublen != dlen) {
1283 brcmf_err("FAILED TO COPY, dlen %d sublen %d\n",
1284 dlen, sublen);
1285 errcode = -1;
1286 }
1287 pnext = NULL;
1288 } else {
1289 brcmf_err("COULDN'T ALLOC %d-BYTE GLOM, FORCE FAILURE\n",
1290 dlen);
1291 errcode = -1;
1292 }
1293 sdio_release_host(bus->sdiodev->func[1]);
1294 bus->sdcnt.f2rxdata++;
1295
1296 /* On failure, kill the superframe, allow a couple retries */
1297 if (errcode < 0) {
1298 brcmf_err("glom read of %d bytes failed: %d\n",
1299 dlen, errcode);
1300
1301 sdio_claim_host(bus->sdiodev->func[1]);
1302 if (bus->glomerr++ < 3) {
1303 brcmf_sdbrcm_rxfail(bus, true, true);
1304 } else {
1305 bus->glomerr = 0;
1306 brcmf_sdbrcm_rxfail(bus, true, false);
1307 bus->sdcnt.rxglomfail++;
1308 brcmf_sdbrcm_free_glom(bus);
1309 }
1310 sdio_release_host(bus->sdiodev->func[1]);
1311 return 0;
1312 }
1313
1314 brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1315 pfirst->data, min_t(int, pfirst->len, 48),
1316 "SUPERFRAME:\n");
1317
1318 rd_new.seq_num = rxseq;
1319 rd_new.len = dlen;
1320 sdio_claim_host(bus->sdiodev->func[1]);
1321 errcode = brcmf_sdio_hdparser(bus, pfirst->data, &rd_new,
1322 BRCMF_SDIO_FT_SUPER);
1323 sdio_release_host(bus->sdiodev->func[1]);
1324 bus->cur_read.len = rd_new.len_nxtfrm << 4;
1325
1326 /* Remove superframe header, remember offset */
1327 skb_pull(pfirst, rd_new.dat_offset);
1328 sfdoff = rd_new.dat_offset;
1329 num = 0;
1330
1331 /* Validate all the subframe headers */
1332 skb_queue_walk(&bus->glom, pnext) {
1333 /* leave when invalid subframe is found */
1334 if (errcode)
1335 break;
1336
1337 rd_new.len = pnext->len;
1338 rd_new.seq_num = rxseq++;
1339 sdio_claim_host(bus->sdiodev->func[1]);
1340 errcode = brcmf_sdio_hdparser(bus, pnext->data, &rd_new,
1341 BRCMF_SDIO_FT_SUB);
1342 sdio_release_host(bus->sdiodev->func[1]);
1343 brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1344 pnext->data, 32, "subframe:\n");
1345
1346 num++;
1347 }
1348
1349 if (errcode) {
1350 /* Terminate frame on error, request
1351 a couple retries */
1352 sdio_claim_host(bus->sdiodev->func[1]);
1353 if (bus->glomerr++ < 3) {
1354 /* Restore superframe header space */
1355 skb_push(pfirst, sfdoff);
1356 brcmf_sdbrcm_rxfail(bus, true, true);
1357 } else {
1358 bus->glomerr = 0;
1359 brcmf_sdbrcm_rxfail(bus, true, false);
1360 bus->sdcnt.rxglomfail++;
1361 brcmf_sdbrcm_free_glom(bus);
1362 }
1363 sdio_release_host(bus->sdiodev->func[1]);
1364 bus->cur_read.len = 0;
1365 return 0;
1366 }
1367
1368 /* Basic SD framing looks ok - process each packet (header) */
1369
1370 skb_queue_walk_safe(&bus->glom, pfirst, pnext) {
1371 dptr = (u8 *) (pfirst->data);
1372 sublen = get_unaligned_le16(dptr);
1373 doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
1374
1375 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(),
1376 dptr, pfirst->len,
1377 "Rx Subframe Data:\n");
1378
1379 __skb_trim(pfirst, sublen);
1380 skb_pull(pfirst, doff);
1381
1382 if (pfirst->len == 0) {
1383 skb_unlink(pfirst, &bus->glom);
1384 brcmu_pkt_buf_free_skb(pfirst);
1385 continue;
1386 }
1387
1388 brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1389 pfirst->data,
1390 min_t(int, pfirst->len, 32),
1391 "subframe %d to stack, %p (%p/%d) nxt/lnk %p/%p\n",
1392 bus->glom.qlen, pfirst, pfirst->data,
1393 pfirst->len, pfirst->next,
1394 pfirst->prev);
1395 }
1396 /* sent any remaining packets up */
1397 if (bus->glom.qlen)
1398 brcmf_rx_frames(bus->sdiodev->dev, &bus->glom);
1399
1400 bus->sdcnt.rxglomframes++;
1401 bus->sdcnt.rxglompkts += bus->glom.qlen;
1402 }
1403 return num;
1404 }
1405
1406 static int brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_sdio *bus, uint *condition,
1407 bool *pending)
1408 {
1409 DECLARE_WAITQUEUE(wait, current);
1410 int timeout = msecs_to_jiffies(DCMD_RESP_TIMEOUT);
1411
1412 /* Wait until control frame is available */
1413 add_wait_queue(&bus->dcmd_resp_wait, &wait);
1414 set_current_state(TASK_INTERRUPTIBLE);
1415
1416 while (!(*condition) && (!signal_pending(current) && timeout))
1417 timeout = schedule_timeout(timeout);
1418
1419 if (signal_pending(current))
1420 *pending = true;
1421
1422 set_current_state(TASK_RUNNING);
1423 remove_wait_queue(&bus->dcmd_resp_wait, &wait);
1424
1425 return timeout;
1426 }
1427
1428 static int brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_sdio *bus)
1429 {
1430 if (waitqueue_active(&bus->dcmd_resp_wait))
1431 wake_up_interruptible(&bus->dcmd_resp_wait);
1432
1433 return 0;
1434 }
1435 static void
1436 brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
1437 {
1438 uint rdlen, pad;
1439 u8 *buf = NULL, *rbuf;
1440 int sdret;
1441
1442 brcmf_dbg(TRACE, "Enter\n");
1443
1444 if (bus->rxblen)
1445 buf = vzalloc(bus->rxblen);
1446 if (!buf)
1447 goto done;
1448
1449 rbuf = bus->rxbuf;
1450 pad = ((unsigned long)rbuf % BRCMF_SDALIGN);
1451 if (pad)
1452 rbuf += (BRCMF_SDALIGN - pad);
1453
1454 /* Copy the already-read portion over */
1455 memcpy(buf, hdr, BRCMF_FIRSTREAD);
1456 if (len <= BRCMF_FIRSTREAD)
1457 goto gotpkt;
1458
1459 /* Raise rdlen to next SDIO block to avoid tail command */
1460 rdlen = len - BRCMF_FIRSTREAD;
1461 if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
1462 pad = bus->blocksize - (rdlen % bus->blocksize);
1463 if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
1464 ((len + pad) < bus->sdiodev->bus_if->maxctl))
1465 rdlen += pad;
1466 } else if (rdlen % BRCMF_SDALIGN) {
1467 rdlen += BRCMF_SDALIGN - (rdlen % BRCMF_SDALIGN);
1468 }
1469
1470 /* Satisfy length-alignment requirements */
1471 if (rdlen & (ALIGNMENT - 1))
1472 rdlen = roundup(rdlen, ALIGNMENT);
1473
1474 /* Drop if the read is too big or it exceeds our maximum */
1475 if ((rdlen + BRCMF_FIRSTREAD) > bus->sdiodev->bus_if->maxctl) {
1476 brcmf_err("%d-byte control read exceeds %d-byte buffer\n",
1477 rdlen, bus->sdiodev->bus_if->maxctl);
1478 brcmf_sdbrcm_rxfail(bus, false, false);
1479 goto done;
1480 }
1481
1482 if ((len - doff) > bus->sdiodev->bus_if->maxctl) {
1483 brcmf_err("%d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n",
1484 len, len - doff, bus->sdiodev->bus_if->maxctl);
1485 bus->sdcnt.rx_toolong++;
1486 brcmf_sdbrcm_rxfail(bus, false, false);
1487 goto done;
1488 }
1489
1490 /* Read remain of frame body */
1491 sdret = brcmf_sdcard_recv_buf(bus->sdiodev,
1492 bus->sdiodev->sbwad,
1493 SDIO_FUNC_2,
1494 F2SYNC, rbuf, rdlen);
1495 bus->sdcnt.f2rxdata++;
1496
1497 /* Control frame failures need retransmission */
1498 if (sdret < 0) {
1499 brcmf_err("read %d control bytes failed: %d\n",
1500 rdlen, sdret);
1501 bus->sdcnt.rxc_errors++;
1502 brcmf_sdbrcm_rxfail(bus, true, true);
1503 goto done;
1504 } else
1505 memcpy(buf + BRCMF_FIRSTREAD, rbuf, rdlen);
1506
1507 gotpkt:
1508
1509 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_CTL_ON(),
1510 buf, len, "RxCtrl:\n");
1511
1512 /* Point to valid data and indicate its length */
1513 spin_lock_bh(&bus->rxctl_lock);
1514 if (bus->rxctl) {
1515 brcmf_err("last control frame is being processed.\n");
1516 spin_unlock_bh(&bus->rxctl_lock);
1517 vfree(buf);
1518 goto done;
1519 }
1520 bus->rxctl = buf + doff;
1521 bus->rxctl_orig = buf;
1522 bus->rxlen = len - doff;
1523 spin_unlock_bh(&bus->rxctl_lock);
1524
1525 done:
1526 /* Awake any waiters */
1527 brcmf_sdbrcm_dcmd_resp_wake(bus);
1528 }
1529
1530 /* Pad read to blocksize for efficiency */
1531 static void brcmf_pad(struct brcmf_sdio *bus, u16 *pad, u16 *rdlen)
1532 {
1533 if (bus->roundup && bus->blocksize && *rdlen > bus->blocksize) {
1534 *pad = bus->blocksize - (*rdlen % bus->blocksize);
1535 if (*pad <= bus->roundup && *pad < bus->blocksize &&
1536 *rdlen + *pad + BRCMF_FIRSTREAD < MAX_RX_DATASZ)
1537 *rdlen += *pad;
1538 } else if (*rdlen % BRCMF_SDALIGN) {
1539 *rdlen += BRCMF_SDALIGN - (*rdlen % BRCMF_SDALIGN);
1540 }
1541 }
1542
1543 static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
1544 {
1545 struct sk_buff *pkt; /* Packet for event or data frames */
1546 struct sk_buff_head pktlist; /* needed for bus interface */
1547 u16 pad; /* Number of pad bytes to read */
1548 uint rxleft = 0; /* Remaining number of frames allowed */
1549 int sdret; /* Return code from calls */
1550 uint rxcount = 0; /* Total frames read */
1551 struct brcmf_sdio_read *rd = &bus->cur_read, rd_new;
1552 u8 head_read = 0;
1553
1554 brcmf_dbg(TRACE, "Enter\n");
1555
1556 /* Not finished unless we encounter no more frames indication */
1557 bus->rxpending = true;
1558
1559 for (rd->seq_num = bus->rx_seq, rxleft = maxframes;
1560 !bus->rxskip && rxleft &&
1561 bus->sdiodev->bus_if->state != BRCMF_BUS_DOWN;
1562 rd->seq_num++, rxleft--) {
1563
1564 /* Handle glomming separately */
1565 if (bus->glomd || !skb_queue_empty(&bus->glom)) {
1566 u8 cnt;
1567 brcmf_dbg(GLOM, "calling rxglom: glomd %p, glom %p\n",
1568 bus->glomd, skb_peek(&bus->glom));
1569 cnt = brcmf_sdbrcm_rxglom(bus, rd->seq_num);
1570 brcmf_dbg(GLOM, "rxglom returned %d\n", cnt);
1571 rd->seq_num += cnt - 1;
1572 rxleft = (rxleft > cnt) ? (rxleft - cnt) : 1;
1573 continue;
1574 }
1575
1576 rd->len_left = rd->len;
1577 /* read header first for unknow frame length */
1578 sdio_claim_host(bus->sdiodev->func[1]);
1579 if (!rd->len) {
1580 sdret = brcmf_sdcard_recv_buf(bus->sdiodev,
1581 bus->sdiodev->sbwad,
1582 SDIO_FUNC_2, F2SYNC,
1583 bus->rxhdr,
1584 BRCMF_FIRSTREAD);
1585 bus->sdcnt.f2rxhdrs++;
1586 if (sdret < 0) {
1587 brcmf_err("RXHEADER FAILED: %d\n",
1588 sdret);
1589 bus->sdcnt.rx_hdrfail++;
1590 brcmf_sdbrcm_rxfail(bus, true, true);
1591 sdio_release_host(bus->sdiodev->func[1]);
1592 continue;
1593 }
1594
1595 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() || BRCMF_HDRS_ON(),
1596 bus->rxhdr, SDPCM_HDRLEN,
1597 "RxHdr:\n");
1598
1599 if (brcmf_sdio_hdparser(bus, bus->rxhdr, rd,
1600 BRCMF_SDIO_FT_NORMAL)) {
1601 sdio_release_host(bus->sdiodev->func[1]);
1602 if (!bus->rxpending)
1603 break;
1604 else
1605 continue;
1606 }
1607
1608 if (rd->channel == SDPCM_CONTROL_CHANNEL) {
1609 brcmf_sdbrcm_read_control(bus, bus->rxhdr,
1610 rd->len,
1611 rd->dat_offset);
1612 /* prepare the descriptor for the next read */
1613 rd->len = rd->len_nxtfrm << 4;
1614 rd->len_nxtfrm = 0;
1615 /* treat all packet as event if we don't know */
1616 rd->channel = SDPCM_EVENT_CHANNEL;
1617 sdio_release_host(bus->sdiodev->func[1]);
1618 continue;
1619 }
1620 rd->len_left = rd->len > BRCMF_FIRSTREAD ?
1621 rd->len - BRCMF_FIRSTREAD : 0;
1622 head_read = BRCMF_FIRSTREAD;
1623 }
1624
1625 brcmf_pad(bus, &pad, &rd->len_left);
1626
1627 pkt = brcmu_pkt_buf_get_skb(rd->len_left + head_read +
1628 BRCMF_SDALIGN);
1629 if (!pkt) {
1630 /* Give up on data, request rtx of events */
1631 brcmf_err("brcmu_pkt_buf_get_skb failed\n");
1632 brcmf_sdbrcm_rxfail(bus, false,
1633 RETRYCHAN(rd->channel));
1634 sdio_release_host(bus->sdiodev->func[1]);
1635 continue;
1636 }
1637 skb_pull(pkt, head_read);
1638 pkt_align(pkt, rd->len_left, BRCMF_SDALIGN);
1639
1640 sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
1641 SDIO_FUNC_2, F2SYNC, pkt);
1642 bus->sdcnt.f2rxdata++;
1643 sdio_release_host(bus->sdiodev->func[1]);
1644
1645 if (sdret < 0) {
1646 brcmf_err("read %d bytes from channel %d failed: %d\n",
1647 rd->len, rd->channel, sdret);
1648 brcmu_pkt_buf_free_skb(pkt);
1649 sdio_claim_host(bus->sdiodev->func[1]);
1650 brcmf_sdbrcm_rxfail(bus, true,
1651 RETRYCHAN(rd->channel));
1652 sdio_release_host(bus->sdiodev->func[1]);
1653 continue;
1654 }
1655
1656 if (head_read) {
1657 skb_push(pkt, head_read);
1658 memcpy(pkt->data, bus->rxhdr, head_read);
1659 head_read = 0;
1660 } else {
1661 memcpy(bus->rxhdr, pkt->data, SDPCM_HDRLEN);
1662 rd_new.seq_num = rd->seq_num;
1663 sdio_claim_host(bus->sdiodev->func[1]);
1664 if (brcmf_sdio_hdparser(bus, bus->rxhdr, &rd_new,
1665 BRCMF_SDIO_FT_NORMAL)) {
1666 rd->len = 0;
1667 brcmu_pkt_buf_free_skb(pkt);
1668 }
1669 bus->sdcnt.rx_readahead_cnt++;
1670 if (rd->len != roundup(rd_new.len, 16)) {
1671 brcmf_err("frame length mismatch:read %d, should be %d\n",
1672 rd->len,
1673 roundup(rd_new.len, 16) >> 4);
1674 rd->len = 0;
1675 brcmf_sdbrcm_rxfail(bus, true, true);
1676 sdio_release_host(bus->sdiodev->func[1]);
1677 brcmu_pkt_buf_free_skb(pkt);
1678 continue;
1679 }
1680 sdio_release_host(bus->sdiodev->func[1]);
1681 rd->len_nxtfrm = rd_new.len_nxtfrm;
1682 rd->channel = rd_new.channel;
1683 rd->dat_offset = rd_new.dat_offset;
1684
1685 brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() &&
1686 BRCMF_DATA_ON()) &&
1687 BRCMF_HDRS_ON(),
1688 bus->rxhdr, SDPCM_HDRLEN,
1689 "RxHdr:\n");
1690
1691 if (rd_new.channel == SDPCM_CONTROL_CHANNEL) {
1692 brcmf_err("readahead on control packet %d?\n",
1693 rd_new.seq_num);
1694 /* Force retry w/normal header read */
1695 rd->len = 0;
1696 sdio_claim_host(bus->sdiodev->func[1]);
1697 brcmf_sdbrcm_rxfail(bus, false, true);
1698 sdio_release_host(bus->sdiodev->func[1]);
1699 brcmu_pkt_buf_free_skb(pkt);
1700 continue;
1701 }
1702 }
1703
1704 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(),
1705 pkt->data, rd->len, "Rx Data:\n");
1706
1707 /* Save superframe descriptor and allocate packet frame */
1708 if (rd->channel == SDPCM_GLOM_CHANNEL) {
1709 if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_FRAMETAG_LEN])) {
1710 brcmf_dbg(GLOM, "glom descriptor, %d bytes:\n",
1711 rd->len);
1712 brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1713 pkt->data, rd->len,
1714 "Glom Data:\n");
1715 __skb_trim(pkt, rd->len);
1716 skb_pull(pkt, SDPCM_HDRLEN);
1717 bus->glomd = pkt;
1718 } else {
1719 brcmf_err("%s: glom superframe w/o "
1720 "descriptor!\n", __func__);
1721 sdio_claim_host(bus->sdiodev->func[1]);
1722 brcmf_sdbrcm_rxfail(bus, false, false);
1723 sdio_release_host(bus->sdiodev->func[1]);
1724 }
1725 /* prepare the descriptor for the next read */
1726 rd->len = rd->len_nxtfrm << 4;
1727 rd->len_nxtfrm = 0;
1728 /* treat all packet as event if we don't know */
1729 rd->channel = SDPCM_EVENT_CHANNEL;
1730 continue;
1731 }
1732
1733 /* Fill in packet len and prio, deliver upward */
1734 __skb_trim(pkt, rd->len);
1735 skb_pull(pkt, rd->dat_offset);
1736
1737 /* prepare the descriptor for the next read */
1738 rd->len = rd->len_nxtfrm << 4;
1739 rd->len_nxtfrm = 0;
1740 /* treat all packet as event if we don't know */
1741 rd->channel = SDPCM_EVENT_CHANNEL;
1742
1743 if (pkt->len == 0) {
1744 brcmu_pkt_buf_free_skb(pkt);
1745 continue;
1746 }
1747
1748 skb_queue_head_init(&pktlist);
1749 skb_queue_tail(&pktlist, pkt);
1750 brcmf_rx_frames(bus->sdiodev->dev, &pktlist);
1751 }
1752
1753 rxcount = maxframes - rxleft;
1754 /* Message if we hit the limit */
1755 if (!rxleft)
1756 brcmf_dbg(DATA, "hit rx limit of %d frames\n", maxframes);
1757 else
1758 brcmf_dbg(DATA, "processed %d frames\n", rxcount);
1759 /* Back off rxseq if awaiting rtx, update rx_seq */
1760 if (bus->rxskip)
1761 rd->seq_num--;
1762 bus->rx_seq = rd->seq_num;
1763
1764 return rxcount;
1765 }
1766
1767 static void
1768 brcmf_sdbrcm_wait_event_wakeup(struct brcmf_sdio *bus)
1769 {
1770 if (waitqueue_active(&bus->ctrl_wait))
1771 wake_up_interruptible(&bus->ctrl_wait);
1772 return;
1773 }
1774
1775 /* Writes a HW/SW header into the packet and sends it. */
1776 /* Assumes: (a) header space already there, (b) caller holds lock */
1777 static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
1778 uint chan, bool free_pkt)
1779 {
1780 int ret;
1781 u8 *frame;
1782 u16 len, pad = 0;
1783 u32 swheader;
1784 struct sk_buff *new;
1785 int i;
1786
1787 brcmf_dbg(TRACE, "Enter\n");
1788
1789 frame = (u8 *) (pkt->data);
1790
1791 /* Add alignment padding, allocate new packet if needed */
1792 pad = ((unsigned long)frame % BRCMF_SDALIGN);
1793 if (pad) {
1794 if (skb_headroom(pkt) < pad) {
1795 brcmf_dbg(INFO, "insufficient headroom %d for %d pad\n",
1796 skb_headroom(pkt), pad);
1797 bus->sdiodev->bus_if->tx_realloc++;
1798 new = brcmu_pkt_buf_get_skb(pkt->len + BRCMF_SDALIGN);
1799 if (!new) {
1800 brcmf_err("couldn't allocate new %d-byte packet\n",
1801 pkt->len + BRCMF_SDALIGN);
1802 ret = -ENOMEM;
1803 goto done;
1804 }
1805
1806 pkt_align(new, pkt->len, BRCMF_SDALIGN);
1807 memcpy(new->data, pkt->data, pkt->len);
1808 if (free_pkt)
1809 brcmu_pkt_buf_free_skb(pkt);
1810 /* free the pkt if canned one is not used */
1811 free_pkt = true;
1812 pkt = new;
1813 frame = (u8 *) (pkt->data);
1814 /* precondition: (frame % BRCMF_SDALIGN) == 0) */
1815 pad = 0;
1816 } else {
1817 skb_push(pkt, pad);
1818 frame = (u8 *) (pkt->data);
1819 /* precondition: pad + SDPCM_HDRLEN <= pkt->len */
1820 memset(frame, 0, pad + SDPCM_HDRLEN);
1821 }
1822 }
1823 /* precondition: pad < BRCMF_SDALIGN */
1824
1825 /* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
1826 len = (u16) (pkt->len);
1827 *(__le16 *) frame = cpu_to_le16(len);
1828 *(((__le16 *) frame) + 1) = cpu_to_le16(~len);
1829
1830 /* Software tag: channel, sequence number, data offset */
1831 swheader =
1832 ((chan << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK) | bus->tx_seq |
1833 (((pad +
1834 SDPCM_HDRLEN) << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
1835
1836 put_unaligned_le32(swheader, frame + SDPCM_FRAMETAG_LEN);
1837 put_unaligned_le32(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader));
1838
1839 #ifdef DEBUG
1840 tx_packets[pkt->priority]++;
1841 #endif
1842
1843 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() &&
1844 ((BRCMF_CTL_ON() && chan == SDPCM_CONTROL_CHANNEL) ||
1845 (BRCMF_DATA_ON() && chan != SDPCM_CONTROL_CHANNEL)),
1846 frame, len, "Tx Frame:\n");
1847 brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() &&
1848 ((BRCMF_CTL_ON() &&
1849 chan == SDPCM_CONTROL_CHANNEL) ||
1850 (BRCMF_DATA_ON() &&
1851 chan != SDPCM_CONTROL_CHANNEL))) &&
1852 BRCMF_HDRS_ON(),
1853 frame, min_t(u16, len, 16), "TxHdr:\n");
1854
1855 /* Raise len to next SDIO block to eliminate tail command */
1856 if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
1857 u16 pad = bus->blocksize - (len % bus->blocksize);
1858 if ((pad <= bus->roundup) && (pad < bus->blocksize))
1859 len += pad;
1860 } else if (len % BRCMF_SDALIGN) {
1861 len += BRCMF_SDALIGN - (len % BRCMF_SDALIGN);
1862 }
1863
1864 /* Some controllers have trouble with odd bytes -- round to even */
1865 if (len & (ALIGNMENT - 1))
1866 len = roundup(len, ALIGNMENT);
1867
1868 sdio_claim_host(bus->sdiodev->func[1]);
1869 ret = brcmf_sdcard_send_pkt(bus->sdiodev, bus->sdiodev->sbwad,
1870 SDIO_FUNC_2, F2SYNC, pkt);
1871 bus->sdcnt.f2txdata++;
1872
1873 if (ret < 0) {
1874 /* On failure, abort the command and terminate the frame */
1875 brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
1876 ret);
1877 bus->sdcnt.tx_sderrs++;
1878
1879 brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
1880 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
1881 SFC_WF_TERM, NULL);
1882 bus->sdcnt.f1regdata++;
1883
1884 for (i = 0; i < 3; i++) {
1885 u8 hi, lo;
1886 hi = brcmf_sdio_regrb(bus->sdiodev,
1887 SBSDIO_FUNC1_WFRAMEBCHI, NULL);
1888 lo = brcmf_sdio_regrb(bus->sdiodev,
1889 SBSDIO_FUNC1_WFRAMEBCLO, NULL);
1890 bus->sdcnt.f1regdata += 2;
1891 if ((hi == 0) && (lo == 0))
1892 break;
1893 }
1894
1895 }
1896 sdio_release_host(bus->sdiodev->func[1]);
1897 if (ret == 0)
1898 bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
1899
1900 done:
1901 /* restore pkt buffer pointer before calling tx complete routine */
1902 skb_pull(pkt, SDPCM_HDRLEN + pad);
1903 brcmf_txcomplete(bus->sdiodev->dev, pkt, ret != 0);
1904
1905 if (free_pkt)
1906 brcmu_pkt_buf_free_skb(pkt);
1907
1908 return ret;
1909 }
1910
1911 static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
1912 {
1913 struct sk_buff *pkt;
1914 u32 intstatus = 0;
1915 int ret = 0, prec_out;
1916 uint cnt = 0;
1917 uint datalen;
1918 u8 tx_prec_map;
1919
1920 brcmf_dbg(TRACE, "Enter\n");
1921
1922 tx_prec_map = ~bus->flowcontrol;
1923
1924 /* Send frames until the limit or some other event */
1925 for (cnt = 0; (cnt < maxframes) && data_ok(bus); cnt++) {
1926 spin_lock_bh(&bus->txqlock);
1927 pkt = brcmu_pktq_mdeq(&bus->txq, tx_prec_map, &prec_out);
1928 if (pkt == NULL) {
1929 spin_unlock_bh(&bus->txqlock);
1930 break;
1931 }
1932 spin_unlock_bh(&bus->txqlock);
1933 datalen = pkt->len - SDPCM_HDRLEN;
1934
1935 ret = brcmf_sdbrcm_txpkt(bus, pkt, SDPCM_DATA_CHANNEL, true);
1936
1937 /* In poll mode, need to check for other events */
1938 if (!bus->intr && cnt) {
1939 /* Check device status, signal pending interrupt */
1940 sdio_claim_host(bus->sdiodev->func[1]);
1941 ret = r_sdreg32(bus, &intstatus,
1942 offsetof(struct sdpcmd_regs,
1943 intstatus));
1944 sdio_release_host(bus->sdiodev->func[1]);
1945 bus->sdcnt.f2txdata++;
1946 if (ret != 0)
1947 break;
1948 if (intstatus & bus->hostintmask)
1949 atomic_set(&bus->ipend, 1);
1950 }
1951 }
1952
1953 /* Deflow-control stack if needed */
1954 if ((bus->sdiodev->bus_if->state == BRCMF_BUS_DATA) &&
1955 bus->txoff && (pktq_len(&bus->txq) < TXLOW)) {
1956 bus->txoff = false;
1957 brcmf_txflowblock(bus->sdiodev->dev, false);
1958 }
1959
1960 return cnt;
1961 }
1962
1963 static void brcmf_sdbrcm_bus_stop(struct device *dev)
1964 {
1965 u32 local_hostintmask;
1966 u8 saveclk;
1967 int err;
1968 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1969 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
1970 struct brcmf_sdio *bus = sdiodev->bus;
1971
1972 brcmf_dbg(TRACE, "Enter\n");
1973
1974 if (bus->watchdog_tsk) {
1975 send_sig(SIGTERM, bus->watchdog_tsk, 1);
1976 kthread_stop(bus->watchdog_tsk);
1977 bus->watchdog_tsk = NULL;
1978 }
1979
1980 sdio_claim_host(bus->sdiodev->func[1]);
1981
1982 /* Enable clock for device interrupts */
1983 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
1984
1985 /* Disable and clear interrupts at the chip level also */
1986 w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, hostintmask));
1987 local_hostintmask = bus->hostintmask;
1988 bus->hostintmask = 0;
1989
1990 /* Change our idea of bus state */
1991 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
1992
1993 /* Force clocks on backplane to be sure F2 interrupt propagates */
1994 saveclk = brcmf_sdio_regrb(bus->sdiodev,
1995 SBSDIO_FUNC1_CHIPCLKCSR, &err);
1996 if (!err) {
1997 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
1998 (saveclk | SBSDIO_FORCE_HT), &err);
1999 }
2000 if (err)
2001 brcmf_err("Failed to force clock for F2: err %d\n", err);
2002
2003 /* Turn off the bus (F2), free any pending packets */
2004 brcmf_dbg(INTR, "disable SDIO interrupts\n");
2005 brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_IOEx, SDIO_FUNC_ENABLE_1,
2006 NULL);
2007
2008 /* Clear any pending interrupts now that F2 is disabled */
2009 w_sdreg32(bus, local_hostintmask,
2010 offsetof(struct sdpcmd_regs, intstatus));
2011
2012 /* Turn off the backplane clock (only) */
2013 brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
2014 sdio_release_host(bus->sdiodev->func[1]);
2015
2016 /* Clear the data packet queues */
2017 brcmu_pktq_flush(&bus->txq, true, NULL, NULL);
2018
2019 /* Clear any held glomming stuff */
2020 if (bus->glomd)
2021 brcmu_pkt_buf_free_skb(bus->glomd);
2022 brcmf_sdbrcm_free_glom(bus);
2023
2024 /* Clear rx control and wake any waiters */
2025 spin_lock_bh(&bus->rxctl_lock);
2026 bus->rxlen = 0;
2027 spin_unlock_bh(&bus->rxctl_lock);
2028 brcmf_sdbrcm_dcmd_resp_wake(bus);
2029
2030 /* Reset some F2 state stuff */
2031 bus->rxskip = false;
2032 bus->tx_seq = bus->rx_seq = 0;
2033 }
2034
2035 #ifdef CONFIG_BRCMFMAC_SDIO_OOB
2036 static inline void brcmf_sdbrcm_clrintr(struct brcmf_sdio *bus)
2037 {
2038 unsigned long flags;
2039
2040 spin_lock_irqsave(&bus->sdiodev->irq_en_lock, flags);
2041 if (!bus->sdiodev->irq_en && !atomic_read(&bus->ipend)) {
2042 enable_irq(bus->sdiodev->irq);
2043 bus->sdiodev->irq_en = true;
2044 }
2045 spin_unlock_irqrestore(&bus->sdiodev->irq_en_lock, flags);
2046 }
2047 #else
2048 static inline void brcmf_sdbrcm_clrintr(struct brcmf_sdio *bus)
2049 {
2050 }
2051 #endif /* CONFIG_BRCMFMAC_SDIO_OOB */
2052
2053 static inline void brcmf_sdbrcm_adddpctsk(struct brcmf_sdio *bus)
2054 {
2055 struct list_head *new_hd;
2056 unsigned long flags;
2057
2058 if (in_interrupt())
2059 new_hd = kzalloc(sizeof(struct list_head), GFP_ATOMIC);
2060 else
2061 new_hd = kzalloc(sizeof(struct list_head), GFP_KERNEL);
2062 if (new_hd == NULL)
2063 return;
2064
2065 spin_lock_irqsave(&bus->dpc_tl_lock, flags);
2066 list_add_tail(new_hd, &bus->dpc_tsklst);
2067 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
2068 }
2069
2070 static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
2071 {
2072 u8 idx;
2073 u32 addr;
2074 unsigned long val;
2075 int n, ret;
2076
2077 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
2078 addr = bus->ci->c_inf[idx].base +
2079 offsetof(struct sdpcmd_regs, intstatus);
2080
2081 ret = brcmf_sdio_regrw_helper(bus->sdiodev, addr, &val, false);
2082 bus->sdcnt.f1regdata++;
2083 if (ret != 0)
2084 val = 0;
2085
2086 val &= bus->hostintmask;
2087 atomic_set(&bus->fcstate, !!(val & I_HMB_FC_STATE));
2088
2089 /* Clear interrupts */
2090 if (val) {
2091 ret = brcmf_sdio_regrw_helper(bus->sdiodev, addr, &val, true);
2092 bus->sdcnt.f1regdata++;
2093 }
2094
2095 if (ret) {
2096 atomic_set(&bus->intstatus, 0);
2097 } else if (val) {
2098 for_each_set_bit(n, &val, 32)
2099 set_bit(n, (unsigned long *)&bus->intstatus.counter);
2100 }
2101
2102 return ret;
2103 }
2104
2105 static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
2106 {
2107 u32 newstatus = 0;
2108 unsigned long intstatus;
2109 uint rxlimit = bus->rxbound; /* Rx frames to read before resched */
2110 uint txlimit = bus->txbound; /* Tx frames to send before resched */
2111 uint framecnt = 0; /* Temporary counter of tx/rx frames */
2112 int err = 0, n;
2113
2114 brcmf_dbg(TRACE, "Enter\n");
2115
2116 sdio_claim_host(bus->sdiodev->func[1]);
2117
2118 /* If waiting for HTAVAIL, check status */
2119 if (bus->clkstate == CLK_PENDING) {
2120 u8 clkctl, devctl = 0;
2121
2122 #ifdef DEBUG
2123 /* Check for inconsistent device control */
2124 devctl = brcmf_sdio_regrb(bus->sdiodev,
2125 SBSDIO_DEVICE_CTL, &err);
2126 if (err) {
2127 brcmf_err("error reading DEVCTL: %d\n", err);
2128 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
2129 }
2130 #endif /* DEBUG */
2131
2132 /* Read CSR, if clock on switch to AVAIL, else ignore */
2133 clkctl = brcmf_sdio_regrb(bus->sdiodev,
2134 SBSDIO_FUNC1_CHIPCLKCSR, &err);
2135 if (err) {
2136 brcmf_err("error reading CSR: %d\n",
2137 err);
2138 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
2139 }
2140
2141 brcmf_dbg(INFO, "DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n",
2142 devctl, clkctl);
2143
2144 if (SBSDIO_HTAV(clkctl)) {
2145 devctl = brcmf_sdio_regrb(bus->sdiodev,
2146 SBSDIO_DEVICE_CTL, &err);
2147 if (err) {
2148 brcmf_err("error reading DEVCTL: %d\n",
2149 err);
2150 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
2151 }
2152 devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
2153 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
2154 devctl, &err);
2155 if (err) {
2156 brcmf_err("error writing DEVCTL: %d\n",
2157 err);
2158 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
2159 }
2160 bus->clkstate = CLK_AVAIL;
2161 }
2162 }
2163
2164 /* Make sure backplane clock is on */
2165 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, true);
2166
2167 /* Pending interrupt indicates new device status */
2168 if (atomic_read(&bus->ipend) > 0) {
2169 atomic_set(&bus->ipend, 0);
2170 err = brcmf_sdio_intr_rstatus(bus);
2171 }
2172
2173 /* Start with leftover status bits */
2174 intstatus = atomic_xchg(&bus->intstatus, 0);
2175
2176 /* Handle flow-control change: read new state in case our ack
2177 * crossed another change interrupt. If change still set, assume
2178 * FC ON for safety, let next loop through do the debounce.
2179 */
2180 if (intstatus & I_HMB_FC_CHANGE) {
2181 intstatus &= ~I_HMB_FC_CHANGE;
2182 err = w_sdreg32(bus, I_HMB_FC_CHANGE,
2183 offsetof(struct sdpcmd_regs, intstatus));
2184
2185 err = r_sdreg32(bus, &newstatus,
2186 offsetof(struct sdpcmd_regs, intstatus));
2187 bus->sdcnt.f1regdata += 2;
2188 atomic_set(&bus->fcstate,
2189 !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE)));
2190 intstatus |= (newstatus & bus->hostintmask);
2191 }
2192
2193 /* Handle host mailbox indication */
2194 if (intstatus & I_HMB_HOST_INT) {
2195 intstatus &= ~I_HMB_HOST_INT;
2196 intstatus |= brcmf_sdbrcm_hostmail(bus);
2197 }
2198
2199 sdio_release_host(bus->sdiodev->func[1]);
2200
2201 /* Generally don't ask for these, can get CRC errors... */
2202 if (intstatus & I_WR_OOSYNC) {
2203 brcmf_err("Dongle reports WR_OOSYNC\n");
2204 intstatus &= ~I_WR_OOSYNC;
2205 }
2206
2207 if (intstatus & I_RD_OOSYNC) {
2208 brcmf_err("Dongle reports RD_OOSYNC\n");
2209 intstatus &= ~I_RD_OOSYNC;
2210 }
2211
2212 if (intstatus & I_SBINT) {
2213 brcmf_err("Dongle reports SBINT\n");
2214 intstatus &= ~I_SBINT;
2215 }
2216
2217 /* Would be active due to wake-wlan in gSPI */
2218 if (intstatus & I_CHIPACTIVE) {
2219 brcmf_dbg(INFO, "Dongle reports CHIPACTIVE\n");
2220 intstatus &= ~I_CHIPACTIVE;
2221 }
2222
2223 /* Ignore frame indications if rxskip is set */
2224 if (bus->rxskip)
2225 intstatus &= ~I_HMB_FRAME_IND;
2226
2227 /* On frame indication, read available frames */
2228 if (PKT_AVAILABLE() && bus->clkstate == CLK_AVAIL) {
2229 framecnt = brcmf_sdio_readframes(bus, rxlimit);
2230 if (!bus->rxpending)
2231 intstatus &= ~I_HMB_FRAME_IND;
2232 rxlimit -= min(framecnt, rxlimit);
2233 }
2234
2235 /* Keep still-pending events for next scheduling */
2236 if (intstatus) {
2237 for_each_set_bit(n, &intstatus, 32)
2238 set_bit(n, (unsigned long *)&bus->intstatus.counter);
2239 }
2240
2241 brcmf_sdbrcm_clrintr(bus);
2242
2243 if (data_ok(bus) && bus->ctrl_frame_stat &&
2244 (bus->clkstate == CLK_AVAIL)) {
2245 int i;
2246
2247 sdio_claim_host(bus->sdiodev->func[1]);
2248 err = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
2249 SDIO_FUNC_2, F2SYNC, bus->ctrl_frame_buf,
2250 (u32) bus->ctrl_frame_len);
2251
2252 if (err < 0) {
2253 /* On failure, abort the command and
2254 terminate the frame */
2255 brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
2256 err);
2257 bus->sdcnt.tx_sderrs++;
2258
2259 brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
2260
2261 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
2262 SFC_WF_TERM, &err);
2263 bus->sdcnt.f1regdata++;
2264
2265 for (i = 0; i < 3; i++) {
2266 u8 hi, lo;
2267 hi = brcmf_sdio_regrb(bus->sdiodev,
2268 SBSDIO_FUNC1_WFRAMEBCHI,
2269 &err);
2270 lo = brcmf_sdio_regrb(bus->sdiodev,
2271 SBSDIO_FUNC1_WFRAMEBCLO,
2272 &err);
2273 bus->sdcnt.f1regdata += 2;
2274 if ((hi == 0) && (lo == 0))
2275 break;
2276 }
2277
2278 } else {
2279 bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
2280 }
2281 sdio_release_host(bus->sdiodev->func[1]);
2282 bus->ctrl_frame_stat = false;
2283 brcmf_sdbrcm_wait_event_wakeup(bus);
2284 }
2285 /* Send queued frames (limit 1 if rx may still be pending) */
2286 else if ((bus->clkstate == CLK_AVAIL) && !atomic_read(&bus->fcstate) &&
2287 brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit
2288 && data_ok(bus)) {
2289 framecnt = bus->rxpending ? min(txlimit, bus->txminmax) :
2290 txlimit;
2291 framecnt = brcmf_sdbrcm_sendfromq(bus, framecnt);
2292 txlimit -= framecnt;
2293 }
2294
2295 if ((bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) || (err != 0)) {
2296 brcmf_err("failed backplane access over SDIO, halting operation\n");
2297 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
2298 atomic_set(&bus->intstatus, 0);
2299 } else if (atomic_read(&bus->intstatus) ||
2300 atomic_read(&bus->ipend) > 0 ||
2301 (!atomic_read(&bus->fcstate) &&
2302 brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
2303 data_ok(bus)) || PKT_AVAILABLE()) {
2304 brcmf_sdbrcm_adddpctsk(bus);
2305 }
2306
2307 /* If we're done for now, turn off clock request. */
2308 if ((bus->clkstate != CLK_PENDING)
2309 && bus->idletime == BRCMF_IDLE_IMMEDIATE) {
2310 bus->activity = false;
2311 sdio_claim_host(bus->sdiodev->func[1]);
2312 brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
2313 sdio_release_host(bus->sdiodev->func[1]);
2314 }
2315 }
2316
2317 static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
2318 {
2319 int ret = -EBADE;
2320 uint datalen, prec;
2321 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2322 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
2323 struct brcmf_sdio *bus = sdiodev->bus;
2324 unsigned long flags;
2325
2326 brcmf_dbg(TRACE, "Enter\n");
2327
2328 datalen = pkt->len;
2329
2330 /* Add space for the header */
2331 skb_push(pkt, SDPCM_HDRLEN);
2332 /* precondition: IS_ALIGNED((unsigned long)(pkt->data), 2) */
2333
2334 prec = prio2prec((pkt->priority & PRIOMASK));
2335
2336 /* Check for existing queue, current flow-control,
2337 pending event, or pending clock */
2338 brcmf_dbg(TRACE, "deferring pktq len %d\n", pktq_len(&bus->txq));
2339 bus->sdcnt.fcqueued++;
2340
2341 /* Priority based enq */
2342 spin_lock_bh(&bus->txqlock);
2343 if (!brcmf_c_prec_enq(bus->sdiodev->dev, &bus->txq, pkt, prec)) {
2344 skb_pull(pkt, SDPCM_HDRLEN);
2345 brcmf_txcomplete(bus->sdiodev->dev, pkt, false);
2346 brcmu_pkt_buf_free_skb(pkt);
2347 brcmf_err("out of bus->txq !!!\n");
2348 ret = -ENOSR;
2349 } else {
2350 ret = 0;
2351 }
2352 spin_unlock_bh(&bus->txqlock);
2353
2354 if (pktq_len(&bus->txq) >= TXHI) {
2355 bus->txoff = true;
2356 brcmf_txflowblock(bus->sdiodev->dev, true);
2357 }
2358
2359 #ifdef DEBUG
2360 if (pktq_plen(&bus->txq, prec) > qcount[prec])
2361 qcount[prec] = pktq_plen(&bus->txq, prec);
2362 #endif
2363
2364 spin_lock_irqsave(&bus->dpc_tl_lock, flags);
2365 if (list_empty(&bus->dpc_tsklst)) {
2366 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
2367
2368 brcmf_sdbrcm_adddpctsk(bus);
2369 queue_work(bus->brcmf_wq, &bus->datawork);
2370 } else {
2371 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
2372 }
2373
2374 return ret;
2375 }
2376
2377 static int
2378 brcmf_sdbrcm_membytes(struct brcmf_sdio *bus, bool write, u32 address, u8 *data,
2379 uint size)
2380 {
2381 int bcmerror = 0;
2382 u32 sdaddr;
2383 uint dsize;
2384
2385 /* Determine initial transfer parameters */
2386 sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK;
2387 if ((sdaddr + size) & SBSDIO_SBWINDOW_MASK)
2388 dsize = (SBSDIO_SB_OFT_ADDR_LIMIT - sdaddr);
2389 else
2390 dsize = size;
2391
2392 sdio_claim_host(bus->sdiodev->func[1]);
2393
2394 /* Set the backplane window to include the start address */
2395 bcmerror = brcmf_sdcard_set_sbaddr_window(bus->sdiodev, address);
2396 if (bcmerror) {
2397 brcmf_err("window change failed\n");
2398 goto xfer_done;
2399 }
2400
2401 /* Do the transfer(s) */
2402 while (size) {
2403 brcmf_dbg(INFO, "%s %d bytes at offset 0x%08x in window 0x%08x\n",
2404 write ? "write" : "read", dsize,
2405 sdaddr, address & SBSDIO_SBWINDOW_MASK);
2406 bcmerror = brcmf_sdcard_rwdata(bus->sdiodev, write,
2407 sdaddr, data, dsize);
2408 if (bcmerror) {
2409 brcmf_err("membytes transfer failed\n");
2410 break;
2411 }
2412
2413 /* Adjust for next transfer (if any) */
2414 size -= dsize;
2415 if (size) {
2416 data += dsize;
2417 address += dsize;
2418 bcmerror = brcmf_sdcard_set_sbaddr_window(bus->sdiodev,
2419 address);
2420 if (bcmerror) {
2421 brcmf_err("window change failed\n");
2422 break;
2423 }
2424 sdaddr = 0;
2425 dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
2426 }
2427 }
2428
2429 xfer_done:
2430 /* Return the window to backplane enumeration space for core access */
2431 if (brcmf_sdcard_set_sbaddr_window(bus->sdiodev, bus->sdiodev->sbwad))
2432 brcmf_err("FAILED to set window back to 0x%x\n",
2433 bus->sdiodev->sbwad);
2434
2435 sdio_release_host(bus->sdiodev->func[1]);
2436
2437 return bcmerror;
2438 }
2439
2440 #ifdef DEBUG
2441 #define CONSOLE_LINE_MAX 192
2442
2443 static int brcmf_sdbrcm_readconsole(struct brcmf_sdio *bus)
2444 {
2445 struct brcmf_console *c = &bus->console;
2446 u8 line[CONSOLE_LINE_MAX], ch;
2447 u32 n, idx, addr;
2448 int rv;
2449
2450 /* Don't do anything until FWREADY updates console address */
2451 if (bus->console_addr == 0)
2452 return 0;
2453
2454 /* Read console log struct */
2455 addr = bus->console_addr + offsetof(struct rte_console, log_le);
2456 rv = brcmf_sdbrcm_membytes(bus, false, addr, (u8 *)&c->log_le,
2457 sizeof(c->log_le));
2458 if (rv < 0)
2459 return rv;
2460
2461 /* Allocate console buffer (one time only) */
2462 if (c->buf == NULL) {
2463 c->bufsize = le32_to_cpu(c->log_le.buf_size);
2464 c->buf = kmalloc(c->bufsize, GFP_ATOMIC);
2465 if (c->buf == NULL)
2466 return -ENOMEM;
2467 }
2468
2469 idx = le32_to_cpu(c->log_le.idx);
2470
2471 /* Protect against corrupt value */
2472 if (idx > c->bufsize)
2473 return -EBADE;
2474
2475 /* Skip reading the console buffer if the index pointer
2476 has not moved */
2477 if (idx == c->last)
2478 return 0;
2479
2480 /* Read the console buffer */
2481 addr = le32_to_cpu(c->log_le.buf);
2482 rv = brcmf_sdbrcm_membytes(bus, false, addr, c->buf, c->bufsize);
2483 if (rv < 0)
2484 return rv;
2485
2486 while (c->last != idx) {
2487 for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
2488 if (c->last == idx) {
2489 /* This would output a partial line.
2490 * Instead, back up
2491 * the buffer pointer and output this
2492 * line next time around.
2493 */
2494 if (c->last >= n)
2495 c->last -= n;
2496 else
2497 c->last = c->bufsize - n;
2498 goto break2;
2499 }
2500 ch = c->buf[c->last];
2501 c->last = (c->last + 1) % c->bufsize;
2502 if (ch == '\n')
2503 break;
2504 line[n] = ch;
2505 }
2506
2507 if (n > 0) {
2508 if (line[n - 1] == '\r')
2509 n--;
2510 line[n] = 0;
2511 pr_debug("CONSOLE: %s\n", line);
2512 }
2513 }
2514 break2:
2515
2516 return 0;
2517 }
2518 #endif /* DEBUG */
2519
2520 static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
2521 {
2522 int i;
2523 int ret;
2524
2525 bus->ctrl_frame_stat = false;
2526 ret = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
2527 SDIO_FUNC_2, F2SYNC, frame, len);
2528
2529 if (ret < 0) {
2530 /* On failure, abort the command and terminate the frame */
2531 brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
2532 ret);
2533 bus->sdcnt.tx_sderrs++;
2534
2535 brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
2536
2537 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
2538 SFC_WF_TERM, NULL);
2539 bus->sdcnt.f1regdata++;
2540
2541 for (i = 0; i < 3; i++) {
2542 u8 hi, lo;
2543 hi = brcmf_sdio_regrb(bus->sdiodev,
2544 SBSDIO_FUNC1_WFRAMEBCHI, NULL);
2545 lo = brcmf_sdio_regrb(bus->sdiodev,
2546 SBSDIO_FUNC1_WFRAMEBCLO, NULL);
2547 bus->sdcnt.f1regdata += 2;
2548 if (hi == 0 && lo == 0)
2549 break;
2550 }
2551 return ret;
2552 }
2553
2554 bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
2555
2556 return ret;
2557 }
2558
2559 static int
2560 brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
2561 {
2562 u8 *frame;
2563 u16 len;
2564 u32 swheader;
2565 uint retries = 0;
2566 u8 doff = 0;
2567 int ret = -1;
2568 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2569 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
2570 struct brcmf_sdio *bus = sdiodev->bus;
2571 unsigned long flags;
2572
2573 brcmf_dbg(TRACE, "Enter\n");
2574
2575 /* Back the pointer to make a room for bus header */
2576 frame = msg - SDPCM_HDRLEN;
2577 len = (msglen += SDPCM_HDRLEN);
2578
2579 /* Add alignment padding (optional for ctl frames) */
2580 doff = ((unsigned long)frame % BRCMF_SDALIGN);
2581 if (doff) {
2582 frame -= doff;
2583 len += doff;
2584 msglen += doff;
2585 memset(frame, 0, doff + SDPCM_HDRLEN);
2586 }
2587 /* precondition: doff < BRCMF_SDALIGN */
2588 doff += SDPCM_HDRLEN;
2589
2590 /* Round send length to next SDIO block */
2591 if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
2592 u16 pad = bus->blocksize - (len % bus->blocksize);
2593 if ((pad <= bus->roundup) && (pad < bus->blocksize))
2594 len += pad;
2595 } else if (len % BRCMF_SDALIGN) {
2596 len += BRCMF_SDALIGN - (len % BRCMF_SDALIGN);
2597 }
2598
2599 /* Satisfy length-alignment requirements */
2600 if (len & (ALIGNMENT - 1))
2601 len = roundup(len, ALIGNMENT);
2602
2603 /* precondition: IS_ALIGNED((unsigned long)frame, 2) */
2604
2605 /* Make sure backplane clock is on */
2606 sdio_claim_host(bus->sdiodev->func[1]);
2607 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
2608 sdio_release_host(bus->sdiodev->func[1]);
2609
2610 /* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
2611 *(__le16 *) frame = cpu_to_le16((u16) msglen);
2612 *(((__le16 *) frame) + 1) = cpu_to_le16(~msglen);
2613
2614 /* Software tag: channel, sequence number, data offset */
2615 swheader =
2616 ((SDPCM_CONTROL_CHANNEL << SDPCM_CHANNEL_SHIFT) &
2617 SDPCM_CHANNEL_MASK)
2618 | bus->tx_seq | ((doff << SDPCM_DOFFSET_SHIFT) &
2619 SDPCM_DOFFSET_MASK);
2620 put_unaligned_le32(swheader, frame + SDPCM_FRAMETAG_LEN);
2621 put_unaligned_le32(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader));
2622
2623 if (!data_ok(bus)) {
2624 brcmf_dbg(INFO, "No bus credit bus->tx_max %d, bus->tx_seq %d\n",
2625 bus->tx_max, bus->tx_seq);
2626 bus->ctrl_frame_stat = true;
2627 /* Send from dpc */
2628 bus->ctrl_frame_buf = frame;
2629 bus->ctrl_frame_len = len;
2630
2631 wait_event_interruptible_timeout(bus->ctrl_wait,
2632 !bus->ctrl_frame_stat,
2633 msecs_to_jiffies(2000));
2634
2635 if (!bus->ctrl_frame_stat) {
2636 brcmf_dbg(INFO, "ctrl_frame_stat == false\n");
2637 ret = 0;
2638 } else {
2639 brcmf_dbg(INFO, "ctrl_frame_stat == true\n");
2640 ret = -1;
2641 }
2642 }
2643
2644 if (ret == -1) {
2645 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_CTL_ON(),
2646 frame, len, "Tx Frame:\n");
2647 brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() && BRCMF_CTL_ON()) &&
2648 BRCMF_HDRS_ON(),
2649 frame, min_t(u16, len, 16), "TxHdr:\n");
2650
2651 do {
2652 sdio_claim_host(bus->sdiodev->func[1]);
2653 ret = brcmf_tx_frame(bus, frame, len);
2654 sdio_release_host(bus->sdiodev->func[1]);
2655 } while (ret < 0 && retries++ < TXRETRIES);
2656 }
2657
2658 spin_lock_irqsave(&bus->dpc_tl_lock, flags);
2659 if ((bus->idletime == BRCMF_IDLE_IMMEDIATE) &&
2660 list_empty(&bus->dpc_tsklst)) {
2661 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
2662
2663 bus->activity = false;
2664 sdio_claim_host(bus->sdiodev->func[1]);
2665 brcmf_sdbrcm_clkctl(bus, CLK_NONE, true);
2666 sdio_release_host(bus->sdiodev->func[1]);
2667 } else {
2668 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
2669 }
2670
2671 if (ret)
2672 bus->sdcnt.tx_ctlerrs++;
2673 else
2674 bus->sdcnt.tx_ctlpkts++;
2675
2676 return ret ? -EIO : 0;
2677 }
2678
2679 #ifdef DEBUG
2680 static inline bool brcmf_sdio_valid_shared_address(u32 addr)
2681 {
2682 return !(addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff));
2683 }
2684
2685 static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
2686 struct sdpcm_shared *sh)
2687 {
2688 u32 addr;
2689 int rv;
2690 u32 shaddr = 0;
2691 struct sdpcm_shared_le sh_le;
2692 __le32 addr_le;
2693
2694 shaddr = bus->ramsize - 4;
2695
2696 /*
2697 * Read last word in socram to determine
2698 * address of sdpcm_shared structure
2699 */
2700 sdio_claim_host(bus->sdiodev->func[1]);
2701 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
2702 rv = brcmf_sdbrcm_membytes(bus, false, shaddr,
2703 (u8 *)&addr_le, 4);
2704 sdio_release_host(bus->sdiodev->func[1]);
2705 if (rv < 0)
2706 return rv;
2707
2708 addr = le32_to_cpu(addr_le);
2709
2710 brcmf_dbg(INFO, "sdpcm_shared address 0x%08X\n", addr);
2711
2712 /*
2713 * Check if addr is valid.
2714 * NVRAM length at the end of memory should have been overwritten.
2715 */
2716 if (!brcmf_sdio_valid_shared_address(addr)) {
2717 brcmf_err("invalid sdpcm_shared address 0x%08X\n",
2718 addr);
2719 return -EINVAL;
2720 }
2721
2722 /* Read hndrte_shared structure */
2723 rv = brcmf_sdbrcm_membytes(bus, false, addr, (u8 *)&sh_le,
2724 sizeof(struct sdpcm_shared_le));
2725 if (rv < 0)
2726 return rv;
2727
2728 /* Endianness */
2729 sh->flags = le32_to_cpu(sh_le.flags);
2730 sh->trap_addr = le32_to_cpu(sh_le.trap_addr);
2731 sh->assert_exp_addr = le32_to_cpu(sh_le.assert_exp_addr);
2732 sh->assert_file_addr = le32_to_cpu(sh_le.assert_file_addr);
2733 sh->assert_line = le32_to_cpu(sh_le.assert_line);
2734 sh->console_addr = le32_to_cpu(sh_le.console_addr);
2735 sh->msgtrace_addr = le32_to_cpu(sh_le.msgtrace_addr);
2736
2737 if ((sh->flags & SDPCM_SHARED_VERSION_MASK) != SDPCM_SHARED_VERSION) {
2738 brcmf_err("sdpcm_shared version mismatch: dhd %d dongle %d\n",
2739 SDPCM_SHARED_VERSION,
2740 sh->flags & SDPCM_SHARED_VERSION_MASK);
2741 return -EPROTO;
2742 }
2743
2744 return 0;
2745 }
2746
2747 static int brcmf_sdio_dump_console(struct brcmf_sdio *bus,
2748 struct sdpcm_shared *sh, char __user *data,
2749 size_t count)
2750 {
2751 u32 addr, console_ptr, console_size, console_index;
2752 char *conbuf = NULL;
2753 __le32 sh_val;
2754 int rv;
2755 loff_t pos = 0;
2756 int nbytes = 0;
2757
2758 /* obtain console information from device memory */
2759 addr = sh->console_addr + offsetof(struct rte_console, log_le);
2760 rv = brcmf_sdbrcm_membytes(bus, false, addr,
2761 (u8 *)&sh_val, sizeof(u32));
2762 if (rv < 0)
2763 return rv;
2764 console_ptr = le32_to_cpu(sh_val);
2765
2766 addr = sh->console_addr + offsetof(struct rte_console, log_le.buf_size);
2767 rv = brcmf_sdbrcm_membytes(bus, false, addr,
2768 (u8 *)&sh_val, sizeof(u32));
2769 if (rv < 0)
2770 return rv;
2771 console_size = le32_to_cpu(sh_val);
2772
2773 addr = sh->console_addr + offsetof(struct rte_console, log_le.idx);
2774 rv = brcmf_sdbrcm_membytes(bus, false, addr,
2775 (u8 *)&sh_val, sizeof(u32));
2776 if (rv < 0)
2777 return rv;
2778 console_index = le32_to_cpu(sh_val);
2779
2780 /* allocate buffer for console data */
2781 if (console_size <= CONSOLE_BUFFER_MAX)
2782 conbuf = vzalloc(console_size+1);
2783
2784 if (!conbuf)
2785 return -ENOMEM;
2786
2787 /* obtain the console data from device */
2788 conbuf[console_size] = '\0';
2789 rv = brcmf_sdbrcm_membytes(bus, false, console_ptr, (u8 *)conbuf,
2790 console_size);
2791 if (rv < 0)
2792 goto done;
2793
2794 rv = simple_read_from_buffer(data, count, &pos,
2795 conbuf + console_index,
2796 console_size - console_index);
2797 if (rv < 0)
2798 goto done;
2799
2800 nbytes = rv;
2801 if (console_index > 0) {
2802 pos = 0;
2803 rv = simple_read_from_buffer(data+nbytes, count, &pos,
2804 conbuf, console_index - 1);
2805 if (rv < 0)
2806 goto done;
2807 rv += nbytes;
2808 }
2809 done:
2810 vfree(conbuf);
2811 return rv;
2812 }
2813
2814 static int brcmf_sdio_trap_info(struct brcmf_sdio *bus, struct sdpcm_shared *sh,
2815 char __user *data, size_t count)
2816 {
2817 int error, res;
2818 char buf[350];
2819 struct brcmf_trap_info tr;
2820 int nbytes;
2821 loff_t pos = 0;
2822
2823 if ((sh->flags & SDPCM_SHARED_TRAP) == 0)
2824 return 0;
2825
2826 error = brcmf_sdbrcm_membytes(bus, false, sh->trap_addr, (u8 *)&tr,
2827 sizeof(struct brcmf_trap_info));
2828 if (error < 0)
2829 return error;
2830
2831 nbytes = brcmf_sdio_dump_console(bus, sh, data, count);
2832 if (nbytes < 0)
2833 return nbytes;
2834
2835 res = scnprintf(buf, sizeof(buf),
2836 "dongle trap info: type 0x%x @ epc 0x%08x\n"
2837 " cpsr 0x%08x spsr 0x%08x sp 0x%08x\n"
2838 " lr 0x%08x pc 0x%08x offset 0x%x\n"
2839 " r0 0x%08x r1 0x%08x r2 0x%08x r3 0x%08x\n"
2840 " r4 0x%08x r5 0x%08x r6 0x%08x r7 0x%08x\n",
2841 le32_to_cpu(tr.type), le32_to_cpu(tr.epc),
2842 le32_to_cpu(tr.cpsr), le32_to_cpu(tr.spsr),
2843 le32_to_cpu(tr.r13), le32_to_cpu(tr.r14),
2844 le32_to_cpu(tr.pc), sh->trap_addr,
2845 le32_to_cpu(tr.r0), le32_to_cpu(tr.r1),
2846 le32_to_cpu(tr.r2), le32_to_cpu(tr.r3),
2847 le32_to_cpu(tr.r4), le32_to_cpu(tr.r5),
2848 le32_to_cpu(tr.r6), le32_to_cpu(tr.r7));
2849
2850 error = simple_read_from_buffer(data+nbytes, count, &pos, buf, res);
2851 if (error < 0)
2852 return error;
2853
2854 nbytes += error;
2855 return nbytes;
2856 }
2857
2858 static int brcmf_sdio_assert_info(struct brcmf_sdio *bus,
2859 struct sdpcm_shared *sh, char __user *data,
2860 size_t count)
2861 {
2862 int error = 0;
2863 char buf[200];
2864 char file[80] = "?";
2865 char expr[80] = "<???>";
2866 int res;
2867 loff_t pos = 0;
2868
2869 if ((sh->flags & SDPCM_SHARED_ASSERT_BUILT) == 0) {
2870 brcmf_dbg(INFO, "firmware not built with -assert\n");
2871 return 0;
2872 } else if ((sh->flags & SDPCM_SHARED_ASSERT) == 0) {
2873 brcmf_dbg(INFO, "no assert in dongle\n");
2874 return 0;
2875 }
2876
2877 sdio_claim_host(bus->sdiodev->func[1]);
2878 if (sh->assert_file_addr != 0) {
2879 error = brcmf_sdbrcm_membytes(bus, false, sh->assert_file_addr,
2880 (u8 *)file, 80);
2881 if (error < 0)
2882 return error;
2883 }
2884 if (sh->assert_exp_addr != 0) {
2885 error = brcmf_sdbrcm_membytes(bus, false, sh->assert_exp_addr,
2886 (u8 *)expr, 80);
2887 if (error < 0)
2888 return error;
2889 }
2890 sdio_release_host(bus->sdiodev->func[1]);
2891
2892 res = scnprintf(buf, sizeof(buf),
2893 "dongle assert: %s:%d: assert(%s)\n",
2894 file, sh->assert_line, expr);
2895 return simple_read_from_buffer(data, count, &pos, buf, res);
2896 }
2897
2898 static int brcmf_sdbrcm_checkdied(struct brcmf_sdio *bus)
2899 {
2900 int error;
2901 struct sdpcm_shared sh;
2902
2903 error = brcmf_sdio_readshared(bus, &sh);
2904
2905 if (error < 0)
2906 return error;
2907
2908 if ((sh.flags & SDPCM_SHARED_ASSERT_BUILT) == 0)
2909 brcmf_dbg(INFO, "firmware not built with -assert\n");
2910 else if (sh.flags & SDPCM_SHARED_ASSERT)
2911 brcmf_err("assertion in dongle\n");
2912
2913 if (sh.flags & SDPCM_SHARED_TRAP)
2914 brcmf_err("firmware trap in dongle\n");
2915
2916 return 0;
2917 }
2918
2919 static int brcmf_sdbrcm_died_dump(struct brcmf_sdio *bus, char __user *data,
2920 size_t count, loff_t *ppos)
2921 {
2922 int error = 0;
2923 struct sdpcm_shared sh;
2924 int nbytes = 0;
2925 loff_t pos = *ppos;
2926
2927 if (pos != 0)
2928 return 0;
2929
2930 error = brcmf_sdio_readshared(bus, &sh);
2931 if (error < 0)
2932 goto done;
2933
2934 error = brcmf_sdio_assert_info(bus, &sh, data, count);
2935 if (error < 0)
2936 goto done;
2937
2938 nbytes = error;
2939 error = brcmf_sdio_trap_info(bus, &sh, data, count);
2940 if (error < 0)
2941 goto done;
2942
2943 error += nbytes;
2944 *ppos += error;
2945 done:
2946 return error;
2947 }
2948
2949 static ssize_t brcmf_sdio_forensic_read(struct file *f, char __user *data,
2950 size_t count, loff_t *ppos)
2951 {
2952 struct brcmf_sdio *bus = f->private_data;
2953 int res;
2954
2955 res = brcmf_sdbrcm_died_dump(bus, data, count, ppos);
2956 if (res > 0)
2957 *ppos += res;
2958 return (ssize_t)res;
2959 }
2960
2961 static const struct file_operations brcmf_sdio_forensic_ops = {
2962 .owner = THIS_MODULE,
2963 .open = simple_open,
2964 .read = brcmf_sdio_forensic_read
2965 };
2966
2967 static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
2968 {
2969 struct brcmf_pub *drvr = bus->sdiodev->bus_if->drvr;
2970 struct dentry *dentry = brcmf_debugfs_get_devdir(drvr);
2971
2972 if (IS_ERR_OR_NULL(dentry))
2973 return;
2974
2975 debugfs_create_file("forensics", S_IRUGO, dentry, bus,
2976 &brcmf_sdio_forensic_ops);
2977 brcmf_debugfs_create_sdio_count(drvr, &bus->sdcnt);
2978 }
2979 #else
2980 static int brcmf_sdbrcm_checkdied(struct brcmf_sdio *bus)
2981 {
2982 return 0;
2983 }
2984
2985 static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
2986 {
2987 }
2988 #endif /* DEBUG */
2989
2990 static int
2991 brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
2992 {
2993 int timeleft;
2994 uint rxlen = 0;
2995 bool pending;
2996 u8 *buf;
2997 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2998 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
2999 struct brcmf_sdio *bus = sdiodev->bus;
3000
3001 brcmf_dbg(TRACE, "Enter\n");
3002
3003 /* Wait until control frame is available */
3004 timeleft = brcmf_sdbrcm_dcmd_resp_wait(bus, &bus->rxlen, &pending);
3005
3006 spin_lock_bh(&bus->rxctl_lock);
3007 rxlen = bus->rxlen;
3008 memcpy(msg, bus->rxctl, min(msglen, rxlen));
3009 bus->rxctl = NULL;
3010 buf = bus->rxctl_orig;
3011 bus->rxctl_orig = NULL;
3012 bus->rxlen = 0;
3013 spin_unlock_bh(&bus->rxctl_lock);
3014 vfree(buf);
3015
3016 if (rxlen) {
3017 brcmf_dbg(CTL, "resumed on rxctl frame, got %d expected %d\n",
3018 rxlen, msglen);
3019 } else if (timeleft == 0) {
3020 brcmf_err("resumed on timeout\n");
3021 brcmf_sdbrcm_checkdied(bus);
3022 } else if (pending) {
3023 brcmf_dbg(CTL, "cancelled\n");
3024 return -ERESTARTSYS;
3025 } else {
3026 brcmf_dbg(CTL, "resumed for unknown reason?\n");
3027 brcmf_sdbrcm_checkdied(bus);
3028 }
3029
3030 if (rxlen)
3031 bus->sdcnt.rx_ctlpkts++;
3032 else
3033 bus->sdcnt.rx_ctlerrs++;
3034
3035 return rxlen ? (int)rxlen : -ETIMEDOUT;
3036 }
3037
3038 static int brcmf_sdbrcm_write_vars(struct brcmf_sdio *bus)
3039 {
3040 int bcmerror = 0;
3041 u32 varaddr;
3042 u32 varsizew;
3043 __le32 varsizew_le;
3044 #ifdef DEBUG
3045 char *nvram_ularray;
3046 #endif /* DEBUG */
3047
3048 /* Even if there are no vars are to be written, we still
3049 need to set the ramsize. */
3050 varaddr = (bus->ramsize - 4) - bus->varsz;
3051
3052 if (bus->vars) {
3053 /* Write the vars list */
3054 bcmerror = brcmf_sdbrcm_membytes(bus, true, varaddr,
3055 bus->vars, bus->varsz);
3056 #ifdef DEBUG
3057 /* Verify NVRAM bytes */
3058 brcmf_dbg(INFO, "Compare NVRAM dl & ul; varsize=%d\n",
3059 bus->varsz);
3060 nvram_ularray = kmalloc(bus->varsz, GFP_ATOMIC);
3061 if (!nvram_ularray)
3062 return -ENOMEM;
3063
3064 /* Upload image to verify downloaded contents. */
3065 memset(nvram_ularray, 0xaa, bus->varsz);
3066
3067 /* Read the vars list to temp buffer for comparison */
3068 bcmerror = brcmf_sdbrcm_membytes(bus, false, varaddr,
3069 nvram_ularray, bus->varsz);
3070 if (bcmerror) {
3071 brcmf_err("error %d on reading %d nvram bytes at 0x%08x\n",
3072 bcmerror, bus->varsz, varaddr);
3073 }
3074 /* Compare the org NVRAM with the one read from RAM */
3075 if (memcmp(bus->vars, nvram_ularray, bus->varsz))
3076 brcmf_err("Downloaded NVRAM image is corrupted\n");
3077 else
3078 brcmf_err("Download/Upload/Compare of NVRAM ok\n");
3079
3080 kfree(nvram_ularray);
3081 #endif /* DEBUG */
3082 }
3083
3084 /* adjust to the user specified RAM */
3085 brcmf_dbg(INFO, "Physical memory size: %d\n", bus->ramsize);
3086 brcmf_dbg(INFO, "Vars are at %d, orig varsize is %d\n",
3087 varaddr, bus->varsz);
3088
3089 /*
3090 * Determine the length token:
3091 * Varsize, converted to words, in lower 16-bits, checksum
3092 * in upper 16-bits.
3093 */
3094 if (bcmerror) {
3095 varsizew = 0;
3096 varsizew_le = cpu_to_le32(0);
3097 } else {
3098 varsizew = bus->varsz / 4;
3099 varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF);
3100 varsizew_le = cpu_to_le32(varsizew);
3101 }
3102
3103 brcmf_dbg(INFO, "New varsize is %d, length token=0x%08x\n",
3104 bus->varsz, varsizew);
3105
3106 /* Write the length token to the last word */
3107 bcmerror = brcmf_sdbrcm_membytes(bus, true, (bus->ramsize - 4),
3108 (u8 *)&varsizew_le, 4);
3109
3110 return bcmerror;
3111 }
3112
3113 static int brcmf_sdbrcm_download_state(struct brcmf_sdio *bus, bool enter)
3114 {
3115 int bcmerror = 0;
3116 struct chip_info *ci = bus->ci;
3117
3118 /* To enter download state, disable ARM and reset SOCRAM.
3119 * To exit download state, simply reset ARM (default is RAM boot).
3120 */
3121 if (enter) {
3122 bus->alp_only = true;
3123
3124 ci->coredisable(bus->sdiodev, ci, BCMA_CORE_ARM_CM3);
3125
3126 ci->resetcore(bus->sdiodev, ci, BCMA_CORE_INTERNAL_MEM);
3127
3128 /* Clear the top bit of memory */
3129 if (bus->ramsize) {
3130 u32 zeros = 0;
3131 brcmf_sdbrcm_membytes(bus, true, bus->ramsize - 4,
3132 (u8 *)&zeros, 4);
3133 }
3134 } else {
3135 if (!ci->iscoreup(bus->sdiodev, ci, BCMA_CORE_INTERNAL_MEM)) {
3136 brcmf_err("SOCRAM core is down after reset?\n");
3137 bcmerror = -EBADE;
3138 goto fail;
3139 }
3140
3141 bcmerror = brcmf_sdbrcm_write_vars(bus);
3142 if (bcmerror) {
3143 brcmf_err("no vars written to RAM\n");
3144 bcmerror = 0;
3145 }
3146
3147 w_sdreg32(bus, 0xFFFFFFFF,
3148 offsetof(struct sdpcmd_regs, intstatus));
3149
3150 ci->resetcore(bus->sdiodev, ci, BCMA_CORE_ARM_CM3);
3151
3152 /* Allow HT Clock now that the ARM is running. */
3153 bus->alp_only = false;
3154
3155 bus->sdiodev->bus_if->state = BRCMF_BUS_LOAD;
3156 }
3157 fail:
3158 return bcmerror;
3159 }
3160
3161 static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_sdio *bus)
3162 {
3163 if (bus->firmware->size < bus->fw_ptr + len)
3164 len = bus->firmware->size - bus->fw_ptr;
3165
3166 memcpy(buf, &bus->firmware->data[bus->fw_ptr], len);
3167 bus->fw_ptr += len;
3168 return len;
3169 }
3170
3171 static int brcmf_sdbrcm_download_code_file(struct brcmf_sdio *bus)
3172 {
3173 int offset = 0;
3174 uint len;
3175 u8 *memblock = NULL, *memptr;
3176 int ret;
3177
3178 brcmf_dbg(INFO, "Enter\n");
3179
3180 ret = request_firmware(&bus->firmware, BRCMF_SDIO_FW_NAME,
3181 &bus->sdiodev->func[2]->dev);
3182 if (ret) {
3183 brcmf_err("Fail to request firmware %d\n", ret);
3184 return ret;
3185 }
3186 bus->fw_ptr = 0;
3187
3188 memptr = memblock = kmalloc(MEMBLOCK + BRCMF_SDALIGN, GFP_ATOMIC);
3189 if (memblock == NULL) {
3190 ret = -ENOMEM;
3191 goto err;
3192 }
3193 if ((u32)(unsigned long)memblock % BRCMF_SDALIGN)
3194 memptr += (BRCMF_SDALIGN -
3195 ((u32)(unsigned long)memblock % BRCMF_SDALIGN));
3196
3197 /* Download image */
3198 while ((len =
3199 brcmf_sdbrcm_get_image((char *)memptr, MEMBLOCK, bus))) {
3200 ret = brcmf_sdbrcm_membytes(bus, true, offset, memptr, len);
3201 if (ret) {
3202 brcmf_err("error %d on writing %d membytes at 0x%08x\n",
3203 ret, MEMBLOCK, offset);
3204 goto err;
3205 }
3206
3207 offset += MEMBLOCK;
3208 }
3209
3210 err:
3211 kfree(memblock);
3212
3213 release_firmware(bus->firmware);
3214 bus->fw_ptr = 0;
3215
3216 return ret;
3217 }
3218
3219 /*
3220 * ProcessVars:Takes a buffer of "<var>=<value>\n" lines read from a file
3221 * and ending in a NUL.
3222 * Removes carriage returns, empty lines, comment lines, and converts
3223 * newlines to NULs.
3224 * Shortens buffer as needed and pads with NULs. End of buffer is marked
3225 * by two NULs.
3226 */
3227
3228 static int brcmf_process_nvram_vars(struct brcmf_sdio *bus)
3229 {
3230 char *varbuf;
3231 char *dp;
3232 bool findNewline;
3233 int column;
3234 int ret = 0;
3235 uint buf_len, n, len;
3236
3237 len = bus->firmware->size;
3238 varbuf = vmalloc(len);
3239 if (!varbuf)
3240 return -ENOMEM;
3241
3242 memcpy(varbuf, bus->firmware->data, len);
3243 dp = varbuf;
3244
3245 findNewline = false;
3246 column = 0;
3247
3248 for (n = 0; n < len; n++) {
3249 if (varbuf[n] == 0)
3250 break;
3251 if (varbuf[n] == '\r')
3252 continue;
3253 if (findNewline && varbuf[n] != '\n')
3254 continue;
3255 findNewline = false;
3256 if (varbuf[n] == '#') {
3257 findNewline = true;
3258 continue;
3259 }
3260 if (varbuf[n] == '\n') {
3261 if (column == 0)
3262 continue;
3263 *dp++ = 0;
3264 column = 0;
3265 continue;
3266 }
3267 *dp++ = varbuf[n];
3268 column++;
3269 }
3270 buf_len = dp - varbuf;
3271 while (dp < varbuf + n)
3272 *dp++ = 0;
3273
3274 kfree(bus->vars);
3275 /* roundup needed for download to device */
3276 bus->varsz = roundup(buf_len + 1, 4);
3277 bus->vars = kmalloc(bus->varsz, GFP_KERNEL);
3278 if (bus->vars == NULL) {
3279 bus->varsz = 0;
3280 ret = -ENOMEM;
3281 goto err;
3282 }
3283
3284 /* copy the processed variables and add null termination */
3285 memcpy(bus->vars, varbuf, buf_len);
3286 bus->vars[buf_len] = 0;
3287 err:
3288 vfree(varbuf);
3289 return ret;
3290 }
3291
3292 static int brcmf_sdbrcm_download_nvram(struct brcmf_sdio *bus)
3293 {
3294 int ret;
3295
3296 ret = request_firmware(&bus->firmware, BRCMF_SDIO_NV_NAME,
3297 &bus->sdiodev->func[2]->dev);
3298 if (ret) {
3299 brcmf_err("Fail to request nvram %d\n", ret);
3300 return ret;
3301 }
3302
3303 ret = brcmf_process_nvram_vars(bus);
3304
3305 release_firmware(bus->firmware);
3306
3307 return ret;
3308 }
3309
3310 static int _brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
3311 {
3312 int bcmerror = -1;
3313
3314 /* Keep arm in reset */
3315 if (brcmf_sdbrcm_download_state(bus, true)) {
3316 brcmf_err("error placing ARM core in reset\n");
3317 goto err;
3318 }
3319
3320 if (brcmf_sdbrcm_download_code_file(bus)) {
3321 brcmf_err("dongle image file download failed\n");
3322 goto err;
3323 }
3324
3325 if (brcmf_sdbrcm_download_nvram(bus)) {
3326 brcmf_err("dongle nvram file download failed\n");
3327 goto err;
3328 }
3329
3330 /* Take arm out of reset */
3331 if (brcmf_sdbrcm_download_state(bus, false)) {
3332 brcmf_err("error getting out of ARM core reset\n");
3333 goto err;
3334 }
3335
3336 bcmerror = 0;
3337
3338 err:
3339 return bcmerror;
3340 }
3341
3342 static bool
3343 brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
3344 {
3345 bool ret;
3346
3347 sdio_claim_host(bus->sdiodev->func[1]);
3348
3349 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
3350
3351 ret = _brcmf_sdbrcm_download_firmware(bus) == 0;
3352
3353 brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
3354
3355 sdio_release_host(bus->sdiodev->func[1]);
3356
3357 return ret;
3358 }
3359
3360 static int brcmf_sdbrcm_bus_init(struct device *dev)
3361 {
3362 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
3363 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
3364 struct brcmf_sdio *bus = sdiodev->bus;
3365 unsigned long timeout;
3366 u8 ready, enable;
3367 int err, ret = 0;
3368 u8 saveclk;
3369
3370 brcmf_dbg(TRACE, "Enter\n");
3371
3372 /* try to download image and nvram to the dongle */
3373 if (bus_if->state == BRCMF_BUS_DOWN) {
3374 if (!(brcmf_sdbrcm_download_firmware(bus)))
3375 return -1;
3376 }
3377
3378 if (!bus->sdiodev->bus_if->drvr)
3379 return 0;
3380
3381 /* Start the watchdog timer */
3382 bus->sdcnt.tickcnt = 0;
3383 brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
3384
3385 sdio_claim_host(bus->sdiodev->func[1]);
3386
3387 /* Make sure backplane clock is on, needed to generate F2 interrupt */
3388 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
3389 if (bus->clkstate != CLK_AVAIL)
3390 goto exit;
3391
3392 /* Force clocks on backplane to be sure F2 interrupt propagates */
3393 saveclk = brcmf_sdio_regrb(bus->sdiodev,
3394 SBSDIO_FUNC1_CHIPCLKCSR, &err);
3395 if (!err) {
3396 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
3397 (saveclk | SBSDIO_FORCE_HT), &err);
3398 }
3399 if (err) {
3400 brcmf_err("Failed to force clock for F2: err %d\n", err);
3401 goto exit;
3402 }
3403
3404 /* Enable function 2 (frame transfers) */
3405 w_sdreg32(bus, SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT,
3406 offsetof(struct sdpcmd_regs, tosbmailboxdata));
3407 enable = (SDIO_FUNC_ENABLE_1 | SDIO_FUNC_ENABLE_2);
3408
3409 brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_IOEx, enable, NULL);
3410
3411 timeout = jiffies + msecs_to_jiffies(BRCMF_WAIT_F2RDY);
3412 ready = 0;
3413 while (enable != ready) {
3414 ready = brcmf_sdio_regrb(bus->sdiodev,
3415 SDIO_CCCR_IORx, NULL);
3416 if (time_after(jiffies, timeout))
3417 break;
3418 else if (time_after(jiffies, timeout - BRCMF_WAIT_F2RDY + 50))
3419 /* prevent busy waiting if it takes too long */
3420 msleep_interruptible(20);
3421 }
3422
3423 brcmf_dbg(INFO, "enable 0x%02x, ready 0x%02x\n", enable, ready);
3424
3425 /* If F2 successfully enabled, set core and enable interrupts */
3426 if (ready == enable) {
3427 /* Set up the interrupt mask and enable interrupts */
3428 bus->hostintmask = HOSTINTMASK;
3429 w_sdreg32(bus, bus->hostintmask,
3430 offsetof(struct sdpcmd_regs, hostintmask));
3431
3432 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_WATERMARK, 8, &err);
3433 } else {
3434 /* Disable F2 again */
3435 enable = SDIO_FUNC_ENABLE_1;
3436 brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_IOEx, enable, NULL);
3437 ret = -ENODEV;
3438 }
3439
3440 /* Restore previous clock setting */
3441 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, saveclk, &err);
3442
3443 if (ret == 0) {
3444 ret = brcmf_sdio_intr_register(bus->sdiodev);
3445 if (ret != 0)
3446 brcmf_err("intr register failed:%d\n", ret);
3447 }
3448
3449 /* If we didn't come up, turn off backplane clock */
3450 if (bus_if->state != BRCMF_BUS_DATA)
3451 brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
3452
3453 exit:
3454 sdio_release_host(bus->sdiodev->func[1]);
3455
3456 return ret;
3457 }
3458
3459 void brcmf_sdbrcm_isr(void *arg)
3460 {
3461 struct brcmf_sdio *bus = (struct brcmf_sdio *) arg;
3462
3463 brcmf_dbg(TRACE, "Enter\n");
3464
3465 if (!bus) {
3466 brcmf_err("bus is null pointer, exiting\n");
3467 return;
3468 }
3469
3470 if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) {
3471 brcmf_err("bus is down. we have nothing to do\n");
3472 return;
3473 }
3474 /* Count the interrupt call */
3475 bus->sdcnt.intrcount++;
3476 if (in_interrupt())
3477 atomic_set(&bus->ipend, 1);
3478 else
3479 if (brcmf_sdio_intr_rstatus(bus)) {
3480 brcmf_err("failed backplane access\n");
3481 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
3482 }
3483
3484 /* Disable additional interrupts (is this needed now)? */
3485 if (!bus->intr)
3486 brcmf_err("isr w/o interrupt configured!\n");
3487
3488 brcmf_sdbrcm_adddpctsk(bus);
3489 queue_work(bus->brcmf_wq, &bus->datawork);
3490 }
3491
3492 static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
3493 {
3494 #ifdef DEBUG
3495 struct brcmf_bus *bus_if = dev_get_drvdata(bus->sdiodev->dev);
3496 #endif /* DEBUG */
3497 unsigned long flags;
3498
3499 brcmf_dbg(TIMER, "Enter\n");
3500
3501 /* Poll period: check device if appropriate. */
3502 if (bus->poll && (++bus->polltick >= bus->pollrate)) {
3503 u32 intstatus = 0;
3504
3505 /* Reset poll tick */
3506 bus->polltick = 0;
3507
3508 /* Check device if no interrupts */
3509 if (!bus->intr ||
3510 (bus->sdcnt.intrcount == bus->sdcnt.lastintrs)) {
3511
3512 spin_lock_irqsave(&bus->dpc_tl_lock, flags);
3513 if (list_empty(&bus->dpc_tsklst)) {
3514 u8 devpend;
3515 spin_unlock_irqrestore(&bus->dpc_tl_lock,
3516 flags);
3517 sdio_claim_host(bus->sdiodev->func[1]);
3518 devpend = brcmf_sdio_regrb(bus->sdiodev,
3519 SDIO_CCCR_INTx,
3520 NULL);
3521 sdio_release_host(bus->sdiodev->func[1]);
3522 intstatus =
3523 devpend & (INTR_STATUS_FUNC1 |
3524 INTR_STATUS_FUNC2);
3525 } else {
3526 spin_unlock_irqrestore(&bus->dpc_tl_lock,
3527 flags);
3528 }
3529
3530 /* If there is something, make like the ISR and
3531 schedule the DPC */
3532 if (intstatus) {
3533 bus->sdcnt.pollcnt++;
3534 atomic_set(&bus->ipend, 1);
3535
3536 brcmf_sdbrcm_adddpctsk(bus);
3537 queue_work(bus->brcmf_wq, &bus->datawork);
3538 }
3539 }
3540
3541 /* Update interrupt tracking */
3542 bus->sdcnt.lastintrs = bus->sdcnt.intrcount;
3543 }
3544 #ifdef DEBUG
3545 /* Poll for console output periodically */
3546 if (bus_if && bus_if->state == BRCMF_BUS_DATA &&
3547 bus->console_interval != 0) {
3548 bus->console.count += BRCMF_WD_POLL_MS;
3549 if (bus->console.count >= bus->console_interval) {
3550 bus->console.count -= bus->console_interval;
3551 sdio_claim_host(bus->sdiodev->func[1]);
3552 /* Make sure backplane clock is on */
3553 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
3554 if (brcmf_sdbrcm_readconsole(bus) < 0)
3555 /* stop on error */
3556 bus->console_interval = 0;
3557 sdio_release_host(bus->sdiodev->func[1]);
3558 }
3559 }
3560 #endif /* DEBUG */
3561
3562 /* On idle timeout clear activity flag and/or turn off clock */
3563 if ((bus->idletime > 0) && (bus->clkstate == CLK_AVAIL)) {
3564 if (++bus->idlecount >= bus->idletime) {
3565 bus->idlecount = 0;
3566 if (bus->activity) {
3567 bus->activity = false;
3568 brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
3569 } else {
3570 sdio_claim_host(bus->sdiodev->func[1]);
3571 brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
3572 sdio_release_host(bus->sdiodev->func[1]);
3573 }
3574 }
3575 }
3576
3577 return (atomic_read(&bus->ipend) > 0);
3578 }
3579
3580 static bool brcmf_sdbrcm_chipmatch(u16 chipid)
3581 {
3582 if (chipid == BCM43241_CHIP_ID)
3583 return true;
3584 if (chipid == BCM4329_CHIP_ID)
3585 return true;
3586 if (chipid == BCM4330_CHIP_ID)
3587 return true;
3588 if (chipid == BCM4334_CHIP_ID)
3589 return true;
3590 return false;
3591 }
3592
3593 static void brcmf_sdio_dataworker(struct work_struct *work)
3594 {
3595 struct brcmf_sdio *bus = container_of(work, struct brcmf_sdio,
3596 datawork);
3597 struct list_head *cur_hd, *tmp_hd;
3598 unsigned long flags;
3599
3600 spin_lock_irqsave(&bus->dpc_tl_lock, flags);
3601 list_for_each_safe(cur_hd, tmp_hd, &bus->dpc_tsklst) {
3602 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
3603
3604 brcmf_sdbrcm_dpc(bus);
3605
3606 spin_lock_irqsave(&bus->dpc_tl_lock, flags);
3607 list_del(cur_hd);
3608 kfree(cur_hd);
3609 }
3610 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
3611 }
3612
3613 static void brcmf_sdbrcm_release_malloc(struct brcmf_sdio *bus)
3614 {
3615 brcmf_dbg(TRACE, "Enter\n");
3616
3617 kfree(bus->rxbuf);
3618 bus->rxctl = bus->rxbuf = NULL;
3619 bus->rxlen = 0;
3620
3621 kfree(bus->databuf);
3622 bus->databuf = NULL;
3623 }
3624
3625 static bool brcmf_sdbrcm_probe_malloc(struct brcmf_sdio *bus)
3626 {
3627 brcmf_dbg(TRACE, "Enter\n");
3628
3629 if (bus->sdiodev->bus_if->maxctl) {
3630 bus->rxblen =
3631 roundup((bus->sdiodev->bus_if->maxctl + SDPCM_HDRLEN),
3632 ALIGNMENT) + BRCMF_SDALIGN;
3633 bus->rxbuf = kmalloc(bus->rxblen, GFP_ATOMIC);
3634 if (!(bus->rxbuf))
3635 goto fail;
3636 }
3637
3638 /* Allocate buffer to receive glomed packet */
3639 bus->databuf = kmalloc(MAX_DATA_BUF, GFP_ATOMIC);
3640 if (!(bus->databuf)) {
3641 /* release rxbuf which was already located as above */
3642 if (!bus->rxblen)
3643 kfree(bus->rxbuf);
3644 goto fail;
3645 }
3646
3647 /* Align the buffer */
3648 if ((unsigned long)bus->databuf % BRCMF_SDALIGN)
3649 bus->dataptr = bus->databuf + (BRCMF_SDALIGN -
3650 ((unsigned long)bus->databuf % BRCMF_SDALIGN));
3651 else
3652 bus->dataptr = bus->databuf;
3653
3654 return true;
3655
3656 fail:
3657 return false;
3658 }
3659
3660 static bool
3661 brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
3662 {
3663 u8 clkctl = 0;
3664 int err = 0;
3665 int reg_addr;
3666 u32 reg_val;
3667 u8 idx;
3668
3669 bus->alp_only = true;
3670
3671 sdio_claim_host(bus->sdiodev->func[1]);
3672
3673 pr_debug("F1 signature read @0x18000000=0x%4x\n",
3674 brcmf_sdio_regrl(bus->sdiodev, SI_ENUM_BASE, NULL));
3675
3676 /*
3677 * Force PLL off until brcmf_sdio_chip_attach()
3678 * programs PLL control regs
3679 */
3680
3681 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
3682 BRCMF_INIT_CLKCTL1, &err);
3683 if (!err)
3684 clkctl = brcmf_sdio_regrb(bus->sdiodev,
3685 SBSDIO_FUNC1_CHIPCLKCSR, &err);
3686
3687 if (err || ((clkctl & ~SBSDIO_AVBITS) != BRCMF_INIT_CLKCTL1)) {
3688 brcmf_err("ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n",
3689 err, BRCMF_INIT_CLKCTL1, clkctl);
3690 goto fail;
3691 }
3692
3693 if (brcmf_sdio_chip_attach(bus->sdiodev, &bus->ci, regsva)) {
3694 brcmf_err("brcmf_sdio_chip_attach failed!\n");
3695 goto fail;
3696 }
3697
3698 if (!brcmf_sdbrcm_chipmatch((u16) bus->ci->chip)) {
3699 brcmf_err("unsupported chip: 0x%04x\n", bus->ci->chip);
3700 goto fail;
3701 }
3702
3703 brcmf_sdio_chip_drivestrengthinit(bus->sdiodev, bus->ci,
3704 SDIO_DRIVE_STRENGTH);
3705
3706 /* Get info on the SOCRAM cores... */
3707 bus->ramsize = bus->ci->ramsize;
3708 if (!(bus->ramsize)) {
3709 brcmf_err("failed to find SOCRAM memory!\n");
3710 goto fail;
3711 }
3712
3713 /* Set core control so an SDIO reset does a backplane reset */
3714 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
3715 reg_addr = bus->ci->c_inf[idx].base +
3716 offsetof(struct sdpcmd_regs, corecontrol);
3717 reg_val = brcmf_sdio_regrl(bus->sdiodev, reg_addr, NULL);
3718 brcmf_sdio_regwl(bus->sdiodev, reg_addr, reg_val | CC_BPRESEN, NULL);
3719
3720 sdio_release_host(bus->sdiodev->func[1]);
3721
3722 brcmu_pktq_init(&bus->txq, (PRIOMASK + 1), TXQLEN);
3723
3724 /* Locate an appropriately-aligned portion of hdrbuf */
3725 bus->rxhdr = (u8 *) roundup((unsigned long)&bus->hdrbuf[0],
3726 BRCMF_SDALIGN);
3727
3728 /* Set the poll and/or interrupt flags */
3729 bus->intr = true;
3730 bus->poll = false;
3731 if (bus->poll)
3732 bus->pollrate = 1;
3733
3734 return true;
3735
3736 fail:
3737 sdio_release_host(bus->sdiodev->func[1]);
3738 return false;
3739 }
3740
3741 static bool brcmf_sdbrcm_probe_init(struct brcmf_sdio *bus)
3742 {
3743 brcmf_dbg(TRACE, "Enter\n");
3744
3745 sdio_claim_host(bus->sdiodev->func[1]);
3746
3747 /* Disable F2 to clear any intermediate frame state on the dongle */
3748 brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_IOEx,
3749 SDIO_FUNC_ENABLE_1, NULL);
3750
3751 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
3752 bus->rxflow = false;
3753
3754 /* Done with backplane-dependent accesses, can drop clock... */
3755 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
3756
3757 sdio_release_host(bus->sdiodev->func[1]);
3758
3759 /* ...and initialize clock/power states */
3760 bus->clkstate = CLK_SDONLY;
3761 bus->idletime = BRCMF_IDLE_INTERVAL;
3762 bus->idleclock = BRCMF_IDLE_ACTIVE;
3763
3764 /* Query the F2 block size, set roundup accordingly */
3765 bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
3766 bus->roundup = min(max_roundup, bus->blocksize);
3767
3768 /* bus module does not support packet chaining */
3769 bus->use_rxchain = false;
3770 bus->sd_rxchain = false;
3771
3772 return true;
3773 }
3774
3775 static int
3776 brcmf_sdbrcm_watchdog_thread(void *data)
3777 {
3778 struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
3779
3780 allow_signal(SIGTERM);
3781 /* Run until signal received */
3782 while (1) {
3783 if (kthread_should_stop())
3784 break;
3785 if (!wait_for_completion_interruptible(&bus->watchdog_wait)) {
3786 brcmf_sdbrcm_bus_watchdog(bus);
3787 /* Count the tick for reference */
3788 bus->sdcnt.tickcnt++;
3789 } else
3790 break;
3791 }
3792 return 0;
3793 }
3794
3795 static void
3796 brcmf_sdbrcm_watchdog(unsigned long data)
3797 {
3798 struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
3799
3800 if (bus->watchdog_tsk) {
3801 complete(&bus->watchdog_wait);
3802 /* Reschedule the watchdog */
3803 if (bus->wd_timer_valid)
3804 mod_timer(&bus->timer,
3805 jiffies + BRCMF_WD_POLL_MS * HZ / 1000);
3806 }
3807 }
3808
3809 static void brcmf_sdbrcm_release_dongle(struct brcmf_sdio *bus)
3810 {
3811 brcmf_dbg(TRACE, "Enter\n");
3812
3813 if (bus->ci) {
3814 sdio_claim_host(bus->sdiodev->func[1]);
3815 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
3816 brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
3817 sdio_release_host(bus->sdiodev->func[1]);
3818 brcmf_sdio_chip_detach(&bus->ci);
3819 if (bus->vars && bus->varsz)
3820 kfree(bus->vars);
3821 bus->vars = NULL;
3822 }
3823
3824 brcmf_dbg(TRACE, "Disconnected\n");
3825 }
3826
3827 /* Detach and free everything */
3828 static void brcmf_sdbrcm_release(struct brcmf_sdio *bus)
3829 {
3830 brcmf_dbg(TRACE, "Enter\n");
3831
3832 if (bus) {
3833 /* De-register interrupt handler */
3834 brcmf_sdio_intr_unregister(bus->sdiodev);
3835
3836 cancel_work_sync(&bus->datawork);
3837 if (bus->brcmf_wq)
3838 destroy_workqueue(bus->brcmf_wq);
3839
3840 if (bus->sdiodev->bus_if->drvr) {
3841 brcmf_detach(bus->sdiodev->dev);
3842 brcmf_sdbrcm_release_dongle(bus);
3843 }
3844
3845 brcmf_sdbrcm_release_malloc(bus);
3846
3847 kfree(bus);
3848 }
3849
3850 brcmf_dbg(TRACE, "Disconnected\n");
3851 }
3852
3853 static struct brcmf_bus_ops brcmf_sdio_bus_ops = {
3854 .stop = brcmf_sdbrcm_bus_stop,
3855 .init = brcmf_sdbrcm_bus_init,
3856 .txdata = brcmf_sdbrcm_bus_txdata,
3857 .txctl = brcmf_sdbrcm_bus_txctl,
3858 .rxctl = brcmf_sdbrcm_bus_rxctl,
3859 };
3860
3861 void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
3862 {
3863 int ret;
3864 struct brcmf_sdio *bus;
3865 struct brcmf_bus_dcmd *dlst;
3866 u32 dngl_txglom;
3867 u32 dngl_txglomalign;
3868 u8 idx;
3869
3870 brcmf_dbg(TRACE, "Enter\n");
3871
3872 /* We make an assumption about address window mappings:
3873 * regsva == SI_ENUM_BASE*/
3874
3875 /* Allocate private bus interface state */
3876 bus = kzalloc(sizeof(struct brcmf_sdio), GFP_ATOMIC);
3877 if (!bus)
3878 goto fail;
3879
3880 bus->sdiodev = sdiodev;
3881 sdiodev->bus = bus;
3882 skb_queue_head_init(&bus->glom);
3883 bus->txbound = BRCMF_TXBOUND;
3884 bus->rxbound = BRCMF_RXBOUND;
3885 bus->txminmax = BRCMF_TXMINMAX;
3886 bus->tx_seq = SDPCM_SEQUENCE_WRAP - 1;
3887
3888 INIT_WORK(&bus->datawork, brcmf_sdio_dataworker);
3889 bus->brcmf_wq = create_singlethread_workqueue("brcmf_wq");
3890 if (bus->brcmf_wq == NULL) {
3891 brcmf_err("insufficient memory to create txworkqueue\n");
3892 goto fail;
3893 }
3894
3895 /* attempt to attach to the dongle */
3896 if (!(brcmf_sdbrcm_probe_attach(bus, regsva))) {
3897 brcmf_err("brcmf_sdbrcm_probe_attach failed\n");
3898 goto fail;
3899 }
3900
3901 spin_lock_init(&bus->rxctl_lock);
3902 spin_lock_init(&bus->txqlock);
3903 init_waitqueue_head(&bus->ctrl_wait);
3904 init_waitqueue_head(&bus->dcmd_resp_wait);
3905
3906 /* Set up the watchdog timer */
3907 init_timer(&bus->timer);
3908 bus->timer.data = (unsigned long)bus;
3909 bus->timer.function = brcmf_sdbrcm_watchdog;
3910
3911 /* Initialize watchdog thread */
3912 init_completion(&bus->watchdog_wait);
3913 bus->watchdog_tsk = kthread_run(brcmf_sdbrcm_watchdog_thread,
3914 bus, "brcmf_watchdog");
3915 if (IS_ERR(bus->watchdog_tsk)) {
3916 pr_warn("brcmf_watchdog thread failed to start\n");
3917 bus->watchdog_tsk = NULL;
3918 }
3919 /* Initialize DPC thread */
3920 INIT_LIST_HEAD(&bus->dpc_tsklst);
3921 spin_lock_init(&bus->dpc_tl_lock);
3922
3923 /* Assign bus interface call back */
3924 bus->sdiodev->bus_if->dev = bus->sdiodev->dev;
3925 bus->sdiodev->bus_if->ops = &brcmf_sdio_bus_ops;
3926 bus->sdiodev->bus_if->chip = bus->ci->chip;
3927 bus->sdiodev->bus_if->chiprev = bus->ci->chiprev;
3928
3929 /* Attach to the brcmf/OS/network interface */
3930 ret = brcmf_attach(SDPCM_RESERVE, bus->sdiodev->dev);
3931 if (ret != 0) {
3932 brcmf_err("brcmf_attach failed\n");
3933 goto fail;
3934 }
3935
3936 /* Allocate buffers */
3937 if (!(brcmf_sdbrcm_probe_malloc(bus))) {
3938 brcmf_err("brcmf_sdbrcm_probe_malloc failed\n");
3939 goto fail;
3940 }
3941
3942 if (!(brcmf_sdbrcm_probe_init(bus))) {
3943 brcmf_err("brcmf_sdbrcm_probe_init failed\n");
3944 goto fail;
3945 }
3946
3947 brcmf_sdio_debugfs_create(bus);
3948 brcmf_dbg(INFO, "completed!!\n");
3949
3950 /* sdio bus core specific dcmd */
3951 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
3952 dlst = kzalloc(sizeof(struct brcmf_bus_dcmd), GFP_KERNEL);
3953 if (dlst) {
3954 if (bus->ci->c_inf[idx].rev < 12) {
3955 /* for sdio core rev < 12, disable txgloming */
3956 dngl_txglom = 0;
3957 dlst->name = "bus:txglom";
3958 dlst->param = (char *)&dngl_txglom;
3959 dlst->param_len = sizeof(u32);
3960 } else {
3961 /* otherwise, set txglomalign */
3962 dngl_txglomalign = bus->sdiodev->bus_if->align;
3963 dlst->name = "bus:txglomalign";
3964 dlst->param = (char *)&dngl_txglomalign;
3965 dlst->param_len = sizeof(u32);
3966 }
3967 list_add(&dlst->list, &bus->sdiodev->bus_if->dcmd_list);
3968 }
3969
3970 /* if firmware path present try to download and bring up bus */
3971 ret = brcmf_bus_start(bus->sdiodev->dev);
3972 if (ret != 0) {
3973 brcmf_err("dongle is not responding\n");
3974 goto fail;
3975 }
3976
3977 return bus;
3978
3979 fail:
3980 brcmf_sdbrcm_release(bus);
3981 return NULL;
3982 }
3983
3984 void brcmf_sdbrcm_disconnect(void *ptr)
3985 {
3986 struct brcmf_sdio *bus = (struct brcmf_sdio *)ptr;
3987
3988 brcmf_dbg(TRACE, "Enter\n");
3989
3990 if (bus)
3991 brcmf_sdbrcm_release(bus);
3992
3993 brcmf_dbg(TRACE, "Disconnected\n");
3994 }
3995
3996 void
3997 brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick)
3998 {
3999 /* Totally stop the timer */
4000 if (!wdtick && bus->wd_timer_valid) {
4001 del_timer_sync(&bus->timer);
4002 bus->wd_timer_valid = false;
4003 bus->save_ms = wdtick;
4004 return;
4005 }
4006
4007 /* don't start the wd until fw is loaded */
4008 if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN)
4009 return;
4010
4011 if (wdtick) {
4012 if (bus->save_ms != BRCMF_WD_POLL_MS) {
4013 if (bus->wd_timer_valid)
4014 /* Stop timer and restart at new value */
4015 del_timer_sync(&bus->timer);
4016
4017 /* Create timer again when watchdog period is
4018 dynamically changed or in the first instance
4019 */
4020 bus->timer.expires =
4021 jiffies + BRCMF_WD_POLL_MS * HZ / 1000;
4022 add_timer(&bus->timer);
4023
4024 } else {
4025 /* Re arm the timer, at last watchdog period */
4026 mod_timer(&bus->timer,
4027 jiffies + BRCMF_WD_POLL_MS * HZ / 1000);
4028 }
4029
4030 bus->wd_timer_valid = true;
4031 bus->save_ms = wdtick;
4032 }
4033 }
This page took 0.176825 seconds and 5 git commands to generate.