b02953c4ade721235fbe0e4ed662aaa422ab21f7
[deliverable/linux.git] / drivers / net / wireless / brcm80211 / brcmfmac / dhd_sdio.c
1 /*
2 * Copyright (c) 2010 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/kthread.h>
20 #include <linux/printk.h>
21 #include <linux/pci_ids.h>
22 #include <linux/netdevice.h>
23 #include <linux/interrupt.h>
24 #include <linux/sched.h>
25 #include <linux/mmc/sdio.h>
26 #include <linux/mmc/sdio_func.h>
27 #include <linux/mmc/card.h>
28 #include <linux/semaphore.h>
29 #include <linux/firmware.h>
30 #include <linux/module.h>
31 #include <linux/bcma/bcma.h>
32 #include <linux/debugfs.h>
33 #include <linux/vmalloc.h>
34 #include <linux/platform_data/brcmfmac-sdio.h>
35 #include <asm/unaligned.h>
36 #include <defs.h>
37 #include <brcmu_wifi.h>
38 #include <brcmu_utils.h>
39 #include <brcm_hw_ids.h>
40 #include <soc.h>
41 #include "sdio_host.h"
42 #include "sdio_chip.h"
43
44 #define DCMD_RESP_TIMEOUT 2000 /* In milli second */
45
46 #ifdef DEBUG
47
48 #define BRCMF_TRAP_INFO_SIZE 80
49
50 #define CBUF_LEN (128)
51
52 /* Device console log buffer state */
53 #define CONSOLE_BUFFER_MAX 2024
54
55 struct rte_log_le {
56 __le32 buf; /* Can't be pointer on (64-bit) hosts */
57 __le32 buf_size;
58 __le32 idx;
59 char *_buf_compat; /* Redundant pointer for backward compat. */
60 };
61
62 struct rte_console {
63 /* Virtual UART
64 * When there is no UART (e.g. Quickturn),
65 * the host should write a complete
66 * input line directly into cbuf and then write
67 * the length into vcons_in.
68 * This may also be used when there is a real UART
69 * (at risk of conflicting with
70 * the real UART). vcons_out is currently unused.
71 */
72 uint vcons_in;
73 uint vcons_out;
74
75 /* Output (logging) buffer
76 * Console output is written to a ring buffer log_buf at index log_idx.
77 * The host may read the output when it sees log_idx advance.
78 * Output will be lost if the output wraps around faster than the host
79 * polls.
80 */
81 struct rte_log_le log_le;
82
83 /* Console input line buffer
84 * Characters are read one at a time into cbuf
85 * until <CR> is received, then
86 * the buffer is processed as a command line.
87 * Also used for virtual UART.
88 */
89 uint cbuf_idx;
90 char cbuf[CBUF_LEN];
91 };
92
93 #endif /* DEBUG */
94 #include <chipcommon.h>
95
96 #include "dhd_bus.h"
97 #include "dhd_dbg.h"
98 #include "tracepoint.h"
99
100 #define TXQLEN 2048 /* bulk tx queue length */
101 #define TXHI (TXQLEN - 256) /* turn on flow control above TXHI */
102 #define TXLOW (TXHI - 256) /* turn off flow control below TXLOW */
103 #define PRIOMASK 7
104
105 #define TXRETRIES 2 /* # of retries for tx frames */
106
107 #define BRCMF_RXBOUND 50 /* Default for max rx frames in
108 one scheduling */
109
110 #define BRCMF_TXBOUND 20 /* Default for max tx frames in
111 one scheduling */
112
113 #define BRCMF_TXMINMAX 1 /* Max tx frames if rx still pending */
114
115 #define MEMBLOCK 2048 /* Block size used for downloading
116 of dongle image */
117 #define MAX_DATA_BUF (32 * 1024) /* Must be large enough to hold
118 biggest possible glom */
119
120 #define BRCMF_FIRSTREAD (1 << 6)
121
122
123 /* SBSDIO_DEVICE_CTL */
124
125 /* 1: device will assert busy signal when receiving CMD53 */
126 #define SBSDIO_DEVCTL_SETBUSY 0x01
127 /* 1: assertion of sdio interrupt is synchronous to the sdio clock */
128 #define SBSDIO_DEVCTL_SPI_INTR_SYNC 0x02
129 /* 1: mask all interrupts to host except the chipActive (rev 8) */
130 #define SBSDIO_DEVCTL_CA_INT_ONLY 0x04
131 /* 1: isolate internal sdio signals, put external pads in tri-state; requires
132 * sdio bus power cycle to clear (rev 9) */
133 #define SBSDIO_DEVCTL_PADS_ISO 0x08
134 /* Force SD->SB reset mapping (rev 11) */
135 #define SBSDIO_DEVCTL_SB_RST_CTL 0x30
136 /* Determined by CoreControl bit */
137 #define SBSDIO_DEVCTL_RST_CORECTL 0x00
138 /* Force backplane reset */
139 #define SBSDIO_DEVCTL_RST_BPRESET 0x10
140 /* Force no backplane reset */
141 #define SBSDIO_DEVCTL_RST_NOBPRESET 0x20
142
143 /* direct(mapped) cis space */
144
145 /* MAPPED common CIS address */
146 #define SBSDIO_CIS_BASE_COMMON 0x1000
147 /* maximum bytes in one CIS */
148 #define SBSDIO_CIS_SIZE_LIMIT 0x200
149 /* cis offset addr is < 17 bits */
150 #define SBSDIO_CIS_OFT_ADDR_MASK 0x1FFFF
151
152 /* manfid tuple length, include tuple, link bytes */
153 #define SBSDIO_CIS_MANFID_TUPLE_LEN 6
154
155 /* intstatus */
156 #define I_SMB_SW0 (1 << 0) /* To SB Mail S/W interrupt 0 */
157 #define I_SMB_SW1 (1 << 1) /* To SB Mail S/W interrupt 1 */
158 #define I_SMB_SW2 (1 << 2) /* To SB Mail S/W interrupt 2 */
159 #define I_SMB_SW3 (1 << 3) /* To SB Mail S/W interrupt 3 */
160 #define I_SMB_SW_MASK 0x0000000f /* To SB Mail S/W interrupts mask */
161 #define I_SMB_SW_SHIFT 0 /* To SB Mail S/W interrupts shift */
162 #define I_HMB_SW0 (1 << 4) /* To Host Mail S/W interrupt 0 */
163 #define I_HMB_SW1 (1 << 5) /* To Host Mail S/W interrupt 1 */
164 #define I_HMB_SW2 (1 << 6) /* To Host Mail S/W interrupt 2 */
165 #define I_HMB_SW3 (1 << 7) /* To Host Mail S/W interrupt 3 */
166 #define I_HMB_SW_MASK 0x000000f0 /* To Host Mail S/W interrupts mask */
167 #define I_HMB_SW_SHIFT 4 /* To Host Mail S/W interrupts shift */
168 #define I_WR_OOSYNC (1 << 8) /* Write Frame Out Of Sync */
169 #define I_RD_OOSYNC (1 << 9) /* Read Frame Out Of Sync */
170 #define I_PC (1 << 10) /* descriptor error */
171 #define I_PD (1 << 11) /* data error */
172 #define I_DE (1 << 12) /* Descriptor protocol Error */
173 #define I_RU (1 << 13) /* Receive descriptor Underflow */
174 #define I_RO (1 << 14) /* Receive fifo Overflow */
175 #define I_XU (1 << 15) /* Transmit fifo Underflow */
176 #define I_RI (1 << 16) /* Receive Interrupt */
177 #define I_BUSPWR (1 << 17) /* SDIO Bus Power Change (rev 9) */
178 #define I_XMTDATA_AVAIL (1 << 23) /* bits in fifo */
179 #define I_XI (1 << 24) /* Transmit Interrupt */
180 #define I_RF_TERM (1 << 25) /* Read Frame Terminate */
181 #define I_WF_TERM (1 << 26) /* Write Frame Terminate */
182 #define I_PCMCIA_XU (1 << 27) /* PCMCIA Transmit FIFO Underflow */
183 #define I_SBINT (1 << 28) /* sbintstatus Interrupt */
184 #define I_CHIPACTIVE (1 << 29) /* chip from doze to active state */
185 #define I_SRESET (1 << 30) /* CCCR RES interrupt */
186 #define I_IOE2 (1U << 31) /* CCCR IOE2 Bit Changed */
187 #define I_ERRORS (I_PC | I_PD | I_DE | I_RU | I_RO | I_XU)
188 #define I_DMA (I_RI | I_XI | I_ERRORS)
189
190 /* corecontrol */
191 #define CC_CISRDY (1 << 0) /* CIS Ready */
192 #define CC_BPRESEN (1 << 1) /* CCCR RES signal */
193 #define CC_F2RDY (1 << 2) /* set CCCR IOR2 bit */
194 #define CC_CLRPADSISO (1 << 3) /* clear SDIO pads isolation */
195 #define CC_XMTDATAAVAIL_MODE (1 << 4)
196 #define CC_XMTDATAAVAIL_CTRL (1 << 5)
197
198 /* SDA_FRAMECTRL */
199 #define SFC_RF_TERM (1 << 0) /* Read Frame Terminate */
200 #define SFC_WF_TERM (1 << 1) /* Write Frame Terminate */
201 #define SFC_CRC4WOOS (1 << 2) /* CRC error for write out of sync */
202 #define SFC_ABORTALL (1 << 3) /* Abort all in-progress frames */
203
204 /*
205 * Software allocation of To SB Mailbox resources
206 */
207
208 /* tosbmailbox bits corresponding to intstatus bits */
209 #define SMB_NAK (1 << 0) /* Frame NAK */
210 #define SMB_INT_ACK (1 << 1) /* Host Interrupt ACK */
211 #define SMB_USE_OOB (1 << 2) /* Use OOB Wakeup */
212 #define SMB_DEV_INT (1 << 3) /* Miscellaneous Interrupt */
213
214 /* tosbmailboxdata */
215 #define SMB_DATA_VERSION_SHIFT 16 /* host protocol version */
216
217 /*
218 * Software allocation of To Host Mailbox resources
219 */
220
221 /* intstatus bits */
222 #define I_HMB_FC_STATE I_HMB_SW0 /* Flow Control State */
223 #define I_HMB_FC_CHANGE I_HMB_SW1 /* Flow Control State Changed */
224 #define I_HMB_FRAME_IND I_HMB_SW2 /* Frame Indication */
225 #define I_HMB_HOST_INT I_HMB_SW3 /* Miscellaneous Interrupt */
226
227 /* tohostmailboxdata */
228 #define HMB_DATA_NAKHANDLED 1 /* retransmit NAK'd frame */
229 #define HMB_DATA_DEVREADY 2 /* talk to host after enable */
230 #define HMB_DATA_FC 4 /* per prio flowcontrol update flag */
231 #define HMB_DATA_FWREADY 8 /* fw ready for protocol activity */
232
233 #define HMB_DATA_FCDATA_MASK 0xff000000
234 #define HMB_DATA_FCDATA_SHIFT 24
235
236 #define HMB_DATA_VERSION_MASK 0x00ff0000
237 #define HMB_DATA_VERSION_SHIFT 16
238
239 /*
240 * Software-defined protocol header
241 */
242
243 /* Current protocol version */
244 #define SDPCM_PROT_VERSION 4
245
246 /*
247 * Shared structure between dongle and the host.
248 * The structure contains pointers to trap or assert information.
249 */
250 #define SDPCM_SHARED_VERSION 0x0003
251 #define SDPCM_SHARED_VERSION_MASK 0x00FF
252 #define SDPCM_SHARED_ASSERT_BUILT 0x0100
253 #define SDPCM_SHARED_ASSERT 0x0200
254 #define SDPCM_SHARED_TRAP 0x0400
255
256 /* Space for header read, limit for data packets */
257 #define MAX_HDR_READ (1 << 6)
258 #define MAX_RX_DATASZ 2048
259
260 /* Maximum milliseconds to wait for F2 to come up */
261 #define BRCMF_WAIT_F2RDY 3000
262
263 /* Bump up limit on waiting for HT to account for first startup;
264 * if the image is doing a CRC calculation before programming the PMU
265 * for HT availability, it could take a couple hundred ms more, so
266 * max out at a 1 second (1000000us).
267 */
268 #undef PMU_MAX_TRANSITION_DLY
269 #define PMU_MAX_TRANSITION_DLY 1000000
270
271 /* Value for ChipClockCSR during initial setup */
272 #define BRCMF_INIT_CLKCTL1 (SBSDIO_FORCE_HW_CLKREQ_OFF | \
273 SBSDIO_ALP_AVAIL_REQ)
274
275 /* Flags for SDH calls */
276 #define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
277
278 #define BRCMF_IDLE_IMMEDIATE (-1) /* Enter idle immediately */
279 #define BRCMF_IDLE_ACTIVE 0 /* Do not request any SD clock change
280 * when idle
281 */
282 #define BRCMF_IDLE_INTERVAL 1
283
284 #define KSO_WAIT_US 50
285 #define MAX_KSO_ATTEMPTS (PMU_MAX_TRANSITION_DLY/KSO_WAIT_US)
286
287 /*
288 * Conversion of 802.1D priority to precedence level
289 */
290 static uint prio2prec(u32 prio)
291 {
292 return (prio == PRIO_8021D_NONE || prio == PRIO_8021D_BE) ?
293 (prio^2) : prio;
294 }
295
296 #ifdef DEBUG
297 /* Device console log buffer state */
298 struct brcmf_console {
299 uint count; /* Poll interval msec counter */
300 uint log_addr; /* Log struct address (fixed) */
301 struct rte_log_le log_le; /* Log struct (host copy) */
302 uint bufsize; /* Size of log buffer */
303 u8 *buf; /* Log buffer (host copy) */
304 uint last; /* Last buffer read index */
305 };
306
307 struct brcmf_trap_info {
308 __le32 type;
309 __le32 epc;
310 __le32 cpsr;
311 __le32 spsr;
312 __le32 r0; /* a1 */
313 __le32 r1; /* a2 */
314 __le32 r2; /* a3 */
315 __le32 r3; /* a4 */
316 __le32 r4; /* v1 */
317 __le32 r5; /* v2 */
318 __le32 r6; /* v3 */
319 __le32 r7; /* v4 */
320 __le32 r8; /* v5 */
321 __le32 r9; /* sb/v6 */
322 __le32 r10; /* sl/v7 */
323 __le32 r11; /* fp/v8 */
324 __le32 r12; /* ip */
325 __le32 r13; /* sp */
326 __le32 r14; /* lr */
327 __le32 pc; /* r15 */
328 };
329 #endif /* DEBUG */
330
331 struct sdpcm_shared {
332 u32 flags;
333 u32 trap_addr;
334 u32 assert_exp_addr;
335 u32 assert_file_addr;
336 u32 assert_line;
337 u32 console_addr; /* Address of struct rte_console */
338 u32 msgtrace_addr;
339 u8 tag[32];
340 u32 brpt_addr;
341 };
342
343 struct sdpcm_shared_le {
344 __le32 flags;
345 __le32 trap_addr;
346 __le32 assert_exp_addr;
347 __le32 assert_file_addr;
348 __le32 assert_line;
349 __le32 console_addr; /* Address of struct rte_console */
350 __le32 msgtrace_addr;
351 u8 tag[32];
352 __le32 brpt_addr;
353 };
354
355 /* dongle SDIO bus specific header info */
356 struct brcmf_sdio_hdrinfo {
357 u8 seq_num;
358 u8 channel;
359 u16 len;
360 u16 len_left;
361 u16 len_nxtfrm;
362 u8 dat_offset;
363 };
364
365 /* misc chip info needed by some of the routines */
366 /* Private data for SDIO bus interaction */
367 struct brcmf_sdio {
368 struct brcmf_sdio_dev *sdiodev; /* sdio device handler */
369 struct chip_info *ci; /* Chip info struct */
370 char *vars; /* Variables (from CIS and/or other) */
371 uint varsz; /* Size of variables buffer */
372
373 u32 ramsize; /* Size of RAM in SOCRAM (bytes) */
374
375 u32 hostintmask; /* Copy of Host Interrupt Mask */
376 atomic_t intstatus; /* Intstatus bits (events) pending */
377 atomic_t fcstate; /* State of dongle flow-control */
378
379 uint blocksize; /* Block size of SDIO transfers */
380 uint roundup; /* Max roundup limit */
381
382 struct pktq txq; /* Queue length used for flow-control */
383 u8 flowcontrol; /* per prio flow control bitmask */
384 u8 tx_seq; /* Transmit sequence number (next) */
385 u8 tx_max; /* Maximum transmit sequence allowed */
386
387 u8 hdrbuf[MAX_HDR_READ + BRCMF_SDALIGN];
388 u8 *rxhdr; /* Header of current rx frame (in hdrbuf) */
389 u8 rx_seq; /* Receive sequence number (expected) */
390 struct brcmf_sdio_hdrinfo cur_read;
391 /* info of current read frame */
392 bool rxskip; /* Skip receive (awaiting NAK ACK) */
393 bool rxpending; /* Data frame pending in dongle */
394
395 uint rxbound; /* Rx frames to read before resched */
396 uint txbound; /* Tx frames to send before resched */
397 uint txminmax;
398
399 struct sk_buff *glomd; /* Packet containing glomming descriptor */
400 struct sk_buff_head glom; /* Packet list for glommed superframe */
401 uint glomerr; /* Glom packet read errors */
402
403 u8 *rxbuf; /* Buffer for receiving control packets */
404 uint rxblen; /* Allocated length of rxbuf */
405 u8 *rxctl; /* Aligned pointer into rxbuf */
406 u8 *rxctl_orig; /* pointer for freeing rxctl */
407 uint rxlen; /* Length of valid data in buffer */
408 spinlock_t rxctl_lock; /* protection lock for ctrl frame resources */
409
410 u8 sdpcm_ver; /* Bus protocol reported by dongle */
411
412 bool intr; /* Use interrupts */
413 bool poll; /* Use polling */
414 atomic_t ipend; /* Device interrupt is pending */
415 uint spurious; /* Count of spurious interrupts */
416 uint pollrate; /* Ticks between device polls */
417 uint polltick; /* Tick counter */
418
419 #ifdef DEBUG
420 uint console_interval;
421 struct brcmf_console console; /* Console output polling support */
422 uint console_addr; /* Console address from shared struct */
423 #endif /* DEBUG */
424
425 uint clkstate; /* State of sd and backplane clock(s) */
426 bool activity; /* Activity flag for clock down */
427 s32 idletime; /* Control for activity timeout */
428 s32 idlecount; /* Activity timeout counter */
429 s32 idleclock; /* How to set bus driver when idle */
430 bool rxflow_mode; /* Rx flow control mode */
431 bool rxflow; /* Is rx flow control on */
432 bool alp_only; /* Don't use HT clock (ALP only) */
433
434 u8 *ctrl_frame_buf;
435 u32 ctrl_frame_len;
436 bool ctrl_frame_stat;
437
438 spinlock_t txqlock;
439 wait_queue_head_t ctrl_wait;
440 wait_queue_head_t dcmd_resp_wait;
441
442 struct timer_list timer;
443 struct completion watchdog_wait;
444 struct task_struct *watchdog_tsk;
445 bool wd_timer_valid;
446 uint save_ms;
447
448 struct workqueue_struct *brcmf_wq;
449 struct work_struct datawork;
450 atomic_t dpc_tskcnt;
451
452 bool txoff; /* Transmit flow-controlled */
453 struct brcmf_sdio_count sdcnt;
454 bool sr_enabled; /* SaveRestore enabled */
455 bool sleeping; /* SDIO bus sleeping */
456
457 u8 tx_hdrlen; /* sdio bus header length for tx packet */
458 };
459
460 /* clkstate */
461 #define CLK_NONE 0
462 #define CLK_SDONLY 1
463 #define CLK_PENDING 2
464 #define CLK_AVAIL 3
465
466 #ifdef DEBUG
467 static int qcount[NUMPRIO];
468 #endif /* DEBUG */
469
470 #define DEFAULT_SDIO_DRIVE_STRENGTH 6 /* in milliamps */
471
472 #define RETRYCHAN(chan) ((chan) == SDPCM_EVENT_CHANNEL)
473
474 /* Retry count for register access failures */
475 static const uint retry_limit = 2;
476
477 /* Limit on rounding up frames */
478 static const uint max_roundup = 512;
479
480 #define ALIGNMENT 4
481
482 enum brcmf_sdio_frmtype {
483 BRCMF_SDIO_FT_NORMAL,
484 BRCMF_SDIO_FT_SUPER,
485 BRCMF_SDIO_FT_SUB,
486 };
487
488 #define BCM43143_FIRMWARE_NAME "brcm/brcmfmac43143-sdio.bin"
489 #define BCM43143_NVRAM_NAME "brcm/brcmfmac43143-sdio.txt"
490 #define BCM43241B0_FIRMWARE_NAME "brcm/brcmfmac43241b0-sdio.bin"
491 #define BCM43241B0_NVRAM_NAME "brcm/brcmfmac43241b0-sdio.txt"
492 #define BCM43241B4_FIRMWARE_NAME "brcm/brcmfmac43241b4-sdio.bin"
493 #define BCM43241B4_NVRAM_NAME "brcm/brcmfmac43241b4-sdio.txt"
494 #define BCM4329_FIRMWARE_NAME "brcm/brcmfmac4329-sdio.bin"
495 #define BCM4329_NVRAM_NAME "brcm/brcmfmac4329-sdio.txt"
496 #define BCM4330_FIRMWARE_NAME "brcm/brcmfmac4330-sdio.bin"
497 #define BCM4330_NVRAM_NAME "brcm/brcmfmac4330-sdio.txt"
498 #define BCM4334_FIRMWARE_NAME "brcm/brcmfmac4334-sdio.bin"
499 #define BCM4334_NVRAM_NAME "brcm/brcmfmac4334-sdio.txt"
500 #define BCM4335_FIRMWARE_NAME "brcm/brcmfmac4335-sdio.bin"
501 #define BCM4335_NVRAM_NAME "brcm/brcmfmac4335-sdio.txt"
502
503 MODULE_FIRMWARE(BCM43143_FIRMWARE_NAME);
504 MODULE_FIRMWARE(BCM43143_NVRAM_NAME);
505 MODULE_FIRMWARE(BCM43241B0_FIRMWARE_NAME);
506 MODULE_FIRMWARE(BCM43241B0_NVRAM_NAME);
507 MODULE_FIRMWARE(BCM43241B4_FIRMWARE_NAME);
508 MODULE_FIRMWARE(BCM43241B4_NVRAM_NAME);
509 MODULE_FIRMWARE(BCM4329_FIRMWARE_NAME);
510 MODULE_FIRMWARE(BCM4329_NVRAM_NAME);
511 MODULE_FIRMWARE(BCM4330_FIRMWARE_NAME);
512 MODULE_FIRMWARE(BCM4330_NVRAM_NAME);
513 MODULE_FIRMWARE(BCM4334_FIRMWARE_NAME);
514 MODULE_FIRMWARE(BCM4334_NVRAM_NAME);
515 MODULE_FIRMWARE(BCM4335_FIRMWARE_NAME);
516 MODULE_FIRMWARE(BCM4335_NVRAM_NAME);
517
518 struct brcmf_firmware_names {
519 u32 chipid;
520 u32 revmsk;
521 const char *bin;
522 const char *nv;
523 };
524
525 enum brcmf_firmware_type {
526 BRCMF_FIRMWARE_BIN,
527 BRCMF_FIRMWARE_NVRAM
528 };
529
530 #define BRCMF_FIRMWARE_NVRAM(name) \
531 name ## _FIRMWARE_NAME, name ## _NVRAM_NAME
532
533 static const struct brcmf_firmware_names brcmf_fwname_data[] = {
534 { BCM43143_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM43143) },
535 { BCM43241_CHIP_ID, 0x0000001F, BRCMF_FIRMWARE_NVRAM(BCM43241B0) },
536 { BCM43241_CHIP_ID, 0xFFFFFFE0, BRCMF_FIRMWARE_NVRAM(BCM43241B4) },
537 { BCM4329_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4329) },
538 { BCM4330_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4330) },
539 { BCM4334_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4334) },
540 { BCM4335_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4335) }
541 };
542
543
544 static const struct firmware *brcmf_sdbrcm_get_fw(struct brcmf_sdio *bus,
545 enum brcmf_firmware_type type)
546 {
547 const struct firmware *fw;
548 const char *name;
549 int err, i;
550
551 for (i = 0; i < ARRAY_SIZE(brcmf_fwname_data); i++) {
552 if (brcmf_fwname_data[i].chipid == bus->ci->chip &&
553 brcmf_fwname_data[i].revmsk & BIT(bus->ci->chiprev)) {
554 switch (type) {
555 case BRCMF_FIRMWARE_BIN:
556 name = brcmf_fwname_data[i].bin;
557 break;
558 case BRCMF_FIRMWARE_NVRAM:
559 name = brcmf_fwname_data[i].nv;
560 break;
561 default:
562 brcmf_err("invalid firmware type (%d)\n", type);
563 return NULL;
564 }
565 goto found;
566 }
567 }
568 brcmf_err("Unknown chipid %d [%d]\n",
569 bus->ci->chip, bus->ci->chiprev);
570 return NULL;
571
572 found:
573 err = request_firmware(&fw, name, &bus->sdiodev->func[2]->dev);
574 if ((err) || (!fw)) {
575 brcmf_err("fail to request firmware %s (%d)\n", name, err);
576 return NULL;
577 }
578
579 return fw;
580 }
581
582 static void pkt_align(struct sk_buff *p, int len, int align)
583 {
584 uint datalign;
585 datalign = (unsigned long)(p->data);
586 datalign = roundup(datalign, (align)) - datalign;
587 if (datalign)
588 skb_pull(p, datalign);
589 __skb_trim(p, len);
590 }
591
592 /* To check if there's window offered */
593 static bool data_ok(struct brcmf_sdio *bus)
594 {
595 return (u8)(bus->tx_max - bus->tx_seq) != 0 &&
596 ((u8)(bus->tx_max - bus->tx_seq) & 0x80) == 0;
597 }
598
599 /*
600 * Reads a register in the SDIO hardware block. This block occupies a series of
601 * adresses on the 32 bit backplane bus.
602 */
603 static int
604 r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 offset)
605 {
606 u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
607 int ret;
608
609 *regvar = brcmf_sdio_regrl(bus->sdiodev,
610 bus->ci->c_inf[idx].base + offset, &ret);
611
612 return ret;
613 }
614
615 static int
616 w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset)
617 {
618 u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
619 int ret;
620
621 brcmf_sdio_regwl(bus->sdiodev,
622 bus->ci->c_inf[idx].base + reg_offset,
623 regval, &ret);
624
625 return ret;
626 }
627
628 static int
629 brcmf_sdbrcm_kso_control(struct brcmf_sdio *bus, bool on)
630 {
631 u8 wr_val = 0, rd_val, cmp_val, bmask;
632 int err = 0;
633 int try_cnt = 0;
634
635 brcmf_dbg(TRACE, "Enter\n");
636
637 wr_val = (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
638 /* 1st KSO write goes to AOS wake up core if device is asleep */
639 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
640 wr_val, &err);
641 if (err) {
642 brcmf_err("SDIO_AOS KSO write error: %d\n", err);
643 return err;
644 }
645
646 if (on) {
647 /* device WAKEUP through KSO:
648 * write bit 0 & read back until
649 * both bits 0 (kso bit) & 1 (dev on status) are set
650 */
651 cmp_val = SBSDIO_FUNC1_SLEEPCSR_KSO_MASK |
652 SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK;
653 bmask = cmp_val;
654 usleep_range(2000, 3000);
655 } else {
656 /* Put device to sleep, turn off KSO */
657 cmp_val = 0;
658 /* only check for bit0, bit1(dev on status) may not
659 * get cleared right away
660 */
661 bmask = SBSDIO_FUNC1_SLEEPCSR_KSO_MASK;
662 }
663
664 do {
665 /* reliable KSO bit set/clr:
666 * the sdiod sleep write access is synced to PMU 32khz clk
667 * just one write attempt may fail,
668 * read it back until it matches written value
669 */
670 rd_val = brcmf_sdio_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
671 &err);
672 if (((rd_val & bmask) == cmp_val) && !err)
673 break;
674 brcmf_dbg(SDIO, "KSO wr/rd retry:%d (max: %d) ERR:%x\n",
675 try_cnt, MAX_KSO_ATTEMPTS, err);
676 udelay(KSO_WAIT_US);
677 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
678 wr_val, &err);
679 } while (try_cnt++ < MAX_KSO_ATTEMPTS);
680
681 return err;
682 }
683
684 #define PKT_AVAILABLE() (intstatus & I_HMB_FRAME_IND)
685
686 #define HOSTINTMASK (I_HMB_SW_MASK | I_CHIPACTIVE)
687
688 /* Turn backplane clock on or off */
689 static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
690 {
691 int err;
692 u8 clkctl, clkreq, devctl;
693 unsigned long timeout;
694
695 brcmf_dbg(SDIO, "Enter\n");
696
697 clkctl = 0;
698
699 if (bus->sr_enabled) {
700 bus->clkstate = (on ? CLK_AVAIL : CLK_SDONLY);
701 return 0;
702 }
703
704 if (on) {
705 /* Request HT Avail */
706 clkreq =
707 bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ;
708
709 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
710 clkreq, &err);
711 if (err) {
712 brcmf_err("HT Avail request error: %d\n", err);
713 return -EBADE;
714 }
715
716 /* Check current status */
717 clkctl = brcmf_sdio_regrb(bus->sdiodev,
718 SBSDIO_FUNC1_CHIPCLKCSR, &err);
719 if (err) {
720 brcmf_err("HT Avail read error: %d\n", err);
721 return -EBADE;
722 }
723
724 /* Go to pending and await interrupt if appropriate */
725 if (!SBSDIO_CLKAV(clkctl, bus->alp_only) && pendok) {
726 /* Allow only clock-available interrupt */
727 devctl = brcmf_sdio_regrb(bus->sdiodev,
728 SBSDIO_DEVICE_CTL, &err);
729 if (err) {
730 brcmf_err("Devctl error setting CA: %d\n",
731 err);
732 return -EBADE;
733 }
734
735 devctl |= SBSDIO_DEVCTL_CA_INT_ONLY;
736 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
737 devctl, &err);
738 brcmf_dbg(SDIO, "CLKCTL: set PENDING\n");
739 bus->clkstate = CLK_PENDING;
740
741 return 0;
742 } else if (bus->clkstate == CLK_PENDING) {
743 /* Cancel CA-only interrupt filter */
744 devctl = brcmf_sdio_regrb(bus->sdiodev,
745 SBSDIO_DEVICE_CTL, &err);
746 devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
747 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
748 devctl, &err);
749 }
750
751 /* Otherwise, wait here (polling) for HT Avail */
752 timeout = jiffies +
753 msecs_to_jiffies(PMU_MAX_TRANSITION_DLY/1000);
754 while (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
755 clkctl = brcmf_sdio_regrb(bus->sdiodev,
756 SBSDIO_FUNC1_CHIPCLKCSR,
757 &err);
758 if (time_after(jiffies, timeout))
759 break;
760 else
761 usleep_range(5000, 10000);
762 }
763 if (err) {
764 brcmf_err("HT Avail request error: %d\n", err);
765 return -EBADE;
766 }
767 if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
768 brcmf_err("HT Avail timeout (%d): clkctl 0x%02x\n",
769 PMU_MAX_TRANSITION_DLY, clkctl);
770 return -EBADE;
771 }
772
773 /* Mark clock available */
774 bus->clkstate = CLK_AVAIL;
775 brcmf_dbg(SDIO, "CLKCTL: turned ON\n");
776
777 #if defined(DEBUG)
778 if (!bus->alp_only) {
779 if (SBSDIO_ALPONLY(clkctl))
780 brcmf_err("HT Clock should be on\n");
781 }
782 #endif /* defined (DEBUG) */
783
784 bus->activity = true;
785 } else {
786 clkreq = 0;
787
788 if (bus->clkstate == CLK_PENDING) {
789 /* Cancel CA-only interrupt filter */
790 devctl = brcmf_sdio_regrb(bus->sdiodev,
791 SBSDIO_DEVICE_CTL, &err);
792 devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
793 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
794 devctl, &err);
795 }
796
797 bus->clkstate = CLK_SDONLY;
798 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
799 clkreq, &err);
800 brcmf_dbg(SDIO, "CLKCTL: turned OFF\n");
801 if (err) {
802 brcmf_err("Failed access turning clock off: %d\n",
803 err);
804 return -EBADE;
805 }
806 }
807 return 0;
808 }
809
810 /* Change idle/active SD state */
811 static int brcmf_sdbrcm_sdclk(struct brcmf_sdio *bus, bool on)
812 {
813 brcmf_dbg(SDIO, "Enter\n");
814
815 if (on)
816 bus->clkstate = CLK_SDONLY;
817 else
818 bus->clkstate = CLK_NONE;
819
820 return 0;
821 }
822
823 /* Transition SD and backplane clock readiness */
824 static int brcmf_sdbrcm_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
825 {
826 #ifdef DEBUG
827 uint oldstate = bus->clkstate;
828 #endif /* DEBUG */
829
830 brcmf_dbg(SDIO, "Enter\n");
831
832 /* Early exit if we're already there */
833 if (bus->clkstate == target) {
834 if (target == CLK_AVAIL) {
835 brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
836 bus->activity = true;
837 }
838 return 0;
839 }
840
841 switch (target) {
842 case CLK_AVAIL:
843 /* Make sure SD clock is available */
844 if (bus->clkstate == CLK_NONE)
845 brcmf_sdbrcm_sdclk(bus, true);
846 /* Now request HT Avail on the backplane */
847 brcmf_sdbrcm_htclk(bus, true, pendok);
848 brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
849 bus->activity = true;
850 break;
851
852 case CLK_SDONLY:
853 /* Remove HT request, or bring up SD clock */
854 if (bus->clkstate == CLK_NONE)
855 brcmf_sdbrcm_sdclk(bus, true);
856 else if (bus->clkstate == CLK_AVAIL)
857 brcmf_sdbrcm_htclk(bus, false, false);
858 else
859 brcmf_err("request for %d -> %d\n",
860 bus->clkstate, target);
861 brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
862 break;
863
864 case CLK_NONE:
865 /* Make sure to remove HT request */
866 if (bus->clkstate == CLK_AVAIL)
867 brcmf_sdbrcm_htclk(bus, false, false);
868 /* Now remove the SD clock */
869 brcmf_sdbrcm_sdclk(bus, false);
870 brcmf_sdbrcm_wd_timer(bus, 0);
871 break;
872 }
873 #ifdef DEBUG
874 brcmf_dbg(SDIO, "%d -> %d\n", oldstate, bus->clkstate);
875 #endif /* DEBUG */
876
877 return 0;
878 }
879
880 static int
881 brcmf_sdbrcm_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
882 {
883 int err = 0;
884 brcmf_dbg(TRACE, "Enter\n");
885 brcmf_dbg(SDIO, "request %s currently %s\n",
886 (sleep ? "SLEEP" : "WAKE"),
887 (bus->sleeping ? "SLEEP" : "WAKE"));
888
889 /* If SR is enabled control bus state with KSO */
890 if (bus->sr_enabled) {
891 /* Done if we're already in the requested state */
892 if (sleep == bus->sleeping)
893 goto end;
894
895 /* Going to sleep */
896 if (sleep) {
897 /* Don't sleep if something is pending */
898 if (atomic_read(&bus->intstatus) ||
899 atomic_read(&bus->ipend) > 0 ||
900 (!atomic_read(&bus->fcstate) &&
901 brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
902 data_ok(bus)))
903 return -EBUSY;
904 err = brcmf_sdbrcm_kso_control(bus, false);
905 /* disable watchdog */
906 if (!err)
907 brcmf_sdbrcm_wd_timer(bus, 0);
908 } else {
909 bus->idlecount = 0;
910 err = brcmf_sdbrcm_kso_control(bus, true);
911 }
912 if (!err) {
913 /* Change state */
914 bus->sleeping = sleep;
915 brcmf_dbg(SDIO, "new state %s\n",
916 (sleep ? "SLEEP" : "WAKE"));
917 } else {
918 brcmf_err("error while changing bus sleep state %d\n",
919 err);
920 return err;
921 }
922 }
923
924 end:
925 /* control clocks */
926 if (sleep) {
927 if (!bus->sr_enabled)
928 brcmf_sdbrcm_clkctl(bus, CLK_NONE, pendok);
929 } else {
930 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, pendok);
931 }
932
933 return err;
934
935 }
936
937 static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
938 {
939 u32 intstatus = 0;
940 u32 hmb_data;
941 u8 fcbits;
942 int ret;
943
944 brcmf_dbg(SDIO, "Enter\n");
945
946 /* Read mailbox data and ack that we did so */
947 ret = r_sdreg32(bus, &hmb_data,
948 offsetof(struct sdpcmd_regs, tohostmailboxdata));
949
950 if (ret == 0)
951 w_sdreg32(bus, SMB_INT_ACK,
952 offsetof(struct sdpcmd_regs, tosbmailbox));
953 bus->sdcnt.f1regdata += 2;
954
955 /* Dongle recomposed rx frames, accept them again */
956 if (hmb_data & HMB_DATA_NAKHANDLED) {
957 brcmf_dbg(SDIO, "Dongle reports NAK handled, expect rtx of %d\n",
958 bus->rx_seq);
959 if (!bus->rxskip)
960 brcmf_err("unexpected NAKHANDLED!\n");
961
962 bus->rxskip = false;
963 intstatus |= I_HMB_FRAME_IND;
964 }
965
966 /*
967 * DEVREADY does not occur with gSPI.
968 */
969 if (hmb_data & (HMB_DATA_DEVREADY | HMB_DATA_FWREADY)) {
970 bus->sdpcm_ver =
971 (hmb_data & HMB_DATA_VERSION_MASK) >>
972 HMB_DATA_VERSION_SHIFT;
973 if (bus->sdpcm_ver != SDPCM_PROT_VERSION)
974 brcmf_err("Version mismatch, dongle reports %d, "
975 "expecting %d\n",
976 bus->sdpcm_ver, SDPCM_PROT_VERSION);
977 else
978 brcmf_dbg(SDIO, "Dongle ready, protocol version %d\n",
979 bus->sdpcm_ver);
980 }
981
982 /*
983 * Flow Control has been moved into the RX headers and this out of band
984 * method isn't used any more.
985 * remaining backward compatible with older dongles.
986 */
987 if (hmb_data & HMB_DATA_FC) {
988 fcbits = (hmb_data & HMB_DATA_FCDATA_MASK) >>
989 HMB_DATA_FCDATA_SHIFT;
990
991 if (fcbits & ~bus->flowcontrol)
992 bus->sdcnt.fc_xoff++;
993
994 if (bus->flowcontrol & ~fcbits)
995 bus->sdcnt.fc_xon++;
996
997 bus->sdcnt.fc_rcvd++;
998 bus->flowcontrol = fcbits;
999 }
1000
1001 /* Shouldn't be any others */
1002 if (hmb_data & ~(HMB_DATA_DEVREADY |
1003 HMB_DATA_NAKHANDLED |
1004 HMB_DATA_FC |
1005 HMB_DATA_FWREADY |
1006 HMB_DATA_FCDATA_MASK | HMB_DATA_VERSION_MASK))
1007 brcmf_err("Unknown mailbox data content: 0x%02x\n",
1008 hmb_data);
1009
1010 return intstatus;
1011 }
1012
1013 static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
1014 {
1015 uint retries = 0;
1016 u16 lastrbc;
1017 u8 hi, lo;
1018 int err;
1019
1020 brcmf_err("%sterminate frame%s\n",
1021 abort ? "abort command, " : "",
1022 rtx ? ", send NAK" : "");
1023
1024 if (abort)
1025 brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
1026
1027 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
1028 SFC_RF_TERM, &err);
1029 bus->sdcnt.f1regdata++;
1030
1031 /* Wait until the packet has been flushed (device/FIFO stable) */
1032 for (lastrbc = retries = 0xffff; retries > 0; retries--) {
1033 hi = brcmf_sdio_regrb(bus->sdiodev,
1034 SBSDIO_FUNC1_RFRAMEBCHI, &err);
1035 lo = brcmf_sdio_regrb(bus->sdiodev,
1036 SBSDIO_FUNC1_RFRAMEBCLO, &err);
1037 bus->sdcnt.f1regdata += 2;
1038
1039 if ((hi == 0) && (lo == 0))
1040 break;
1041
1042 if ((hi > (lastrbc >> 8)) && (lo > (lastrbc & 0x00ff))) {
1043 brcmf_err("count growing: last 0x%04x now 0x%04x\n",
1044 lastrbc, (hi << 8) + lo);
1045 }
1046 lastrbc = (hi << 8) + lo;
1047 }
1048
1049 if (!retries)
1050 brcmf_err("count never zeroed: last 0x%04x\n", lastrbc);
1051 else
1052 brcmf_dbg(SDIO, "flush took %d iterations\n", 0xffff - retries);
1053
1054 if (rtx) {
1055 bus->sdcnt.rxrtx++;
1056 err = w_sdreg32(bus, SMB_NAK,
1057 offsetof(struct sdpcmd_regs, tosbmailbox));
1058
1059 bus->sdcnt.f1regdata++;
1060 if (err == 0)
1061 bus->rxskip = true;
1062 }
1063
1064 /* Clear partial in any case */
1065 bus->cur_read.len = 0;
1066
1067 /* If we can't reach the device, signal failure */
1068 if (err)
1069 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
1070 }
1071
1072 /* return total length of buffer chain */
1073 static uint brcmf_sdbrcm_glom_len(struct brcmf_sdio *bus)
1074 {
1075 struct sk_buff *p;
1076 uint total;
1077
1078 total = 0;
1079 skb_queue_walk(&bus->glom, p)
1080 total += p->len;
1081 return total;
1082 }
1083
1084 static void brcmf_sdbrcm_free_glom(struct brcmf_sdio *bus)
1085 {
1086 struct sk_buff *cur, *next;
1087
1088 skb_queue_walk_safe(&bus->glom, cur, next) {
1089 skb_unlink(cur, &bus->glom);
1090 brcmu_pkt_buf_free_skb(cur);
1091 }
1092 }
1093
1094 /**
1095 * brcmfmac sdio bus specific header
1096 * This is the lowest layer header wrapped on the packets transmitted between
1097 * host and WiFi dongle which contains information needed for SDIO core and
1098 * firmware
1099 *
1100 * It consists of 2 parts: hw header and software header
1101 * hardware header (frame tag) - 4 bytes
1102 * Byte 0~1: Frame length
1103 * Byte 2~3: Checksum, bit-wise inverse of frame length
1104 * software header - 8 bytes
1105 * Byte 0: Rx/Tx sequence number
1106 * Byte 1: 4 MSB Channel number, 4 LSB arbitrary flag
1107 * Byte 2: Length of next data frame, reserved for Tx
1108 * Byte 3: Data offset
1109 * Byte 4: Flow control bits, reserved for Tx
1110 * Byte 5: Maximum Sequence number allowed by firmware for Tx, N/A for Tx packet
1111 * Byte 6~7: Reserved
1112 */
1113 #define SDPCM_HWHDR_LEN 4
1114 #define SDPCM_SWHDR_LEN 8
1115 #define SDPCM_HDRLEN (SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN)
1116 /* software header */
1117 #define SDPCM_SEQ_MASK 0x000000ff
1118 #define SDPCM_SEQ_WRAP 256
1119 #define SDPCM_CHANNEL_MASK 0x00000f00
1120 #define SDPCM_CHANNEL_SHIFT 8
1121 #define SDPCM_CONTROL_CHANNEL 0 /* Control */
1122 #define SDPCM_EVENT_CHANNEL 1 /* Asyc Event Indication */
1123 #define SDPCM_DATA_CHANNEL 2 /* Data Xmit/Recv */
1124 #define SDPCM_GLOM_CHANNEL 3 /* Coalesced packets */
1125 #define SDPCM_TEST_CHANNEL 15 /* Test/debug packets */
1126 #define SDPCM_GLOMDESC(p) (((u8 *)p)[1] & 0x80)
1127 #define SDPCM_NEXTLEN_MASK 0x00ff0000
1128 #define SDPCM_NEXTLEN_SHIFT 16
1129 #define SDPCM_DOFFSET_MASK 0xff000000
1130 #define SDPCM_DOFFSET_SHIFT 24
1131 #define SDPCM_FCMASK_MASK 0x000000ff
1132 #define SDPCM_WINDOW_MASK 0x0000ff00
1133 #define SDPCM_WINDOW_SHIFT 8
1134
1135 static inline u8 brcmf_sdio_getdatoffset(u8 *swheader)
1136 {
1137 u32 hdrvalue;
1138 hdrvalue = *(u32 *)swheader;
1139 return (u8)((hdrvalue & SDPCM_DOFFSET_MASK) >> SDPCM_DOFFSET_SHIFT);
1140 }
1141
1142 static int brcmf_sdio_hdparse(struct brcmf_sdio *bus, u8 *header,
1143 struct brcmf_sdio_hdrinfo *rd,
1144 enum brcmf_sdio_frmtype type)
1145 {
1146 u16 len, checksum;
1147 u8 rx_seq, fc, tx_seq_max;
1148 u32 swheader;
1149
1150 trace_brcmf_sdpcm_hdr(false, header);
1151
1152 /* hw header */
1153 len = get_unaligned_le16(header);
1154 checksum = get_unaligned_le16(header + sizeof(u16));
1155 /* All zero means no more to read */
1156 if (!(len | checksum)) {
1157 bus->rxpending = false;
1158 return -ENODATA;
1159 }
1160 if ((u16)(~(len ^ checksum))) {
1161 brcmf_err("HW header checksum error\n");
1162 bus->sdcnt.rx_badhdr++;
1163 brcmf_sdbrcm_rxfail(bus, false, false);
1164 return -EIO;
1165 }
1166 if (len < SDPCM_HDRLEN) {
1167 brcmf_err("HW header length error\n");
1168 return -EPROTO;
1169 }
1170 if (type == BRCMF_SDIO_FT_SUPER &&
1171 (roundup(len, bus->blocksize) != rd->len)) {
1172 brcmf_err("HW superframe header length error\n");
1173 return -EPROTO;
1174 }
1175 if (type == BRCMF_SDIO_FT_SUB && len > rd->len) {
1176 brcmf_err("HW subframe header length error\n");
1177 return -EPROTO;
1178 }
1179 rd->len = len;
1180
1181 /* software header */
1182 header += SDPCM_HWHDR_LEN;
1183 swheader = le32_to_cpu(*(__le32 *)header);
1184 if (type == BRCMF_SDIO_FT_SUPER && SDPCM_GLOMDESC(header)) {
1185 brcmf_err("Glom descriptor found in superframe head\n");
1186 rd->len = 0;
1187 return -EINVAL;
1188 }
1189 rx_seq = (u8)(swheader & SDPCM_SEQ_MASK);
1190 rd->channel = (swheader & SDPCM_CHANNEL_MASK) >> SDPCM_CHANNEL_SHIFT;
1191 if (len > MAX_RX_DATASZ && rd->channel != SDPCM_CONTROL_CHANNEL &&
1192 type != BRCMF_SDIO_FT_SUPER) {
1193 brcmf_err("HW header length too long\n");
1194 bus->sdcnt.rx_toolong++;
1195 brcmf_sdbrcm_rxfail(bus, false, false);
1196 rd->len = 0;
1197 return -EPROTO;
1198 }
1199 if (type == BRCMF_SDIO_FT_SUPER && rd->channel != SDPCM_GLOM_CHANNEL) {
1200 brcmf_err("Wrong channel for superframe\n");
1201 rd->len = 0;
1202 return -EINVAL;
1203 }
1204 if (type == BRCMF_SDIO_FT_SUB && rd->channel != SDPCM_DATA_CHANNEL &&
1205 rd->channel != SDPCM_EVENT_CHANNEL) {
1206 brcmf_err("Wrong channel for subframe\n");
1207 rd->len = 0;
1208 return -EINVAL;
1209 }
1210 rd->dat_offset = brcmf_sdio_getdatoffset(header);
1211 if (rd->dat_offset < SDPCM_HDRLEN || rd->dat_offset > rd->len) {
1212 brcmf_err("seq %d: bad data offset\n", rx_seq);
1213 bus->sdcnt.rx_badhdr++;
1214 brcmf_sdbrcm_rxfail(bus, false, false);
1215 rd->len = 0;
1216 return -ENXIO;
1217 }
1218 if (rd->seq_num != rx_seq) {
1219 brcmf_err("seq %d: sequence number error, expect %d\n",
1220 rx_seq, rd->seq_num);
1221 bus->sdcnt.rx_badseq++;
1222 rd->seq_num = rx_seq;
1223 }
1224 /* no need to check the reset for subframe */
1225 if (type == BRCMF_SDIO_FT_SUB)
1226 return 0;
1227 rd->len_nxtfrm = (swheader & SDPCM_NEXTLEN_MASK) >> SDPCM_NEXTLEN_SHIFT;
1228 if (rd->len_nxtfrm << 4 > MAX_RX_DATASZ) {
1229 /* only warm for NON glom packet */
1230 if (rd->channel != SDPCM_GLOM_CHANNEL)
1231 brcmf_err("seq %d: next length error\n", rx_seq);
1232 rd->len_nxtfrm = 0;
1233 }
1234 swheader = le32_to_cpu(*(__le32 *)(header + 4));
1235 fc = swheader & SDPCM_FCMASK_MASK;
1236 if (bus->flowcontrol != fc) {
1237 if (~bus->flowcontrol & fc)
1238 bus->sdcnt.fc_xoff++;
1239 if (bus->flowcontrol & ~fc)
1240 bus->sdcnt.fc_xon++;
1241 bus->sdcnt.fc_rcvd++;
1242 bus->flowcontrol = fc;
1243 }
1244 tx_seq_max = (swheader & SDPCM_WINDOW_MASK) >> SDPCM_WINDOW_SHIFT;
1245 if ((u8)(tx_seq_max - bus->tx_seq) > 0x40) {
1246 brcmf_err("seq %d: max tx seq number error\n", rx_seq);
1247 tx_seq_max = bus->tx_seq + 2;
1248 }
1249 bus->tx_max = tx_seq_max;
1250
1251 return 0;
1252 }
1253
1254 static inline void brcmf_sdio_update_hwhdr(u8 *header, u16 frm_length)
1255 {
1256 *(__le16 *)header = cpu_to_le16(frm_length);
1257 *(((__le16 *)header) + 1) = cpu_to_le16(~frm_length);
1258 }
1259
1260 static void brcmf_sdio_hdpack(struct brcmf_sdio *bus, u8 *header,
1261 struct brcmf_sdio_hdrinfo *hd_info)
1262 {
1263 u32 sw_header;
1264
1265 brcmf_sdio_update_hwhdr(header, hd_info->len);
1266
1267 sw_header = bus->tx_seq;
1268 sw_header |= (hd_info->channel << SDPCM_CHANNEL_SHIFT) &
1269 SDPCM_CHANNEL_MASK;
1270 sw_header |= (hd_info->dat_offset << SDPCM_DOFFSET_SHIFT) &
1271 SDPCM_DOFFSET_MASK;
1272 *(((__le32 *)header) + 1) = cpu_to_le32(sw_header);
1273 *(((__le32 *)header) + 2) = 0;
1274 trace_brcmf_sdpcm_hdr(true, header);
1275 }
1276
1277 static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1278 {
1279 u16 dlen, totlen;
1280 u8 *dptr, num = 0;
1281 u32 align = 0;
1282 u16 sublen;
1283 struct sk_buff *pfirst, *pnext;
1284
1285 int errcode;
1286 u8 doff, sfdoff;
1287
1288 struct brcmf_sdio_hdrinfo rd_new;
1289
1290 /* If packets, issue read(s) and send up packet chain */
1291 /* Return sequence numbers consumed? */
1292
1293 brcmf_dbg(SDIO, "start: glomd %p glom %p\n",
1294 bus->glomd, skb_peek(&bus->glom));
1295
1296 if (bus->sdiodev->pdata)
1297 align = bus->sdiodev->pdata->sd_sgentry_align;
1298 if (align < 4)
1299 align = 4;
1300
1301 /* If there's a descriptor, generate the packet chain */
1302 if (bus->glomd) {
1303 pfirst = pnext = NULL;
1304 dlen = (u16) (bus->glomd->len);
1305 dptr = bus->glomd->data;
1306 if (!dlen || (dlen & 1)) {
1307 brcmf_err("bad glomd len(%d), ignore descriptor\n",
1308 dlen);
1309 dlen = 0;
1310 }
1311
1312 for (totlen = num = 0; dlen; num++) {
1313 /* Get (and move past) next length */
1314 sublen = get_unaligned_le16(dptr);
1315 dlen -= sizeof(u16);
1316 dptr += sizeof(u16);
1317 if ((sublen < SDPCM_HDRLEN) ||
1318 ((num == 0) && (sublen < (2 * SDPCM_HDRLEN)))) {
1319 brcmf_err("descriptor len %d bad: %d\n",
1320 num, sublen);
1321 pnext = NULL;
1322 break;
1323 }
1324 if (sublen % align) {
1325 brcmf_err("sublen %d not multiple of %d\n",
1326 sublen, align);
1327 }
1328 totlen += sublen;
1329
1330 /* For last frame, adjust read len so total
1331 is a block multiple */
1332 if (!dlen) {
1333 sublen +=
1334 (roundup(totlen, bus->blocksize) - totlen);
1335 totlen = roundup(totlen, bus->blocksize);
1336 }
1337
1338 /* Allocate/chain packet for next subframe */
1339 pnext = brcmu_pkt_buf_get_skb(sublen + align);
1340 if (pnext == NULL) {
1341 brcmf_err("bcm_pkt_buf_get_skb failed, num %d len %d\n",
1342 num, sublen);
1343 break;
1344 }
1345 skb_queue_tail(&bus->glom, pnext);
1346
1347 /* Adhere to start alignment requirements */
1348 pkt_align(pnext, sublen, align);
1349 }
1350
1351 /* If all allocations succeeded, save packet chain
1352 in bus structure */
1353 if (pnext) {
1354 brcmf_dbg(GLOM, "allocated %d-byte packet chain for %d subframes\n",
1355 totlen, num);
1356 if (BRCMF_GLOM_ON() && bus->cur_read.len &&
1357 totlen != bus->cur_read.len) {
1358 brcmf_dbg(GLOM, "glomdesc mismatch: nextlen %d glomdesc %d rxseq %d\n",
1359 bus->cur_read.len, totlen, rxseq);
1360 }
1361 pfirst = pnext = NULL;
1362 } else {
1363 brcmf_sdbrcm_free_glom(bus);
1364 num = 0;
1365 }
1366
1367 /* Done with descriptor packet */
1368 brcmu_pkt_buf_free_skb(bus->glomd);
1369 bus->glomd = NULL;
1370 bus->cur_read.len = 0;
1371 }
1372
1373 /* Ok -- either we just generated a packet chain,
1374 or had one from before */
1375 if (!skb_queue_empty(&bus->glom)) {
1376 if (BRCMF_GLOM_ON()) {
1377 brcmf_dbg(GLOM, "try superframe read, packet chain:\n");
1378 skb_queue_walk(&bus->glom, pnext) {
1379 brcmf_dbg(GLOM, " %p: %p len 0x%04x (%d)\n",
1380 pnext, (u8 *) (pnext->data),
1381 pnext->len, pnext->len);
1382 }
1383 }
1384
1385 pfirst = skb_peek(&bus->glom);
1386 dlen = (u16) brcmf_sdbrcm_glom_len(bus);
1387
1388 /* Do an SDIO read for the superframe. Configurable iovar to
1389 * read directly into the chained packet, or allocate a large
1390 * packet and and copy into the chain.
1391 */
1392 sdio_claim_host(bus->sdiodev->func[1]);
1393 errcode = brcmf_sdcard_recv_chain(bus->sdiodev,
1394 bus->sdiodev->sbwad,
1395 SDIO_FUNC_2, F2SYNC, &bus->glom, dlen);
1396 sdio_release_host(bus->sdiodev->func[1]);
1397 bus->sdcnt.f2rxdata++;
1398
1399 /* On failure, kill the superframe, allow a couple retries */
1400 if (errcode < 0) {
1401 brcmf_err("glom read of %d bytes failed: %d\n",
1402 dlen, errcode);
1403
1404 sdio_claim_host(bus->sdiodev->func[1]);
1405 if (bus->glomerr++ < 3) {
1406 brcmf_sdbrcm_rxfail(bus, true, true);
1407 } else {
1408 bus->glomerr = 0;
1409 brcmf_sdbrcm_rxfail(bus, true, false);
1410 bus->sdcnt.rxglomfail++;
1411 brcmf_sdbrcm_free_glom(bus);
1412 }
1413 sdio_release_host(bus->sdiodev->func[1]);
1414 return 0;
1415 }
1416
1417 brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1418 pfirst->data, min_t(int, pfirst->len, 48),
1419 "SUPERFRAME:\n");
1420
1421 rd_new.seq_num = rxseq;
1422 rd_new.len = dlen;
1423 sdio_claim_host(bus->sdiodev->func[1]);
1424 errcode = brcmf_sdio_hdparse(bus, pfirst->data, &rd_new,
1425 BRCMF_SDIO_FT_SUPER);
1426 sdio_release_host(bus->sdiodev->func[1]);
1427 bus->cur_read.len = rd_new.len_nxtfrm << 4;
1428
1429 /* Remove superframe header, remember offset */
1430 skb_pull(pfirst, rd_new.dat_offset);
1431 sfdoff = rd_new.dat_offset;
1432 num = 0;
1433
1434 /* Validate all the subframe headers */
1435 skb_queue_walk(&bus->glom, pnext) {
1436 /* leave when invalid subframe is found */
1437 if (errcode)
1438 break;
1439
1440 rd_new.len = pnext->len;
1441 rd_new.seq_num = rxseq++;
1442 sdio_claim_host(bus->sdiodev->func[1]);
1443 errcode = brcmf_sdio_hdparse(bus, pnext->data, &rd_new,
1444 BRCMF_SDIO_FT_SUB);
1445 sdio_release_host(bus->sdiodev->func[1]);
1446 brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1447 pnext->data, 32, "subframe:\n");
1448
1449 num++;
1450 }
1451
1452 if (errcode) {
1453 /* Terminate frame on error, request
1454 a couple retries */
1455 sdio_claim_host(bus->sdiodev->func[1]);
1456 if (bus->glomerr++ < 3) {
1457 /* Restore superframe header space */
1458 skb_push(pfirst, sfdoff);
1459 brcmf_sdbrcm_rxfail(bus, true, true);
1460 } else {
1461 bus->glomerr = 0;
1462 brcmf_sdbrcm_rxfail(bus, true, false);
1463 bus->sdcnt.rxglomfail++;
1464 brcmf_sdbrcm_free_glom(bus);
1465 }
1466 sdio_release_host(bus->sdiodev->func[1]);
1467 bus->cur_read.len = 0;
1468 return 0;
1469 }
1470
1471 /* Basic SD framing looks ok - process each packet (header) */
1472
1473 skb_queue_walk_safe(&bus->glom, pfirst, pnext) {
1474 dptr = (u8 *) (pfirst->data);
1475 sublen = get_unaligned_le16(dptr);
1476 doff = brcmf_sdio_getdatoffset(&dptr[SDPCM_HWHDR_LEN]);
1477
1478 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(),
1479 dptr, pfirst->len,
1480 "Rx Subframe Data:\n");
1481
1482 __skb_trim(pfirst, sublen);
1483 skb_pull(pfirst, doff);
1484
1485 if (pfirst->len == 0) {
1486 skb_unlink(pfirst, &bus->glom);
1487 brcmu_pkt_buf_free_skb(pfirst);
1488 continue;
1489 }
1490
1491 brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1492 pfirst->data,
1493 min_t(int, pfirst->len, 32),
1494 "subframe %d to stack, %p (%p/%d) nxt/lnk %p/%p\n",
1495 bus->glom.qlen, pfirst, pfirst->data,
1496 pfirst->len, pfirst->next,
1497 pfirst->prev);
1498 skb_unlink(pfirst, &bus->glom);
1499 brcmf_rx_frame(bus->sdiodev->dev, pfirst);
1500 bus->sdcnt.rxglompkts++;
1501 }
1502
1503 bus->sdcnt.rxglomframes++;
1504 }
1505 return num;
1506 }
1507
1508 static int brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_sdio *bus, uint *condition,
1509 bool *pending)
1510 {
1511 DECLARE_WAITQUEUE(wait, current);
1512 int timeout = msecs_to_jiffies(DCMD_RESP_TIMEOUT);
1513
1514 /* Wait until control frame is available */
1515 add_wait_queue(&bus->dcmd_resp_wait, &wait);
1516 set_current_state(TASK_INTERRUPTIBLE);
1517
1518 while (!(*condition) && (!signal_pending(current) && timeout))
1519 timeout = schedule_timeout(timeout);
1520
1521 if (signal_pending(current))
1522 *pending = true;
1523
1524 set_current_state(TASK_RUNNING);
1525 remove_wait_queue(&bus->dcmd_resp_wait, &wait);
1526
1527 return timeout;
1528 }
1529
1530 static int brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_sdio *bus)
1531 {
1532 if (waitqueue_active(&bus->dcmd_resp_wait))
1533 wake_up_interruptible(&bus->dcmd_resp_wait);
1534
1535 return 0;
1536 }
1537 static void
1538 brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
1539 {
1540 uint rdlen, pad;
1541 u8 *buf = NULL, *rbuf;
1542 int sdret;
1543
1544 brcmf_dbg(TRACE, "Enter\n");
1545
1546 if (bus->rxblen)
1547 buf = vzalloc(bus->rxblen);
1548 if (!buf)
1549 goto done;
1550
1551 rbuf = bus->rxbuf;
1552 pad = ((unsigned long)rbuf % BRCMF_SDALIGN);
1553 if (pad)
1554 rbuf += (BRCMF_SDALIGN - pad);
1555
1556 /* Copy the already-read portion over */
1557 memcpy(buf, hdr, BRCMF_FIRSTREAD);
1558 if (len <= BRCMF_FIRSTREAD)
1559 goto gotpkt;
1560
1561 /* Raise rdlen to next SDIO block to avoid tail command */
1562 rdlen = len - BRCMF_FIRSTREAD;
1563 if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
1564 pad = bus->blocksize - (rdlen % bus->blocksize);
1565 if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
1566 ((len + pad) < bus->sdiodev->bus_if->maxctl))
1567 rdlen += pad;
1568 } else if (rdlen % BRCMF_SDALIGN) {
1569 rdlen += BRCMF_SDALIGN - (rdlen % BRCMF_SDALIGN);
1570 }
1571
1572 /* Satisfy length-alignment requirements */
1573 if (rdlen & (ALIGNMENT - 1))
1574 rdlen = roundup(rdlen, ALIGNMENT);
1575
1576 /* Drop if the read is too big or it exceeds our maximum */
1577 if ((rdlen + BRCMF_FIRSTREAD) > bus->sdiodev->bus_if->maxctl) {
1578 brcmf_err("%d-byte control read exceeds %d-byte buffer\n",
1579 rdlen, bus->sdiodev->bus_if->maxctl);
1580 brcmf_sdbrcm_rxfail(bus, false, false);
1581 goto done;
1582 }
1583
1584 if ((len - doff) > bus->sdiodev->bus_if->maxctl) {
1585 brcmf_err("%d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n",
1586 len, len - doff, bus->sdiodev->bus_if->maxctl);
1587 bus->sdcnt.rx_toolong++;
1588 brcmf_sdbrcm_rxfail(bus, false, false);
1589 goto done;
1590 }
1591
1592 /* Read remain of frame body */
1593 sdret = brcmf_sdcard_recv_buf(bus->sdiodev,
1594 bus->sdiodev->sbwad,
1595 SDIO_FUNC_2,
1596 F2SYNC, rbuf, rdlen);
1597 bus->sdcnt.f2rxdata++;
1598
1599 /* Control frame failures need retransmission */
1600 if (sdret < 0) {
1601 brcmf_err("read %d control bytes failed: %d\n",
1602 rdlen, sdret);
1603 bus->sdcnt.rxc_errors++;
1604 brcmf_sdbrcm_rxfail(bus, true, true);
1605 goto done;
1606 } else
1607 memcpy(buf + BRCMF_FIRSTREAD, rbuf, rdlen);
1608
1609 gotpkt:
1610
1611 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_CTL_ON(),
1612 buf, len, "RxCtrl:\n");
1613
1614 /* Point to valid data and indicate its length */
1615 spin_lock_bh(&bus->rxctl_lock);
1616 if (bus->rxctl) {
1617 brcmf_err("last control frame is being processed.\n");
1618 spin_unlock_bh(&bus->rxctl_lock);
1619 vfree(buf);
1620 goto done;
1621 }
1622 bus->rxctl = buf + doff;
1623 bus->rxctl_orig = buf;
1624 bus->rxlen = len - doff;
1625 spin_unlock_bh(&bus->rxctl_lock);
1626
1627 done:
1628 /* Awake any waiters */
1629 brcmf_sdbrcm_dcmd_resp_wake(bus);
1630 }
1631
1632 /* Pad read to blocksize for efficiency */
1633 static void brcmf_pad(struct brcmf_sdio *bus, u16 *pad, u16 *rdlen)
1634 {
1635 if (bus->roundup && bus->blocksize && *rdlen > bus->blocksize) {
1636 *pad = bus->blocksize - (*rdlen % bus->blocksize);
1637 if (*pad <= bus->roundup && *pad < bus->blocksize &&
1638 *rdlen + *pad + BRCMF_FIRSTREAD < MAX_RX_DATASZ)
1639 *rdlen += *pad;
1640 } else if (*rdlen % BRCMF_SDALIGN) {
1641 *rdlen += BRCMF_SDALIGN - (*rdlen % BRCMF_SDALIGN);
1642 }
1643 }
1644
1645 static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
1646 {
1647 struct sk_buff *pkt; /* Packet for event or data frames */
1648 u16 pad; /* Number of pad bytes to read */
1649 uint rxleft = 0; /* Remaining number of frames allowed */
1650 int ret; /* Return code from calls */
1651 uint rxcount = 0; /* Total frames read */
1652 struct brcmf_sdio_hdrinfo *rd = &bus->cur_read, rd_new;
1653 u8 head_read = 0;
1654
1655 brcmf_dbg(TRACE, "Enter\n");
1656
1657 /* Not finished unless we encounter no more frames indication */
1658 bus->rxpending = true;
1659
1660 for (rd->seq_num = bus->rx_seq, rxleft = maxframes;
1661 !bus->rxskip && rxleft &&
1662 bus->sdiodev->bus_if->state != BRCMF_BUS_DOWN;
1663 rd->seq_num++, rxleft--) {
1664
1665 /* Handle glomming separately */
1666 if (bus->glomd || !skb_queue_empty(&bus->glom)) {
1667 u8 cnt;
1668 brcmf_dbg(GLOM, "calling rxglom: glomd %p, glom %p\n",
1669 bus->glomd, skb_peek(&bus->glom));
1670 cnt = brcmf_sdbrcm_rxglom(bus, rd->seq_num);
1671 brcmf_dbg(GLOM, "rxglom returned %d\n", cnt);
1672 rd->seq_num += cnt - 1;
1673 rxleft = (rxleft > cnt) ? (rxleft - cnt) : 1;
1674 continue;
1675 }
1676
1677 rd->len_left = rd->len;
1678 /* read header first for unknow frame length */
1679 sdio_claim_host(bus->sdiodev->func[1]);
1680 if (!rd->len) {
1681 ret = brcmf_sdcard_recv_buf(bus->sdiodev,
1682 bus->sdiodev->sbwad,
1683 SDIO_FUNC_2, F2SYNC,
1684 bus->rxhdr,
1685 BRCMF_FIRSTREAD);
1686 bus->sdcnt.f2rxhdrs++;
1687 if (ret < 0) {
1688 brcmf_err("RXHEADER FAILED: %d\n",
1689 ret);
1690 bus->sdcnt.rx_hdrfail++;
1691 brcmf_sdbrcm_rxfail(bus, true, true);
1692 sdio_release_host(bus->sdiodev->func[1]);
1693 continue;
1694 }
1695
1696 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() || BRCMF_HDRS_ON(),
1697 bus->rxhdr, SDPCM_HDRLEN,
1698 "RxHdr:\n");
1699
1700 if (brcmf_sdio_hdparse(bus, bus->rxhdr, rd,
1701 BRCMF_SDIO_FT_NORMAL)) {
1702 sdio_release_host(bus->sdiodev->func[1]);
1703 if (!bus->rxpending)
1704 break;
1705 else
1706 continue;
1707 }
1708
1709 if (rd->channel == SDPCM_CONTROL_CHANNEL) {
1710 brcmf_sdbrcm_read_control(bus, bus->rxhdr,
1711 rd->len,
1712 rd->dat_offset);
1713 /* prepare the descriptor for the next read */
1714 rd->len = rd->len_nxtfrm << 4;
1715 rd->len_nxtfrm = 0;
1716 /* treat all packet as event if we don't know */
1717 rd->channel = SDPCM_EVENT_CHANNEL;
1718 sdio_release_host(bus->sdiodev->func[1]);
1719 continue;
1720 }
1721 rd->len_left = rd->len > BRCMF_FIRSTREAD ?
1722 rd->len - BRCMF_FIRSTREAD : 0;
1723 head_read = BRCMF_FIRSTREAD;
1724 }
1725
1726 brcmf_pad(bus, &pad, &rd->len_left);
1727
1728 pkt = brcmu_pkt_buf_get_skb(rd->len_left + head_read +
1729 BRCMF_SDALIGN);
1730 if (!pkt) {
1731 /* Give up on data, request rtx of events */
1732 brcmf_err("brcmu_pkt_buf_get_skb failed\n");
1733 brcmf_sdbrcm_rxfail(bus, false,
1734 RETRYCHAN(rd->channel));
1735 sdio_release_host(bus->sdiodev->func[1]);
1736 continue;
1737 }
1738 skb_pull(pkt, head_read);
1739 pkt_align(pkt, rd->len_left, BRCMF_SDALIGN);
1740
1741 ret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
1742 SDIO_FUNC_2, F2SYNC, pkt);
1743 bus->sdcnt.f2rxdata++;
1744 sdio_release_host(bus->sdiodev->func[1]);
1745
1746 if (ret < 0) {
1747 brcmf_err("read %d bytes from channel %d failed: %d\n",
1748 rd->len, rd->channel, ret);
1749 brcmu_pkt_buf_free_skb(pkt);
1750 sdio_claim_host(bus->sdiodev->func[1]);
1751 brcmf_sdbrcm_rxfail(bus, true,
1752 RETRYCHAN(rd->channel));
1753 sdio_release_host(bus->sdiodev->func[1]);
1754 continue;
1755 }
1756
1757 if (head_read) {
1758 skb_push(pkt, head_read);
1759 memcpy(pkt->data, bus->rxhdr, head_read);
1760 head_read = 0;
1761 } else {
1762 memcpy(bus->rxhdr, pkt->data, SDPCM_HDRLEN);
1763 rd_new.seq_num = rd->seq_num;
1764 sdio_claim_host(bus->sdiodev->func[1]);
1765 if (brcmf_sdio_hdparse(bus, bus->rxhdr, &rd_new,
1766 BRCMF_SDIO_FT_NORMAL)) {
1767 rd->len = 0;
1768 brcmu_pkt_buf_free_skb(pkt);
1769 }
1770 bus->sdcnt.rx_readahead_cnt++;
1771 if (rd->len != roundup(rd_new.len, 16)) {
1772 brcmf_err("frame length mismatch:read %d, should be %d\n",
1773 rd->len,
1774 roundup(rd_new.len, 16) >> 4);
1775 rd->len = 0;
1776 brcmf_sdbrcm_rxfail(bus, true, true);
1777 sdio_release_host(bus->sdiodev->func[1]);
1778 brcmu_pkt_buf_free_skb(pkt);
1779 continue;
1780 }
1781 sdio_release_host(bus->sdiodev->func[1]);
1782 rd->len_nxtfrm = rd_new.len_nxtfrm;
1783 rd->channel = rd_new.channel;
1784 rd->dat_offset = rd_new.dat_offset;
1785
1786 brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() &&
1787 BRCMF_DATA_ON()) &&
1788 BRCMF_HDRS_ON(),
1789 bus->rxhdr, SDPCM_HDRLEN,
1790 "RxHdr:\n");
1791
1792 if (rd_new.channel == SDPCM_CONTROL_CHANNEL) {
1793 brcmf_err("readahead on control packet %d?\n",
1794 rd_new.seq_num);
1795 /* Force retry w/normal header read */
1796 rd->len = 0;
1797 sdio_claim_host(bus->sdiodev->func[1]);
1798 brcmf_sdbrcm_rxfail(bus, false, true);
1799 sdio_release_host(bus->sdiodev->func[1]);
1800 brcmu_pkt_buf_free_skb(pkt);
1801 continue;
1802 }
1803 }
1804
1805 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(),
1806 pkt->data, rd->len, "Rx Data:\n");
1807
1808 /* Save superframe descriptor and allocate packet frame */
1809 if (rd->channel == SDPCM_GLOM_CHANNEL) {
1810 if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_HWHDR_LEN])) {
1811 brcmf_dbg(GLOM, "glom descriptor, %d bytes:\n",
1812 rd->len);
1813 brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1814 pkt->data, rd->len,
1815 "Glom Data:\n");
1816 __skb_trim(pkt, rd->len);
1817 skb_pull(pkt, SDPCM_HDRLEN);
1818 bus->glomd = pkt;
1819 } else {
1820 brcmf_err("%s: glom superframe w/o "
1821 "descriptor!\n", __func__);
1822 sdio_claim_host(bus->sdiodev->func[1]);
1823 brcmf_sdbrcm_rxfail(bus, false, false);
1824 sdio_release_host(bus->sdiodev->func[1]);
1825 }
1826 /* prepare the descriptor for the next read */
1827 rd->len = rd->len_nxtfrm << 4;
1828 rd->len_nxtfrm = 0;
1829 /* treat all packet as event if we don't know */
1830 rd->channel = SDPCM_EVENT_CHANNEL;
1831 continue;
1832 }
1833
1834 /* Fill in packet len and prio, deliver upward */
1835 __skb_trim(pkt, rd->len);
1836 skb_pull(pkt, rd->dat_offset);
1837
1838 /* prepare the descriptor for the next read */
1839 rd->len = rd->len_nxtfrm << 4;
1840 rd->len_nxtfrm = 0;
1841 /* treat all packet as event if we don't know */
1842 rd->channel = SDPCM_EVENT_CHANNEL;
1843
1844 if (pkt->len == 0) {
1845 brcmu_pkt_buf_free_skb(pkt);
1846 continue;
1847 }
1848
1849 brcmf_rx_frame(bus->sdiodev->dev, pkt);
1850 }
1851
1852 rxcount = maxframes - rxleft;
1853 /* Message if we hit the limit */
1854 if (!rxleft)
1855 brcmf_dbg(DATA, "hit rx limit of %d frames\n", maxframes);
1856 else
1857 brcmf_dbg(DATA, "processed %d frames\n", rxcount);
1858 /* Back off rxseq if awaiting rtx, update rx_seq */
1859 if (bus->rxskip)
1860 rd->seq_num--;
1861 bus->rx_seq = rd->seq_num;
1862
1863 return rxcount;
1864 }
1865
1866 static void
1867 brcmf_sdbrcm_wait_event_wakeup(struct brcmf_sdio *bus)
1868 {
1869 if (waitqueue_active(&bus->ctrl_wait))
1870 wake_up_interruptible(&bus->ctrl_wait);
1871 return;
1872 }
1873
1874 /**
1875 * struct brcmf_skbuff_cb reserves first two bytes in sk_buff::cb for
1876 * bus layer usage.
1877 */
1878 /* flag marking a dummy skb added for DMA alignment requirement */
1879 #define ALIGN_SKB_FLAG 0x8000
1880 /* bit mask of data length chopped from the previous packet */
1881 #define ALIGN_SKB_CHOP_LEN_MASK 0x7fff
1882
1883 static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio_dev *sdiodev,
1884 struct sk_buff_head *pktq,
1885 struct sk_buff *pkt, uint chan)
1886 {
1887 struct sk_buff *pkt_pad;
1888 u16 tail_pad, tail_chop, sg_align;
1889 unsigned int blksize;
1890 u8 *dat_buf;
1891 int ntail;
1892
1893 blksize = sdiodev->func[SDIO_FUNC_2]->cur_blksize;
1894 sg_align = 4;
1895 if (sdiodev->pdata && sdiodev->pdata->sd_sgentry_align > 4)
1896 sg_align = sdiodev->pdata->sd_sgentry_align;
1897 /* sg entry alignment should be a divisor of block size */
1898 WARN_ON(blksize % sg_align);
1899
1900 /* Check tail padding */
1901 pkt_pad = NULL;
1902 tail_chop = pkt->len % sg_align;
1903 tail_pad = sg_align - tail_chop;
1904 tail_pad += blksize - (pkt->len + tail_pad) % blksize;
1905 if (skb_tailroom(pkt) < tail_pad && pkt->len > blksize) {
1906 pkt_pad = brcmu_pkt_buf_get_skb(tail_pad + tail_chop);
1907 if (pkt_pad == NULL)
1908 return -ENOMEM;
1909 memcpy(pkt_pad->data,
1910 pkt->data + pkt->len - tail_chop,
1911 tail_chop);
1912 *(u32 *)(pkt_pad->cb) = ALIGN_SKB_FLAG + tail_chop;
1913 skb_trim(pkt, pkt->len - tail_chop);
1914 __skb_queue_after(pktq, pkt, pkt_pad);
1915 } else {
1916 ntail = pkt->data_len + tail_pad -
1917 (pkt->end - pkt->tail);
1918 if (skb_cloned(pkt) || ntail > 0)
1919 if (pskb_expand_head(pkt, 0, ntail, GFP_ATOMIC))
1920 return -ENOMEM;
1921 if (skb_linearize(pkt))
1922 return -ENOMEM;
1923 dat_buf = (u8 *)(pkt->data);
1924 __skb_put(pkt, tail_pad);
1925 }
1926
1927 if (pkt_pad)
1928 return pkt->len + tail_chop;
1929 else
1930 return pkt->len - tail_pad;
1931 }
1932
1933 /**
1934 * brcmf_sdio_txpkt_prep - packet preparation for transmit
1935 * @bus: brcmf_sdio structure pointer
1936 * @pktq: packet list pointer
1937 * @chan: virtual channel to transmit the packet
1938 *
1939 * Processes to be applied to the packet
1940 * - Align data buffer pointer
1941 * - Align data buffer length
1942 * - Prepare header
1943 * Return: negative value if there is error
1944 */
1945 static int
1946 brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
1947 uint chan)
1948 {
1949 u16 head_pad, head_align;
1950 struct sk_buff *pkt_next;
1951 u8 *dat_buf;
1952 int err;
1953 struct brcmf_sdio_hdrinfo hd_info = {0};
1954
1955 /* SDIO ADMA requires at least 32 bit alignment */
1956 head_align = 4;
1957 if (bus->sdiodev->pdata && bus->sdiodev->pdata->sd_head_align > 4)
1958 head_align = bus->sdiodev->pdata->sd_head_align;
1959
1960 pkt_next = pktq->next;
1961 dat_buf = (u8 *)(pkt_next->data);
1962
1963 /* Check head padding */
1964 head_pad = ((unsigned long)dat_buf % head_align);
1965 if (head_pad) {
1966 if (skb_headroom(pkt_next) < head_pad) {
1967 bus->sdiodev->bus_if->tx_realloc++;
1968 head_pad = 0;
1969 if (skb_cow(pkt_next, head_pad))
1970 return -ENOMEM;
1971 }
1972 skb_push(pkt_next, head_pad);
1973 dat_buf = (u8 *)(pkt_next->data);
1974 memset(dat_buf, 0, head_pad + bus->tx_hdrlen);
1975 }
1976
1977 if (bus->sdiodev->sg_support && pktq->qlen > 1) {
1978 err = brcmf_sdio_txpkt_prep_sg(bus->sdiodev, pktq,
1979 pkt_next, chan);
1980 if (err < 0)
1981 return err;
1982 hd_info.len = (u16)err;
1983 } else {
1984 hd_info.len = pkt_next->len;
1985 }
1986
1987 hd_info.channel = chan;
1988 hd_info.dat_offset = head_pad + bus->tx_hdrlen;
1989
1990 /* Now fill the header */
1991 brcmf_sdio_hdpack(bus, dat_buf, &hd_info);
1992
1993 if (BRCMF_BYTES_ON() &&
1994 ((BRCMF_CTL_ON() && chan == SDPCM_CONTROL_CHANNEL) ||
1995 (BRCMF_DATA_ON() && chan != SDPCM_CONTROL_CHANNEL)))
1996 brcmf_dbg_hex_dump(true, pkt_next, hd_info.len, "Tx Frame:\n");
1997 else if (BRCMF_HDRS_ON())
1998 brcmf_dbg_hex_dump(true, pkt_next, head_pad + bus->tx_hdrlen,
1999 "Tx Header:\n");
2000
2001 return 0;
2002 }
2003
2004 /**
2005 * brcmf_sdio_txpkt_postp - packet post processing for transmit
2006 * @bus: brcmf_sdio structure pointer
2007 * @pktq: packet list pointer
2008 *
2009 * Processes to be applied to the packet
2010 * - Remove head padding
2011 * - Remove tail padding
2012 */
2013 static void
2014 brcmf_sdio_txpkt_postp(struct brcmf_sdio *bus, struct sk_buff_head *pktq)
2015 {
2016 u8 *hdr;
2017 u32 dat_offset;
2018 u32 dummy_flags, chop_len;
2019 struct sk_buff *pkt_next, *tmp, *pkt_prev;
2020
2021 skb_queue_walk_safe(pktq, pkt_next, tmp) {
2022 dummy_flags = *(u32 *)(pkt_next->cb);
2023 if (dummy_flags & ALIGN_SKB_FLAG) {
2024 chop_len = dummy_flags & ALIGN_SKB_CHOP_LEN_MASK;
2025 if (chop_len) {
2026 pkt_prev = pkt_next->prev;
2027 memcpy(pkt_prev->data + pkt_prev->len,
2028 pkt_next->data, chop_len);
2029 skb_put(pkt_prev, chop_len);
2030 }
2031 __skb_unlink(pkt_next, pktq);
2032 brcmu_pkt_buf_free_skb(pkt_next);
2033 } else {
2034 hdr = pkt_next->data + SDPCM_HWHDR_LEN;
2035 dat_offset = le32_to_cpu(*(__le32 *)hdr);
2036 dat_offset = (dat_offset & SDPCM_DOFFSET_MASK) >>
2037 SDPCM_DOFFSET_SHIFT;
2038 skb_pull(pkt_next, dat_offset);
2039 }
2040 }
2041 }
2042
2043 /* Writes a HW/SW header into the packet and sends it. */
2044 /* Assumes: (a) header space already there, (b) caller holds lock */
2045 static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
2046 uint chan)
2047 {
2048 int ret;
2049 int i;
2050 struct sk_buff_head localq;
2051
2052 brcmf_dbg(TRACE, "Enter\n");
2053
2054 __skb_queue_head_init(&localq);
2055 __skb_queue_tail(&localq, pkt);
2056 ret = brcmf_sdio_txpkt_prep(bus, &localq, chan);
2057 if (ret)
2058 goto done;
2059
2060 sdio_claim_host(bus->sdiodev->func[1]);
2061 ret = brcmf_sdcard_send_pkt(bus->sdiodev, bus->sdiodev->sbwad,
2062 SDIO_FUNC_2, F2SYNC, &localq);
2063 bus->sdcnt.f2txdata++;
2064
2065 if (ret < 0) {
2066 /* On failure, abort the command and terminate the frame */
2067 brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
2068 ret);
2069 bus->sdcnt.tx_sderrs++;
2070
2071 brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
2072 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
2073 SFC_WF_TERM, NULL);
2074 bus->sdcnt.f1regdata++;
2075
2076 for (i = 0; i < 3; i++) {
2077 u8 hi, lo;
2078 hi = brcmf_sdio_regrb(bus->sdiodev,
2079 SBSDIO_FUNC1_WFRAMEBCHI, NULL);
2080 lo = brcmf_sdio_regrb(bus->sdiodev,
2081 SBSDIO_FUNC1_WFRAMEBCLO, NULL);
2082 bus->sdcnt.f1regdata += 2;
2083 if ((hi == 0) && (lo == 0))
2084 break;
2085 }
2086
2087 }
2088 sdio_release_host(bus->sdiodev->func[1]);
2089 if (ret == 0)
2090 bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
2091
2092 done:
2093 brcmf_sdio_txpkt_postp(bus, &localq);
2094 __skb_dequeue_tail(&localq);
2095 brcmf_txcomplete(bus->sdiodev->dev, pkt, ret == 0);
2096 return ret;
2097 }
2098
2099 static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
2100 {
2101 struct sk_buff *pkt;
2102 u32 intstatus = 0;
2103 int ret = 0, prec_out;
2104 uint cnt = 0;
2105 u8 tx_prec_map;
2106
2107 brcmf_dbg(TRACE, "Enter\n");
2108
2109 tx_prec_map = ~bus->flowcontrol;
2110
2111 /* Send frames until the limit or some other event */
2112 for (cnt = 0; (cnt < maxframes) && data_ok(bus); cnt++) {
2113 spin_lock_bh(&bus->txqlock);
2114 pkt = brcmu_pktq_mdeq(&bus->txq, tx_prec_map, &prec_out);
2115 if (pkt == NULL) {
2116 spin_unlock_bh(&bus->txqlock);
2117 break;
2118 }
2119 spin_unlock_bh(&bus->txqlock);
2120
2121 ret = brcmf_sdbrcm_txpkt(bus, pkt, SDPCM_DATA_CHANNEL);
2122
2123 /* In poll mode, need to check for other events */
2124 if (!bus->intr && cnt) {
2125 /* Check device status, signal pending interrupt */
2126 sdio_claim_host(bus->sdiodev->func[1]);
2127 ret = r_sdreg32(bus, &intstatus,
2128 offsetof(struct sdpcmd_regs,
2129 intstatus));
2130 sdio_release_host(bus->sdiodev->func[1]);
2131 bus->sdcnt.f2txdata++;
2132 if (ret != 0)
2133 break;
2134 if (intstatus & bus->hostintmask)
2135 atomic_set(&bus->ipend, 1);
2136 }
2137 }
2138
2139 /* Deflow-control stack if needed */
2140 if ((bus->sdiodev->bus_if->state == BRCMF_BUS_DATA) &&
2141 bus->txoff && (pktq_len(&bus->txq) < TXLOW)) {
2142 bus->txoff = false;
2143 brcmf_txflowblock(bus->sdiodev->dev, false);
2144 }
2145
2146 return cnt;
2147 }
2148
2149 static void brcmf_sdbrcm_bus_stop(struct device *dev)
2150 {
2151 u32 local_hostintmask;
2152 u8 saveclk;
2153 int err;
2154 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2155 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
2156 struct brcmf_sdio *bus = sdiodev->bus;
2157
2158 brcmf_dbg(TRACE, "Enter\n");
2159
2160 if (bus->watchdog_tsk) {
2161 send_sig(SIGTERM, bus->watchdog_tsk, 1);
2162 kthread_stop(bus->watchdog_tsk);
2163 bus->watchdog_tsk = NULL;
2164 }
2165
2166 sdio_claim_host(bus->sdiodev->func[1]);
2167
2168 /* Enable clock for device interrupts */
2169 brcmf_sdbrcm_bus_sleep(bus, false, false);
2170
2171 /* Disable and clear interrupts at the chip level also */
2172 w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, hostintmask));
2173 local_hostintmask = bus->hostintmask;
2174 bus->hostintmask = 0;
2175
2176 /* Change our idea of bus state */
2177 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
2178
2179 /* Force clocks on backplane to be sure F2 interrupt propagates */
2180 saveclk = brcmf_sdio_regrb(bus->sdiodev,
2181 SBSDIO_FUNC1_CHIPCLKCSR, &err);
2182 if (!err) {
2183 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
2184 (saveclk | SBSDIO_FORCE_HT), &err);
2185 }
2186 if (err)
2187 brcmf_err("Failed to force clock for F2: err %d\n", err);
2188
2189 /* Turn off the bus (F2), free any pending packets */
2190 brcmf_dbg(INTR, "disable SDIO interrupts\n");
2191 brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_IOEx, SDIO_FUNC_ENABLE_1,
2192 NULL);
2193
2194 /* Clear any pending interrupts now that F2 is disabled */
2195 w_sdreg32(bus, local_hostintmask,
2196 offsetof(struct sdpcmd_regs, intstatus));
2197
2198 /* Turn off the backplane clock (only) */
2199 brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
2200 sdio_release_host(bus->sdiodev->func[1]);
2201
2202 /* Clear the data packet queues */
2203 brcmu_pktq_flush(&bus->txq, true, NULL, NULL);
2204
2205 /* Clear any held glomming stuff */
2206 if (bus->glomd)
2207 brcmu_pkt_buf_free_skb(bus->glomd);
2208 brcmf_sdbrcm_free_glom(bus);
2209
2210 /* Clear rx control and wake any waiters */
2211 spin_lock_bh(&bus->rxctl_lock);
2212 bus->rxlen = 0;
2213 spin_unlock_bh(&bus->rxctl_lock);
2214 brcmf_sdbrcm_dcmd_resp_wake(bus);
2215
2216 /* Reset some F2 state stuff */
2217 bus->rxskip = false;
2218 bus->tx_seq = bus->rx_seq = 0;
2219 }
2220
2221 static inline void brcmf_sdbrcm_clrintr(struct brcmf_sdio *bus)
2222 {
2223 unsigned long flags;
2224
2225 if (bus->sdiodev->oob_irq_requested) {
2226 spin_lock_irqsave(&bus->sdiodev->irq_en_lock, flags);
2227 if (!bus->sdiodev->irq_en && !atomic_read(&bus->ipend)) {
2228 enable_irq(bus->sdiodev->pdata->oob_irq_nr);
2229 bus->sdiodev->irq_en = true;
2230 }
2231 spin_unlock_irqrestore(&bus->sdiodev->irq_en_lock, flags);
2232 }
2233 }
2234
2235 static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
2236 {
2237 u8 idx;
2238 u32 addr;
2239 unsigned long val;
2240 int n, ret;
2241
2242 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
2243 addr = bus->ci->c_inf[idx].base +
2244 offsetof(struct sdpcmd_regs, intstatus);
2245
2246 ret = brcmf_sdio_regrw_helper(bus->sdiodev, addr, &val, false);
2247 bus->sdcnt.f1regdata++;
2248 if (ret != 0)
2249 val = 0;
2250
2251 val &= bus->hostintmask;
2252 atomic_set(&bus->fcstate, !!(val & I_HMB_FC_STATE));
2253
2254 /* Clear interrupts */
2255 if (val) {
2256 ret = brcmf_sdio_regrw_helper(bus->sdiodev, addr, &val, true);
2257 bus->sdcnt.f1regdata++;
2258 }
2259
2260 if (ret) {
2261 atomic_set(&bus->intstatus, 0);
2262 } else if (val) {
2263 for_each_set_bit(n, &val, 32)
2264 set_bit(n, (unsigned long *)&bus->intstatus.counter);
2265 }
2266
2267 return ret;
2268 }
2269
2270 static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
2271 {
2272 u32 newstatus = 0;
2273 unsigned long intstatus;
2274 uint rxlimit = bus->rxbound; /* Rx frames to read before resched */
2275 uint txlimit = bus->txbound; /* Tx frames to send before resched */
2276 uint framecnt = 0; /* Temporary counter of tx/rx frames */
2277 int err = 0, n;
2278
2279 brcmf_dbg(TRACE, "Enter\n");
2280
2281 sdio_claim_host(bus->sdiodev->func[1]);
2282
2283 /* If waiting for HTAVAIL, check status */
2284 if (!bus->sr_enabled && bus->clkstate == CLK_PENDING) {
2285 u8 clkctl, devctl = 0;
2286
2287 #ifdef DEBUG
2288 /* Check for inconsistent device control */
2289 devctl = brcmf_sdio_regrb(bus->sdiodev,
2290 SBSDIO_DEVICE_CTL, &err);
2291 if (err) {
2292 brcmf_err("error reading DEVCTL: %d\n", err);
2293 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
2294 }
2295 #endif /* DEBUG */
2296
2297 /* Read CSR, if clock on switch to AVAIL, else ignore */
2298 clkctl = brcmf_sdio_regrb(bus->sdiodev,
2299 SBSDIO_FUNC1_CHIPCLKCSR, &err);
2300 if (err) {
2301 brcmf_err("error reading CSR: %d\n",
2302 err);
2303 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
2304 }
2305
2306 brcmf_dbg(SDIO, "DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n",
2307 devctl, clkctl);
2308
2309 if (SBSDIO_HTAV(clkctl)) {
2310 devctl = brcmf_sdio_regrb(bus->sdiodev,
2311 SBSDIO_DEVICE_CTL, &err);
2312 if (err) {
2313 brcmf_err("error reading DEVCTL: %d\n",
2314 err);
2315 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
2316 }
2317 devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
2318 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
2319 devctl, &err);
2320 if (err) {
2321 brcmf_err("error writing DEVCTL: %d\n",
2322 err);
2323 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
2324 }
2325 bus->clkstate = CLK_AVAIL;
2326 }
2327 }
2328
2329 /* Make sure backplane clock is on */
2330 brcmf_sdbrcm_bus_sleep(bus, false, true);
2331
2332 /* Pending interrupt indicates new device status */
2333 if (atomic_read(&bus->ipend) > 0) {
2334 atomic_set(&bus->ipend, 0);
2335 err = brcmf_sdio_intr_rstatus(bus);
2336 }
2337
2338 /* Start with leftover status bits */
2339 intstatus = atomic_xchg(&bus->intstatus, 0);
2340
2341 /* Handle flow-control change: read new state in case our ack
2342 * crossed another change interrupt. If change still set, assume
2343 * FC ON for safety, let next loop through do the debounce.
2344 */
2345 if (intstatus & I_HMB_FC_CHANGE) {
2346 intstatus &= ~I_HMB_FC_CHANGE;
2347 err = w_sdreg32(bus, I_HMB_FC_CHANGE,
2348 offsetof(struct sdpcmd_regs, intstatus));
2349
2350 err = r_sdreg32(bus, &newstatus,
2351 offsetof(struct sdpcmd_regs, intstatus));
2352 bus->sdcnt.f1regdata += 2;
2353 atomic_set(&bus->fcstate,
2354 !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE)));
2355 intstatus |= (newstatus & bus->hostintmask);
2356 }
2357
2358 /* Handle host mailbox indication */
2359 if (intstatus & I_HMB_HOST_INT) {
2360 intstatus &= ~I_HMB_HOST_INT;
2361 intstatus |= brcmf_sdbrcm_hostmail(bus);
2362 }
2363
2364 sdio_release_host(bus->sdiodev->func[1]);
2365
2366 /* Generally don't ask for these, can get CRC errors... */
2367 if (intstatus & I_WR_OOSYNC) {
2368 brcmf_err("Dongle reports WR_OOSYNC\n");
2369 intstatus &= ~I_WR_OOSYNC;
2370 }
2371
2372 if (intstatus & I_RD_OOSYNC) {
2373 brcmf_err("Dongle reports RD_OOSYNC\n");
2374 intstatus &= ~I_RD_OOSYNC;
2375 }
2376
2377 if (intstatus & I_SBINT) {
2378 brcmf_err("Dongle reports SBINT\n");
2379 intstatus &= ~I_SBINT;
2380 }
2381
2382 /* Would be active due to wake-wlan in gSPI */
2383 if (intstatus & I_CHIPACTIVE) {
2384 brcmf_dbg(INFO, "Dongle reports CHIPACTIVE\n");
2385 intstatus &= ~I_CHIPACTIVE;
2386 }
2387
2388 /* Ignore frame indications if rxskip is set */
2389 if (bus->rxskip)
2390 intstatus &= ~I_HMB_FRAME_IND;
2391
2392 /* On frame indication, read available frames */
2393 if (PKT_AVAILABLE() && bus->clkstate == CLK_AVAIL) {
2394 framecnt = brcmf_sdio_readframes(bus, rxlimit);
2395 if (!bus->rxpending)
2396 intstatus &= ~I_HMB_FRAME_IND;
2397 rxlimit -= min(framecnt, rxlimit);
2398 }
2399
2400 /* Keep still-pending events for next scheduling */
2401 if (intstatus) {
2402 for_each_set_bit(n, &intstatus, 32)
2403 set_bit(n, (unsigned long *)&bus->intstatus.counter);
2404 }
2405
2406 brcmf_sdbrcm_clrintr(bus);
2407
2408 if (data_ok(bus) && bus->ctrl_frame_stat &&
2409 (bus->clkstate == CLK_AVAIL)) {
2410 int i;
2411
2412 sdio_claim_host(bus->sdiodev->func[1]);
2413 err = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
2414 SDIO_FUNC_2, F2SYNC, bus->ctrl_frame_buf,
2415 (u32) bus->ctrl_frame_len);
2416
2417 if (err < 0) {
2418 /* On failure, abort the command and
2419 terminate the frame */
2420 brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
2421 err);
2422 bus->sdcnt.tx_sderrs++;
2423
2424 brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
2425
2426 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
2427 SFC_WF_TERM, &err);
2428 bus->sdcnt.f1regdata++;
2429
2430 for (i = 0; i < 3; i++) {
2431 u8 hi, lo;
2432 hi = brcmf_sdio_regrb(bus->sdiodev,
2433 SBSDIO_FUNC1_WFRAMEBCHI,
2434 &err);
2435 lo = brcmf_sdio_regrb(bus->sdiodev,
2436 SBSDIO_FUNC1_WFRAMEBCLO,
2437 &err);
2438 bus->sdcnt.f1regdata += 2;
2439 if ((hi == 0) && (lo == 0))
2440 break;
2441 }
2442
2443 } else {
2444 bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
2445 }
2446 sdio_release_host(bus->sdiodev->func[1]);
2447 bus->ctrl_frame_stat = false;
2448 brcmf_sdbrcm_wait_event_wakeup(bus);
2449 }
2450 /* Send queued frames (limit 1 if rx may still be pending) */
2451 else if ((bus->clkstate == CLK_AVAIL) && !atomic_read(&bus->fcstate) &&
2452 brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit
2453 && data_ok(bus)) {
2454 framecnt = bus->rxpending ? min(txlimit, bus->txminmax) :
2455 txlimit;
2456 framecnt = brcmf_sdbrcm_sendfromq(bus, framecnt);
2457 txlimit -= framecnt;
2458 }
2459
2460 if ((bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) || (err != 0)) {
2461 brcmf_err("failed backplane access over SDIO, halting operation\n");
2462 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
2463 atomic_set(&bus->intstatus, 0);
2464 } else if (atomic_read(&bus->intstatus) ||
2465 atomic_read(&bus->ipend) > 0 ||
2466 (!atomic_read(&bus->fcstate) &&
2467 brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
2468 data_ok(bus)) || PKT_AVAILABLE()) {
2469 atomic_inc(&bus->dpc_tskcnt);
2470 }
2471
2472 /* If we're done for now, turn off clock request. */
2473 if ((bus->clkstate != CLK_PENDING)
2474 && bus->idletime == BRCMF_IDLE_IMMEDIATE) {
2475 bus->activity = false;
2476 brcmf_dbg(SDIO, "idle state\n");
2477 sdio_claim_host(bus->sdiodev->func[1]);
2478 brcmf_sdbrcm_bus_sleep(bus, true, false);
2479 sdio_release_host(bus->sdiodev->func[1]);
2480 }
2481 }
2482
2483 static struct pktq *brcmf_sdbrcm_bus_gettxq(struct device *dev)
2484 {
2485 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2486 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
2487 struct brcmf_sdio *bus = sdiodev->bus;
2488
2489 return &bus->txq;
2490 }
2491
2492 static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
2493 {
2494 int ret = -EBADE;
2495 uint datalen, prec;
2496 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2497 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
2498 struct brcmf_sdio *bus = sdiodev->bus;
2499 ulong flags;
2500
2501 brcmf_dbg(TRACE, "Enter\n");
2502
2503 datalen = pkt->len;
2504
2505 /* Add space for the header */
2506 skb_push(pkt, bus->tx_hdrlen);
2507 /* precondition: IS_ALIGNED((unsigned long)(pkt->data), 2) */
2508
2509 prec = prio2prec((pkt->priority & PRIOMASK));
2510
2511 /* Check for existing queue, current flow-control,
2512 pending event, or pending clock */
2513 brcmf_dbg(TRACE, "deferring pktq len %d\n", pktq_len(&bus->txq));
2514 bus->sdcnt.fcqueued++;
2515
2516 /* Priority based enq */
2517 spin_lock_irqsave(&bus->txqlock, flags);
2518 if (!brcmf_c_prec_enq(bus->sdiodev->dev, &bus->txq, pkt, prec)) {
2519 skb_pull(pkt, bus->tx_hdrlen);
2520 brcmf_err("out of bus->txq !!!\n");
2521 ret = -ENOSR;
2522 } else {
2523 ret = 0;
2524 }
2525
2526 if (pktq_len(&bus->txq) >= TXHI) {
2527 bus->txoff = true;
2528 brcmf_txflowblock(bus->sdiodev->dev, true);
2529 }
2530 spin_unlock_irqrestore(&bus->txqlock, flags);
2531
2532 #ifdef DEBUG
2533 if (pktq_plen(&bus->txq, prec) > qcount[prec])
2534 qcount[prec] = pktq_plen(&bus->txq, prec);
2535 #endif
2536
2537 if (atomic_read(&bus->dpc_tskcnt) == 0) {
2538 atomic_inc(&bus->dpc_tskcnt);
2539 queue_work(bus->brcmf_wq, &bus->datawork);
2540 }
2541
2542 return ret;
2543 }
2544
2545 #ifdef DEBUG
2546 #define CONSOLE_LINE_MAX 192
2547
2548 static int brcmf_sdbrcm_readconsole(struct brcmf_sdio *bus)
2549 {
2550 struct brcmf_console *c = &bus->console;
2551 u8 line[CONSOLE_LINE_MAX], ch;
2552 u32 n, idx, addr;
2553 int rv;
2554
2555 /* Don't do anything until FWREADY updates console address */
2556 if (bus->console_addr == 0)
2557 return 0;
2558
2559 /* Read console log struct */
2560 addr = bus->console_addr + offsetof(struct rte_console, log_le);
2561 rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr, (u8 *)&c->log_le,
2562 sizeof(c->log_le));
2563 if (rv < 0)
2564 return rv;
2565
2566 /* Allocate console buffer (one time only) */
2567 if (c->buf == NULL) {
2568 c->bufsize = le32_to_cpu(c->log_le.buf_size);
2569 c->buf = kmalloc(c->bufsize, GFP_ATOMIC);
2570 if (c->buf == NULL)
2571 return -ENOMEM;
2572 }
2573
2574 idx = le32_to_cpu(c->log_le.idx);
2575
2576 /* Protect against corrupt value */
2577 if (idx > c->bufsize)
2578 return -EBADE;
2579
2580 /* Skip reading the console buffer if the index pointer
2581 has not moved */
2582 if (idx == c->last)
2583 return 0;
2584
2585 /* Read the console buffer */
2586 addr = le32_to_cpu(c->log_le.buf);
2587 rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr, c->buf, c->bufsize);
2588 if (rv < 0)
2589 return rv;
2590
2591 while (c->last != idx) {
2592 for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
2593 if (c->last == idx) {
2594 /* This would output a partial line.
2595 * Instead, back up
2596 * the buffer pointer and output this
2597 * line next time around.
2598 */
2599 if (c->last >= n)
2600 c->last -= n;
2601 else
2602 c->last = c->bufsize - n;
2603 goto break2;
2604 }
2605 ch = c->buf[c->last];
2606 c->last = (c->last + 1) % c->bufsize;
2607 if (ch == '\n')
2608 break;
2609 line[n] = ch;
2610 }
2611
2612 if (n > 0) {
2613 if (line[n - 1] == '\r')
2614 n--;
2615 line[n] = 0;
2616 pr_debug("CONSOLE: %s\n", line);
2617 }
2618 }
2619 break2:
2620
2621 return 0;
2622 }
2623 #endif /* DEBUG */
2624
2625 static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
2626 {
2627 int i;
2628 int ret;
2629
2630 bus->ctrl_frame_stat = false;
2631 ret = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
2632 SDIO_FUNC_2, F2SYNC, frame, len);
2633
2634 if (ret < 0) {
2635 /* On failure, abort the command and terminate the frame */
2636 brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
2637 ret);
2638 bus->sdcnt.tx_sderrs++;
2639
2640 brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
2641
2642 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
2643 SFC_WF_TERM, NULL);
2644 bus->sdcnt.f1regdata++;
2645
2646 for (i = 0; i < 3; i++) {
2647 u8 hi, lo;
2648 hi = brcmf_sdio_regrb(bus->sdiodev,
2649 SBSDIO_FUNC1_WFRAMEBCHI, NULL);
2650 lo = brcmf_sdio_regrb(bus->sdiodev,
2651 SBSDIO_FUNC1_WFRAMEBCLO, NULL);
2652 bus->sdcnt.f1regdata += 2;
2653 if (hi == 0 && lo == 0)
2654 break;
2655 }
2656 return ret;
2657 }
2658
2659 bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
2660
2661 return ret;
2662 }
2663
2664 static int
2665 brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
2666 {
2667 u8 *frame;
2668 u16 len;
2669 uint retries = 0;
2670 u8 doff = 0;
2671 int ret = -1;
2672 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2673 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
2674 struct brcmf_sdio *bus = sdiodev->bus;
2675 struct brcmf_sdio_hdrinfo hd_info = {0};
2676
2677 brcmf_dbg(TRACE, "Enter\n");
2678
2679 /* Back the pointer to make a room for bus header */
2680 frame = msg - bus->tx_hdrlen;
2681 len = (msglen += bus->tx_hdrlen);
2682
2683 /* Add alignment padding (optional for ctl frames) */
2684 doff = ((unsigned long)frame % BRCMF_SDALIGN);
2685 if (doff) {
2686 frame -= doff;
2687 len += doff;
2688 msglen += doff;
2689 memset(frame, 0, doff + bus->tx_hdrlen);
2690 }
2691 /* precondition: doff < BRCMF_SDALIGN */
2692 doff += bus->tx_hdrlen;
2693
2694 /* Round send length to next SDIO block */
2695 if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
2696 u16 pad = bus->blocksize - (len % bus->blocksize);
2697 if ((pad <= bus->roundup) && (pad < bus->blocksize))
2698 len += pad;
2699 } else if (len % BRCMF_SDALIGN) {
2700 len += BRCMF_SDALIGN - (len % BRCMF_SDALIGN);
2701 }
2702
2703 /* Satisfy length-alignment requirements */
2704 if (len & (ALIGNMENT - 1))
2705 len = roundup(len, ALIGNMENT);
2706
2707 /* precondition: IS_ALIGNED((unsigned long)frame, 2) */
2708
2709 /* Make sure backplane clock is on */
2710 sdio_claim_host(bus->sdiodev->func[1]);
2711 brcmf_sdbrcm_bus_sleep(bus, false, false);
2712 sdio_release_host(bus->sdiodev->func[1]);
2713
2714 hd_info.len = (u16)msglen;
2715 hd_info.channel = SDPCM_CONTROL_CHANNEL;
2716 hd_info.dat_offset = doff;
2717 brcmf_sdio_hdpack(bus, frame, &hd_info);
2718
2719 if (!data_ok(bus)) {
2720 brcmf_dbg(INFO, "No bus credit bus->tx_max %d, bus->tx_seq %d\n",
2721 bus->tx_max, bus->tx_seq);
2722 bus->ctrl_frame_stat = true;
2723 /* Send from dpc */
2724 bus->ctrl_frame_buf = frame;
2725 bus->ctrl_frame_len = len;
2726
2727 wait_event_interruptible_timeout(bus->ctrl_wait,
2728 !bus->ctrl_frame_stat,
2729 msecs_to_jiffies(2000));
2730
2731 if (!bus->ctrl_frame_stat) {
2732 brcmf_dbg(SDIO, "ctrl_frame_stat == false\n");
2733 ret = 0;
2734 } else {
2735 brcmf_dbg(SDIO, "ctrl_frame_stat == true\n");
2736 ret = -1;
2737 }
2738 }
2739
2740 if (ret == -1) {
2741 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_CTL_ON(),
2742 frame, len, "Tx Frame:\n");
2743 brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() && BRCMF_CTL_ON()) &&
2744 BRCMF_HDRS_ON(),
2745 frame, min_t(u16, len, 16), "TxHdr:\n");
2746
2747 do {
2748 sdio_claim_host(bus->sdiodev->func[1]);
2749 ret = brcmf_tx_frame(bus, frame, len);
2750 sdio_release_host(bus->sdiodev->func[1]);
2751 } while (ret < 0 && retries++ < TXRETRIES);
2752 }
2753
2754 if ((bus->idletime == BRCMF_IDLE_IMMEDIATE) &&
2755 atomic_read(&bus->dpc_tskcnt) == 0) {
2756 bus->activity = false;
2757 sdio_claim_host(bus->sdiodev->func[1]);
2758 brcmf_dbg(INFO, "idle\n");
2759 brcmf_sdbrcm_clkctl(bus, CLK_NONE, true);
2760 sdio_release_host(bus->sdiodev->func[1]);
2761 }
2762
2763 if (ret)
2764 bus->sdcnt.tx_ctlerrs++;
2765 else
2766 bus->sdcnt.tx_ctlpkts++;
2767
2768 return ret ? -EIO : 0;
2769 }
2770
2771 #ifdef DEBUG
2772 static inline bool brcmf_sdio_valid_shared_address(u32 addr)
2773 {
2774 return !(addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff));
2775 }
2776
2777 static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
2778 struct sdpcm_shared *sh)
2779 {
2780 u32 addr;
2781 int rv;
2782 u32 shaddr = 0;
2783 struct sdpcm_shared_le sh_le;
2784 __le32 addr_le;
2785
2786 shaddr = bus->ci->rambase + bus->ramsize - 4;
2787
2788 /*
2789 * Read last word in socram to determine
2790 * address of sdpcm_shared structure
2791 */
2792 sdio_claim_host(bus->sdiodev->func[1]);
2793 brcmf_sdbrcm_bus_sleep(bus, false, false);
2794 rv = brcmf_sdio_ramrw(bus->sdiodev, false, shaddr, (u8 *)&addr_le, 4);
2795 sdio_release_host(bus->sdiodev->func[1]);
2796 if (rv < 0)
2797 return rv;
2798
2799 addr = le32_to_cpu(addr_le);
2800
2801 brcmf_dbg(SDIO, "sdpcm_shared address 0x%08X\n", addr);
2802
2803 /*
2804 * Check if addr is valid.
2805 * NVRAM length at the end of memory should have been overwritten.
2806 */
2807 if (!brcmf_sdio_valid_shared_address(addr)) {
2808 brcmf_err("invalid sdpcm_shared address 0x%08X\n",
2809 addr);
2810 return -EINVAL;
2811 }
2812
2813 /* Read hndrte_shared structure */
2814 rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr, (u8 *)&sh_le,
2815 sizeof(struct sdpcm_shared_le));
2816 if (rv < 0)
2817 return rv;
2818
2819 /* Endianness */
2820 sh->flags = le32_to_cpu(sh_le.flags);
2821 sh->trap_addr = le32_to_cpu(sh_le.trap_addr);
2822 sh->assert_exp_addr = le32_to_cpu(sh_le.assert_exp_addr);
2823 sh->assert_file_addr = le32_to_cpu(sh_le.assert_file_addr);
2824 sh->assert_line = le32_to_cpu(sh_le.assert_line);
2825 sh->console_addr = le32_to_cpu(sh_le.console_addr);
2826 sh->msgtrace_addr = le32_to_cpu(sh_le.msgtrace_addr);
2827
2828 if ((sh->flags & SDPCM_SHARED_VERSION_MASK) > SDPCM_SHARED_VERSION) {
2829 brcmf_err("sdpcm shared version unsupported: dhd %d dongle %d\n",
2830 SDPCM_SHARED_VERSION,
2831 sh->flags & SDPCM_SHARED_VERSION_MASK);
2832 return -EPROTO;
2833 }
2834
2835 return 0;
2836 }
2837
2838 static int brcmf_sdio_dump_console(struct brcmf_sdio *bus,
2839 struct sdpcm_shared *sh, char __user *data,
2840 size_t count)
2841 {
2842 u32 addr, console_ptr, console_size, console_index;
2843 char *conbuf = NULL;
2844 __le32 sh_val;
2845 int rv;
2846 loff_t pos = 0;
2847 int nbytes = 0;
2848
2849 /* obtain console information from device memory */
2850 addr = sh->console_addr + offsetof(struct rte_console, log_le);
2851 rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr,
2852 (u8 *)&sh_val, sizeof(u32));
2853 if (rv < 0)
2854 return rv;
2855 console_ptr = le32_to_cpu(sh_val);
2856
2857 addr = sh->console_addr + offsetof(struct rte_console, log_le.buf_size);
2858 rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr,
2859 (u8 *)&sh_val, sizeof(u32));
2860 if (rv < 0)
2861 return rv;
2862 console_size = le32_to_cpu(sh_val);
2863
2864 addr = sh->console_addr + offsetof(struct rte_console, log_le.idx);
2865 rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr,
2866 (u8 *)&sh_val, sizeof(u32));
2867 if (rv < 0)
2868 return rv;
2869 console_index = le32_to_cpu(sh_val);
2870
2871 /* allocate buffer for console data */
2872 if (console_size <= CONSOLE_BUFFER_MAX)
2873 conbuf = vzalloc(console_size+1);
2874
2875 if (!conbuf)
2876 return -ENOMEM;
2877
2878 /* obtain the console data from device */
2879 conbuf[console_size] = '\0';
2880 rv = brcmf_sdio_ramrw(bus->sdiodev, false, console_ptr, (u8 *)conbuf,
2881 console_size);
2882 if (rv < 0)
2883 goto done;
2884
2885 rv = simple_read_from_buffer(data, count, &pos,
2886 conbuf + console_index,
2887 console_size - console_index);
2888 if (rv < 0)
2889 goto done;
2890
2891 nbytes = rv;
2892 if (console_index > 0) {
2893 pos = 0;
2894 rv = simple_read_from_buffer(data+nbytes, count, &pos,
2895 conbuf, console_index - 1);
2896 if (rv < 0)
2897 goto done;
2898 rv += nbytes;
2899 }
2900 done:
2901 vfree(conbuf);
2902 return rv;
2903 }
2904
2905 static int brcmf_sdio_trap_info(struct brcmf_sdio *bus, struct sdpcm_shared *sh,
2906 char __user *data, size_t count)
2907 {
2908 int error, res;
2909 char buf[350];
2910 struct brcmf_trap_info tr;
2911 loff_t pos = 0;
2912
2913 if ((sh->flags & SDPCM_SHARED_TRAP) == 0) {
2914 brcmf_dbg(INFO, "no trap in firmware\n");
2915 return 0;
2916 }
2917
2918 error = brcmf_sdio_ramrw(bus->sdiodev, false, sh->trap_addr, (u8 *)&tr,
2919 sizeof(struct brcmf_trap_info));
2920 if (error < 0)
2921 return error;
2922
2923 res = scnprintf(buf, sizeof(buf),
2924 "dongle trap info: type 0x%x @ epc 0x%08x\n"
2925 " cpsr 0x%08x spsr 0x%08x sp 0x%08x\n"
2926 " lr 0x%08x pc 0x%08x offset 0x%x\n"
2927 " r0 0x%08x r1 0x%08x r2 0x%08x r3 0x%08x\n"
2928 " r4 0x%08x r5 0x%08x r6 0x%08x r7 0x%08x\n",
2929 le32_to_cpu(tr.type), le32_to_cpu(tr.epc),
2930 le32_to_cpu(tr.cpsr), le32_to_cpu(tr.spsr),
2931 le32_to_cpu(tr.r13), le32_to_cpu(tr.r14),
2932 le32_to_cpu(tr.pc), sh->trap_addr,
2933 le32_to_cpu(tr.r0), le32_to_cpu(tr.r1),
2934 le32_to_cpu(tr.r2), le32_to_cpu(tr.r3),
2935 le32_to_cpu(tr.r4), le32_to_cpu(tr.r5),
2936 le32_to_cpu(tr.r6), le32_to_cpu(tr.r7));
2937
2938 return simple_read_from_buffer(data, count, &pos, buf, res);
2939 }
2940
2941 static int brcmf_sdio_assert_info(struct brcmf_sdio *bus,
2942 struct sdpcm_shared *sh, char __user *data,
2943 size_t count)
2944 {
2945 int error = 0;
2946 char buf[200];
2947 char file[80] = "?";
2948 char expr[80] = "<???>";
2949 int res;
2950 loff_t pos = 0;
2951
2952 if ((sh->flags & SDPCM_SHARED_ASSERT_BUILT) == 0) {
2953 brcmf_dbg(INFO, "firmware not built with -assert\n");
2954 return 0;
2955 } else if ((sh->flags & SDPCM_SHARED_ASSERT) == 0) {
2956 brcmf_dbg(INFO, "no assert in dongle\n");
2957 return 0;
2958 }
2959
2960 sdio_claim_host(bus->sdiodev->func[1]);
2961 if (sh->assert_file_addr != 0) {
2962 error = brcmf_sdio_ramrw(bus->sdiodev, false,
2963 sh->assert_file_addr, (u8 *)file, 80);
2964 if (error < 0)
2965 return error;
2966 }
2967 if (sh->assert_exp_addr != 0) {
2968 error = brcmf_sdio_ramrw(bus->sdiodev, false,
2969 sh->assert_exp_addr, (u8 *)expr, 80);
2970 if (error < 0)
2971 return error;
2972 }
2973 sdio_release_host(bus->sdiodev->func[1]);
2974
2975 res = scnprintf(buf, sizeof(buf),
2976 "dongle assert: %s:%d: assert(%s)\n",
2977 file, sh->assert_line, expr);
2978 return simple_read_from_buffer(data, count, &pos, buf, res);
2979 }
2980
2981 static int brcmf_sdbrcm_checkdied(struct brcmf_sdio *bus)
2982 {
2983 int error;
2984 struct sdpcm_shared sh;
2985
2986 error = brcmf_sdio_readshared(bus, &sh);
2987
2988 if (error < 0)
2989 return error;
2990
2991 if ((sh.flags & SDPCM_SHARED_ASSERT_BUILT) == 0)
2992 brcmf_dbg(INFO, "firmware not built with -assert\n");
2993 else if (sh.flags & SDPCM_SHARED_ASSERT)
2994 brcmf_err("assertion in dongle\n");
2995
2996 if (sh.flags & SDPCM_SHARED_TRAP)
2997 brcmf_err("firmware trap in dongle\n");
2998
2999 return 0;
3000 }
3001
3002 static int brcmf_sdbrcm_died_dump(struct brcmf_sdio *bus, char __user *data,
3003 size_t count, loff_t *ppos)
3004 {
3005 int error = 0;
3006 struct sdpcm_shared sh;
3007 int nbytes = 0;
3008 loff_t pos = *ppos;
3009
3010 if (pos != 0)
3011 return 0;
3012
3013 error = brcmf_sdio_readshared(bus, &sh);
3014 if (error < 0)
3015 goto done;
3016
3017 error = brcmf_sdio_assert_info(bus, &sh, data, count);
3018 if (error < 0)
3019 goto done;
3020 nbytes = error;
3021
3022 error = brcmf_sdio_trap_info(bus, &sh, data+nbytes, count);
3023 if (error < 0)
3024 goto done;
3025 nbytes += error;
3026
3027 error = brcmf_sdio_dump_console(bus, &sh, data+nbytes, count);
3028 if (error < 0)
3029 goto done;
3030 nbytes += error;
3031
3032 error = nbytes;
3033 *ppos += nbytes;
3034 done:
3035 return error;
3036 }
3037
3038 static ssize_t brcmf_sdio_forensic_read(struct file *f, char __user *data,
3039 size_t count, loff_t *ppos)
3040 {
3041 struct brcmf_sdio *bus = f->private_data;
3042 int res;
3043
3044 res = brcmf_sdbrcm_died_dump(bus, data, count, ppos);
3045 if (res > 0)
3046 *ppos += res;
3047 return (ssize_t)res;
3048 }
3049
3050 static const struct file_operations brcmf_sdio_forensic_ops = {
3051 .owner = THIS_MODULE,
3052 .open = simple_open,
3053 .read = brcmf_sdio_forensic_read
3054 };
3055
3056 static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
3057 {
3058 struct brcmf_pub *drvr = bus->sdiodev->bus_if->drvr;
3059 struct dentry *dentry = brcmf_debugfs_get_devdir(drvr);
3060
3061 if (IS_ERR_OR_NULL(dentry))
3062 return;
3063
3064 debugfs_create_file("forensics", S_IRUGO, dentry, bus,
3065 &brcmf_sdio_forensic_ops);
3066 brcmf_debugfs_create_sdio_count(drvr, &bus->sdcnt);
3067 }
3068 #else
3069 static int brcmf_sdbrcm_checkdied(struct brcmf_sdio *bus)
3070 {
3071 return 0;
3072 }
3073
3074 static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
3075 {
3076 }
3077 #endif /* DEBUG */
3078
3079 static int
3080 brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
3081 {
3082 int timeleft;
3083 uint rxlen = 0;
3084 bool pending;
3085 u8 *buf;
3086 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
3087 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
3088 struct brcmf_sdio *bus = sdiodev->bus;
3089
3090 brcmf_dbg(TRACE, "Enter\n");
3091
3092 /* Wait until control frame is available */
3093 timeleft = brcmf_sdbrcm_dcmd_resp_wait(bus, &bus->rxlen, &pending);
3094
3095 spin_lock_bh(&bus->rxctl_lock);
3096 rxlen = bus->rxlen;
3097 memcpy(msg, bus->rxctl, min(msglen, rxlen));
3098 bus->rxctl = NULL;
3099 buf = bus->rxctl_orig;
3100 bus->rxctl_orig = NULL;
3101 bus->rxlen = 0;
3102 spin_unlock_bh(&bus->rxctl_lock);
3103 vfree(buf);
3104
3105 if (rxlen) {
3106 brcmf_dbg(CTL, "resumed on rxctl frame, got %d expected %d\n",
3107 rxlen, msglen);
3108 } else if (timeleft == 0) {
3109 brcmf_err("resumed on timeout\n");
3110 brcmf_sdbrcm_checkdied(bus);
3111 } else if (pending) {
3112 brcmf_dbg(CTL, "cancelled\n");
3113 return -ERESTARTSYS;
3114 } else {
3115 brcmf_dbg(CTL, "resumed for unknown reason?\n");
3116 brcmf_sdbrcm_checkdied(bus);
3117 }
3118
3119 if (rxlen)
3120 bus->sdcnt.rx_ctlpkts++;
3121 else
3122 bus->sdcnt.rx_ctlerrs++;
3123
3124 return rxlen ? (int)rxlen : -ETIMEDOUT;
3125 }
3126
3127 static bool brcmf_sdbrcm_download_state(struct brcmf_sdio *bus, bool enter)
3128 {
3129 struct chip_info *ci = bus->ci;
3130
3131 /* To enter download state, disable ARM and reset SOCRAM.
3132 * To exit download state, simply reset ARM (default is RAM boot).
3133 */
3134 if (enter) {
3135 bus->alp_only = true;
3136
3137 brcmf_sdio_chip_enter_download(bus->sdiodev, ci);
3138 } else {
3139 if (!brcmf_sdio_chip_exit_download(bus->sdiodev, ci, bus->vars,
3140 bus->varsz))
3141 return false;
3142
3143 /* Allow HT Clock now that the ARM is running. */
3144 bus->alp_only = false;
3145
3146 bus->sdiodev->bus_if->state = BRCMF_BUS_LOAD;
3147 }
3148
3149 return true;
3150 }
3151
3152 static int brcmf_sdbrcm_download_code_file(struct brcmf_sdio *bus)
3153 {
3154 const struct firmware *fw;
3155 int err;
3156 int offset;
3157 int address;
3158 int len;
3159
3160 fw = brcmf_sdbrcm_get_fw(bus, BRCMF_FIRMWARE_BIN);
3161 if (fw == NULL)
3162 return -ENOENT;
3163
3164 if (brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_ARM_CR4) !=
3165 BRCMF_MAX_CORENUM)
3166 memcpy(&bus->ci->rst_vec, fw->data, sizeof(bus->ci->rst_vec));
3167
3168 err = 0;
3169 offset = 0;
3170 address = bus->ci->rambase;
3171 while (offset < fw->size) {
3172 len = ((offset + MEMBLOCK) < fw->size) ? MEMBLOCK :
3173 fw->size - offset;
3174 err = brcmf_sdio_ramrw(bus->sdiodev, true, address,
3175 (u8 *)&fw->data[offset], len);
3176 if (err) {
3177 brcmf_err("error %d on writing %d membytes at 0x%08x\n",
3178 err, len, address);
3179 goto failure;
3180 }
3181 offset += len;
3182 address += len;
3183 }
3184
3185 failure:
3186 release_firmware(fw);
3187
3188 return err;
3189 }
3190
3191 /*
3192 * ProcessVars:Takes a buffer of "<var>=<value>\n" lines read from a file
3193 * and ending in a NUL.
3194 * Removes carriage returns, empty lines, comment lines, and converts
3195 * newlines to NULs.
3196 * Shortens buffer as needed and pads with NULs. End of buffer is marked
3197 * by two NULs.
3198 */
3199
3200 static int brcmf_process_nvram_vars(struct brcmf_sdio *bus,
3201 const struct firmware *nv)
3202 {
3203 char *varbuf;
3204 char *dp;
3205 bool findNewline;
3206 int column;
3207 int ret = 0;
3208 uint buf_len, n, len;
3209
3210 len = nv->size;
3211 varbuf = vmalloc(len);
3212 if (!varbuf)
3213 return -ENOMEM;
3214
3215 memcpy(varbuf, nv->data, len);
3216 dp = varbuf;
3217
3218 findNewline = false;
3219 column = 0;
3220
3221 for (n = 0; n < len; n++) {
3222 if (varbuf[n] == 0)
3223 break;
3224 if (varbuf[n] == '\r')
3225 continue;
3226 if (findNewline && varbuf[n] != '\n')
3227 continue;
3228 findNewline = false;
3229 if (varbuf[n] == '#') {
3230 findNewline = true;
3231 continue;
3232 }
3233 if (varbuf[n] == '\n') {
3234 if (column == 0)
3235 continue;
3236 *dp++ = 0;
3237 column = 0;
3238 continue;
3239 }
3240 *dp++ = varbuf[n];
3241 column++;
3242 }
3243 buf_len = dp - varbuf;
3244 while (dp < varbuf + n)
3245 *dp++ = 0;
3246
3247 kfree(bus->vars);
3248 /* roundup needed for download to device */
3249 bus->varsz = roundup(buf_len + 1, 4);
3250 bus->vars = kmalloc(bus->varsz, GFP_KERNEL);
3251 if (bus->vars == NULL) {
3252 bus->varsz = 0;
3253 ret = -ENOMEM;
3254 goto err;
3255 }
3256
3257 /* copy the processed variables and add null termination */
3258 memcpy(bus->vars, varbuf, buf_len);
3259 bus->vars[buf_len] = 0;
3260 err:
3261 vfree(varbuf);
3262 return ret;
3263 }
3264
3265 static int brcmf_sdbrcm_download_nvram(struct brcmf_sdio *bus)
3266 {
3267 const struct firmware *nv;
3268 int ret;
3269
3270 nv = brcmf_sdbrcm_get_fw(bus, BRCMF_FIRMWARE_NVRAM);
3271 if (nv == NULL)
3272 return -ENOENT;
3273
3274 ret = brcmf_process_nvram_vars(bus, nv);
3275
3276 release_firmware(nv);
3277
3278 return ret;
3279 }
3280
3281 static int _brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
3282 {
3283 int bcmerror = -1;
3284
3285 /* Keep arm in reset */
3286 if (!brcmf_sdbrcm_download_state(bus, true)) {
3287 brcmf_err("error placing ARM core in reset\n");
3288 goto err;
3289 }
3290
3291 if (brcmf_sdbrcm_download_code_file(bus)) {
3292 brcmf_err("dongle image file download failed\n");
3293 goto err;
3294 }
3295
3296 if (brcmf_sdbrcm_download_nvram(bus)) {
3297 brcmf_err("dongle nvram file download failed\n");
3298 goto err;
3299 }
3300
3301 /* Take arm out of reset */
3302 if (!brcmf_sdbrcm_download_state(bus, false)) {
3303 brcmf_err("error getting out of ARM core reset\n");
3304 goto err;
3305 }
3306
3307 bcmerror = 0;
3308
3309 err:
3310 return bcmerror;
3311 }
3312
3313 static bool brcmf_sdbrcm_sr_capable(struct brcmf_sdio *bus)
3314 {
3315 u32 addr, reg;
3316
3317 brcmf_dbg(TRACE, "Enter\n");
3318
3319 /* old chips with PMU version less than 17 don't support save restore */
3320 if (bus->ci->pmurev < 17)
3321 return false;
3322
3323 /* read PMU chipcontrol register 3*/
3324 addr = CORE_CC_REG(bus->ci->c_inf[0].base, chipcontrol_addr);
3325 brcmf_sdio_regwl(bus->sdiodev, addr, 3, NULL);
3326 addr = CORE_CC_REG(bus->ci->c_inf[0].base, chipcontrol_data);
3327 reg = brcmf_sdio_regrl(bus->sdiodev, addr, NULL);
3328
3329 return (bool)reg;
3330 }
3331
3332 static void brcmf_sdbrcm_sr_init(struct brcmf_sdio *bus)
3333 {
3334 int err = 0;
3335 u8 val;
3336
3337 brcmf_dbg(TRACE, "Enter\n");
3338
3339 val = brcmf_sdio_regrb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL,
3340 &err);
3341 if (err) {
3342 brcmf_err("error reading SBSDIO_FUNC1_WAKEUPCTRL\n");
3343 return;
3344 }
3345
3346 val |= 1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT;
3347 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL,
3348 val, &err);
3349 if (err) {
3350 brcmf_err("error writing SBSDIO_FUNC1_WAKEUPCTRL\n");
3351 return;
3352 }
3353
3354 /* Add CMD14 Support */
3355 brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_BRCM_CARDCAP,
3356 (SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT |
3357 SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT),
3358 &err);
3359 if (err) {
3360 brcmf_err("error writing SDIO_CCCR_BRCM_CARDCAP\n");
3361 return;
3362 }
3363
3364 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
3365 SBSDIO_FORCE_HT, &err);
3366 if (err) {
3367 brcmf_err("error writing SBSDIO_FUNC1_CHIPCLKCSR\n");
3368 return;
3369 }
3370
3371 /* set flag */
3372 bus->sr_enabled = true;
3373 brcmf_dbg(INFO, "SR enabled\n");
3374 }
3375
3376 /* enable KSO bit */
3377 static int brcmf_sdbrcm_kso_init(struct brcmf_sdio *bus)
3378 {
3379 u8 val;
3380 int err = 0;
3381
3382 brcmf_dbg(TRACE, "Enter\n");
3383
3384 /* KSO bit added in SDIO core rev 12 */
3385 if (bus->ci->c_inf[1].rev < 12)
3386 return 0;
3387
3388 val = brcmf_sdio_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
3389 &err);
3390 if (err) {
3391 brcmf_err("error reading SBSDIO_FUNC1_SLEEPCSR\n");
3392 return err;
3393 }
3394
3395 if (!(val & SBSDIO_FUNC1_SLEEPCSR_KSO_MASK)) {
3396 val |= (SBSDIO_FUNC1_SLEEPCSR_KSO_EN <<
3397 SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
3398 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
3399 val, &err);
3400 if (err) {
3401 brcmf_err("error writing SBSDIO_FUNC1_SLEEPCSR\n");
3402 return err;
3403 }
3404 }
3405
3406 return 0;
3407 }
3408
3409
3410 static bool
3411 brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
3412 {
3413 bool ret;
3414
3415 sdio_claim_host(bus->sdiodev->func[1]);
3416
3417 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
3418
3419 ret = _brcmf_sdbrcm_download_firmware(bus) == 0;
3420
3421 brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
3422
3423 sdio_release_host(bus->sdiodev->func[1]);
3424
3425 return ret;
3426 }
3427
3428 static int brcmf_sdbrcm_bus_init(struct device *dev)
3429 {
3430 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
3431 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
3432 struct brcmf_sdio *bus = sdiodev->bus;
3433 unsigned long timeout;
3434 u8 ready, enable;
3435 int err, ret = 0;
3436 u8 saveclk;
3437
3438 brcmf_dbg(TRACE, "Enter\n");
3439
3440 /* try to download image and nvram to the dongle */
3441 if (bus_if->state == BRCMF_BUS_DOWN) {
3442 if (!(brcmf_sdbrcm_download_firmware(bus)))
3443 return -1;
3444 }
3445
3446 if (!bus->sdiodev->bus_if->drvr)
3447 return 0;
3448
3449 /* Start the watchdog timer */
3450 bus->sdcnt.tickcnt = 0;
3451 brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
3452
3453 sdio_claim_host(bus->sdiodev->func[1]);
3454
3455 /* Make sure backplane clock is on, needed to generate F2 interrupt */
3456 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
3457 if (bus->clkstate != CLK_AVAIL)
3458 goto exit;
3459
3460 /* Force clocks on backplane to be sure F2 interrupt propagates */
3461 saveclk = brcmf_sdio_regrb(bus->sdiodev,
3462 SBSDIO_FUNC1_CHIPCLKCSR, &err);
3463 if (!err) {
3464 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
3465 (saveclk | SBSDIO_FORCE_HT), &err);
3466 }
3467 if (err) {
3468 brcmf_err("Failed to force clock for F2: err %d\n", err);
3469 goto exit;
3470 }
3471
3472 /* Enable function 2 (frame transfers) */
3473 w_sdreg32(bus, SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT,
3474 offsetof(struct sdpcmd_regs, tosbmailboxdata));
3475 enable = (SDIO_FUNC_ENABLE_1 | SDIO_FUNC_ENABLE_2);
3476
3477 brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_IOEx, enable, NULL);
3478
3479 timeout = jiffies + msecs_to_jiffies(BRCMF_WAIT_F2RDY);
3480 ready = 0;
3481 while (enable != ready) {
3482 ready = brcmf_sdio_regrb(bus->sdiodev,
3483 SDIO_CCCR_IORx, NULL);
3484 if (time_after(jiffies, timeout))
3485 break;
3486 else if (time_after(jiffies, timeout - BRCMF_WAIT_F2RDY + 50))
3487 /* prevent busy waiting if it takes too long */
3488 msleep_interruptible(20);
3489 }
3490
3491 brcmf_dbg(INFO, "enable 0x%02x, ready 0x%02x\n", enable, ready);
3492
3493 /* If F2 successfully enabled, set core and enable interrupts */
3494 if (ready == enable) {
3495 /* Set up the interrupt mask and enable interrupts */
3496 bus->hostintmask = HOSTINTMASK;
3497 w_sdreg32(bus, bus->hostintmask,
3498 offsetof(struct sdpcmd_regs, hostintmask));
3499
3500 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_WATERMARK, 8, &err);
3501 } else {
3502 /* Disable F2 again */
3503 enable = SDIO_FUNC_ENABLE_1;
3504 brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_IOEx, enable, NULL);
3505 ret = -ENODEV;
3506 }
3507
3508 if (brcmf_sdbrcm_sr_capable(bus)) {
3509 brcmf_sdbrcm_sr_init(bus);
3510 } else {
3511 /* Restore previous clock setting */
3512 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
3513 saveclk, &err);
3514 }
3515
3516 if (ret == 0) {
3517 ret = brcmf_sdio_intr_register(bus->sdiodev);
3518 if (ret != 0)
3519 brcmf_err("intr register failed:%d\n", ret);
3520 }
3521
3522 /* If we didn't come up, turn off backplane clock */
3523 if (bus_if->state != BRCMF_BUS_DATA)
3524 brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
3525
3526 exit:
3527 sdio_release_host(bus->sdiodev->func[1]);
3528
3529 return ret;
3530 }
3531
3532 void brcmf_sdbrcm_isr(void *arg)
3533 {
3534 struct brcmf_sdio *bus = (struct brcmf_sdio *) arg;
3535
3536 brcmf_dbg(TRACE, "Enter\n");
3537
3538 if (!bus) {
3539 brcmf_err("bus is null pointer, exiting\n");
3540 return;
3541 }
3542
3543 if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) {
3544 brcmf_err("bus is down. we have nothing to do\n");
3545 return;
3546 }
3547 /* Count the interrupt call */
3548 bus->sdcnt.intrcount++;
3549 if (in_interrupt())
3550 atomic_set(&bus->ipend, 1);
3551 else
3552 if (brcmf_sdio_intr_rstatus(bus)) {
3553 brcmf_err("failed backplane access\n");
3554 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
3555 }
3556
3557 /* Disable additional interrupts (is this needed now)? */
3558 if (!bus->intr)
3559 brcmf_err("isr w/o interrupt configured!\n");
3560
3561 atomic_inc(&bus->dpc_tskcnt);
3562 queue_work(bus->brcmf_wq, &bus->datawork);
3563 }
3564
3565 static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
3566 {
3567 #ifdef DEBUG
3568 struct brcmf_bus *bus_if = dev_get_drvdata(bus->sdiodev->dev);
3569 #endif /* DEBUG */
3570
3571 brcmf_dbg(TIMER, "Enter\n");
3572
3573 /* Poll period: check device if appropriate. */
3574 if (!bus->sr_enabled &&
3575 bus->poll && (++bus->polltick >= bus->pollrate)) {
3576 u32 intstatus = 0;
3577
3578 /* Reset poll tick */
3579 bus->polltick = 0;
3580
3581 /* Check device if no interrupts */
3582 if (!bus->intr ||
3583 (bus->sdcnt.intrcount == bus->sdcnt.lastintrs)) {
3584
3585 if (atomic_read(&bus->dpc_tskcnt) == 0) {
3586 u8 devpend;
3587
3588 sdio_claim_host(bus->sdiodev->func[1]);
3589 devpend = brcmf_sdio_regrb(bus->sdiodev,
3590 SDIO_CCCR_INTx,
3591 NULL);
3592 sdio_release_host(bus->sdiodev->func[1]);
3593 intstatus =
3594 devpend & (INTR_STATUS_FUNC1 |
3595 INTR_STATUS_FUNC2);
3596 }
3597
3598 /* If there is something, make like the ISR and
3599 schedule the DPC */
3600 if (intstatus) {
3601 bus->sdcnt.pollcnt++;
3602 atomic_set(&bus->ipend, 1);
3603
3604 atomic_inc(&bus->dpc_tskcnt);
3605 queue_work(bus->brcmf_wq, &bus->datawork);
3606 }
3607 }
3608
3609 /* Update interrupt tracking */
3610 bus->sdcnt.lastintrs = bus->sdcnt.intrcount;
3611 }
3612 #ifdef DEBUG
3613 /* Poll for console output periodically */
3614 if (bus_if && bus_if->state == BRCMF_BUS_DATA &&
3615 bus->console_interval != 0) {
3616 bus->console.count += BRCMF_WD_POLL_MS;
3617 if (bus->console.count >= bus->console_interval) {
3618 bus->console.count -= bus->console_interval;
3619 sdio_claim_host(bus->sdiodev->func[1]);
3620 /* Make sure backplane clock is on */
3621 brcmf_sdbrcm_bus_sleep(bus, false, false);
3622 if (brcmf_sdbrcm_readconsole(bus) < 0)
3623 /* stop on error */
3624 bus->console_interval = 0;
3625 sdio_release_host(bus->sdiodev->func[1]);
3626 }
3627 }
3628 #endif /* DEBUG */
3629
3630 /* On idle timeout clear activity flag and/or turn off clock */
3631 if ((bus->idletime > 0) && (bus->clkstate == CLK_AVAIL)) {
3632 if (++bus->idlecount >= bus->idletime) {
3633 bus->idlecount = 0;
3634 if (bus->activity) {
3635 bus->activity = false;
3636 brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
3637 } else {
3638 brcmf_dbg(SDIO, "idle\n");
3639 sdio_claim_host(bus->sdiodev->func[1]);
3640 brcmf_sdbrcm_bus_sleep(bus, true, false);
3641 sdio_release_host(bus->sdiodev->func[1]);
3642 }
3643 }
3644 }
3645
3646 return (atomic_read(&bus->ipend) > 0);
3647 }
3648
3649 static void brcmf_sdio_dataworker(struct work_struct *work)
3650 {
3651 struct brcmf_sdio *bus = container_of(work, struct brcmf_sdio,
3652 datawork);
3653
3654 while (atomic_read(&bus->dpc_tskcnt)) {
3655 brcmf_sdbrcm_dpc(bus);
3656 atomic_dec(&bus->dpc_tskcnt);
3657 }
3658 }
3659
3660 static void brcmf_sdbrcm_release_malloc(struct brcmf_sdio *bus)
3661 {
3662 brcmf_dbg(TRACE, "Enter\n");
3663
3664 kfree(bus->rxbuf);
3665 bus->rxctl = bus->rxbuf = NULL;
3666 bus->rxlen = 0;
3667 }
3668
3669 static bool brcmf_sdbrcm_probe_malloc(struct brcmf_sdio *bus)
3670 {
3671 brcmf_dbg(TRACE, "Enter\n");
3672
3673 if (bus->sdiodev->bus_if->maxctl) {
3674 bus->rxblen =
3675 roundup((bus->sdiodev->bus_if->maxctl + SDPCM_HDRLEN),
3676 ALIGNMENT) + BRCMF_SDALIGN;
3677 bus->rxbuf = kmalloc(bus->rxblen, GFP_ATOMIC);
3678 if (!(bus->rxbuf))
3679 return false;
3680 }
3681
3682 return true;
3683 }
3684
3685 static bool
3686 brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
3687 {
3688 u8 clkctl = 0;
3689 int err = 0;
3690 int reg_addr;
3691 u32 reg_val;
3692 u32 drivestrength;
3693
3694 bus->alp_only = true;
3695
3696 sdio_claim_host(bus->sdiodev->func[1]);
3697
3698 pr_debug("F1 signature read @0x18000000=0x%4x\n",
3699 brcmf_sdio_regrl(bus->sdiodev, SI_ENUM_BASE, NULL));
3700
3701 /*
3702 * Force PLL off until brcmf_sdio_chip_attach()
3703 * programs PLL control regs
3704 */
3705
3706 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
3707 BRCMF_INIT_CLKCTL1, &err);
3708 if (!err)
3709 clkctl = brcmf_sdio_regrb(bus->sdiodev,
3710 SBSDIO_FUNC1_CHIPCLKCSR, &err);
3711
3712 if (err || ((clkctl & ~SBSDIO_AVBITS) != BRCMF_INIT_CLKCTL1)) {
3713 brcmf_err("ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n",
3714 err, BRCMF_INIT_CLKCTL1, clkctl);
3715 goto fail;
3716 }
3717
3718 if (brcmf_sdio_chip_attach(bus->sdiodev, &bus->ci, regsva)) {
3719 brcmf_err("brcmf_sdio_chip_attach failed!\n");
3720 goto fail;
3721 }
3722
3723 if (brcmf_sdbrcm_kso_init(bus)) {
3724 brcmf_err("error enabling KSO\n");
3725 goto fail;
3726 }
3727
3728 if ((bus->sdiodev->pdata) && (bus->sdiodev->pdata->drive_strength))
3729 drivestrength = bus->sdiodev->pdata->drive_strength;
3730 else
3731 drivestrength = DEFAULT_SDIO_DRIVE_STRENGTH;
3732 brcmf_sdio_chip_drivestrengthinit(bus->sdiodev, bus->ci, drivestrength);
3733
3734 /* Get info on the SOCRAM cores... */
3735 bus->ramsize = bus->ci->ramsize;
3736 if (!(bus->ramsize)) {
3737 brcmf_err("failed to find SOCRAM memory!\n");
3738 goto fail;
3739 }
3740
3741 /* Set card control so an SDIO card reset does a WLAN backplane reset */
3742 reg_val = brcmf_sdio_regrb(bus->sdiodev,
3743 SDIO_CCCR_BRCM_CARDCTRL, &err);
3744 if (err)
3745 goto fail;
3746
3747 reg_val |= SDIO_CCCR_BRCM_CARDCTRL_WLANRESET;
3748
3749 brcmf_sdio_regwb(bus->sdiodev,
3750 SDIO_CCCR_BRCM_CARDCTRL, reg_val, &err);
3751 if (err)
3752 goto fail;
3753
3754 /* set PMUControl so a backplane reset does PMU state reload */
3755 reg_addr = CORE_CC_REG(bus->ci->c_inf[0].base,
3756 pmucontrol);
3757 reg_val = brcmf_sdio_regrl(bus->sdiodev,
3758 reg_addr,
3759 &err);
3760 if (err)
3761 goto fail;
3762
3763 reg_val |= (BCMA_CC_PMU_CTL_RES_RELOAD << BCMA_CC_PMU_CTL_RES_SHIFT);
3764
3765 brcmf_sdio_regwl(bus->sdiodev,
3766 reg_addr,
3767 reg_val,
3768 &err);
3769 if (err)
3770 goto fail;
3771
3772
3773 sdio_release_host(bus->sdiodev->func[1]);
3774
3775 brcmu_pktq_init(&bus->txq, (PRIOMASK + 1), TXQLEN);
3776
3777 /* Locate an appropriately-aligned portion of hdrbuf */
3778 bus->rxhdr = (u8 *) roundup((unsigned long)&bus->hdrbuf[0],
3779 BRCMF_SDALIGN);
3780
3781 /* Set the poll and/or interrupt flags */
3782 bus->intr = true;
3783 bus->poll = false;
3784 if (bus->poll)
3785 bus->pollrate = 1;
3786
3787 return true;
3788
3789 fail:
3790 sdio_release_host(bus->sdiodev->func[1]);
3791 return false;
3792 }
3793
3794 static bool brcmf_sdbrcm_probe_init(struct brcmf_sdio *bus)
3795 {
3796 brcmf_dbg(TRACE, "Enter\n");
3797
3798 sdio_claim_host(bus->sdiodev->func[1]);
3799
3800 /* Disable F2 to clear any intermediate frame state on the dongle */
3801 brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_IOEx,
3802 SDIO_FUNC_ENABLE_1, NULL);
3803
3804 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
3805 bus->rxflow = false;
3806
3807 /* Done with backplane-dependent accesses, can drop clock... */
3808 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
3809
3810 sdio_release_host(bus->sdiodev->func[1]);
3811
3812 /* ...and initialize clock/power states */
3813 bus->clkstate = CLK_SDONLY;
3814 bus->idletime = BRCMF_IDLE_INTERVAL;
3815 bus->idleclock = BRCMF_IDLE_ACTIVE;
3816
3817 /* Query the F2 block size, set roundup accordingly */
3818 bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
3819 bus->roundup = min(max_roundup, bus->blocksize);
3820
3821 /* SR state */
3822 bus->sleeping = false;
3823 bus->sr_enabled = false;
3824
3825 return true;
3826 }
3827
3828 static int
3829 brcmf_sdbrcm_watchdog_thread(void *data)
3830 {
3831 struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
3832
3833 allow_signal(SIGTERM);
3834 /* Run until signal received */
3835 while (1) {
3836 if (kthread_should_stop())
3837 break;
3838 if (!wait_for_completion_interruptible(&bus->watchdog_wait)) {
3839 brcmf_sdbrcm_bus_watchdog(bus);
3840 /* Count the tick for reference */
3841 bus->sdcnt.tickcnt++;
3842 } else
3843 break;
3844 }
3845 return 0;
3846 }
3847
3848 static void
3849 brcmf_sdbrcm_watchdog(unsigned long data)
3850 {
3851 struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
3852
3853 if (bus->watchdog_tsk) {
3854 complete(&bus->watchdog_wait);
3855 /* Reschedule the watchdog */
3856 if (bus->wd_timer_valid)
3857 mod_timer(&bus->timer,
3858 jiffies + BRCMF_WD_POLL_MS * HZ / 1000);
3859 }
3860 }
3861
3862 static void brcmf_sdbrcm_release_dongle(struct brcmf_sdio *bus)
3863 {
3864 brcmf_dbg(TRACE, "Enter\n");
3865
3866 if (bus->ci) {
3867 sdio_claim_host(bus->sdiodev->func[1]);
3868 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
3869 brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
3870 sdio_release_host(bus->sdiodev->func[1]);
3871 brcmf_sdio_chip_detach(&bus->ci);
3872 if (bus->vars && bus->varsz)
3873 kfree(bus->vars);
3874 bus->vars = NULL;
3875 }
3876
3877 brcmf_dbg(TRACE, "Disconnected\n");
3878 }
3879
3880 /* Detach and free everything */
3881 static void brcmf_sdbrcm_release(struct brcmf_sdio *bus)
3882 {
3883 brcmf_dbg(TRACE, "Enter\n");
3884
3885 if (bus) {
3886 /* De-register interrupt handler */
3887 brcmf_sdio_intr_unregister(bus->sdiodev);
3888
3889 cancel_work_sync(&bus->datawork);
3890 if (bus->brcmf_wq)
3891 destroy_workqueue(bus->brcmf_wq);
3892
3893 if (bus->sdiodev->bus_if->drvr) {
3894 brcmf_detach(bus->sdiodev->dev);
3895 brcmf_sdbrcm_release_dongle(bus);
3896 }
3897
3898 brcmf_sdbrcm_release_malloc(bus);
3899
3900 kfree(bus);
3901 }
3902
3903 brcmf_dbg(TRACE, "Disconnected\n");
3904 }
3905
3906 static struct brcmf_bus_ops brcmf_sdio_bus_ops = {
3907 .stop = brcmf_sdbrcm_bus_stop,
3908 .init = brcmf_sdbrcm_bus_init,
3909 .txdata = brcmf_sdbrcm_bus_txdata,
3910 .txctl = brcmf_sdbrcm_bus_txctl,
3911 .rxctl = brcmf_sdbrcm_bus_rxctl,
3912 .gettxq = brcmf_sdbrcm_bus_gettxq,
3913 };
3914
3915 void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
3916 {
3917 int ret;
3918 struct brcmf_sdio *bus;
3919 struct brcmf_bus_dcmd *dlst;
3920 u32 dngl_txglom;
3921 u32 txglomalign = 0;
3922 u8 idx;
3923
3924 brcmf_dbg(TRACE, "Enter\n");
3925
3926 /* We make an assumption about address window mappings:
3927 * regsva == SI_ENUM_BASE*/
3928
3929 /* Allocate private bus interface state */
3930 bus = kzalloc(sizeof(struct brcmf_sdio), GFP_ATOMIC);
3931 if (!bus)
3932 goto fail;
3933
3934 bus->sdiodev = sdiodev;
3935 sdiodev->bus = bus;
3936 skb_queue_head_init(&bus->glom);
3937 bus->txbound = BRCMF_TXBOUND;
3938 bus->rxbound = BRCMF_RXBOUND;
3939 bus->txminmax = BRCMF_TXMINMAX;
3940 bus->tx_seq = SDPCM_SEQ_WRAP - 1;
3941
3942 INIT_WORK(&bus->datawork, brcmf_sdio_dataworker);
3943 bus->brcmf_wq = create_singlethread_workqueue("brcmf_wq");
3944 if (bus->brcmf_wq == NULL) {
3945 brcmf_err("insufficient memory to create txworkqueue\n");
3946 goto fail;
3947 }
3948
3949 /* attempt to attach to the dongle */
3950 if (!(brcmf_sdbrcm_probe_attach(bus, regsva))) {
3951 brcmf_err("brcmf_sdbrcm_probe_attach failed\n");
3952 goto fail;
3953 }
3954
3955 spin_lock_init(&bus->rxctl_lock);
3956 spin_lock_init(&bus->txqlock);
3957 init_waitqueue_head(&bus->ctrl_wait);
3958 init_waitqueue_head(&bus->dcmd_resp_wait);
3959
3960 /* Set up the watchdog timer */
3961 init_timer(&bus->timer);
3962 bus->timer.data = (unsigned long)bus;
3963 bus->timer.function = brcmf_sdbrcm_watchdog;
3964
3965 /* Initialize watchdog thread */
3966 init_completion(&bus->watchdog_wait);
3967 bus->watchdog_tsk = kthread_run(brcmf_sdbrcm_watchdog_thread,
3968 bus, "brcmf_watchdog");
3969 if (IS_ERR(bus->watchdog_tsk)) {
3970 pr_warn("brcmf_watchdog thread failed to start\n");
3971 bus->watchdog_tsk = NULL;
3972 }
3973 /* Initialize DPC thread */
3974 atomic_set(&bus->dpc_tskcnt, 0);
3975
3976 /* Assign bus interface call back */
3977 bus->sdiodev->bus_if->dev = bus->sdiodev->dev;
3978 bus->sdiodev->bus_if->ops = &brcmf_sdio_bus_ops;
3979 bus->sdiodev->bus_if->chip = bus->ci->chip;
3980 bus->sdiodev->bus_if->chiprev = bus->ci->chiprev;
3981
3982 /* default sdio bus header length for tx packet */
3983 bus->tx_hdrlen = SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN;
3984
3985 /* Attach to the common layer, reserve hdr space */
3986 ret = brcmf_attach(bus->tx_hdrlen, bus->sdiodev->dev);
3987 if (ret != 0) {
3988 brcmf_err("brcmf_attach failed\n");
3989 goto fail;
3990 }
3991
3992 /* Allocate buffers */
3993 if (!(brcmf_sdbrcm_probe_malloc(bus))) {
3994 brcmf_err("brcmf_sdbrcm_probe_malloc failed\n");
3995 goto fail;
3996 }
3997
3998 if (!(brcmf_sdbrcm_probe_init(bus))) {
3999 brcmf_err("brcmf_sdbrcm_probe_init failed\n");
4000 goto fail;
4001 }
4002
4003 brcmf_sdio_debugfs_create(bus);
4004 brcmf_dbg(INFO, "completed!!\n");
4005
4006 /* sdio bus core specific dcmd */
4007 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
4008 dlst = kzalloc(sizeof(struct brcmf_bus_dcmd), GFP_KERNEL);
4009 if (dlst) {
4010 if (bus->ci->c_inf[idx].rev < 12) {
4011 /* for sdio core rev < 12, disable txgloming */
4012 dngl_txglom = 0;
4013 dlst->name = "bus:txglom";
4014 dlst->param = (char *)&dngl_txglom;
4015 dlst->param_len = sizeof(u32);
4016 } else {
4017 /* otherwise, set txglomalign */
4018 if (sdiodev->pdata)
4019 txglomalign = sdiodev->pdata->sd_sgentry_align;
4020 /* SDIO ADMA requires at least 32 bit alignment */
4021 if (txglomalign < 4)
4022 txglomalign = 4;
4023 dlst->name = "bus:txglomalign";
4024 dlst->param = (char *)&txglomalign;
4025 dlst->param_len = sizeof(u32);
4026 }
4027 list_add(&dlst->list, &bus->sdiodev->bus_if->dcmd_list);
4028 }
4029
4030 /* if firmware path present try to download and bring up bus */
4031 ret = brcmf_bus_start(bus->sdiodev->dev);
4032 if (ret != 0) {
4033 brcmf_err("dongle is not responding\n");
4034 goto fail;
4035 }
4036
4037 return bus;
4038
4039 fail:
4040 brcmf_sdbrcm_release(bus);
4041 return NULL;
4042 }
4043
4044 void brcmf_sdbrcm_disconnect(void *ptr)
4045 {
4046 struct brcmf_sdio *bus = (struct brcmf_sdio *)ptr;
4047
4048 brcmf_dbg(TRACE, "Enter\n");
4049
4050 if (bus)
4051 brcmf_sdbrcm_release(bus);
4052
4053 brcmf_dbg(TRACE, "Disconnected\n");
4054 }
4055
4056 void
4057 brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick)
4058 {
4059 /* Totally stop the timer */
4060 if (!wdtick && bus->wd_timer_valid) {
4061 del_timer_sync(&bus->timer);
4062 bus->wd_timer_valid = false;
4063 bus->save_ms = wdtick;
4064 return;
4065 }
4066
4067 /* don't start the wd until fw is loaded */
4068 if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN)
4069 return;
4070
4071 if (wdtick) {
4072 if (bus->save_ms != BRCMF_WD_POLL_MS) {
4073 if (bus->wd_timer_valid)
4074 /* Stop timer and restart at new value */
4075 del_timer_sync(&bus->timer);
4076
4077 /* Create timer again when watchdog period is
4078 dynamically changed or in the first instance
4079 */
4080 bus->timer.expires =
4081 jiffies + BRCMF_WD_POLL_MS * HZ / 1000;
4082 add_timer(&bus->timer);
4083
4084 } else {
4085 /* Re arm the timer, at last watchdog period */
4086 mod_timer(&bus->timer,
4087 jiffies + BRCMF_WD_POLL_MS * HZ / 1000);
4088 }
4089
4090 bus->wd_timer_valid = true;
4091 bus->save_ms = wdtick;
4092 }
4093 }
This page took 0.161456 seconds and 4 git commands to generate.