brcmfmac: remove regs parameter from sdio probe functions
[deliverable/linux.git] / drivers / net / wireless / brcm80211 / brcmfmac / dhd_sdio.c
1 /*
2 * Copyright (c) 2010 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/kthread.h>
20 #include <linux/printk.h>
21 #include <linux/pci_ids.h>
22 #include <linux/netdevice.h>
23 #include <linux/interrupt.h>
24 #include <linux/sched.h>
25 #include <linux/mmc/sdio.h>
26 #include <linux/mmc/sdio_func.h>
27 #include <linux/mmc/card.h>
28 #include <linux/semaphore.h>
29 #include <linux/firmware.h>
30 #include <linux/module.h>
31 #include <linux/bcma/bcma.h>
32 #include <linux/debugfs.h>
33 #include <linux/vmalloc.h>
34 #include <linux/platform_data/brcmfmac-sdio.h>
35 #include <linux/moduleparam.h>
36 #include <asm/unaligned.h>
37 #include <defs.h>
38 #include <brcmu_wifi.h>
39 #include <brcmu_utils.h>
40 #include <brcm_hw_ids.h>
41 #include <soc.h>
42 #include "sdio_host.h"
43 #include "sdio_chip.h"
44
45 #define DCMD_RESP_TIMEOUT 2000 /* In milli second */
46
47 #ifdef DEBUG
48
49 #define BRCMF_TRAP_INFO_SIZE 80
50
51 #define CBUF_LEN (128)
52
53 /* Device console log buffer state */
54 #define CONSOLE_BUFFER_MAX 2024
55
56 struct rte_log_le {
57 __le32 buf; /* Can't be pointer on (64-bit) hosts */
58 __le32 buf_size;
59 __le32 idx;
60 char *_buf_compat; /* Redundant pointer for backward compat. */
61 };
62
63 struct rte_console {
64 /* Virtual UART
65 * When there is no UART (e.g. Quickturn),
66 * the host should write a complete
67 * input line directly into cbuf and then write
68 * the length into vcons_in.
69 * This may also be used when there is a real UART
70 * (at risk of conflicting with
71 * the real UART). vcons_out is currently unused.
72 */
73 uint vcons_in;
74 uint vcons_out;
75
76 /* Output (logging) buffer
77 * Console output is written to a ring buffer log_buf at index log_idx.
78 * The host may read the output when it sees log_idx advance.
79 * Output will be lost if the output wraps around faster than the host
80 * polls.
81 */
82 struct rte_log_le log_le;
83
84 /* Console input line buffer
85 * Characters are read one at a time into cbuf
86 * until <CR> is received, then
87 * the buffer is processed as a command line.
88 * Also used for virtual UART.
89 */
90 uint cbuf_idx;
91 char cbuf[CBUF_LEN];
92 };
93
94 #endif /* DEBUG */
95 #include <chipcommon.h>
96
97 #include "dhd_bus.h"
98 #include "dhd_dbg.h"
99 #include "tracepoint.h"
100
101 #define TXQLEN 2048 /* bulk tx queue length */
102 #define TXHI (TXQLEN - 256) /* turn on flow control above TXHI */
103 #define TXLOW (TXHI - 256) /* turn off flow control below TXLOW */
104 #define PRIOMASK 7
105
106 #define TXRETRIES 2 /* # of retries for tx frames */
107
108 #define BRCMF_RXBOUND 50 /* Default for max rx frames in
109 one scheduling */
110
111 #define BRCMF_TXBOUND 20 /* Default for max tx frames in
112 one scheduling */
113
114 #define BRCMF_DEFAULT_TXGLOM_SIZE 32 /* max tx frames in glom chain */
115
116 #define BRCMF_TXMINMAX 1 /* Max tx frames if rx still pending */
117
118 #define MEMBLOCK 2048 /* Block size used for downloading
119 of dongle image */
120 #define MAX_DATA_BUF (32 * 1024) /* Must be large enough to hold
121 biggest possible glom */
122
123 #define BRCMF_FIRSTREAD (1 << 6)
124
125
126 /* SBSDIO_DEVICE_CTL */
127
128 /* 1: device will assert busy signal when receiving CMD53 */
129 #define SBSDIO_DEVCTL_SETBUSY 0x01
130 /* 1: assertion of sdio interrupt is synchronous to the sdio clock */
131 #define SBSDIO_DEVCTL_SPI_INTR_SYNC 0x02
132 /* 1: mask all interrupts to host except the chipActive (rev 8) */
133 #define SBSDIO_DEVCTL_CA_INT_ONLY 0x04
134 /* 1: isolate internal sdio signals, put external pads in tri-state; requires
135 * sdio bus power cycle to clear (rev 9) */
136 #define SBSDIO_DEVCTL_PADS_ISO 0x08
137 /* Force SD->SB reset mapping (rev 11) */
138 #define SBSDIO_DEVCTL_SB_RST_CTL 0x30
139 /* Determined by CoreControl bit */
140 #define SBSDIO_DEVCTL_RST_CORECTL 0x00
141 /* Force backplane reset */
142 #define SBSDIO_DEVCTL_RST_BPRESET 0x10
143 /* Force no backplane reset */
144 #define SBSDIO_DEVCTL_RST_NOBPRESET 0x20
145
146 /* direct(mapped) cis space */
147
148 /* MAPPED common CIS address */
149 #define SBSDIO_CIS_BASE_COMMON 0x1000
150 /* maximum bytes in one CIS */
151 #define SBSDIO_CIS_SIZE_LIMIT 0x200
152 /* cis offset addr is < 17 bits */
153 #define SBSDIO_CIS_OFT_ADDR_MASK 0x1FFFF
154
155 /* manfid tuple length, include tuple, link bytes */
156 #define SBSDIO_CIS_MANFID_TUPLE_LEN 6
157
158 /* intstatus */
159 #define I_SMB_SW0 (1 << 0) /* To SB Mail S/W interrupt 0 */
160 #define I_SMB_SW1 (1 << 1) /* To SB Mail S/W interrupt 1 */
161 #define I_SMB_SW2 (1 << 2) /* To SB Mail S/W interrupt 2 */
162 #define I_SMB_SW3 (1 << 3) /* To SB Mail S/W interrupt 3 */
163 #define I_SMB_SW_MASK 0x0000000f /* To SB Mail S/W interrupts mask */
164 #define I_SMB_SW_SHIFT 0 /* To SB Mail S/W interrupts shift */
165 #define I_HMB_SW0 (1 << 4) /* To Host Mail S/W interrupt 0 */
166 #define I_HMB_SW1 (1 << 5) /* To Host Mail S/W interrupt 1 */
167 #define I_HMB_SW2 (1 << 6) /* To Host Mail S/W interrupt 2 */
168 #define I_HMB_SW3 (1 << 7) /* To Host Mail S/W interrupt 3 */
169 #define I_HMB_SW_MASK 0x000000f0 /* To Host Mail S/W interrupts mask */
170 #define I_HMB_SW_SHIFT 4 /* To Host Mail S/W interrupts shift */
171 #define I_WR_OOSYNC (1 << 8) /* Write Frame Out Of Sync */
172 #define I_RD_OOSYNC (1 << 9) /* Read Frame Out Of Sync */
173 #define I_PC (1 << 10) /* descriptor error */
174 #define I_PD (1 << 11) /* data error */
175 #define I_DE (1 << 12) /* Descriptor protocol Error */
176 #define I_RU (1 << 13) /* Receive descriptor Underflow */
177 #define I_RO (1 << 14) /* Receive fifo Overflow */
178 #define I_XU (1 << 15) /* Transmit fifo Underflow */
179 #define I_RI (1 << 16) /* Receive Interrupt */
180 #define I_BUSPWR (1 << 17) /* SDIO Bus Power Change (rev 9) */
181 #define I_XMTDATA_AVAIL (1 << 23) /* bits in fifo */
182 #define I_XI (1 << 24) /* Transmit Interrupt */
183 #define I_RF_TERM (1 << 25) /* Read Frame Terminate */
184 #define I_WF_TERM (1 << 26) /* Write Frame Terminate */
185 #define I_PCMCIA_XU (1 << 27) /* PCMCIA Transmit FIFO Underflow */
186 #define I_SBINT (1 << 28) /* sbintstatus Interrupt */
187 #define I_CHIPACTIVE (1 << 29) /* chip from doze to active state */
188 #define I_SRESET (1 << 30) /* CCCR RES interrupt */
189 #define I_IOE2 (1U << 31) /* CCCR IOE2 Bit Changed */
190 #define I_ERRORS (I_PC | I_PD | I_DE | I_RU | I_RO | I_XU)
191 #define I_DMA (I_RI | I_XI | I_ERRORS)
192
193 /* corecontrol */
194 #define CC_CISRDY (1 << 0) /* CIS Ready */
195 #define CC_BPRESEN (1 << 1) /* CCCR RES signal */
196 #define CC_F2RDY (1 << 2) /* set CCCR IOR2 bit */
197 #define CC_CLRPADSISO (1 << 3) /* clear SDIO pads isolation */
198 #define CC_XMTDATAAVAIL_MODE (1 << 4)
199 #define CC_XMTDATAAVAIL_CTRL (1 << 5)
200
201 /* SDA_FRAMECTRL */
202 #define SFC_RF_TERM (1 << 0) /* Read Frame Terminate */
203 #define SFC_WF_TERM (1 << 1) /* Write Frame Terminate */
204 #define SFC_CRC4WOOS (1 << 2) /* CRC error for write out of sync */
205 #define SFC_ABORTALL (1 << 3) /* Abort all in-progress frames */
206
207 /*
208 * Software allocation of To SB Mailbox resources
209 */
210
211 /* tosbmailbox bits corresponding to intstatus bits */
212 #define SMB_NAK (1 << 0) /* Frame NAK */
213 #define SMB_INT_ACK (1 << 1) /* Host Interrupt ACK */
214 #define SMB_USE_OOB (1 << 2) /* Use OOB Wakeup */
215 #define SMB_DEV_INT (1 << 3) /* Miscellaneous Interrupt */
216
217 /* tosbmailboxdata */
218 #define SMB_DATA_VERSION_SHIFT 16 /* host protocol version */
219
220 /*
221 * Software allocation of To Host Mailbox resources
222 */
223
224 /* intstatus bits */
225 #define I_HMB_FC_STATE I_HMB_SW0 /* Flow Control State */
226 #define I_HMB_FC_CHANGE I_HMB_SW1 /* Flow Control State Changed */
227 #define I_HMB_FRAME_IND I_HMB_SW2 /* Frame Indication */
228 #define I_HMB_HOST_INT I_HMB_SW3 /* Miscellaneous Interrupt */
229
230 /* tohostmailboxdata */
231 #define HMB_DATA_NAKHANDLED 1 /* retransmit NAK'd frame */
232 #define HMB_DATA_DEVREADY 2 /* talk to host after enable */
233 #define HMB_DATA_FC 4 /* per prio flowcontrol update flag */
234 #define HMB_DATA_FWREADY 8 /* fw ready for protocol activity */
235
236 #define HMB_DATA_FCDATA_MASK 0xff000000
237 #define HMB_DATA_FCDATA_SHIFT 24
238
239 #define HMB_DATA_VERSION_MASK 0x00ff0000
240 #define HMB_DATA_VERSION_SHIFT 16
241
242 /*
243 * Software-defined protocol header
244 */
245
246 /* Current protocol version */
247 #define SDPCM_PROT_VERSION 4
248
249 /*
250 * Shared structure between dongle and the host.
251 * The structure contains pointers to trap or assert information.
252 */
253 #define SDPCM_SHARED_VERSION 0x0003
254 #define SDPCM_SHARED_VERSION_MASK 0x00FF
255 #define SDPCM_SHARED_ASSERT_BUILT 0x0100
256 #define SDPCM_SHARED_ASSERT 0x0200
257 #define SDPCM_SHARED_TRAP 0x0400
258
259 /* Space for header read, limit for data packets */
260 #define MAX_HDR_READ (1 << 6)
261 #define MAX_RX_DATASZ 2048
262
263 /* Bump up limit on waiting for HT to account for first startup;
264 * if the image is doing a CRC calculation before programming the PMU
265 * for HT availability, it could take a couple hundred ms more, so
266 * max out at a 1 second (1000000us).
267 */
268 #undef PMU_MAX_TRANSITION_DLY
269 #define PMU_MAX_TRANSITION_DLY 1000000
270
271 /* Value for ChipClockCSR during initial setup */
272 #define BRCMF_INIT_CLKCTL1 (SBSDIO_FORCE_HW_CLKREQ_OFF | \
273 SBSDIO_ALP_AVAIL_REQ)
274
275 /* Flags for SDH calls */
276 #define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
277
278 #define BRCMF_IDLE_IMMEDIATE (-1) /* Enter idle immediately */
279 #define BRCMF_IDLE_ACTIVE 0 /* Do not request any SD clock change
280 * when idle
281 */
282 #define BRCMF_IDLE_INTERVAL 1
283
284 #define KSO_WAIT_US 50
285 #define MAX_KSO_ATTEMPTS (PMU_MAX_TRANSITION_DLY/KSO_WAIT_US)
286
287 /*
288 * Conversion of 802.1D priority to precedence level
289 */
290 static uint prio2prec(u32 prio)
291 {
292 return (prio == PRIO_8021D_NONE || prio == PRIO_8021D_BE) ?
293 (prio^2) : prio;
294 }
295
296 #ifdef DEBUG
297 /* Device console log buffer state */
298 struct brcmf_console {
299 uint count; /* Poll interval msec counter */
300 uint log_addr; /* Log struct address (fixed) */
301 struct rte_log_le log_le; /* Log struct (host copy) */
302 uint bufsize; /* Size of log buffer */
303 u8 *buf; /* Log buffer (host copy) */
304 uint last; /* Last buffer read index */
305 };
306
307 struct brcmf_trap_info {
308 __le32 type;
309 __le32 epc;
310 __le32 cpsr;
311 __le32 spsr;
312 __le32 r0; /* a1 */
313 __le32 r1; /* a2 */
314 __le32 r2; /* a3 */
315 __le32 r3; /* a4 */
316 __le32 r4; /* v1 */
317 __le32 r5; /* v2 */
318 __le32 r6; /* v3 */
319 __le32 r7; /* v4 */
320 __le32 r8; /* v5 */
321 __le32 r9; /* sb/v6 */
322 __le32 r10; /* sl/v7 */
323 __le32 r11; /* fp/v8 */
324 __le32 r12; /* ip */
325 __le32 r13; /* sp */
326 __le32 r14; /* lr */
327 __le32 pc; /* r15 */
328 };
329 #endif /* DEBUG */
330
331 struct sdpcm_shared {
332 u32 flags;
333 u32 trap_addr;
334 u32 assert_exp_addr;
335 u32 assert_file_addr;
336 u32 assert_line;
337 u32 console_addr; /* Address of struct rte_console */
338 u32 msgtrace_addr;
339 u8 tag[32];
340 u32 brpt_addr;
341 };
342
343 struct sdpcm_shared_le {
344 __le32 flags;
345 __le32 trap_addr;
346 __le32 assert_exp_addr;
347 __le32 assert_file_addr;
348 __le32 assert_line;
349 __le32 console_addr; /* Address of struct rte_console */
350 __le32 msgtrace_addr;
351 u8 tag[32];
352 __le32 brpt_addr;
353 };
354
355 /* dongle SDIO bus specific header info */
356 struct brcmf_sdio_hdrinfo {
357 u8 seq_num;
358 u8 channel;
359 u16 len;
360 u16 len_left;
361 u16 len_nxtfrm;
362 u8 dat_offset;
363 bool lastfrm;
364 u16 tail_pad;
365 };
366
367 /* misc chip info needed by some of the routines */
368 /* Private data for SDIO bus interaction */
369 struct brcmf_sdio {
370 struct brcmf_sdio_dev *sdiodev; /* sdio device handler */
371 struct chip_info *ci; /* Chip info struct */
372 char *vars; /* Variables (from CIS and/or other) */
373 uint varsz; /* Size of variables buffer */
374
375 u32 ramsize; /* Size of RAM in SOCRAM (bytes) */
376
377 u32 hostintmask; /* Copy of Host Interrupt Mask */
378 atomic_t intstatus; /* Intstatus bits (events) pending */
379 atomic_t fcstate; /* State of dongle flow-control */
380
381 uint blocksize; /* Block size of SDIO transfers */
382 uint roundup; /* Max roundup limit */
383
384 struct pktq txq; /* Queue length used for flow-control */
385 u8 flowcontrol; /* per prio flow control bitmask */
386 u8 tx_seq; /* Transmit sequence number (next) */
387 u8 tx_max; /* Maximum transmit sequence allowed */
388
389 u8 *hdrbuf; /* buffer for handling rx frame */
390 u8 *rxhdr; /* Header of current rx frame (in hdrbuf) */
391 u8 rx_seq; /* Receive sequence number (expected) */
392 struct brcmf_sdio_hdrinfo cur_read;
393 /* info of current read frame */
394 bool rxskip; /* Skip receive (awaiting NAK ACK) */
395 bool rxpending; /* Data frame pending in dongle */
396
397 uint rxbound; /* Rx frames to read before resched */
398 uint txbound; /* Tx frames to send before resched */
399 uint txminmax;
400
401 struct sk_buff *glomd; /* Packet containing glomming descriptor */
402 struct sk_buff_head glom; /* Packet list for glommed superframe */
403 uint glomerr; /* Glom packet read errors */
404
405 u8 *rxbuf; /* Buffer for receiving control packets */
406 uint rxblen; /* Allocated length of rxbuf */
407 u8 *rxctl; /* Aligned pointer into rxbuf */
408 u8 *rxctl_orig; /* pointer for freeing rxctl */
409 uint rxlen; /* Length of valid data in buffer */
410 spinlock_t rxctl_lock; /* protection lock for ctrl frame resources */
411
412 u8 sdpcm_ver; /* Bus protocol reported by dongle */
413
414 bool intr; /* Use interrupts */
415 bool poll; /* Use polling */
416 atomic_t ipend; /* Device interrupt is pending */
417 uint spurious; /* Count of spurious interrupts */
418 uint pollrate; /* Ticks between device polls */
419 uint polltick; /* Tick counter */
420
421 #ifdef DEBUG
422 uint console_interval;
423 struct brcmf_console console; /* Console output polling support */
424 uint console_addr; /* Console address from shared struct */
425 #endif /* DEBUG */
426
427 uint clkstate; /* State of sd and backplane clock(s) */
428 bool activity; /* Activity flag for clock down */
429 s32 idletime; /* Control for activity timeout */
430 s32 idlecount; /* Activity timeout counter */
431 s32 idleclock; /* How to set bus driver when idle */
432 bool rxflow_mode; /* Rx flow control mode */
433 bool rxflow; /* Is rx flow control on */
434 bool alp_only; /* Don't use HT clock (ALP only) */
435
436 u8 *ctrl_frame_buf;
437 u32 ctrl_frame_len;
438 bool ctrl_frame_stat;
439
440 spinlock_t txqlock;
441 wait_queue_head_t ctrl_wait;
442 wait_queue_head_t dcmd_resp_wait;
443
444 struct timer_list timer;
445 struct completion watchdog_wait;
446 struct task_struct *watchdog_tsk;
447 bool wd_timer_valid;
448 uint save_ms;
449
450 struct workqueue_struct *brcmf_wq;
451 struct work_struct datawork;
452 atomic_t dpc_tskcnt;
453
454 bool txoff; /* Transmit flow-controlled */
455 struct brcmf_sdio_count sdcnt;
456 bool sr_enabled; /* SaveRestore enabled */
457 bool sleeping; /* SDIO bus sleeping */
458
459 u8 tx_hdrlen; /* sdio bus header length for tx packet */
460 bool txglom; /* host tx glomming enable flag */
461 struct sk_buff *txglom_sgpad; /* scatter-gather padding buffer */
462 u16 head_align; /* buffer pointer alignment */
463 u16 sgentry_align; /* scatter-gather buffer alignment */
464 };
465
466 /* clkstate */
467 #define CLK_NONE 0
468 #define CLK_SDONLY 1
469 #define CLK_PENDING 2
470 #define CLK_AVAIL 3
471
472 #ifdef DEBUG
473 static int qcount[NUMPRIO];
474 #endif /* DEBUG */
475
476 #define DEFAULT_SDIO_DRIVE_STRENGTH 6 /* in milliamps */
477
478 #define RETRYCHAN(chan) ((chan) == SDPCM_EVENT_CHANNEL)
479
480 /* Retry count for register access failures */
481 static const uint retry_limit = 2;
482
483 /* Limit on rounding up frames */
484 static const uint max_roundup = 512;
485
486 #define ALIGNMENT 4
487
488 static int brcmf_sdio_txglomsz = BRCMF_DEFAULT_TXGLOM_SIZE;
489 module_param_named(txglomsz, brcmf_sdio_txglomsz, int, 0);
490 MODULE_PARM_DESC(txglomsz, "maximum tx packet chain size [SDIO]");
491
492 enum brcmf_sdio_frmtype {
493 BRCMF_SDIO_FT_NORMAL,
494 BRCMF_SDIO_FT_SUPER,
495 BRCMF_SDIO_FT_SUB,
496 };
497
498 #define BCM43143_FIRMWARE_NAME "brcm/brcmfmac43143-sdio.bin"
499 #define BCM43143_NVRAM_NAME "brcm/brcmfmac43143-sdio.txt"
500 #define BCM43241B0_FIRMWARE_NAME "brcm/brcmfmac43241b0-sdio.bin"
501 #define BCM43241B0_NVRAM_NAME "brcm/brcmfmac43241b0-sdio.txt"
502 #define BCM43241B4_FIRMWARE_NAME "brcm/brcmfmac43241b4-sdio.bin"
503 #define BCM43241B4_NVRAM_NAME "brcm/brcmfmac43241b4-sdio.txt"
504 #define BCM4329_FIRMWARE_NAME "brcm/brcmfmac4329-sdio.bin"
505 #define BCM4329_NVRAM_NAME "brcm/brcmfmac4329-sdio.txt"
506 #define BCM4330_FIRMWARE_NAME "brcm/brcmfmac4330-sdio.bin"
507 #define BCM4330_NVRAM_NAME "brcm/brcmfmac4330-sdio.txt"
508 #define BCM4334_FIRMWARE_NAME "brcm/brcmfmac4334-sdio.bin"
509 #define BCM4334_NVRAM_NAME "brcm/brcmfmac4334-sdio.txt"
510 #define BCM4335_FIRMWARE_NAME "brcm/brcmfmac4335-sdio.bin"
511 #define BCM4335_NVRAM_NAME "brcm/brcmfmac4335-sdio.txt"
512 #define BCM4339_FIRMWARE_NAME "brcm/brcmfmac4339-sdio.bin"
513 #define BCM4339_NVRAM_NAME "brcm/brcmfmac4339-sdio.txt"
514
515 MODULE_FIRMWARE(BCM43143_FIRMWARE_NAME);
516 MODULE_FIRMWARE(BCM43143_NVRAM_NAME);
517 MODULE_FIRMWARE(BCM43241B0_FIRMWARE_NAME);
518 MODULE_FIRMWARE(BCM43241B0_NVRAM_NAME);
519 MODULE_FIRMWARE(BCM43241B4_FIRMWARE_NAME);
520 MODULE_FIRMWARE(BCM43241B4_NVRAM_NAME);
521 MODULE_FIRMWARE(BCM4329_FIRMWARE_NAME);
522 MODULE_FIRMWARE(BCM4329_NVRAM_NAME);
523 MODULE_FIRMWARE(BCM4330_FIRMWARE_NAME);
524 MODULE_FIRMWARE(BCM4330_NVRAM_NAME);
525 MODULE_FIRMWARE(BCM4334_FIRMWARE_NAME);
526 MODULE_FIRMWARE(BCM4334_NVRAM_NAME);
527 MODULE_FIRMWARE(BCM4335_FIRMWARE_NAME);
528 MODULE_FIRMWARE(BCM4335_NVRAM_NAME);
529 MODULE_FIRMWARE(BCM4339_FIRMWARE_NAME);
530 MODULE_FIRMWARE(BCM4339_NVRAM_NAME);
531
532 struct brcmf_firmware_names {
533 u32 chipid;
534 u32 revmsk;
535 const char *bin;
536 const char *nv;
537 };
538
539 enum brcmf_firmware_type {
540 BRCMF_FIRMWARE_BIN,
541 BRCMF_FIRMWARE_NVRAM
542 };
543
544 #define BRCMF_FIRMWARE_NVRAM(name) \
545 name ## _FIRMWARE_NAME, name ## _NVRAM_NAME
546
547 static const struct brcmf_firmware_names brcmf_fwname_data[] = {
548 { BCM43143_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM43143) },
549 { BCM43241_CHIP_ID, 0x0000001F, BRCMF_FIRMWARE_NVRAM(BCM43241B0) },
550 { BCM43241_CHIP_ID, 0xFFFFFFE0, BRCMF_FIRMWARE_NVRAM(BCM43241B4) },
551 { BCM4329_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4329) },
552 { BCM4330_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4330) },
553 { BCM4334_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4334) },
554 { BCM4335_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4335) },
555 { BCM4339_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4339) }
556 };
557
558
559 static const struct firmware *brcmf_sdbrcm_get_fw(struct brcmf_sdio *bus,
560 enum brcmf_firmware_type type)
561 {
562 const struct firmware *fw;
563 const char *name;
564 int err, i;
565
566 for (i = 0; i < ARRAY_SIZE(brcmf_fwname_data); i++) {
567 if (brcmf_fwname_data[i].chipid == bus->ci->chip &&
568 brcmf_fwname_data[i].revmsk & BIT(bus->ci->chiprev)) {
569 switch (type) {
570 case BRCMF_FIRMWARE_BIN:
571 name = brcmf_fwname_data[i].bin;
572 break;
573 case BRCMF_FIRMWARE_NVRAM:
574 name = brcmf_fwname_data[i].nv;
575 break;
576 default:
577 brcmf_err("invalid firmware type (%d)\n", type);
578 return NULL;
579 }
580 goto found;
581 }
582 }
583 brcmf_err("Unknown chipid %d [%d]\n",
584 bus->ci->chip, bus->ci->chiprev);
585 return NULL;
586
587 found:
588 err = request_firmware(&fw, name, &bus->sdiodev->func[2]->dev);
589 if ((err) || (!fw)) {
590 brcmf_err("fail to request firmware %s (%d)\n", name, err);
591 return NULL;
592 }
593
594 return fw;
595 }
596
597 static void pkt_align(struct sk_buff *p, int len, int align)
598 {
599 uint datalign;
600 datalign = (unsigned long)(p->data);
601 datalign = roundup(datalign, (align)) - datalign;
602 if (datalign)
603 skb_pull(p, datalign);
604 __skb_trim(p, len);
605 }
606
607 /* To check if there's window offered */
608 static bool data_ok(struct brcmf_sdio *bus)
609 {
610 return (u8)(bus->tx_max - bus->tx_seq) != 0 &&
611 ((u8)(bus->tx_max - bus->tx_seq) & 0x80) == 0;
612 }
613
614 /*
615 * Reads a register in the SDIO hardware block. This block occupies a series of
616 * adresses on the 32 bit backplane bus.
617 */
618 static int
619 r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 offset)
620 {
621 u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
622 int ret;
623
624 *regvar = brcmf_sdio_regrl(bus->sdiodev,
625 bus->ci->c_inf[idx].base + offset, &ret);
626
627 return ret;
628 }
629
630 static int
631 w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset)
632 {
633 u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
634 int ret;
635
636 brcmf_sdio_regwl(bus->sdiodev,
637 bus->ci->c_inf[idx].base + reg_offset,
638 regval, &ret);
639
640 return ret;
641 }
642
643 static int
644 brcmf_sdbrcm_kso_control(struct brcmf_sdio *bus, bool on)
645 {
646 u8 wr_val = 0, rd_val, cmp_val, bmask;
647 int err = 0;
648 int try_cnt = 0;
649
650 brcmf_dbg(TRACE, "Enter\n");
651
652 wr_val = (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
653 /* 1st KSO write goes to AOS wake up core if device is asleep */
654 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
655 wr_val, &err);
656 if (err) {
657 brcmf_err("SDIO_AOS KSO write error: %d\n", err);
658 return err;
659 }
660
661 if (on) {
662 /* device WAKEUP through KSO:
663 * write bit 0 & read back until
664 * both bits 0 (kso bit) & 1 (dev on status) are set
665 */
666 cmp_val = SBSDIO_FUNC1_SLEEPCSR_KSO_MASK |
667 SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK;
668 bmask = cmp_val;
669 usleep_range(2000, 3000);
670 } else {
671 /* Put device to sleep, turn off KSO */
672 cmp_val = 0;
673 /* only check for bit0, bit1(dev on status) may not
674 * get cleared right away
675 */
676 bmask = SBSDIO_FUNC1_SLEEPCSR_KSO_MASK;
677 }
678
679 do {
680 /* reliable KSO bit set/clr:
681 * the sdiod sleep write access is synced to PMU 32khz clk
682 * just one write attempt may fail,
683 * read it back until it matches written value
684 */
685 rd_val = brcmf_sdio_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
686 &err);
687 if (((rd_val & bmask) == cmp_val) && !err)
688 break;
689 brcmf_dbg(SDIO, "KSO wr/rd retry:%d (max: %d) ERR:%x\n",
690 try_cnt, MAX_KSO_ATTEMPTS, err);
691 udelay(KSO_WAIT_US);
692 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
693 wr_val, &err);
694 } while (try_cnt++ < MAX_KSO_ATTEMPTS);
695
696 return err;
697 }
698
699 #define PKT_AVAILABLE() (intstatus & I_HMB_FRAME_IND)
700
701 #define HOSTINTMASK (I_HMB_SW_MASK | I_CHIPACTIVE)
702
703 /* Turn backplane clock on or off */
704 static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
705 {
706 int err;
707 u8 clkctl, clkreq, devctl;
708 unsigned long timeout;
709
710 brcmf_dbg(SDIO, "Enter\n");
711
712 clkctl = 0;
713
714 if (bus->sr_enabled) {
715 bus->clkstate = (on ? CLK_AVAIL : CLK_SDONLY);
716 return 0;
717 }
718
719 if (on) {
720 /* Request HT Avail */
721 clkreq =
722 bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ;
723
724 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
725 clkreq, &err);
726 if (err) {
727 brcmf_err("HT Avail request error: %d\n", err);
728 return -EBADE;
729 }
730
731 /* Check current status */
732 clkctl = brcmf_sdio_regrb(bus->sdiodev,
733 SBSDIO_FUNC1_CHIPCLKCSR, &err);
734 if (err) {
735 brcmf_err("HT Avail read error: %d\n", err);
736 return -EBADE;
737 }
738
739 /* Go to pending and await interrupt if appropriate */
740 if (!SBSDIO_CLKAV(clkctl, bus->alp_only) && pendok) {
741 /* Allow only clock-available interrupt */
742 devctl = brcmf_sdio_regrb(bus->sdiodev,
743 SBSDIO_DEVICE_CTL, &err);
744 if (err) {
745 brcmf_err("Devctl error setting CA: %d\n",
746 err);
747 return -EBADE;
748 }
749
750 devctl |= SBSDIO_DEVCTL_CA_INT_ONLY;
751 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
752 devctl, &err);
753 brcmf_dbg(SDIO, "CLKCTL: set PENDING\n");
754 bus->clkstate = CLK_PENDING;
755
756 return 0;
757 } else if (bus->clkstate == CLK_PENDING) {
758 /* Cancel CA-only interrupt filter */
759 devctl = brcmf_sdio_regrb(bus->sdiodev,
760 SBSDIO_DEVICE_CTL, &err);
761 devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
762 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
763 devctl, &err);
764 }
765
766 /* Otherwise, wait here (polling) for HT Avail */
767 timeout = jiffies +
768 msecs_to_jiffies(PMU_MAX_TRANSITION_DLY/1000);
769 while (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
770 clkctl = brcmf_sdio_regrb(bus->sdiodev,
771 SBSDIO_FUNC1_CHIPCLKCSR,
772 &err);
773 if (time_after(jiffies, timeout))
774 break;
775 else
776 usleep_range(5000, 10000);
777 }
778 if (err) {
779 brcmf_err("HT Avail request error: %d\n", err);
780 return -EBADE;
781 }
782 if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
783 brcmf_err("HT Avail timeout (%d): clkctl 0x%02x\n",
784 PMU_MAX_TRANSITION_DLY, clkctl);
785 return -EBADE;
786 }
787
788 /* Mark clock available */
789 bus->clkstate = CLK_AVAIL;
790 brcmf_dbg(SDIO, "CLKCTL: turned ON\n");
791
792 #if defined(DEBUG)
793 if (!bus->alp_only) {
794 if (SBSDIO_ALPONLY(clkctl))
795 brcmf_err("HT Clock should be on\n");
796 }
797 #endif /* defined (DEBUG) */
798
799 bus->activity = true;
800 } else {
801 clkreq = 0;
802
803 if (bus->clkstate == CLK_PENDING) {
804 /* Cancel CA-only interrupt filter */
805 devctl = brcmf_sdio_regrb(bus->sdiodev,
806 SBSDIO_DEVICE_CTL, &err);
807 devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
808 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
809 devctl, &err);
810 }
811
812 bus->clkstate = CLK_SDONLY;
813 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
814 clkreq, &err);
815 brcmf_dbg(SDIO, "CLKCTL: turned OFF\n");
816 if (err) {
817 brcmf_err("Failed access turning clock off: %d\n",
818 err);
819 return -EBADE;
820 }
821 }
822 return 0;
823 }
824
825 /* Change idle/active SD state */
826 static int brcmf_sdbrcm_sdclk(struct brcmf_sdio *bus, bool on)
827 {
828 brcmf_dbg(SDIO, "Enter\n");
829
830 if (on)
831 bus->clkstate = CLK_SDONLY;
832 else
833 bus->clkstate = CLK_NONE;
834
835 return 0;
836 }
837
838 /* Transition SD and backplane clock readiness */
839 static int brcmf_sdbrcm_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
840 {
841 #ifdef DEBUG
842 uint oldstate = bus->clkstate;
843 #endif /* DEBUG */
844
845 brcmf_dbg(SDIO, "Enter\n");
846
847 /* Early exit if we're already there */
848 if (bus->clkstate == target) {
849 if (target == CLK_AVAIL) {
850 brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
851 bus->activity = true;
852 }
853 return 0;
854 }
855
856 switch (target) {
857 case CLK_AVAIL:
858 /* Make sure SD clock is available */
859 if (bus->clkstate == CLK_NONE)
860 brcmf_sdbrcm_sdclk(bus, true);
861 /* Now request HT Avail on the backplane */
862 brcmf_sdbrcm_htclk(bus, true, pendok);
863 brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
864 bus->activity = true;
865 break;
866
867 case CLK_SDONLY:
868 /* Remove HT request, or bring up SD clock */
869 if (bus->clkstate == CLK_NONE)
870 brcmf_sdbrcm_sdclk(bus, true);
871 else if (bus->clkstate == CLK_AVAIL)
872 brcmf_sdbrcm_htclk(bus, false, false);
873 else
874 brcmf_err("request for %d -> %d\n",
875 bus->clkstate, target);
876 brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
877 break;
878
879 case CLK_NONE:
880 /* Make sure to remove HT request */
881 if (bus->clkstate == CLK_AVAIL)
882 brcmf_sdbrcm_htclk(bus, false, false);
883 /* Now remove the SD clock */
884 brcmf_sdbrcm_sdclk(bus, false);
885 brcmf_sdbrcm_wd_timer(bus, 0);
886 break;
887 }
888 #ifdef DEBUG
889 brcmf_dbg(SDIO, "%d -> %d\n", oldstate, bus->clkstate);
890 #endif /* DEBUG */
891
892 return 0;
893 }
894
895 static int
896 brcmf_sdbrcm_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
897 {
898 int err = 0;
899 brcmf_dbg(TRACE, "Enter\n");
900 brcmf_dbg(SDIO, "request %s currently %s\n",
901 (sleep ? "SLEEP" : "WAKE"),
902 (bus->sleeping ? "SLEEP" : "WAKE"));
903
904 /* If SR is enabled control bus state with KSO */
905 if (bus->sr_enabled) {
906 /* Done if we're already in the requested state */
907 if (sleep == bus->sleeping)
908 goto end;
909
910 /* Going to sleep */
911 if (sleep) {
912 /* Don't sleep if something is pending */
913 if (atomic_read(&bus->intstatus) ||
914 atomic_read(&bus->ipend) > 0 ||
915 (!atomic_read(&bus->fcstate) &&
916 brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
917 data_ok(bus)))
918 return -EBUSY;
919 err = brcmf_sdbrcm_kso_control(bus, false);
920 /* disable watchdog */
921 if (!err)
922 brcmf_sdbrcm_wd_timer(bus, 0);
923 } else {
924 bus->idlecount = 0;
925 err = brcmf_sdbrcm_kso_control(bus, true);
926 }
927 if (!err) {
928 /* Change state */
929 bus->sleeping = sleep;
930 brcmf_dbg(SDIO, "new state %s\n",
931 (sleep ? "SLEEP" : "WAKE"));
932 } else {
933 brcmf_err("error while changing bus sleep state %d\n",
934 err);
935 return err;
936 }
937 }
938
939 end:
940 /* control clocks */
941 if (sleep) {
942 if (!bus->sr_enabled)
943 brcmf_sdbrcm_clkctl(bus, CLK_NONE, pendok);
944 } else {
945 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, pendok);
946 }
947
948 return err;
949
950 }
951
952 static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
953 {
954 u32 intstatus = 0;
955 u32 hmb_data;
956 u8 fcbits;
957 int ret;
958
959 brcmf_dbg(SDIO, "Enter\n");
960
961 /* Read mailbox data and ack that we did so */
962 ret = r_sdreg32(bus, &hmb_data,
963 offsetof(struct sdpcmd_regs, tohostmailboxdata));
964
965 if (ret == 0)
966 w_sdreg32(bus, SMB_INT_ACK,
967 offsetof(struct sdpcmd_regs, tosbmailbox));
968 bus->sdcnt.f1regdata += 2;
969
970 /* Dongle recomposed rx frames, accept them again */
971 if (hmb_data & HMB_DATA_NAKHANDLED) {
972 brcmf_dbg(SDIO, "Dongle reports NAK handled, expect rtx of %d\n",
973 bus->rx_seq);
974 if (!bus->rxskip)
975 brcmf_err("unexpected NAKHANDLED!\n");
976
977 bus->rxskip = false;
978 intstatus |= I_HMB_FRAME_IND;
979 }
980
981 /*
982 * DEVREADY does not occur with gSPI.
983 */
984 if (hmb_data & (HMB_DATA_DEVREADY | HMB_DATA_FWREADY)) {
985 bus->sdpcm_ver =
986 (hmb_data & HMB_DATA_VERSION_MASK) >>
987 HMB_DATA_VERSION_SHIFT;
988 if (bus->sdpcm_ver != SDPCM_PROT_VERSION)
989 brcmf_err("Version mismatch, dongle reports %d, "
990 "expecting %d\n",
991 bus->sdpcm_ver, SDPCM_PROT_VERSION);
992 else
993 brcmf_dbg(SDIO, "Dongle ready, protocol version %d\n",
994 bus->sdpcm_ver);
995 }
996
997 /*
998 * Flow Control has been moved into the RX headers and this out of band
999 * method isn't used any more.
1000 * remaining backward compatible with older dongles.
1001 */
1002 if (hmb_data & HMB_DATA_FC) {
1003 fcbits = (hmb_data & HMB_DATA_FCDATA_MASK) >>
1004 HMB_DATA_FCDATA_SHIFT;
1005
1006 if (fcbits & ~bus->flowcontrol)
1007 bus->sdcnt.fc_xoff++;
1008
1009 if (bus->flowcontrol & ~fcbits)
1010 bus->sdcnt.fc_xon++;
1011
1012 bus->sdcnt.fc_rcvd++;
1013 bus->flowcontrol = fcbits;
1014 }
1015
1016 /* Shouldn't be any others */
1017 if (hmb_data & ~(HMB_DATA_DEVREADY |
1018 HMB_DATA_NAKHANDLED |
1019 HMB_DATA_FC |
1020 HMB_DATA_FWREADY |
1021 HMB_DATA_FCDATA_MASK | HMB_DATA_VERSION_MASK))
1022 brcmf_err("Unknown mailbox data content: 0x%02x\n",
1023 hmb_data);
1024
1025 return intstatus;
1026 }
1027
1028 static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
1029 {
1030 uint retries = 0;
1031 u16 lastrbc;
1032 u8 hi, lo;
1033 int err;
1034
1035 brcmf_err("%sterminate frame%s\n",
1036 abort ? "abort command, " : "",
1037 rtx ? ", send NAK" : "");
1038
1039 if (abort)
1040 brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
1041
1042 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
1043 SFC_RF_TERM, &err);
1044 bus->sdcnt.f1regdata++;
1045
1046 /* Wait until the packet has been flushed (device/FIFO stable) */
1047 for (lastrbc = retries = 0xffff; retries > 0; retries--) {
1048 hi = brcmf_sdio_regrb(bus->sdiodev,
1049 SBSDIO_FUNC1_RFRAMEBCHI, &err);
1050 lo = brcmf_sdio_regrb(bus->sdiodev,
1051 SBSDIO_FUNC1_RFRAMEBCLO, &err);
1052 bus->sdcnt.f1regdata += 2;
1053
1054 if ((hi == 0) && (lo == 0))
1055 break;
1056
1057 if ((hi > (lastrbc >> 8)) && (lo > (lastrbc & 0x00ff))) {
1058 brcmf_err("count growing: last 0x%04x now 0x%04x\n",
1059 lastrbc, (hi << 8) + lo);
1060 }
1061 lastrbc = (hi << 8) + lo;
1062 }
1063
1064 if (!retries)
1065 brcmf_err("count never zeroed: last 0x%04x\n", lastrbc);
1066 else
1067 brcmf_dbg(SDIO, "flush took %d iterations\n", 0xffff - retries);
1068
1069 if (rtx) {
1070 bus->sdcnt.rxrtx++;
1071 err = w_sdreg32(bus, SMB_NAK,
1072 offsetof(struct sdpcmd_regs, tosbmailbox));
1073
1074 bus->sdcnt.f1regdata++;
1075 if (err == 0)
1076 bus->rxskip = true;
1077 }
1078
1079 /* Clear partial in any case */
1080 bus->cur_read.len = 0;
1081
1082 /* If we can't reach the device, signal failure */
1083 if (err)
1084 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
1085 }
1086
1087 /* return total length of buffer chain */
1088 static uint brcmf_sdbrcm_glom_len(struct brcmf_sdio *bus)
1089 {
1090 struct sk_buff *p;
1091 uint total;
1092
1093 total = 0;
1094 skb_queue_walk(&bus->glom, p)
1095 total += p->len;
1096 return total;
1097 }
1098
1099 static void brcmf_sdbrcm_free_glom(struct brcmf_sdio *bus)
1100 {
1101 struct sk_buff *cur, *next;
1102
1103 skb_queue_walk_safe(&bus->glom, cur, next) {
1104 skb_unlink(cur, &bus->glom);
1105 brcmu_pkt_buf_free_skb(cur);
1106 }
1107 }
1108
1109 /**
1110 * brcmfmac sdio bus specific header
1111 * This is the lowest layer header wrapped on the packets transmitted between
1112 * host and WiFi dongle which contains information needed for SDIO core and
1113 * firmware
1114 *
1115 * It consists of 3 parts: hardware header, hardware extension header and
1116 * software header
1117 * hardware header (frame tag) - 4 bytes
1118 * Byte 0~1: Frame length
1119 * Byte 2~3: Checksum, bit-wise inverse of frame length
1120 * hardware extension header - 8 bytes
1121 * Tx glom mode only, N/A for Rx or normal Tx
1122 * Byte 0~1: Packet length excluding hw frame tag
1123 * Byte 2: Reserved
1124 * Byte 3: Frame flags, bit 0: last frame indication
1125 * Byte 4~5: Reserved
1126 * Byte 6~7: Tail padding length
1127 * software header - 8 bytes
1128 * Byte 0: Rx/Tx sequence number
1129 * Byte 1: 4 MSB Channel number, 4 LSB arbitrary flag
1130 * Byte 2: Length of next data frame, reserved for Tx
1131 * Byte 3: Data offset
1132 * Byte 4: Flow control bits, reserved for Tx
1133 * Byte 5: Maximum Sequence number allowed by firmware for Tx, N/A for Tx packet
1134 * Byte 6~7: Reserved
1135 */
1136 #define SDPCM_HWHDR_LEN 4
1137 #define SDPCM_HWEXT_LEN 8
1138 #define SDPCM_SWHDR_LEN 8
1139 #define SDPCM_HDRLEN (SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN)
1140 /* software header */
1141 #define SDPCM_SEQ_MASK 0x000000ff
1142 #define SDPCM_SEQ_WRAP 256
1143 #define SDPCM_CHANNEL_MASK 0x00000f00
1144 #define SDPCM_CHANNEL_SHIFT 8
1145 #define SDPCM_CONTROL_CHANNEL 0 /* Control */
1146 #define SDPCM_EVENT_CHANNEL 1 /* Asyc Event Indication */
1147 #define SDPCM_DATA_CHANNEL 2 /* Data Xmit/Recv */
1148 #define SDPCM_GLOM_CHANNEL 3 /* Coalesced packets */
1149 #define SDPCM_TEST_CHANNEL 15 /* Test/debug packets */
1150 #define SDPCM_GLOMDESC(p) (((u8 *)p)[1] & 0x80)
1151 #define SDPCM_NEXTLEN_MASK 0x00ff0000
1152 #define SDPCM_NEXTLEN_SHIFT 16
1153 #define SDPCM_DOFFSET_MASK 0xff000000
1154 #define SDPCM_DOFFSET_SHIFT 24
1155 #define SDPCM_FCMASK_MASK 0x000000ff
1156 #define SDPCM_WINDOW_MASK 0x0000ff00
1157 #define SDPCM_WINDOW_SHIFT 8
1158
1159 static inline u8 brcmf_sdio_getdatoffset(u8 *swheader)
1160 {
1161 u32 hdrvalue;
1162 hdrvalue = *(u32 *)swheader;
1163 return (u8)((hdrvalue & SDPCM_DOFFSET_MASK) >> SDPCM_DOFFSET_SHIFT);
1164 }
1165
1166 static int brcmf_sdio_hdparse(struct brcmf_sdio *bus, u8 *header,
1167 struct brcmf_sdio_hdrinfo *rd,
1168 enum brcmf_sdio_frmtype type)
1169 {
1170 u16 len, checksum;
1171 u8 rx_seq, fc, tx_seq_max;
1172 u32 swheader;
1173
1174 trace_brcmf_sdpcm_hdr(SDPCM_RX, header);
1175
1176 /* hw header */
1177 len = get_unaligned_le16(header);
1178 checksum = get_unaligned_le16(header + sizeof(u16));
1179 /* All zero means no more to read */
1180 if (!(len | checksum)) {
1181 bus->rxpending = false;
1182 return -ENODATA;
1183 }
1184 if ((u16)(~(len ^ checksum))) {
1185 brcmf_err("HW header checksum error\n");
1186 bus->sdcnt.rx_badhdr++;
1187 brcmf_sdbrcm_rxfail(bus, false, false);
1188 return -EIO;
1189 }
1190 if (len < SDPCM_HDRLEN) {
1191 brcmf_err("HW header length error\n");
1192 return -EPROTO;
1193 }
1194 if (type == BRCMF_SDIO_FT_SUPER &&
1195 (roundup(len, bus->blocksize) != rd->len)) {
1196 brcmf_err("HW superframe header length error\n");
1197 return -EPROTO;
1198 }
1199 if (type == BRCMF_SDIO_FT_SUB && len > rd->len) {
1200 brcmf_err("HW subframe header length error\n");
1201 return -EPROTO;
1202 }
1203 rd->len = len;
1204
1205 /* software header */
1206 header += SDPCM_HWHDR_LEN;
1207 swheader = le32_to_cpu(*(__le32 *)header);
1208 if (type == BRCMF_SDIO_FT_SUPER && SDPCM_GLOMDESC(header)) {
1209 brcmf_err("Glom descriptor found in superframe head\n");
1210 rd->len = 0;
1211 return -EINVAL;
1212 }
1213 rx_seq = (u8)(swheader & SDPCM_SEQ_MASK);
1214 rd->channel = (swheader & SDPCM_CHANNEL_MASK) >> SDPCM_CHANNEL_SHIFT;
1215 if (len > MAX_RX_DATASZ && rd->channel != SDPCM_CONTROL_CHANNEL &&
1216 type != BRCMF_SDIO_FT_SUPER) {
1217 brcmf_err("HW header length too long\n");
1218 bus->sdcnt.rx_toolong++;
1219 brcmf_sdbrcm_rxfail(bus, false, false);
1220 rd->len = 0;
1221 return -EPROTO;
1222 }
1223 if (type == BRCMF_SDIO_FT_SUPER && rd->channel != SDPCM_GLOM_CHANNEL) {
1224 brcmf_err("Wrong channel for superframe\n");
1225 rd->len = 0;
1226 return -EINVAL;
1227 }
1228 if (type == BRCMF_SDIO_FT_SUB && rd->channel != SDPCM_DATA_CHANNEL &&
1229 rd->channel != SDPCM_EVENT_CHANNEL) {
1230 brcmf_err("Wrong channel for subframe\n");
1231 rd->len = 0;
1232 return -EINVAL;
1233 }
1234 rd->dat_offset = brcmf_sdio_getdatoffset(header);
1235 if (rd->dat_offset < SDPCM_HDRLEN || rd->dat_offset > rd->len) {
1236 brcmf_err("seq %d: bad data offset\n", rx_seq);
1237 bus->sdcnt.rx_badhdr++;
1238 brcmf_sdbrcm_rxfail(bus, false, false);
1239 rd->len = 0;
1240 return -ENXIO;
1241 }
1242 if (rd->seq_num != rx_seq) {
1243 brcmf_err("seq %d: sequence number error, expect %d\n",
1244 rx_seq, rd->seq_num);
1245 bus->sdcnt.rx_badseq++;
1246 rd->seq_num = rx_seq;
1247 }
1248 /* no need to check the reset for subframe */
1249 if (type == BRCMF_SDIO_FT_SUB)
1250 return 0;
1251 rd->len_nxtfrm = (swheader & SDPCM_NEXTLEN_MASK) >> SDPCM_NEXTLEN_SHIFT;
1252 if (rd->len_nxtfrm << 4 > MAX_RX_DATASZ) {
1253 /* only warm for NON glom packet */
1254 if (rd->channel != SDPCM_GLOM_CHANNEL)
1255 brcmf_err("seq %d: next length error\n", rx_seq);
1256 rd->len_nxtfrm = 0;
1257 }
1258 swheader = le32_to_cpu(*(__le32 *)(header + 4));
1259 fc = swheader & SDPCM_FCMASK_MASK;
1260 if (bus->flowcontrol != fc) {
1261 if (~bus->flowcontrol & fc)
1262 bus->sdcnt.fc_xoff++;
1263 if (bus->flowcontrol & ~fc)
1264 bus->sdcnt.fc_xon++;
1265 bus->sdcnt.fc_rcvd++;
1266 bus->flowcontrol = fc;
1267 }
1268 tx_seq_max = (swheader & SDPCM_WINDOW_MASK) >> SDPCM_WINDOW_SHIFT;
1269 if ((u8)(tx_seq_max - bus->tx_seq) > 0x40) {
1270 brcmf_err("seq %d: max tx seq number error\n", rx_seq);
1271 tx_seq_max = bus->tx_seq + 2;
1272 }
1273 bus->tx_max = tx_seq_max;
1274
1275 return 0;
1276 }
1277
1278 static inline void brcmf_sdio_update_hwhdr(u8 *header, u16 frm_length)
1279 {
1280 *(__le16 *)header = cpu_to_le16(frm_length);
1281 *(((__le16 *)header) + 1) = cpu_to_le16(~frm_length);
1282 }
1283
1284 static void brcmf_sdio_hdpack(struct brcmf_sdio *bus, u8 *header,
1285 struct brcmf_sdio_hdrinfo *hd_info)
1286 {
1287 u32 hdrval;
1288 u8 hdr_offset;
1289
1290 brcmf_sdio_update_hwhdr(header, hd_info->len);
1291 hdr_offset = SDPCM_HWHDR_LEN;
1292
1293 if (bus->txglom) {
1294 hdrval = (hd_info->len - hdr_offset) | (hd_info->lastfrm << 24);
1295 *((__le32 *)(header + hdr_offset)) = cpu_to_le32(hdrval);
1296 hdrval = (u16)hd_info->tail_pad << 16;
1297 *(((__le32 *)(header + hdr_offset)) + 1) = cpu_to_le32(hdrval);
1298 hdr_offset += SDPCM_HWEXT_LEN;
1299 }
1300
1301 hdrval = hd_info->seq_num;
1302 hdrval |= (hd_info->channel << SDPCM_CHANNEL_SHIFT) &
1303 SDPCM_CHANNEL_MASK;
1304 hdrval |= (hd_info->dat_offset << SDPCM_DOFFSET_SHIFT) &
1305 SDPCM_DOFFSET_MASK;
1306 *((__le32 *)(header + hdr_offset)) = cpu_to_le32(hdrval);
1307 *(((__le32 *)(header + hdr_offset)) + 1) = 0;
1308 trace_brcmf_sdpcm_hdr(SDPCM_TX + !!(bus->txglom), header);
1309 }
1310
1311 static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1312 {
1313 u16 dlen, totlen;
1314 u8 *dptr, num = 0;
1315 u16 sublen;
1316 struct sk_buff *pfirst, *pnext;
1317
1318 int errcode;
1319 u8 doff, sfdoff;
1320
1321 struct brcmf_sdio_hdrinfo rd_new;
1322
1323 /* If packets, issue read(s) and send up packet chain */
1324 /* Return sequence numbers consumed? */
1325
1326 brcmf_dbg(SDIO, "start: glomd %p glom %p\n",
1327 bus->glomd, skb_peek(&bus->glom));
1328
1329 /* If there's a descriptor, generate the packet chain */
1330 if (bus->glomd) {
1331 pfirst = pnext = NULL;
1332 dlen = (u16) (bus->glomd->len);
1333 dptr = bus->glomd->data;
1334 if (!dlen || (dlen & 1)) {
1335 brcmf_err("bad glomd len(%d), ignore descriptor\n",
1336 dlen);
1337 dlen = 0;
1338 }
1339
1340 for (totlen = num = 0; dlen; num++) {
1341 /* Get (and move past) next length */
1342 sublen = get_unaligned_le16(dptr);
1343 dlen -= sizeof(u16);
1344 dptr += sizeof(u16);
1345 if ((sublen < SDPCM_HDRLEN) ||
1346 ((num == 0) && (sublen < (2 * SDPCM_HDRLEN)))) {
1347 brcmf_err("descriptor len %d bad: %d\n",
1348 num, sublen);
1349 pnext = NULL;
1350 break;
1351 }
1352 if (sublen % bus->sgentry_align) {
1353 brcmf_err("sublen %d not multiple of %d\n",
1354 sublen, bus->sgentry_align);
1355 }
1356 totlen += sublen;
1357
1358 /* For last frame, adjust read len so total
1359 is a block multiple */
1360 if (!dlen) {
1361 sublen +=
1362 (roundup(totlen, bus->blocksize) - totlen);
1363 totlen = roundup(totlen, bus->blocksize);
1364 }
1365
1366 /* Allocate/chain packet for next subframe */
1367 pnext = brcmu_pkt_buf_get_skb(sublen + bus->sgentry_align);
1368 if (pnext == NULL) {
1369 brcmf_err("bcm_pkt_buf_get_skb failed, num %d len %d\n",
1370 num, sublen);
1371 break;
1372 }
1373 skb_queue_tail(&bus->glom, pnext);
1374
1375 /* Adhere to start alignment requirements */
1376 pkt_align(pnext, sublen, bus->sgentry_align);
1377 }
1378
1379 /* If all allocations succeeded, save packet chain
1380 in bus structure */
1381 if (pnext) {
1382 brcmf_dbg(GLOM, "allocated %d-byte packet chain for %d subframes\n",
1383 totlen, num);
1384 if (BRCMF_GLOM_ON() && bus->cur_read.len &&
1385 totlen != bus->cur_read.len) {
1386 brcmf_dbg(GLOM, "glomdesc mismatch: nextlen %d glomdesc %d rxseq %d\n",
1387 bus->cur_read.len, totlen, rxseq);
1388 }
1389 pfirst = pnext = NULL;
1390 } else {
1391 brcmf_sdbrcm_free_glom(bus);
1392 num = 0;
1393 }
1394
1395 /* Done with descriptor packet */
1396 brcmu_pkt_buf_free_skb(bus->glomd);
1397 bus->glomd = NULL;
1398 bus->cur_read.len = 0;
1399 }
1400
1401 /* Ok -- either we just generated a packet chain,
1402 or had one from before */
1403 if (!skb_queue_empty(&bus->glom)) {
1404 if (BRCMF_GLOM_ON()) {
1405 brcmf_dbg(GLOM, "try superframe read, packet chain:\n");
1406 skb_queue_walk(&bus->glom, pnext) {
1407 brcmf_dbg(GLOM, " %p: %p len 0x%04x (%d)\n",
1408 pnext, (u8 *) (pnext->data),
1409 pnext->len, pnext->len);
1410 }
1411 }
1412
1413 pfirst = skb_peek(&bus->glom);
1414 dlen = (u16) brcmf_sdbrcm_glom_len(bus);
1415
1416 /* Do an SDIO read for the superframe. Configurable iovar to
1417 * read directly into the chained packet, or allocate a large
1418 * packet and and copy into the chain.
1419 */
1420 sdio_claim_host(bus->sdiodev->func[1]);
1421 errcode = brcmf_sdcard_recv_chain(bus->sdiodev,
1422 bus->sdiodev->sbwad,
1423 SDIO_FUNC_2, F2SYNC, &bus->glom, dlen);
1424 sdio_release_host(bus->sdiodev->func[1]);
1425 bus->sdcnt.f2rxdata++;
1426
1427 /* On failure, kill the superframe, allow a couple retries */
1428 if (errcode < 0) {
1429 brcmf_err("glom read of %d bytes failed: %d\n",
1430 dlen, errcode);
1431
1432 sdio_claim_host(bus->sdiodev->func[1]);
1433 if (bus->glomerr++ < 3) {
1434 brcmf_sdbrcm_rxfail(bus, true, true);
1435 } else {
1436 bus->glomerr = 0;
1437 brcmf_sdbrcm_rxfail(bus, true, false);
1438 bus->sdcnt.rxglomfail++;
1439 brcmf_sdbrcm_free_glom(bus);
1440 }
1441 sdio_release_host(bus->sdiodev->func[1]);
1442 return 0;
1443 }
1444
1445 brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1446 pfirst->data, min_t(int, pfirst->len, 48),
1447 "SUPERFRAME:\n");
1448
1449 rd_new.seq_num = rxseq;
1450 rd_new.len = dlen;
1451 sdio_claim_host(bus->sdiodev->func[1]);
1452 errcode = brcmf_sdio_hdparse(bus, pfirst->data, &rd_new,
1453 BRCMF_SDIO_FT_SUPER);
1454 sdio_release_host(bus->sdiodev->func[1]);
1455 bus->cur_read.len = rd_new.len_nxtfrm << 4;
1456
1457 /* Remove superframe header, remember offset */
1458 skb_pull(pfirst, rd_new.dat_offset);
1459 sfdoff = rd_new.dat_offset;
1460 num = 0;
1461
1462 /* Validate all the subframe headers */
1463 skb_queue_walk(&bus->glom, pnext) {
1464 /* leave when invalid subframe is found */
1465 if (errcode)
1466 break;
1467
1468 rd_new.len = pnext->len;
1469 rd_new.seq_num = rxseq++;
1470 sdio_claim_host(bus->sdiodev->func[1]);
1471 errcode = brcmf_sdio_hdparse(bus, pnext->data, &rd_new,
1472 BRCMF_SDIO_FT_SUB);
1473 sdio_release_host(bus->sdiodev->func[1]);
1474 brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1475 pnext->data, 32, "subframe:\n");
1476
1477 num++;
1478 }
1479
1480 if (errcode) {
1481 /* Terminate frame on error, request
1482 a couple retries */
1483 sdio_claim_host(bus->sdiodev->func[1]);
1484 if (bus->glomerr++ < 3) {
1485 /* Restore superframe header space */
1486 skb_push(pfirst, sfdoff);
1487 brcmf_sdbrcm_rxfail(bus, true, true);
1488 } else {
1489 bus->glomerr = 0;
1490 brcmf_sdbrcm_rxfail(bus, true, false);
1491 bus->sdcnt.rxglomfail++;
1492 brcmf_sdbrcm_free_glom(bus);
1493 }
1494 sdio_release_host(bus->sdiodev->func[1]);
1495 bus->cur_read.len = 0;
1496 return 0;
1497 }
1498
1499 /* Basic SD framing looks ok - process each packet (header) */
1500
1501 skb_queue_walk_safe(&bus->glom, pfirst, pnext) {
1502 dptr = (u8 *) (pfirst->data);
1503 sublen = get_unaligned_le16(dptr);
1504 doff = brcmf_sdio_getdatoffset(&dptr[SDPCM_HWHDR_LEN]);
1505
1506 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(),
1507 dptr, pfirst->len,
1508 "Rx Subframe Data:\n");
1509
1510 __skb_trim(pfirst, sublen);
1511 skb_pull(pfirst, doff);
1512
1513 if (pfirst->len == 0) {
1514 skb_unlink(pfirst, &bus->glom);
1515 brcmu_pkt_buf_free_skb(pfirst);
1516 continue;
1517 }
1518
1519 brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1520 pfirst->data,
1521 min_t(int, pfirst->len, 32),
1522 "subframe %d to stack, %p (%p/%d) nxt/lnk %p/%p\n",
1523 bus->glom.qlen, pfirst, pfirst->data,
1524 pfirst->len, pfirst->next,
1525 pfirst->prev);
1526 skb_unlink(pfirst, &bus->glom);
1527 brcmf_rx_frame(bus->sdiodev->dev, pfirst);
1528 bus->sdcnt.rxglompkts++;
1529 }
1530
1531 bus->sdcnt.rxglomframes++;
1532 }
1533 return num;
1534 }
1535
1536 static int brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_sdio *bus, uint *condition,
1537 bool *pending)
1538 {
1539 DECLARE_WAITQUEUE(wait, current);
1540 int timeout = msecs_to_jiffies(DCMD_RESP_TIMEOUT);
1541
1542 /* Wait until control frame is available */
1543 add_wait_queue(&bus->dcmd_resp_wait, &wait);
1544 set_current_state(TASK_INTERRUPTIBLE);
1545
1546 while (!(*condition) && (!signal_pending(current) && timeout))
1547 timeout = schedule_timeout(timeout);
1548
1549 if (signal_pending(current))
1550 *pending = true;
1551
1552 set_current_state(TASK_RUNNING);
1553 remove_wait_queue(&bus->dcmd_resp_wait, &wait);
1554
1555 return timeout;
1556 }
1557
1558 static int brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_sdio *bus)
1559 {
1560 if (waitqueue_active(&bus->dcmd_resp_wait))
1561 wake_up_interruptible(&bus->dcmd_resp_wait);
1562
1563 return 0;
1564 }
1565 static void
1566 brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
1567 {
1568 uint rdlen, pad;
1569 u8 *buf = NULL, *rbuf;
1570 int sdret;
1571
1572 brcmf_dbg(TRACE, "Enter\n");
1573
1574 if (bus->rxblen)
1575 buf = vzalloc(bus->rxblen);
1576 if (!buf)
1577 goto done;
1578
1579 rbuf = bus->rxbuf;
1580 pad = ((unsigned long)rbuf % bus->head_align);
1581 if (pad)
1582 rbuf += (bus->head_align - pad);
1583
1584 /* Copy the already-read portion over */
1585 memcpy(buf, hdr, BRCMF_FIRSTREAD);
1586 if (len <= BRCMF_FIRSTREAD)
1587 goto gotpkt;
1588
1589 /* Raise rdlen to next SDIO block to avoid tail command */
1590 rdlen = len - BRCMF_FIRSTREAD;
1591 if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
1592 pad = bus->blocksize - (rdlen % bus->blocksize);
1593 if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
1594 ((len + pad) < bus->sdiodev->bus_if->maxctl))
1595 rdlen += pad;
1596 } else if (rdlen % bus->head_align) {
1597 rdlen += bus->head_align - (rdlen % bus->head_align);
1598 }
1599
1600 /* Drop if the read is too big or it exceeds our maximum */
1601 if ((rdlen + BRCMF_FIRSTREAD) > bus->sdiodev->bus_if->maxctl) {
1602 brcmf_err("%d-byte control read exceeds %d-byte buffer\n",
1603 rdlen, bus->sdiodev->bus_if->maxctl);
1604 brcmf_sdbrcm_rxfail(bus, false, false);
1605 goto done;
1606 }
1607
1608 if ((len - doff) > bus->sdiodev->bus_if->maxctl) {
1609 brcmf_err("%d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n",
1610 len, len - doff, bus->sdiodev->bus_if->maxctl);
1611 bus->sdcnt.rx_toolong++;
1612 brcmf_sdbrcm_rxfail(bus, false, false);
1613 goto done;
1614 }
1615
1616 /* Read remain of frame body */
1617 sdret = brcmf_sdcard_recv_buf(bus->sdiodev,
1618 bus->sdiodev->sbwad,
1619 SDIO_FUNC_2,
1620 F2SYNC, rbuf, rdlen);
1621 bus->sdcnt.f2rxdata++;
1622
1623 /* Control frame failures need retransmission */
1624 if (sdret < 0) {
1625 brcmf_err("read %d control bytes failed: %d\n",
1626 rdlen, sdret);
1627 bus->sdcnt.rxc_errors++;
1628 brcmf_sdbrcm_rxfail(bus, true, true);
1629 goto done;
1630 } else
1631 memcpy(buf + BRCMF_FIRSTREAD, rbuf, rdlen);
1632
1633 gotpkt:
1634
1635 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_CTL_ON(),
1636 buf, len, "RxCtrl:\n");
1637
1638 /* Point to valid data and indicate its length */
1639 spin_lock_bh(&bus->rxctl_lock);
1640 if (bus->rxctl) {
1641 brcmf_err("last control frame is being processed.\n");
1642 spin_unlock_bh(&bus->rxctl_lock);
1643 vfree(buf);
1644 goto done;
1645 }
1646 bus->rxctl = buf + doff;
1647 bus->rxctl_orig = buf;
1648 bus->rxlen = len - doff;
1649 spin_unlock_bh(&bus->rxctl_lock);
1650
1651 done:
1652 /* Awake any waiters */
1653 brcmf_sdbrcm_dcmd_resp_wake(bus);
1654 }
1655
1656 /* Pad read to blocksize for efficiency */
1657 static void brcmf_pad(struct brcmf_sdio *bus, u16 *pad, u16 *rdlen)
1658 {
1659 if (bus->roundup && bus->blocksize && *rdlen > bus->blocksize) {
1660 *pad = bus->blocksize - (*rdlen % bus->blocksize);
1661 if (*pad <= bus->roundup && *pad < bus->blocksize &&
1662 *rdlen + *pad + BRCMF_FIRSTREAD < MAX_RX_DATASZ)
1663 *rdlen += *pad;
1664 } else if (*rdlen % bus->head_align) {
1665 *rdlen += bus->head_align - (*rdlen % bus->head_align);
1666 }
1667 }
1668
1669 static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
1670 {
1671 struct sk_buff *pkt; /* Packet for event or data frames */
1672 u16 pad; /* Number of pad bytes to read */
1673 uint rxleft = 0; /* Remaining number of frames allowed */
1674 int ret; /* Return code from calls */
1675 uint rxcount = 0; /* Total frames read */
1676 struct brcmf_sdio_hdrinfo *rd = &bus->cur_read, rd_new;
1677 u8 head_read = 0;
1678
1679 brcmf_dbg(TRACE, "Enter\n");
1680
1681 /* Not finished unless we encounter no more frames indication */
1682 bus->rxpending = true;
1683
1684 for (rd->seq_num = bus->rx_seq, rxleft = maxframes;
1685 !bus->rxskip && rxleft &&
1686 bus->sdiodev->bus_if->state != BRCMF_BUS_DOWN;
1687 rd->seq_num++, rxleft--) {
1688
1689 /* Handle glomming separately */
1690 if (bus->glomd || !skb_queue_empty(&bus->glom)) {
1691 u8 cnt;
1692 brcmf_dbg(GLOM, "calling rxglom: glomd %p, glom %p\n",
1693 bus->glomd, skb_peek(&bus->glom));
1694 cnt = brcmf_sdbrcm_rxglom(bus, rd->seq_num);
1695 brcmf_dbg(GLOM, "rxglom returned %d\n", cnt);
1696 rd->seq_num += cnt - 1;
1697 rxleft = (rxleft > cnt) ? (rxleft - cnt) : 1;
1698 continue;
1699 }
1700
1701 rd->len_left = rd->len;
1702 /* read header first for unknow frame length */
1703 sdio_claim_host(bus->sdiodev->func[1]);
1704 if (!rd->len) {
1705 ret = brcmf_sdcard_recv_buf(bus->sdiodev,
1706 bus->sdiodev->sbwad,
1707 SDIO_FUNC_2, F2SYNC,
1708 bus->rxhdr,
1709 BRCMF_FIRSTREAD);
1710 bus->sdcnt.f2rxhdrs++;
1711 if (ret < 0) {
1712 brcmf_err("RXHEADER FAILED: %d\n",
1713 ret);
1714 bus->sdcnt.rx_hdrfail++;
1715 brcmf_sdbrcm_rxfail(bus, true, true);
1716 sdio_release_host(bus->sdiodev->func[1]);
1717 continue;
1718 }
1719
1720 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() || BRCMF_HDRS_ON(),
1721 bus->rxhdr, SDPCM_HDRLEN,
1722 "RxHdr:\n");
1723
1724 if (brcmf_sdio_hdparse(bus, bus->rxhdr, rd,
1725 BRCMF_SDIO_FT_NORMAL)) {
1726 sdio_release_host(bus->sdiodev->func[1]);
1727 if (!bus->rxpending)
1728 break;
1729 else
1730 continue;
1731 }
1732
1733 if (rd->channel == SDPCM_CONTROL_CHANNEL) {
1734 brcmf_sdbrcm_read_control(bus, bus->rxhdr,
1735 rd->len,
1736 rd->dat_offset);
1737 /* prepare the descriptor for the next read */
1738 rd->len = rd->len_nxtfrm << 4;
1739 rd->len_nxtfrm = 0;
1740 /* treat all packet as event if we don't know */
1741 rd->channel = SDPCM_EVENT_CHANNEL;
1742 sdio_release_host(bus->sdiodev->func[1]);
1743 continue;
1744 }
1745 rd->len_left = rd->len > BRCMF_FIRSTREAD ?
1746 rd->len - BRCMF_FIRSTREAD : 0;
1747 head_read = BRCMF_FIRSTREAD;
1748 }
1749
1750 brcmf_pad(bus, &pad, &rd->len_left);
1751
1752 pkt = brcmu_pkt_buf_get_skb(rd->len_left + head_read +
1753 bus->head_align);
1754 if (!pkt) {
1755 /* Give up on data, request rtx of events */
1756 brcmf_err("brcmu_pkt_buf_get_skb failed\n");
1757 brcmf_sdbrcm_rxfail(bus, false,
1758 RETRYCHAN(rd->channel));
1759 sdio_release_host(bus->sdiodev->func[1]);
1760 continue;
1761 }
1762 skb_pull(pkt, head_read);
1763 pkt_align(pkt, rd->len_left, bus->head_align);
1764
1765 ret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
1766 SDIO_FUNC_2, F2SYNC, pkt);
1767 bus->sdcnt.f2rxdata++;
1768 sdio_release_host(bus->sdiodev->func[1]);
1769
1770 if (ret < 0) {
1771 brcmf_err("read %d bytes from channel %d failed: %d\n",
1772 rd->len, rd->channel, ret);
1773 brcmu_pkt_buf_free_skb(pkt);
1774 sdio_claim_host(bus->sdiodev->func[1]);
1775 brcmf_sdbrcm_rxfail(bus, true,
1776 RETRYCHAN(rd->channel));
1777 sdio_release_host(bus->sdiodev->func[1]);
1778 continue;
1779 }
1780
1781 if (head_read) {
1782 skb_push(pkt, head_read);
1783 memcpy(pkt->data, bus->rxhdr, head_read);
1784 head_read = 0;
1785 } else {
1786 memcpy(bus->rxhdr, pkt->data, SDPCM_HDRLEN);
1787 rd_new.seq_num = rd->seq_num;
1788 sdio_claim_host(bus->sdiodev->func[1]);
1789 if (brcmf_sdio_hdparse(bus, bus->rxhdr, &rd_new,
1790 BRCMF_SDIO_FT_NORMAL)) {
1791 rd->len = 0;
1792 brcmu_pkt_buf_free_skb(pkt);
1793 }
1794 bus->sdcnt.rx_readahead_cnt++;
1795 if (rd->len != roundup(rd_new.len, 16)) {
1796 brcmf_err("frame length mismatch:read %d, should be %d\n",
1797 rd->len,
1798 roundup(rd_new.len, 16) >> 4);
1799 rd->len = 0;
1800 brcmf_sdbrcm_rxfail(bus, true, true);
1801 sdio_release_host(bus->sdiodev->func[1]);
1802 brcmu_pkt_buf_free_skb(pkt);
1803 continue;
1804 }
1805 sdio_release_host(bus->sdiodev->func[1]);
1806 rd->len_nxtfrm = rd_new.len_nxtfrm;
1807 rd->channel = rd_new.channel;
1808 rd->dat_offset = rd_new.dat_offset;
1809
1810 brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() &&
1811 BRCMF_DATA_ON()) &&
1812 BRCMF_HDRS_ON(),
1813 bus->rxhdr, SDPCM_HDRLEN,
1814 "RxHdr:\n");
1815
1816 if (rd_new.channel == SDPCM_CONTROL_CHANNEL) {
1817 brcmf_err("readahead on control packet %d?\n",
1818 rd_new.seq_num);
1819 /* Force retry w/normal header read */
1820 rd->len = 0;
1821 sdio_claim_host(bus->sdiodev->func[1]);
1822 brcmf_sdbrcm_rxfail(bus, false, true);
1823 sdio_release_host(bus->sdiodev->func[1]);
1824 brcmu_pkt_buf_free_skb(pkt);
1825 continue;
1826 }
1827 }
1828
1829 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(),
1830 pkt->data, rd->len, "Rx Data:\n");
1831
1832 /* Save superframe descriptor and allocate packet frame */
1833 if (rd->channel == SDPCM_GLOM_CHANNEL) {
1834 if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_HWHDR_LEN])) {
1835 brcmf_dbg(GLOM, "glom descriptor, %d bytes:\n",
1836 rd->len);
1837 brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1838 pkt->data, rd->len,
1839 "Glom Data:\n");
1840 __skb_trim(pkt, rd->len);
1841 skb_pull(pkt, SDPCM_HDRLEN);
1842 bus->glomd = pkt;
1843 } else {
1844 brcmf_err("%s: glom superframe w/o "
1845 "descriptor!\n", __func__);
1846 sdio_claim_host(bus->sdiodev->func[1]);
1847 brcmf_sdbrcm_rxfail(bus, false, false);
1848 sdio_release_host(bus->sdiodev->func[1]);
1849 }
1850 /* prepare the descriptor for the next read */
1851 rd->len = rd->len_nxtfrm << 4;
1852 rd->len_nxtfrm = 0;
1853 /* treat all packet as event if we don't know */
1854 rd->channel = SDPCM_EVENT_CHANNEL;
1855 continue;
1856 }
1857
1858 /* Fill in packet len and prio, deliver upward */
1859 __skb_trim(pkt, rd->len);
1860 skb_pull(pkt, rd->dat_offset);
1861
1862 /* prepare the descriptor for the next read */
1863 rd->len = rd->len_nxtfrm << 4;
1864 rd->len_nxtfrm = 0;
1865 /* treat all packet as event if we don't know */
1866 rd->channel = SDPCM_EVENT_CHANNEL;
1867
1868 if (pkt->len == 0) {
1869 brcmu_pkt_buf_free_skb(pkt);
1870 continue;
1871 }
1872
1873 brcmf_rx_frame(bus->sdiodev->dev, pkt);
1874 }
1875
1876 rxcount = maxframes - rxleft;
1877 /* Message if we hit the limit */
1878 if (!rxleft)
1879 brcmf_dbg(DATA, "hit rx limit of %d frames\n", maxframes);
1880 else
1881 brcmf_dbg(DATA, "processed %d frames\n", rxcount);
1882 /* Back off rxseq if awaiting rtx, update rx_seq */
1883 if (bus->rxskip)
1884 rd->seq_num--;
1885 bus->rx_seq = rd->seq_num;
1886
1887 return rxcount;
1888 }
1889
1890 static void
1891 brcmf_sdbrcm_wait_event_wakeup(struct brcmf_sdio *bus)
1892 {
1893 if (waitqueue_active(&bus->ctrl_wait))
1894 wake_up_interruptible(&bus->ctrl_wait);
1895 return;
1896 }
1897
1898 static int brcmf_sdio_txpkt_hdalign(struct brcmf_sdio *bus, struct sk_buff *pkt)
1899 {
1900 u16 head_pad;
1901 u8 *dat_buf;
1902
1903 dat_buf = (u8 *)(pkt->data);
1904
1905 /* Check head padding */
1906 head_pad = ((unsigned long)dat_buf % bus->head_align);
1907 if (head_pad) {
1908 if (skb_headroom(pkt) < head_pad) {
1909 bus->sdiodev->bus_if->tx_realloc++;
1910 head_pad = 0;
1911 if (skb_cow(pkt, head_pad))
1912 return -ENOMEM;
1913 }
1914 skb_push(pkt, head_pad);
1915 dat_buf = (u8 *)(pkt->data);
1916 memset(dat_buf, 0, head_pad + bus->tx_hdrlen);
1917 }
1918 return head_pad;
1919 }
1920
1921 /**
1922 * struct brcmf_skbuff_cb reserves first two bytes in sk_buff::cb for
1923 * bus layer usage.
1924 */
1925 /* flag marking a dummy skb added for DMA alignment requirement */
1926 #define ALIGN_SKB_FLAG 0x8000
1927 /* bit mask of data length chopped from the previous packet */
1928 #define ALIGN_SKB_CHOP_LEN_MASK 0x7fff
1929
1930 static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio *bus,
1931 struct sk_buff_head *pktq,
1932 struct sk_buff *pkt, u16 total_len)
1933 {
1934 struct brcmf_sdio_dev *sdiodev;
1935 struct sk_buff *pkt_pad;
1936 u16 tail_pad, tail_chop, chain_pad;
1937 unsigned int blksize;
1938 bool lastfrm;
1939 int ntail, ret;
1940
1941 sdiodev = bus->sdiodev;
1942 blksize = sdiodev->func[SDIO_FUNC_2]->cur_blksize;
1943 /* sg entry alignment should be a divisor of block size */
1944 WARN_ON(blksize % bus->sgentry_align);
1945
1946 /* Check tail padding */
1947 lastfrm = skb_queue_is_last(pktq, pkt);
1948 tail_pad = 0;
1949 tail_chop = pkt->len % bus->sgentry_align;
1950 if (tail_chop)
1951 tail_pad = bus->sgentry_align - tail_chop;
1952 chain_pad = (total_len + tail_pad) % blksize;
1953 if (lastfrm && chain_pad)
1954 tail_pad += blksize - chain_pad;
1955 if (skb_tailroom(pkt) < tail_pad && pkt->len > blksize) {
1956 pkt_pad = bus->txglom_sgpad;
1957 if (pkt_pad == NULL)
1958 brcmu_pkt_buf_get_skb(tail_pad + tail_chop);
1959 if (pkt_pad == NULL)
1960 return -ENOMEM;
1961 ret = brcmf_sdio_txpkt_hdalign(bus, pkt_pad);
1962 if (unlikely(ret < 0))
1963 return ret;
1964 memcpy(pkt_pad->data,
1965 pkt->data + pkt->len - tail_chop,
1966 tail_chop);
1967 *(u32 *)(pkt_pad->cb) = ALIGN_SKB_FLAG + tail_chop;
1968 skb_trim(pkt, pkt->len - tail_chop);
1969 __skb_queue_after(pktq, pkt, pkt_pad);
1970 } else {
1971 ntail = pkt->data_len + tail_pad -
1972 (pkt->end - pkt->tail);
1973 if (skb_cloned(pkt) || ntail > 0)
1974 if (pskb_expand_head(pkt, 0, ntail, GFP_ATOMIC))
1975 return -ENOMEM;
1976 if (skb_linearize(pkt))
1977 return -ENOMEM;
1978 __skb_put(pkt, tail_pad);
1979 }
1980
1981 return tail_pad;
1982 }
1983
1984 /**
1985 * brcmf_sdio_txpkt_prep - packet preparation for transmit
1986 * @bus: brcmf_sdio structure pointer
1987 * @pktq: packet list pointer
1988 * @chan: virtual channel to transmit the packet
1989 *
1990 * Processes to be applied to the packet
1991 * - Align data buffer pointer
1992 * - Align data buffer length
1993 * - Prepare header
1994 * Return: negative value if there is error
1995 */
1996 static int
1997 brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
1998 uint chan)
1999 {
2000 u16 head_pad, total_len;
2001 struct sk_buff *pkt_next;
2002 u8 txseq;
2003 int ret;
2004 struct brcmf_sdio_hdrinfo hd_info = {0};
2005
2006 txseq = bus->tx_seq;
2007 total_len = 0;
2008 skb_queue_walk(pktq, pkt_next) {
2009 /* alignment packet inserted in previous
2010 * loop cycle can be skipped as it is
2011 * already properly aligned and does not
2012 * need an sdpcm header.
2013 */
2014 if (*(u32 *)(pkt_next->cb) & ALIGN_SKB_FLAG)
2015 continue;
2016
2017 /* align packet data pointer */
2018 ret = brcmf_sdio_txpkt_hdalign(bus, pkt_next);
2019 if (ret < 0)
2020 return ret;
2021 head_pad = (u16)ret;
2022 if (head_pad)
2023 memset(pkt_next->data, 0, head_pad + bus->tx_hdrlen);
2024
2025 total_len += pkt_next->len;
2026
2027 hd_info.len = pkt_next->len;
2028 hd_info.lastfrm = skb_queue_is_last(pktq, pkt_next);
2029 if (bus->txglom && pktq->qlen > 1) {
2030 ret = brcmf_sdio_txpkt_prep_sg(bus, pktq,
2031 pkt_next, total_len);
2032 if (ret < 0)
2033 return ret;
2034 hd_info.tail_pad = (u16)ret;
2035 total_len += (u16)ret;
2036 }
2037
2038 hd_info.channel = chan;
2039 hd_info.dat_offset = head_pad + bus->tx_hdrlen;
2040 hd_info.seq_num = txseq++;
2041
2042 /* Now fill the header */
2043 brcmf_sdio_hdpack(bus, pkt_next->data, &hd_info);
2044
2045 if (BRCMF_BYTES_ON() &&
2046 ((BRCMF_CTL_ON() && chan == SDPCM_CONTROL_CHANNEL) ||
2047 (BRCMF_DATA_ON() && chan != SDPCM_CONTROL_CHANNEL)))
2048 brcmf_dbg_hex_dump(true, pkt_next, hd_info.len,
2049 "Tx Frame:\n");
2050 else if (BRCMF_HDRS_ON())
2051 brcmf_dbg_hex_dump(true, pkt_next,
2052 head_pad + bus->tx_hdrlen,
2053 "Tx Header:\n");
2054 }
2055 /* Hardware length tag of the first packet should be total
2056 * length of the chain (including padding)
2057 */
2058 if (bus->txglom)
2059 brcmf_sdio_update_hwhdr(pktq->next->data, total_len);
2060 return 0;
2061 }
2062
2063 /**
2064 * brcmf_sdio_txpkt_postp - packet post processing for transmit
2065 * @bus: brcmf_sdio structure pointer
2066 * @pktq: packet list pointer
2067 *
2068 * Processes to be applied to the packet
2069 * - Remove head padding
2070 * - Remove tail padding
2071 */
2072 static void
2073 brcmf_sdio_txpkt_postp(struct brcmf_sdio *bus, struct sk_buff_head *pktq)
2074 {
2075 u8 *hdr;
2076 u32 dat_offset;
2077 u16 tail_pad;
2078 u32 dummy_flags, chop_len;
2079 struct sk_buff *pkt_next, *tmp, *pkt_prev;
2080
2081 skb_queue_walk_safe(pktq, pkt_next, tmp) {
2082 dummy_flags = *(u32 *)(pkt_next->cb);
2083 if (dummy_flags & ALIGN_SKB_FLAG) {
2084 chop_len = dummy_flags & ALIGN_SKB_CHOP_LEN_MASK;
2085 if (chop_len) {
2086 pkt_prev = pkt_next->prev;
2087 skb_put(pkt_prev, chop_len);
2088 }
2089 __skb_unlink(pkt_next, pktq);
2090 brcmu_pkt_buf_free_skb(pkt_next);
2091 } else {
2092 hdr = pkt_next->data + bus->tx_hdrlen - SDPCM_SWHDR_LEN;
2093 dat_offset = le32_to_cpu(*(__le32 *)hdr);
2094 dat_offset = (dat_offset & SDPCM_DOFFSET_MASK) >>
2095 SDPCM_DOFFSET_SHIFT;
2096 skb_pull(pkt_next, dat_offset);
2097 if (bus->txglom) {
2098 tail_pad = le16_to_cpu(*(__le16 *)(hdr - 2));
2099 skb_trim(pkt_next, pkt_next->len - tail_pad);
2100 }
2101 }
2102 }
2103 }
2104
2105 /* Writes a HW/SW header into the packet and sends it. */
2106 /* Assumes: (a) header space already there, (b) caller holds lock */
2107 static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
2108 uint chan)
2109 {
2110 int ret;
2111 int i;
2112 struct sk_buff *pkt_next, *tmp;
2113
2114 brcmf_dbg(TRACE, "Enter\n");
2115
2116 ret = brcmf_sdio_txpkt_prep(bus, pktq, chan);
2117 if (ret)
2118 goto done;
2119
2120 sdio_claim_host(bus->sdiodev->func[1]);
2121 ret = brcmf_sdcard_send_pkt(bus->sdiodev, bus->sdiodev->sbwad,
2122 SDIO_FUNC_2, F2SYNC, pktq);
2123 bus->sdcnt.f2txdata++;
2124
2125 if (ret < 0) {
2126 /* On failure, abort the command and terminate the frame */
2127 brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
2128 ret);
2129 bus->sdcnt.tx_sderrs++;
2130
2131 brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
2132 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
2133 SFC_WF_TERM, NULL);
2134 bus->sdcnt.f1regdata++;
2135
2136 for (i = 0; i < 3; i++) {
2137 u8 hi, lo;
2138 hi = brcmf_sdio_regrb(bus->sdiodev,
2139 SBSDIO_FUNC1_WFRAMEBCHI, NULL);
2140 lo = brcmf_sdio_regrb(bus->sdiodev,
2141 SBSDIO_FUNC1_WFRAMEBCLO, NULL);
2142 bus->sdcnt.f1regdata += 2;
2143 if ((hi == 0) && (lo == 0))
2144 break;
2145 }
2146 }
2147 sdio_release_host(bus->sdiodev->func[1]);
2148
2149 done:
2150 brcmf_sdio_txpkt_postp(bus, pktq);
2151 if (ret == 0)
2152 bus->tx_seq = (bus->tx_seq + pktq->qlen) % SDPCM_SEQ_WRAP;
2153 skb_queue_walk_safe(pktq, pkt_next, tmp) {
2154 __skb_unlink(pkt_next, pktq);
2155 brcmf_txcomplete(bus->sdiodev->dev, pkt_next, ret == 0);
2156 }
2157 return ret;
2158 }
2159
2160 static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
2161 {
2162 struct sk_buff *pkt;
2163 struct sk_buff_head pktq;
2164 u32 intstatus = 0;
2165 int ret = 0, prec_out, i;
2166 uint cnt = 0;
2167 u8 tx_prec_map, pkt_num;
2168
2169 brcmf_dbg(TRACE, "Enter\n");
2170
2171 tx_prec_map = ~bus->flowcontrol;
2172
2173 /* Send frames until the limit or some other event */
2174 for (cnt = 0; (cnt < maxframes) && data_ok(bus);) {
2175 pkt_num = 1;
2176 __skb_queue_head_init(&pktq);
2177 if (bus->txglom)
2178 pkt_num = min_t(u8, bus->tx_max - bus->tx_seq,
2179 brcmf_sdio_txglomsz);
2180 pkt_num = min_t(u32, pkt_num,
2181 brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol));
2182 spin_lock_bh(&bus->txqlock);
2183 for (i = 0; i < pkt_num; i++) {
2184 pkt = brcmu_pktq_mdeq(&bus->txq, tx_prec_map,
2185 &prec_out);
2186 if (pkt == NULL)
2187 break;
2188 __skb_queue_tail(&pktq, pkt);
2189 }
2190 spin_unlock_bh(&bus->txqlock);
2191 if (i == 0)
2192 break;
2193
2194 ret = brcmf_sdbrcm_txpkt(bus, &pktq, SDPCM_DATA_CHANNEL);
2195 cnt += i;
2196
2197 /* In poll mode, need to check for other events */
2198 if (!bus->intr && cnt) {
2199 /* Check device status, signal pending interrupt */
2200 sdio_claim_host(bus->sdiodev->func[1]);
2201 ret = r_sdreg32(bus, &intstatus,
2202 offsetof(struct sdpcmd_regs,
2203 intstatus));
2204 sdio_release_host(bus->sdiodev->func[1]);
2205 bus->sdcnt.f2txdata++;
2206 if (ret != 0)
2207 break;
2208 if (intstatus & bus->hostintmask)
2209 atomic_set(&bus->ipend, 1);
2210 }
2211 }
2212
2213 /* Deflow-control stack if needed */
2214 if ((bus->sdiodev->bus_if->state == BRCMF_BUS_DATA) &&
2215 bus->txoff && (pktq_len(&bus->txq) < TXLOW)) {
2216 bus->txoff = false;
2217 brcmf_txflowblock(bus->sdiodev->dev, false);
2218 }
2219
2220 return cnt;
2221 }
2222
2223 static void brcmf_sdbrcm_bus_stop(struct device *dev)
2224 {
2225 u32 local_hostintmask;
2226 u8 saveclk;
2227 int err;
2228 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2229 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
2230 struct brcmf_sdio *bus = sdiodev->bus;
2231
2232 brcmf_dbg(TRACE, "Enter\n");
2233
2234 if (bus->watchdog_tsk) {
2235 send_sig(SIGTERM, bus->watchdog_tsk, 1);
2236 kthread_stop(bus->watchdog_tsk);
2237 bus->watchdog_tsk = NULL;
2238 }
2239
2240 sdio_claim_host(bus->sdiodev->func[1]);
2241
2242 /* Enable clock for device interrupts */
2243 brcmf_sdbrcm_bus_sleep(bus, false, false);
2244
2245 /* Disable and clear interrupts at the chip level also */
2246 w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, hostintmask));
2247 local_hostintmask = bus->hostintmask;
2248 bus->hostintmask = 0;
2249
2250 /* Change our idea of bus state */
2251 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
2252
2253 /* Force clocks on backplane to be sure F2 interrupt propagates */
2254 saveclk = brcmf_sdio_regrb(bus->sdiodev,
2255 SBSDIO_FUNC1_CHIPCLKCSR, &err);
2256 if (!err) {
2257 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
2258 (saveclk | SBSDIO_FORCE_HT), &err);
2259 }
2260 if (err)
2261 brcmf_err("Failed to force clock for F2: err %d\n", err);
2262
2263 /* Turn off the bus (F2), free any pending packets */
2264 brcmf_dbg(INTR, "disable SDIO interrupts\n");
2265 sdio_disable_func(bus->sdiodev->func[SDIO_FUNC_2]);
2266
2267 /* Clear any pending interrupts now that F2 is disabled */
2268 w_sdreg32(bus, local_hostintmask,
2269 offsetof(struct sdpcmd_regs, intstatus));
2270
2271 /* Turn off the backplane clock (only) */
2272 brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
2273 sdio_release_host(bus->sdiodev->func[1]);
2274
2275 /* Clear the data packet queues */
2276 brcmu_pktq_flush(&bus->txq, true, NULL, NULL);
2277
2278 /* Clear any held glomming stuff */
2279 if (bus->glomd)
2280 brcmu_pkt_buf_free_skb(bus->glomd);
2281 brcmf_sdbrcm_free_glom(bus);
2282
2283 /* Clear rx control and wake any waiters */
2284 spin_lock_bh(&bus->rxctl_lock);
2285 bus->rxlen = 0;
2286 spin_unlock_bh(&bus->rxctl_lock);
2287 brcmf_sdbrcm_dcmd_resp_wake(bus);
2288
2289 /* Reset some F2 state stuff */
2290 bus->rxskip = false;
2291 bus->tx_seq = bus->rx_seq = 0;
2292 }
2293
2294 static inline void brcmf_sdbrcm_clrintr(struct brcmf_sdio *bus)
2295 {
2296 unsigned long flags;
2297
2298 if (bus->sdiodev->oob_irq_requested) {
2299 spin_lock_irqsave(&bus->sdiodev->irq_en_lock, flags);
2300 if (!bus->sdiodev->irq_en && !atomic_read(&bus->ipend)) {
2301 enable_irq(bus->sdiodev->pdata->oob_irq_nr);
2302 bus->sdiodev->irq_en = true;
2303 }
2304 spin_unlock_irqrestore(&bus->sdiodev->irq_en_lock, flags);
2305 }
2306 }
2307
2308 static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
2309 {
2310 u8 idx;
2311 u32 addr;
2312 unsigned long val;
2313 int n, ret;
2314
2315 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
2316 addr = bus->ci->c_inf[idx].base +
2317 offsetof(struct sdpcmd_regs, intstatus);
2318
2319 val = brcmf_sdio_regrl(bus->sdiodev, addr, &ret);
2320 bus->sdcnt.f1regdata++;
2321 if (ret != 0)
2322 val = 0;
2323
2324 val &= bus->hostintmask;
2325 atomic_set(&bus->fcstate, !!(val & I_HMB_FC_STATE));
2326
2327 /* Clear interrupts */
2328 if (val) {
2329 brcmf_sdio_regwl(bus->sdiodev, addr, val, &ret);
2330 bus->sdcnt.f1regdata++;
2331 }
2332
2333 if (ret) {
2334 atomic_set(&bus->intstatus, 0);
2335 } else if (val) {
2336 for_each_set_bit(n, &val, 32)
2337 set_bit(n, (unsigned long *)&bus->intstatus.counter);
2338 }
2339
2340 return ret;
2341 }
2342
2343 static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
2344 {
2345 u32 newstatus = 0;
2346 unsigned long intstatus;
2347 uint rxlimit = bus->rxbound; /* Rx frames to read before resched */
2348 uint txlimit = bus->txbound; /* Tx frames to send before resched */
2349 uint framecnt = 0; /* Temporary counter of tx/rx frames */
2350 int err = 0, n;
2351
2352 brcmf_dbg(TRACE, "Enter\n");
2353
2354 sdio_claim_host(bus->sdiodev->func[1]);
2355
2356 /* If waiting for HTAVAIL, check status */
2357 if (!bus->sr_enabled && bus->clkstate == CLK_PENDING) {
2358 u8 clkctl, devctl = 0;
2359
2360 #ifdef DEBUG
2361 /* Check for inconsistent device control */
2362 devctl = brcmf_sdio_regrb(bus->sdiodev,
2363 SBSDIO_DEVICE_CTL, &err);
2364 if (err) {
2365 brcmf_err("error reading DEVCTL: %d\n", err);
2366 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
2367 }
2368 #endif /* DEBUG */
2369
2370 /* Read CSR, if clock on switch to AVAIL, else ignore */
2371 clkctl = brcmf_sdio_regrb(bus->sdiodev,
2372 SBSDIO_FUNC1_CHIPCLKCSR, &err);
2373 if (err) {
2374 brcmf_err("error reading CSR: %d\n",
2375 err);
2376 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
2377 }
2378
2379 brcmf_dbg(SDIO, "DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n",
2380 devctl, clkctl);
2381
2382 if (SBSDIO_HTAV(clkctl)) {
2383 devctl = brcmf_sdio_regrb(bus->sdiodev,
2384 SBSDIO_DEVICE_CTL, &err);
2385 if (err) {
2386 brcmf_err("error reading DEVCTL: %d\n",
2387 err);
2388 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
2389 }
2390 devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
2391 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
2392 devctl, &err);
2393 if (err) {
2394 brcmf_err("error writing DEVCTL: %d\n",
2395 err);
2396 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
2397 }
2398 bus->clkstate = CLK_AVAIL;
2399 }
2400 }
2401
2402 /* Make sure backplane clock is on */
2403 brcmf_sdbrcm_bus_sleep(bus, false, true);
2404
2405 /* Pending interrupt indicates new device status */
2406 if (atomic_read(&bus->ipend) > 0) {
2407 atomic_set(&bus->ipend, 0);
2408 err = brcmf_sdio_intr_rstatus(bus);
2409 }
2410
2411 /* Start with leftover status bits */
2412 intstatus = atomic_xchg(&bus->intstatus, 0);
2413
2414 /* Handle flow-control change: read new state in case our ack
2415 * crossed another change interrupt. If change still set, assume
2416 * FC ON for safety, let next loop through do the debounce.
2417 */
2418 if (intstatus & I_HMB_FC_CHANGE) {
2419 intstatus &= ~I_HMB_FC_CHANGE;
2420 err = w_sdreg32(bus, I_HMB_FC_CHANGE,
2421 offsetof(struct sdpcmd_regs, intstatus));
2422
2423 err = r_sdreg32(bus, &newstatus,
2424 offsetof(struct sdpcmd_regs, intstatus));
2425 bus->sdcnt.f1regdata += 2;
2426 atomic_set(&bus->fcstate,
2427 !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE)));
2428 intstatus |= (newstatus & bus->hostintmask);
2429 }
2430
2431 /* Handle host mailbox indication */
2432 if (intstatus & I_HMB_HOST_INT) {
2433 intstatus &= ~I_HMB_HOST_INT;
2434 intstatus |= brcmf_sdbrcm_hostmail(bus);
2435 }
2436
2437 sdio_release_host(bus->sdiodev->func[1]);
2438
2439 /* Generally don't ask for these, can get CRC errors... */
2440 if (intstatus & I_WR_OOSYNC) {
2441 brcmf_err("Dongle reports WR_OOSYNC\n");
2442 intstatus &= ~I_WR_OOSYNC;
2443 }
2444
2445 if (intstatus & I_RD_OOSYNC) {
2446 brcmf_err("Dongle reports RD_OOSYNC\n");
2447 intstatus &= ~I_RD_OOSYNC;
2448 }
2449
2450 if (intstatus & I_SBINT) {
2451 brcmf_err("Dongle reports SBINT\n");
2452 intstatus &= ~I_SBINT;
2453 }
2454
2455 /* Would be active due to wake-wlan in gSPI */
2456 if (intstatus & I_CHIPACTIVE) {
2457 brcmf_dbg(INFO, "Dongle reports CHIPACTIVE\n");
2458 intstatus &= ~I_CHIPACTIVE;
2459 }
2460
2461 /* Ignore frame indications if rxskip is set */
2462 if (bus->rxskip)
2463 intstatus &= ~I_HMB_FRAME_IND;
2464
2465 /* On frame indication, read available frames */
2466 if (PKT_AVAILABLE() && bus->clkstate == CLK_AVAIL) {
2467 framecnt = brcmf_sdio_readframes(bus, rxlimit);
2468 if (!bus->rxpending)
2469 intstatus &= ~I_HMB_FRAME_IND;
2470 rxlimit -= min(framecnt, rxlimit);
2471 }
2472
2473 /* Keep still-pending events for next scheduling */
2474 if (intstatus) {
2475 for_each_set_bit(n, &intstatus, 32)
2476 set_bit(n, (unsigned long *)&bus->intstatus.counter);
2477 }
2478
2479 brcmf_sdbrcm_clrintr(bus);
2480
2481 if (data_ok(bus) && bus->ctrl_frame_stat &&
2482 (bus->clkstate == CLK_AVAIL)) {
2483 int i;
2484
2485 sdio_claim_host(bus->sdiodev->func[1]);
2486 err = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
2487 SDIO_FUNC_2, F2SYNC, bus->ctrl_frame_buf,
2488 (u32) bus->ctrl_frame_len);
2489
2490 if (err < 0) {
2491 /* On failure, abort the command and
2492 terminate the frame */
2493 brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
2494 err);
2495 bus->sdcnt.tx_sderrs++;
2496
2497 brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
2498
2499 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
2500 SFC_WF_TERM, &err);
2501 bus->sdcnt.f1regdata++;
2502
2503 for (i = 0; i < 3; i++) {
2504 u8 hi, lo;
2505 hi = brcmf_sdio_regrb(bus->sdiodev,
2506 SBSDIO_FUNC1_WFRAMEBCHI,
2507 &err);
2508 lo = brcmf_sdio_regrb(bus->sdiodev,
2509 SBSDIO_FUNC1_WFRAMEBCLO,
2510 &err);
2511 bus->sdcnt.f1regdata += 2;
2512 if ((hi == 0) && (lo == 0))
2513 break;
2514 }
2515
2516 } else {
2517 bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
2518 }
2519 sdio_release_host(bus->sdiodev->func[1]);
2520 bus->ctrl_frame_stat = false;
2521 brcmf_sdbrcm_wait_event_wakeup(bus);
2522 }
2523 /* Send queued frames (limit 1 if rx may still be pending) */
2524 else if ((bus->clkstate == CLK_AVAIL) && !atomic_read(&bus->fcstate) &&
2525 brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit
2526 && data_ok(bus)) {
2527 framecnt = bus->rxpending ? min(txlimit, bus->txminmax) :
2528 txlimit;
2529 framecnt = brcmf_sdbrcm_sendfromq(bus, framecnt);
2530 txlimit -= framecnt;
2531 }
2532
2533 if ((bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) || (err != 0)) {
2534 brcmf_err("failed backplane access over SDIO, halting operation\n");
2535 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
2536 atomic_set(&bus->intstatus, 0);
2537 } else if (atomic_read(&bus->intstatus) ||
2538 atomic_read(&bus->ipend) > 0 ||
2539 (!atomic_read(&bus->fcstate) &&
2540 brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
2541 data_ok(bus)) || PKT_AVAILABLE()) {
2542 atomic_inc(&bus->dpc_tskcnt);
2543 }
2544
2545 /* If we're done for now, turn off clock request. */
2546 if ((bus->clkstate != CLK_PENDING)
2547 && bus->idletime == BRCMF_IDLE_IMMEDIATE) {
2548 bus->activity = false;
2549 brcmf_dbg(SDIO, "idle state\n");
2550 sdio_claim_host(bus->sdiodev->func[1]);
2551 brcmf_sdbrcm_bus_sleep(bus, true, false);
2552 sdio_release_host(bus->sdiodev->func[1]);
2553 }
2554 }
2555
2556 static struct pktq *brcmf_sdbrcm_bus_gettxq(struct device *dev)
2557 {
2558 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2559 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
2560 struct brcmf_sdio *bus = sdiodev->bus;
2561
2562 return &bus->txq;
2563 }
2564
2565 static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
2566 {
2567 int ret = -EBADE;
2568 uint datalen, prec;
2569 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2570 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
2571 struct brcmf_sdio *bus = sdiodev->bus;
2572 ulong flags;
2573
2574 brcmf_dbg(TRACE, "Enter\n");
2575
2576 datalen = pkt->len;
2577
2578 /* Add space for the header */
2579 skb_push(pkt, bus->tx_hdrlen);
2580 /* precondition: IS_ALIGNED((unsigned long)(pkt->data), 2) */
2581
2582 prec = prio2prec((pkt->priority & PRIOMASK));
2583
2584 /* Check for existing queue, current flow-control,
2585 pending event, or pending clock */
2586 brcmf_dbg(TRACE, "deferring pktq len %d\n", pktq_len(&bus->txq));
2587 bus->sdcnt.fcqueued++;
2588
2589 /* Priority based enq */
2590 spin_lock_irqsave(&bus->txqlock, flags);
2591 if (!brcmf_c_prec_enq(bus->sdiodev->dev, &bus->txq, pkt, prec)) {
2592 skb_pull(pkt, bus->tx_hdrlen);
2593 brcmf_err("out of bus->txq !!!\n");
2594 ret = -ENOSR;
2595 } else {
2596 ret = 0;
2597 }
2598
2599 if (pktq_len(&bus->txq) >= TXHI) {
2600 bus->txoff = true;
2601 brcmf_txflowblock(bus->sdiodev->dev, true);
2602 }
2603 spin_unlock_irqrestore(&bus->txqlock, flags);
2604
2605 #ifdef DEBUG
2606 if (pktq_plen(&bus->txq, prec) > qcount[prec])
2607 qcount[prec] = pktq_plen(&bus->txq, prec);
2608 #endif
2609
2610 if (atomic_read(&bus->dpc_tskcnt) == 0) {
2611 atomic_inc(&bus->dpc_tskcnt);
2612 queue_work(bus->brcmf_wq, &bus->datawork);
2613 }
2614
2615 return ret;
2616 }
2617
2618 #ifdef DEBUG
2619 #define CONSOLE_LINE_MAX 192
2620
2621 static int brcmf_sdbrcm_readconsole(struct brcmf_sdio *bus)
2622 {
2623 struct brcmf_console *c = &bus->console;
2624 u8 line[CONSOLE_LINE_MAX], ch;
2625 u32 n, idx, addr;
2626 int rv;
2627
2628 /* Don't do anything until FWREADY updates console address */
2629 if (bus->console_addr == 0)
2630 return 0;
2631
2632 /* Read console log struct */
2633 addr = bus->console_addr + offsetof(struct rte_console, log_le);
2634 rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr, (u8 *)&c->log_le,
2635 sizeof(c->log_le));
2636 if (rv < 0)
2637 return rv;
2638
2639 /* Allocate console buffer (one time only) */
2640 if (c->buf == NULL) {
2641 c->bufsize = le32_to_cpu(c->log_le.buf_size);
2642 c->buf = kmalloc(c->bufsize, GFP_ATOMIC);
2643 if (c->buf == NULL)
2644 return -ENOMEM;
2645 }
2646
2647 idx = le32_to_cpu(c->log_le.idx);
2648
2649 /* Protect against corrupt value */
2650 if (idx > c->bufsize)
2651 return -EBADE;
2652
2653 /* Skip reading the console buffer if the index pointer
2654 has not moved */
2655 if (idx == c->last)
2656 return 0;
2657
2658 /* Read the console buffer */
2659 addr = le32_to_cpu(c->log_le.buf);
2660 rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr, c->buf, c->bufsize);
2661 if (rv < 0)
2662 return rv;
2663
2664 while (c->last != idx) {
2665 for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
2666 if (c->last == idx) {
2667 /* This would output a partial line.
2668 * Instead, back up
2669 * the buffer pointer and output this
2670 * line next time around.
2671 */
2672 if (c->last >= n)
2673 c->last -= n;
2674 else
2675 c->last = c->bufsize - n;
2676 goto break2;
2677 }
2678 ch = c->buf[c->last];
2679 c->last = (c->last + 1) % c->bufsize;
2680 if (ch == '\n')
2681 break;
2682 line[n] = ch;
2683 }
2684
2685 if (n > 0) {
2686 if (line[n - 1] == '\r')
2687 n--;
2688 line[n] = 0;
2689 pr_debug("CONSOLE: %s\n", line);
2690 }
2691 }
2692 break2:
2693
2694 return 0;
2695 }
2696 #endif /* DEBUG */
2697
2698 static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
2699 {
2700 int i;
2701 int ret;
2702
2703 bus->ctrl_frame_stat = false;
2704 ret = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
2705 SDIO_FUNC_2, F2SYNC, frame, len);
2706
2707 if (ret < 0) {
2708 /* On failure, abort the command and terminate the frame */
2709 brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
2710 ret);
2711 bus->sdcnt.tx_sderrs++;
2712
2713 brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
2714
2715 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
2716 SFC_WF_TERM, NULL);
2717 bus->sdcnt.f1regdata++;
2718
2719 for (i = 0; i < 3; i++) {
2720 u8 hi, lo;
2721 hi = brcmf_sdio_regrb(bus->sdiodev,
2722 SBSDIO_FUNC1_WFRAMEBCHI, NULL);
2723 lo = brcmf_sdio_regrb(bus->sdiodev,
2724 SBSDIO_FUNC1_WFRAMEBCLO, NULL);
2725 bus->sdcnt.f1regdata += 2;
2726 if (hi == 0 && lo == 0)
2727 break;
2728 }
2729 return ret;
2730 }
2731
2732 bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
2733
2734 return ret;
2735 }
2736
2737 static int
2738 brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
2739 {
2740 u8 *frame;
2741 u16 len, pad;
2742 uint retries = 0;
2743 u8 doff = 0;
2744 int ret = -1;
2745 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2746 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
2747 struct brcmf_sdio *bus = sdiodev->bus;
2748 struct brcmf_sdio_hdrinfo hd_info = {0};
2749
2750 brcmf_dbg(TRACE, "Enter\n");
2751
2752 /* Back the pointer to make a room for bus header */
2753 frame = msg - bus->tx_hdrlen;
2754 len = (msglen += bus->tx_hdrlen);
2755
2756 /* Add alignment padding (optional for ctl frames) */
2757 doff = ((unsigned long)frame % bus->head_align);
2758 if (doff) {
2759 frame -= doff;
2760 len += doff;
2761 msglen += doff;
2762 memset(frame, 0, doff + bus->tx_hdrlen);
2763 }
2764 /* precondition: doff < bus->head_align */
2765 doff += bus->tx_hdrlen;
2766
2767 /* Round send length to next SDIO block */
2768 pad = 0;
2769 if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
2770 pad = bus->blocksize - (len % bus->blocksize);
2771 if ((pad > bus->roundup) || (pad >= bus->blocksize))
2772 pad = 0;
2773 } else if (len % bus->head_align) {
2774 pad = bus->head_align - (len % bus->head_align);
2775 }
2776 len += pad;
2777
2778 /* precondition: IS_ALIGNED((unsigned long)frame, 2) */
2779
2780 /* Make sure backplane clock is on */
2781 sdio_claim_host(bus->sdiodev->func[1]);
2782 brcmf_sdbrcm_bus_sleep(bus, false, false);
2783 sdio_release_host(bus->sdiodev->func[1]);
2784
2785 hd_info.len = (u16)msglen;
2786 hd_info.channel = SDPCM_CONTROL_CHANNEL;
2787 hd_info.dat_offset = doff;
2788 hd_info.seq_num = bus->tx_seq;
2789 hd_info.lastfrm = true;
2790 hd_info.tail_pad = pad;
2791 brcmf_sdio_hdpack(bus, frame, &hd_info);
2792
2793 if (bus->txglom)
2794 brcmf_sdio_update_hwhdr(frame, len);
2795
2796 if (!data_ok(bus)) {
2797 brcmf_dbg(INFO, "No bus credit bus->tx_max %d, bus->tx_seq %d\n",
2798 bus->tx_max, bus->tx_seq);
2799 bus->ctrl_frame_stat = true;
2800 /* Send from dpc */
2801 bus->ctrl_frame_buf = frame;
2802 bus->ctrl_frame_len = len;
2803
2804 wait_event_interruptible_timeout(bus->ctrl_wait,
2805 !bus->ctrl_frame_stat,
2806 msecs_to_jiffies(2000));
2807
2808 if (!bus->ctrl_frame_stat) {
2809 brcmf_dbg(SDIO, "ctrl_frame_stat == false\n");
2810 ret = 0;
2811 } else {
2812 brcmf_dbg(SDIO, "ctrl_frame_stat == true\n");
2813 ret = -1;
2814 }
2815 }
2816
2817 if (ret == -1) {
2818 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_CTL_ON(),
2819 frame, len, "Tx Frame:\n");
2820 brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() && BRCMF_CTL_ON()) &&
2821 BRCMF_HDRS_ON(),
2822 frame, min_t(u16, len, 16), "TxHdr:\n");
2823
2824 do {
2825 sdio_claim_host(bus->sdiodev->func[1]);
2826 ret = brcmf_tx_frame(bus, frame, len);
2827 sdio_release_host(bus->sdiodev->func[1]);
2828 } while (ret < 0 && retries++ < TXRETRIES);
2829 }
2830
2831 if ((bus->idletime == BRCMF_IDLE_IMMEDIATE) &&
2832 atomic_read(&bus->dpc_tskcnt) == 0) {
2833 bus->activity = false;
2834 sdio_claim_host(bus->sdiodev->func[1]);
2835 brcmf_dbg(INFO, "idle\n");
2836 brcmf_sdbrcm_clkctl(bus, CLK_NONE, true);
2837 sdio_release_host(bus->sdiodev->func[1]);
2838 }
2839
2840 if (ret)
2841 bus->sdcnt.tx_ctlerrs++;
2842 else
2843 bus->sdcnt.tx_ctlpkts++;
2844
2845 return ret ? -EIO : 0;
2846 }
2847
2848 #ifdef DEBUG
2849 static inline bool brcmf_sdio_valid_shared_address(u32 addr)
2850 {
2851 return !(addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff));
2852 }
2853
2854 static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
2855 struct sdpcm_shared *sh)
2856 {
2857 u32 addr;
2858 int rv;
2859 u32 shaddr = 0;
2860 struct sdpcm_shared_le sh_le;
2861 __le32 addr_le;
2862
2863 shaddr = bus->ci->rambase + bus->ramsize - 4;
2864
2865 /*
2866 * Read last word in socram to determine
2867 * address of sdpcm_shared structure
2868 */
2869 sdio_claim_host(bus->sdiodev->func[1]);
2870 brcmf_sdbrcm_bus_sleep(bus, false, false);
2871 rv = brcmf_sdio_ramrw(bus->sdiodev, false, shaddr, (u8 *)&addr_le, 4);
2872 sdio_release_host(bus->sdiodev->func[1]);
2873 if (rv < 0)
2874 return rv;
2875
2876 addr = le32_to_cpu(addr_le);
2877
2878 brcmf_dbg(SDIO, "sdpcm_shared address 0x%08X\n", addr);
2879
2880 /*
2881 * Check if addr is valid.
2882 * NVRAM length at the end of memory should have been overwritten.
2883 */
2884 if (!brcmf_sdio_valid_shared_address(addr)) {
2885 brcmf_err("invalid sdpcm_shared address 0x%08X\n",
2886 addr);
2887 return -EINVAL;
2888 }
2889
2890 /* Read hndrte_shared structure */
2891 rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr, (u8 *)&sh_le,
2892 sizeof(struct sdpcm_shared_le));
2893 if (rv < 0)
2894 return rv;
2895
2896 /* Endianness */
2897 sh->flags = le32_to_cpu(sh_le.flags);
2898 sh->trap_addr = le32_to_cpu(sh_le.trap_addr);
2899 sh->assert_exp_addr = le32_to_cpu(sh_le.assert_exp_addr);
2900 sh->assert_file_addr = le32_to_cpu(sh_le.assert_file_addr);
2901 sh->assert_line = le32_to_cpu(sh_le.assert_line);
2902 sh->console_addr = le32_to_cpu(sh_le.console_addr);
2903 sh->msgtrace_addr = le32_to_cpu(sh_le.msgtrace_addr);
2904
2905 if ((sh->flags & SDPCM_SHARED_VERSION_MASK) > SDPCM_SHARED_VERSION) {
2906 brcmf_err("sdpcm shared version unsupported: dhd %d dongle %d\n",
2907 SDPCM_SHARED_VERSION,
2908 sh->flags & SDPCM_SHARED_VERSION_MASK);
2909 return -EPROTO;
2910 }
2911
2912 return 0;
2913 }
2914
2915 static int brcmf_sdio_dump_console(struct brcmf_sdio *bus,
2916 struct sdpcm_shared *sh, char __user *data,
2917 size_t count)
2918 {
2919 u32 addr, console_ptr, console_size, console_index;
2920 char *conbuf = NULL;
2921 __le32 sh_val;
2922 int rv;
2923 loff_t pos = 0;
2924 int nbytes = 0;
2925
2926 /* obtain console information from device memory */
2927 addr = sh->console_addr + offsetof(struct rte_console, log_le);
2928 rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr,
2929 (u8 *)&sh_val, sizeof(u32));
2930 if (rv < 0)
2931 return rv;
2932 console_ptr = le32_to_cpu(sh_val);
2933
2934 addr = sh->console_addr + offsetof(struct rte_console, log_le.buf_size);
2935 rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr,
2936 (u8 *)&sh_val, sizeof(u32));
2937 if (rv < 0)
2938 return rv;
2939 console_size = le32_to_cpu(sh_val);
2940
2941 addr = sh->console_addr + offsetof(struct rte_console, log_le.idx);
2942 rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr,
2943 (u8 *)&sh_val, sizeof(u32));
2944 if (rv < 0)
2945 return rv;
2946 console_index = le32_to_cpu(sh_val);
2947
2948 /* allocate buffer for console data */
2949 if (console_size <= CONSOLE_BUFFER_MAX)
2950 conbuf = vzalloc(console_size+1);
2951
2952 if (!conbuf)
2953 return -ENOMEM;
2954
2955 /* obtain the console data from device */
2956 conbuf[console_size] = '\0';
2957 rv = brcmf_sdio_ramrw(bus->sdiodev, false, console_ptr, (u8 *)conbuf,
2958 console_size);
2959 if (rv < 0)
2960 goto done;
2961
2962 rv = simple_read_from_buffer(data, count, &pos,
2963 conbuf + console_index,
2964 console_size - console_index);
2965 if (rv < 0)
2966 goto done;
2967
2968 nbytes = rv;
2969 if (console_index > 0) {
2970 pos = 0;
2971 rv = simple_read_from_buffer(data+nbytes, count, &pos,
2972 conbuf, console_index - 1);
2973 if (rv < 0)
2974 goto done;
2975 rv += nbytes;
2976 }
2977 done:
2978 vfree(conbuf);
2979 return rv;
2980 }
2981
2982 static int brcmf_sdio_trap_info(struct brcmf_sdio *bus, struct sdpcm_shared *sh,
2983 char __user *data, size_t count)
2984 {
2985 int error, res;
2986 char buf[350];
2987 struct brcmf_trap_info tr;
2988 loff_t pos = 0;
2989
2990 if ((sh->flags & SDPCM_SHARED_TRAP) == 0) {
2991 brcmf_dbg(INFO, "no trap in firmware\n");
2992 return 0;
2993 }
2994
2995 error = brcmf_sdio_ramrw(bus->sdiodev, false, sh->trap_addr, (u8 *)&tr,
2996 sizeof(struct brcmf_trap_info));
2997 if (error < 0)
2998 return error;
2999
3000 res = scnprintf(buf, sizeof(buf),
3001 "dongle trap info: type 0x%x @ epc 0x%08x\n"
3002 " cpsr 0x%08x spsr 0x%08x sp 0x%08x\n"
3003 " lr 0x%08x pc 0x%08x offset 0x%x\n"
3004 " r0 0x%08x r1 0x%08x r2 0x%08x r3 0x%08x\n"
3005 " r4 0x%08x r5 0x%08x r6 0x%08x r7 0x%08x\n",
3006 le32_to_cpu(tr.type), le32_to_cpu(tr.epc),
3007 le32_to_cpu(tr.cpsr), le32_to_cpu(tr.spsr),
3008 le32_to_cpu(tr.r13), le32_to_cpu(tr.r14),
3009 le32_to_cpu(tr.pc), sh->trap_addr,
3010 le32_to_cpu(tr.r0), le32_to_cpu(tr.r1),
3011 le32_to_cpu(tr.r2), le32_to_cpu(tr.r3),
3012 le32_to_cpu(tr.r4), le32_to_cpu(tr.r5),
3013 le32_to_cpu(tr.r6), le32_to_cpu(tr.r7));
3014
3015 return simple_read_from_buffer(data, count, &pos, buf, res);
3016 }
3017
3018 static int brcmf_sdio_assert_info(struct brcmf_sdio *bus,
3019 struct sdpcm_shared *sh, char __user *data,
3020 size_t count)
3021 {
3022 int error = 0;
3023 char buf[200];
3024 char file[80] = "?";
3025 char expr[80] = "<???>";
3026 int res;
3027 loff_t pos = 0;
3028
3029 if ((sh->flags & SDPCM_SHARED_ASSERT_BUILT) == 0) {
3030 brcmf_dbg(INFO, "firmware not built with -assert\n");
3031 return 0;
3032 } else if ((sh->flags & SDPCM_SHARED_ASSERT) == 0) {
3033 brcmf_dbg(INFO, "no assert in dongle\n");
3034 return 0;
3035 }
3036
3037 sdio_claim_host(bus->sdiodev->func[1]);
3038 if (sh->assert_file_addr != 0) {
3039 error = brcmf_sdio_ramrw(bus->sdiodev, false,
3040 sh->assert_file_addr, (u8 *)file, 80);
3041 if (error < 0)
3042 return error;
3043 }
3044 if (sh->assert_exp_addr != 0) {
3045 error = brcmf_sdio_ramrw(bus->sdiodev, false,
3046 sh->assert_exp_addr, (u8 *)expr, 80);
3047 if (error < 0)
3048 return error;
3049 }
3050 sdio_release_host(bus->sdiodev->func[1]);
3051
3052 res = scnprintf(buf, sizeof(buf),
3053 "dongle assert: %s:%d: assert(%s)\n",
3054 file, sh->assert_line, expr);
3055 return simple_read_from_buffer(data, count, &pos, buf, res);
3056 }
3057
3058 static int brcmf_sdbrcm_checkdied(struct brcmf_sdio *bus)
3059 {
3060 int error;
3061 struct sdpcm_shared sh;
3062
3063 error = brcmf_sdio_readshared(bus, &sh);
3064
3065 if (error < 0)
3066 return error;
3067
3068 if ((sh.flags & SDPCM_SHARED_ASSERT_BUILT) == 0)
3069 brcmf_dbg(INFO, "firmware not built with -assert\n");
3070 else if (sh.flags & SDPCM_SHARED_ASSERT)
3071 brcmf_err("assertion in dongle\n");
3072
3073 if (sh.flags & SDPCM_SHARED_TRAP)
3074 brcmf_err("firmware trap in dongle\n");
3075
3076 return 0;
3077 }
3078
3079 static int brcmf_sdbrcm_died_dump(struct brcmf_sdio *bus, char __user *data,
3080 size_t count, loff_t *ppos)
3081 {
3082 int error = 0;
3083 struct sdpcm_shared sh;
3084 int nbytes = 0;
3085 loff_t pos = *ppos;
3086
3087 if (pos != 0)
3088 return 0;
3089
3090 error = brcmf_sdio_readshared(bus, &sh);
3091 if (error < 0)
3092 goto done;
3093
3094 error = brcmf_sdio_assert_info(bus, &sh, data, count);
3095 if (error < 0)
3096 goto done;
3097 nbytes = error;
3098
3099 error = brcmf_sdio_trap_info(bus, &sh, data+nbytes, count);
3100 if (error < 0)
3101 goto done;
3102 nbytes += error;
3103
3104 error = brcmf_sdio_dump_console(bus, &sh, data+nbytes, count);
3105 if (error < 0)
3106 goto done;
3107 nbytes += error;
3108
3109 error = nbytes;
3110 *ppos += nbytes;
3111 done:
3112 return error;
3113 }
3114
3115 static ssize_t brcmf_sdio_forensic_read(struct file *f, char __user *data,
3116 size_t count, loff_t *ppos)
3117 {
3118 struct brcmf_sdio *bus = f->private_data;
3119 int res;
3120
3121 res = brcmf_sdbrcm_died_dump(bus, data, count, ppos);
3122 if (res > 0)
3123 *ppos += res;
3124 return (ssize_t)res;
3125 }
3126
3127 static const struct file_operations brcmf_sdio_forensic_ops = {
3128 .owner = THIS_MODULE,
3129 .open = simple_open,
3130 .read = brcmf_sdio_forensic_read
3131 };
3132
3133 static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
3134 {
3135 struct brcmf_pub *drvr = bus->sdiodev->bus_if->drvr;
3136 struct dentry *dentry = brcmf_debugfs_get_devdir(drvr);
3137
3138 if (IS_ERR_OR_NULL(dentry))
3139 return;
3140
3141 debugfs_create_file("forensics", S_IRUGO, dentry, bus,
3142 &brcmf_sdio_forensic_ops);
3143 brcmf_debugfs_create_sdio_count(drvr, &bus->sdcnt);
3144 }
3145 #else
3146 static int brcmf_sdbrcm_checkdied(struct brcmf_sdio *bus)
3147 {
3148 return 0;
3149 }
3150
3151 static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
3152 {
3153 }
3154 #endif /* DEBUG */
3155
3156 static int
3157 brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
3158 {
3159 int timeleft;
3160 uint rxlen = 0;
3161 bool pending;
3162 u8 *buf;
3163 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
3164 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
3165 struct brcmf_sdio *bus = sdiodev->bus;
3166
3167 brcmf_dbg(TRACE, "Enter\n");
3168
3169 /* Wait until control frame is available */
3170 timeleft = brcmf_sdbrcm_dcmd_resp_wait(bus, &bus->rxlen, &pending);
3171
3172 spin_lock_bh(&bus->rxctl_lock);
3173 rxlen = bus->rxlen;
3174 memcpy(msg, bus->rxctl, min(msglen, rxlen));
3175 bus->rxctl = NULL;
3176 buf = bus->rxctl_orig;
3177 bus->rxctl_orig = NULL;
3178 bus->rxlen = 0;
3179 spin_unlock_bh(&bus->rxctl_lock);
3180 vfree(buf);
3181
3182 if (rxlen) {
3183 brcmf_dbg(CTL, "resumed on rxctl frame, got %d expected %d\n",
3184 rxlen, msglen);
3185 } else if (timeleft == 0) {
3186 brcmf_err("resumed on timeout\n");
3187 brcmf_sdbrcm_checkdied(bus);
3188 } else if (pending) {
3189 brcmf_dbg(CTL, "cancelled\n");
3190 return -ERESTARTSYS;
3191 } else {
3192 brcmf_dbg(CTL, "resumed for unknown reason?\n");
3193 brcmf_sdbrcm_checkdied(bus);
3194 }
3195
3196 if (rxlen)
3197 bus->sdcnt.rx_ctlpkts++;
3198 else
3199 bus->sdcnt.rx_ctlerrs++;
3200
3201 return rxlen ? (int)rxlen : -ETIMEDOUT;
3202 }
3203
3204 static bool brcmf_sdbrcm_download_state(struct brcmf_sdio *bus, bool enter)
3205 {
3206 struct chip_info *ci = bus->ci;
3207
3208 /* To enter download state, disable ARM and reset SOCRAM.
3209 * To exit download state, simply reset ARM (default is RAM boot).
3210 */
3211 if (enter) {
3212 bus->alp_only = true;
3213
3214 brcmf_sdio_chip_enter_download(bus->sdiodev, ci);
3215 } else {
3216 if (!brcmf_sdio_chip_exit_download(bus->sdiodev, ci, bus->vars,
3217 bus->varsz))
3218 return false;
3219
3220 /* Allow HT Clock now that the ARM is running. */
3221 bus->alp_only = false;
3222
3223 bus->sdiodev->bus_if->state = BRCMF_BUS_LOAD;
3224 }
3225
3226 return true;
3227 }
3228
3229 static int brcmf_sdbrcm_download_code_file(struct brcmf_sdio *bus)
3230 {
3231 const struct firmware *fw;
3232 int err;
3233 int offset;
3234 int address;
3235 int len;
3236
3237 fw = brcmf_sdbrcm_get_fw(bus, BRCMF_FIRMWARE_BIN);
3238 if (fw == NULL)
3239 return -ENOENT;
3240
3241 if (brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_ARM_CR4) !=
3242 BRCMF_MAX_CORENUM)
3243 memcpy(&bus->ci->rst_vec, fw->data, sizeof(bus->ci->rst_vec));
3244
3245 err = 0;
3246 offset = 0;
3247 address = bus->ci->rambase;
3248 while (offset < fw->size) {
3249 len = ((offset + MEMBLOCK) < fw->size) ? MEMBLOCK :
3250 fw->size - offset;
3251 err = brcmf_sdio_ramrw(bus->sdiodev, true, address,
3252 (u8 *)&fw->data[offset], len);
3253 if (err) {
3254 brcmf_err("error %d on writing %d membytes at 0x%08x\n",
3255 err, len, address);
3256 goto failure;
3257 }
3258 offset += len;
3259 address += len;
3260 }
3261
3262 failure:
3263 release_firmware(fw);
3264
3265 return err;
3266 }
3267
3268 /*
3269 * ProcessVars:Takes a buffer of "<var>=<value>\n" lines read from a file
3270 * and ending in a NUL.
3271 * Removes carriage returns, empty lines, comment lines, and converts
3272 * newlines to NULs.
3273 * Shortens buffer as needed and pads with NULs. End of buffer is marked
3274 * by two NULs.
3275 */
3276
3277 static int brcmf_process_nvram_vars(struct brcmf_sdio *bus,
3278 const struct firmware *nv)
3279 {
3280 char *varbuf;
3281 char *dp;
3282 bool findNewline;
3283 int column;
3284 int ret = 0;
3285 uint buf_len, n, len;
3286
3287 len = nv->size;
3288 varbuf = vmalloc(len);
3289 if (!varbuf)
3290 return -ENOMEM;
3291
3292 memcpy(varbuf, nv->data, len);
3293 dp = varbuf;
3294
3295 findNewline = false;
3296 column = 0;
3297
3298 for (n = 0; n < len; n++) {
3299 if (varbuf[n] == 0)
3300 break;
3301 if (varbuf[n] == '\r')
3302 continue;
3303 if (findNewline && varbuf[n] != '\n')
3304 continue;
3305 findNewline = false;
3306 if (varbuf[n] == '#') {
3307 findNewline = true;
3308 continue;
3309 }
3310 if (varbuf[n] == '\n') {
3311 if (column == 0)
3312 continue;
3313 *dp++ = 0;
3314 column = 0;
3315 continue;
3316 }
3317 *dp++ = varbuf[n];
3318 column++;
3319 }
3320 buf_len = dp - varbuf;
3321 while (dp < varbuf + n)
3322 *dp++ = 0;
3323
3324 kfree(bus->vars);
3325 /* roundup needed for download to device */
3326 bus->varsz = roundup(buf_len + 1, 4);
3327 bus->vars = kmalloc(bus->varsz, GFP_KERNEL);
3328 if (bus->vars == NULL) {
3329 bus->varsz = 0;
3330 ret = -ENOMEM;
3331 goto err;
3332 }
3333
3334 /* copy the processed variables and add null termination */
3335 memcpy(bus->vars, varbuf, buf_len);
3336 bus->vars[buf_len] = 0;
3337 err:
3338 vfree(varbuf);
3339 return ret;
3340 }
3341
3342 static int brcmf_sdbrcm_download_nvram(struct brcmf_sdio *bus)
3343 {
3344 const struct firmware *nv;
3345 int ret;
3346
3347 nv = brcmf_sdbrcm_get_fw(bus, BRCMF_FIRMWARE_NVRAM);
3348 if (nv == NULL)
3349 return -ENOENT;
3350
3351 ret = brcmf_process_nvram_vars(bus, nv);
3352
3353 release_firmware(nv);
3354
3355 return ret;
3356 }
3357
3358 static int _brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
3359 {
3360 int bcmerror = -1;
3361
3362 /* Keep arm in reset */
3363 if (!brcmf_sdbrcm_download_state(bus, true)) {
3364 brcmf_err("error placing ARM core in reset\n");
3365 goto err;
3366 }
3367
3368 if (brcmf_sdbrcm_download_code_file(bus)) {
3369 brcmf_err("dongle image file download failed\n");
3370 goto err;
3371 }
3372
3373 if (brcmf_sdbrcm_download_nvram(bus)) {
3374 brcmf_err("dongle nvram file download failed\n");
3375 goto err;
3376 }
3377
3378 /* Take arm out of reset */
3379 if (!brcmf_sdbrcm_download_state(bus, false)) {
3380 brcmf_err("error getting out of ARM core reset\n");
3381 goto err;
3382 }
3383
3384 bcmerror = 0;
3385
3386 err:
3387 return bcmerror;
3388 }
3389
3390 static bool brcmf_sdbrcm_sr_capable(struct brcmf_sdio *bus)
3391 {
3392 u32 addr, reg;
3393
3394 brcmf_dbg(TRACE, "Enter\n");
3395
3396 /* old chips with PMU version less than 17 don't support save restore */
3397 if (bus->ci->pmurev < 17)
3398 return false;
3399
3400 /* read PMU chipcontrol register 3*/
3401 addr = CORE_CC_REG(bus->ci->c_inf[0].base, chipcontrol_addr);
3402 brcmf_sdio_regwl(bus->sdiodev, addr, 3, NULL);
3403 addr = CORE_CC_REG(bus->ci->c_inf[0].base, chipcontrol_data);
3404 reg = brcmf_sdio_regrl(bus->sdiodev, addr, NULL);
3405
3406 return (bool)reg;
3407 }
3408
3409 static void brcmf_sdbrcm_sr_init(struct brcmf_sdio *bus)
3410 {
3411 int err = 0;
3412 u8 val;
3413
3414 brcmf_dbg(TRACE, "Enter\n");
3415
3416 val = brcmf_sdio_regrb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL,
3417 &err);
3418 if (err) {
3419 brcmf_err("error reading SBSDIO_FUNC1_WAKEUPCTRL\n");
3420 return;
3421 }
3422
3423 val |= 1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT;
3424 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL,
3425 val, &err);
3426 if (err) {
3427 brcmf_err("error writing SBSDIO_FUNC1_WAKEUPCTRL\n");
3428 return;
3429 }
3430
3431 /* Add CMD14 Support */
3432 brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_BRCM_CARDCAP,
3433 (SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT |
3434 SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT),
3435 &err);
3436 if (err) {
3437 brcmf_err("error writing SDIO_CCCR_BRCM_CARDCAP\n");
3438 return;
3439 }
3440
3441 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
3442 SBSDIO_FORCE_HT, &err);
3443 if (err) {
3444 brcmf_err("error writing SBSDIO_FUNC1_CHIPCLKCSR\n");
3445 return;
3446 }
3447
3448 /* set flag */
3449 bus->sr_enabled = true;
3450 brcmf_dbg(INFO, "SR enabled\n");
3451 }
3452
3453 /* enable KSO bit */
3454 static int brcmf_sdbrcm_kso_init(struct brcmf_sdio *bus)
3455 {
3456 u8 val;
3457 int err = 0;
3458
3459 brcmf_dbg(TRACE, "Enter\n");
3460
3461 /* KSO bit added in SDIO core rev 12 */
3462 if (bus->ci->c_inf[1].rev < 12)
3463 return 0;
3464
3465 val = brcmf_sdio_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
3466 &err);
3467 if (err) {
3468 brcmf_err("error reading SBSDIO_FUNC1_SLEEPCSR\n");
3469 return err;
3470 }
3471
3472 if (!(val & SBSDIO_FUNC1_SLEEPCSR_KSO_MASK)) {
3473 val |= (SBSDIO_FUNC1_SLEEPCSR_KSO_EN <<
3474 SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
3475 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
3476 val, &err);
3477 if (err) {
3478 brcmf_err("error writing SBSDIO_FUNC1_SLEEPCSR\n");
3479 return err;
3480 }
3481 }
3482
3483 return 0;
3484 }
3485
3486
3487 static bool
3488 brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
3489 {
3490 bool ret;
3491
3492 sdio_claim_host(bus->sdiodev->func[1]);
3493
3494 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
3495
3496 ret = _brcmf_sdbrcm_download_firmware(bus) == 0;
3497
3498 brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
3499
3500 sdio_release_host(bus->sdiodev->func[1]);
3501
3502 return ret;
3503 }
3504
3505 static int brcmf_sdbrcm_bus_preinit(struct device *dev)
3506 {
3507 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
3508 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
3509 struct brcmf_sdio *bus = sdiodev->bus;
3510 uint pad_size;
3511 u32 value;
3512 u8 idx;
3513 int err;
3514
3515 /* the commands below use the terms tx and rx from
3516 * a device perspective, ie. bus:txglom affects the
3517 * bus transfers from device to host.
3518 */
3519 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
3520 if (bus->ci->c_inf[idx].rev < 12) {
3521 /* for sdio core rev < 12, disable txgloming */
3522 value = 0;
3523 err = brcmf_iovar_data_set(dev, "bus:txglom", &value,
3524 sizeof(u32));
3525 } else {
3526 /* otherwise, set txglomalign */
3527 value = 4;
3528 if (sdiodev->pdata)
3529 value = sdiodev->pdata->sd_sgentry_align;
3530 /* SDIO ADMA requires at least 32 bit alignment */
3531 value = max_t(u32, value, 4);
3532 err = brcmf_iovar_data_set(dev, "bus:txglomalign", &value,
3533 sizeof(u32));
3534 }
3535
3536 if (err < 0)
3537 goto done;
3538
3539 bus->tx_hdrlen = SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN;
3540 if (sdiodev->sg_support) {
3541 bus->txglom = false;
3542 value = 1;
3543 pad_size = bus->sdiodev->func[2]->cur_blksize << 1;
3544 bus->txglom_sgpad = brcmu_pkt_buf_get_skb(pad_size);
3545 if (!bus->txglom_sgpad)
3546 brcmf_err("allocating txglom padding skb failed, reduced performance\n");
3547
3548 err = brcmf_iovar_data_set(bus->sdiodev->dev, "bus:rxglom",
3549 &value, sizeof(u32));
3550 if (err < 0) {
3551 /* bus:rxglom is allowed to fail */
3552 err = 0;
3553 } else {
3554 bus->txglom = true;
3555 bus->tx_hdrlen += SDPCM_HWEXT_LEN;
3556 }
3557 }
3558 brcmf_bus_add_txhdrlen(bus->sdiodev->dev, bus->tx_hdrlen);
3559
3560 done:
3561 return err;
3562 }
3563
3564 static int brcmf_sdbrcm_bus_init(struct device *dev)
3565 {
3566 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
3567 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
3568 struct brcmf_sdio *bus = sdiodev->bus;
3569 int err, ret = 0;
3570 u8 saveclk;
3571
3572 brcmf_dbg(TRACE, "Enter\n");
3573
3574 /* try to download image and nvram to the dongle */
3575 if (bus_if->state == BRCMF_BUS_DOWN) {
3576 if (!(brcmf_sdbrcm_download_firmware(bus)))
3577 return -1;
3578 }
3579
3580 if (!bus->sdiodev->bus_if->drvr)
3581 return 0;
3582
3583 /* Start the watchdog timer */
3584 bus->sdcnt.tickcnt = 0;
3585 brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
3586
3587 sdio_claim_host(bus->sdiodev->func[1]);
3588
3589 /* Make sure backplane clock is on, needed to generate F2 interrupt */
3590 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
3591 if (bus->clkstate != CLK_AVAIL)
3592 goto exit;
3593
3594 /* Force clocks on backplane to be sure F2 interrupt propagates */
3595 saveclk = brcmf_sdio_regrb(bus->sdiodev,
3596 SBSDIO_FUNC1_CHIPCLKCSR, &err);
3597 if (!err) {
3598 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
3599 (saveclk | SBSDIO_FORCE_HT), &err);
3600 }
3601 if (err) {
3602 brcmf_err("Failed to force clock for F2: err %d\n", err);
3603 goto exit;
3604 }
3605
3606 /* Enable function 2 (frame transfers) */
3607 w_sdreg32(bus, SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT,
3608 offsetof(struct sdpcmd_regs, tosbmailboxdata));
3609 err = sdio_enable_func(bus->sdiodev->func[SDIO_FUNC_2]);
3610
3611
3612 brcmf_dbg(INFO, "enable F2: err=%d\n", err);
3613
3614 /* If F2 successfully enabled, set core and enable interrupts */
3615 if (!err) {
3616 /* Set up the interrupt mask and enable interrupts */
3617 bus->hostintmask = HOSTINTMASK;
3618 w_sdreg32(bus, bus->hostintmask,
3619 offsetof(struct sdpcmd_regs, hostintmask));
3620
3621 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_WATERMARK, 8, &err);
3622 } else {
3623 /* Disable F2 again */
3624 sdio_disable_func(bus->sdiodev->func[SDIO_FUNC_2]);
3625 ret = -ENODEV;
3626 }
3627
3628 if (brcmf_sdbrcm_sr_capable(bus)) {
3629 brcmf_sdbrcm_sr_init(bus);
3630 } else {
3631 /* Restore previous clock setting */
3632 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
3633 saveclk, &err);
3634 }
3635
3636 if (ret == 0) {
3637 ret = brcmf_sdio_intr_register(bus->sdiodev);
3638 if (ret != 0)
3639 brcmf_err("intr register failed:%d\n", ret);
3640 }
3641
3642 /* If we didn't come up, turn off backplane clock */
3643 if (bus_if->state != BRCMF_BUS_DATA)
3644 brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
3645
3646 exit:
3647 sdio_release_host(bus->sdiodev->func[1]);
3648
3649 return ret;
3650 }
3651
3652 void brcmf_sdbrcm_isr(void *arg)
3653 {
3654 struct brcmf_sdio *bus = (struct brcmf_sdio *) arg;
3655
3656 brcmf_dbg(TRACE, "Enter\n");
3657
3658 if (!bus) {
3659 brcmf_err("bus is null pointer, exiting\n");
3660 return;
3661 }
3662
3663 if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) {
3664 brcmf_err("bus is down. we have nothing to do\n");
3665 return;
3666 }
3667 /* Count the interrupt call */
3668 bus->sdcnt.intrcount++;
3669 if (in_interrupt())
3670 atomic_set(&bus->ipend, 1);
3671 else
3672 if (brcmf_sdio_intr_rstatus(bus)) {
3673 brcmf_err("failed backplane access\n");
3674 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
3675 }
3676
3677 /* Disable additional interrupts (is this needed now)? */
3678 if (!bus->intr)
3679 brcmf_err("isr w/o interrupt configured!\n");
3680
3681 atomic_inc(&bus->dpc_tskcnt);
3682 queue_work(bus->brcmf_wq, &bus->datawork);
3683 }
3684
3685 static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
3686 {
3687 #ifdef DEBUG
3688 struct brcmf_bus *bus_if = dev_get_drvdata(bus->sdiodev->dev);
3689 #endif /* DEBUG */
3690
3691 brcmf_dbg(TIMER, "Enter\n");
3692
3693 /* Poll period: check device if appropriate. */
3694 if (!bus->sr_enabled &&
3695 bus->poll && (++bus->polltick >= bus->pollrate)) {
3696 u32 intstatus = 0;
3697
3698 /* Reset poll tick */
3699 bus->polltick = 0;
3700
3701 /* Check device if no interrupts */
3702 if (!bus->intr ||
3703 (bus->sdcnt.intrcount == bus->sdcnt.lastintrs)) {
3704
3705 if (atomic_read(&bus->dpc_tskcnt) == 0) {
3706 u8 devpend;
3707
3708 sdio_claim_host(bus->sdiodev->func[1]);
3709 devpend = brcmf_sdio_regrb(bus->sdiodev,
3710 SDIO_CCCR_INTx,
3711 NULL);
3712 sdio_release_host(bus->sdiodev->func[1]);
3713 intstatus =
3714 devpend & (INTR_STATUS_FUNC1 |
3715 INTR_STATUS_FUNC2);
3716 }
3717
3718 /* If there is something, make like the ISR and
3719 schedule the DPC */
3720 if (intstatus) {
3721 bus->sdcnt.pollcnt++;
3722 atomic_set(&bus->ipend, 1);
3723
3724 atomic_inc(&bus->dpc_tskcnt);
3725 queue_work(bus->brcmf_wq, &bus->datawork);
3726 }
3727 }
3728
3729 /* Update interrupt tracking */
3730 bus->sdcnt.lastintrs = bus->sdcnt.intrcount;
3731 }
3732 #ifdef DEBUG
3733 /* Poll for console output periodically */
3734 if (bus_if && bus_if->state == BRCMF_BUS_DATA &&
3735 bus->console_interval != 0) {
3736 bus->console.count += BRCMF_WD_POLL_MS;
3737 if (bus->console.count >= bus->console_interval) {
3738 bus->console.count -= bus->console_interval;
3739 sdio_claim_host(bus->sdiodev->func[1]);
3740 /* Make sure backplane clock is on */
3741 brcmf_sdbrcm_bus_sleep(bus, false, false);
3742 if (brcmf_sdbrcm_readconsole(bus) < 0)
3743 /* stop on error */
3744 bus->console_interval = 0;
3745 sdio_release_host(bus->sdiodev->func[1]);
3746 }
3747 }
3748 #endif /* DEBUG */
3749
3750 /* On idle timeout clear activity flag and/or turn off clock */
3751 if ((bus->idletime > 0) && (bus->clkstate == CLK_AVAIL)) {
3752 if (++bus->idlecount >= bus->idletime) {
3753 bus->idlecount = 0;
3754 if (bus->activity) {
3755 bus->activity = false;
3756 brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
3757 } else {
3758 brcmf_dbg(SDIO, "idle\n");
3759 sdio_claim_host(bus->sdiodev->func[1]);
3760 brcmf_sdbrcm_bus_sleep(bus, true, false);
3761 sdio_release_host(bus->sdiodev->func[1]);
3762 }
3763 }
3764 }
3765
3766 return (atomic_read(&bus->ipend) > 0);
3767 }
3768
3769 static void brcmf_sdio_dataworker(struct work_struct *work)
3770 {
3771 struct brcmf_sdio *bus = container_of(work, struct brcmf_sdio,
3772 datawork);
3773
3774 while (atomic_read(&bus->dpc_tskcnt)) {
3775 brcmf_sdbrcm_dpc(bus);
3776 atomic_dec(&bus->dpc_tskcnt);
3777 }
3778 }
3779
3780 static void brcmf_sdbrcm_release_malloc(struct brcmf_sdio *bus)
3781 {
3782 brcmf_dbg(TRACE, "Enter\n");
3783
3784 kfree(bus->rxbuf);
3785 bus->rxctl = bus->rxbuf = NULL;
3786 bus->rxlen = 0;
3787 }
3788
3789 static bool brcmf_sdbrcm_probe_malloc(struct brcmf_sdio *bus)
3790 {
3791 brcmf_dbg(TRACE, "Enter\n");
3792
3793 if (bus->sdiodev->bus_if->maxctl) {
3794 bus->rxblen =
3795 roundup((bus->sdiodev->bus_if->maxctl + SDPCM_HDRLEN),
3796 ALIGNMENT) + bus->head_align;
3797 bus->rxbuf = kmalloc(bus->rxblen, GFP_ATOMIC);
3798 if (!(bus->rxbuf))
3799 return false;
3800 }
3801
3802 return true;
3803 }
3804
3805 static bool
3806 brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus)
3807 {
3808 u8 clkctl = 0;
3809 int err = 0;
3810 int reg_addr;
3811 u32 reg_val;
3812 u32 drivestrength;
3813
3814 bus->alp_only = true;
3815
3816 sdio_claim_host(bus->sdiodev->func[1]);
3817
3818 pr_debug("F1 signature read @0x18000000=0x%4x\n",
3819 brcmf_sdio_regrl(bus->sdiodev, SI_ENUM_BASE, NULL));
3820
3821 /*
3822 * Force PLL off until brcmf_sdio_chip_attach()
3823 * programs PLL control regs
3824 */
3825
3826 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
3827 BRCMF_INIT_CLKCTL1, &err);
3828 if (!err)
3829 clkctl = brcmf_sdio_regrb(bus->sdiodev,
3830 SBSDIO_FUNC1_CHIPCLKCSR, &err);
3831
3832 if (err || ((clkctl & ~SBSDIO_AVBITS) != BRCMF_INIT_CLKCTL1)) {
3833 brcmf_err("ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n",
3834 err, BRCMF_INIT_CLKCTL1, clkctl);
3835 goto fail;
3836 }
3837
3838 if (brcmf_sdio_chip_attach(bus->sdiodev, &bus->ci)) {
3839 brcmf_err("brcmf_sdio_chip_attach failed!\n");
3840 goto fail;
3841 }
3842
3843 if (brcmf_sdbrcm_kso_init(bus)) {
3844 brcmf_err("error enabling KSO\n");
3845 goto fail;
3846 }
3847
3848 if ((bus->sdiodev->pdata) && (bus->sdiodev->pdata->drive_strength))
3849 drivestrength = bus->sdiodev->pdata->drive_strength;
3850 else
3851 drivestrength = DEFAULT_SDIO_DRIVE_STRENGTH;
3852 brcmf_sdio_chip_drivestrengthinit(bus->sdiodev, bus->ci, drivestrength);
3853
3854 /* Get info on the SOCRAM cores... */
3855 bus->ramsize = bus->ci->ramsize;
3856 if (!(bus->ramsize)) {
3857 brcmf_err("failed to find SOCRAM memory!\n");
3858 goto fail;
3859 }
3860
3861 /* Set card control so an SDIO card reset does a WLAN backplane reset */
3862 reg_val = brcmf_sdio_regrb(bus->sdiodev,
3863 SDIO_CCCR_BRCM_CARDCTRL, &err);
3864 if (err)
3865 goto fail;
3866
3867 reg_val |= SDIO_CCCR_BRCM_CARDCTRL_WLANRESET;
3868
3869 brcmf_sdio_regwb(bus->sdiodev,
3870 SDIO_CCCR_BRCM_CARDCTRL, reg_val, &err);
3871 if (err)
3872 goto fail;
3873
3874 /* set PMUControl so a backplane reset does PMU state reload */
3875 reg_addr = CORE_CC_REG(bus->ci->c_inf[0].base,
3876 pmucontrol);
3877 reg_val = brcmf_sdio_regrl(bus->sdiodev,
3878 reg_addr,
3879 &err);
3880 if (err)
3881 goto fail;
3882
3883 reg_val |= (BCMA_CC_PMU_CTL_RES_RELOAD << BCMA_CC_PMU_CTL_RES_SHIFT);
3884
3885 brcmf_sdio_regwl(bus->sdiodev,
3886 reg_addr,
3887 reg_val,
3888 &err);
3889 if (err)
3890 goto fail;
3891
3892
3893 sdio_release_host(bus->sdiodev->func[1]);
3894
3895 brcmu_pktq_init(&bus->txq, (PRIOMASK + 1), TXQLEN);
3896
3897 /* allocate header buffer */
3898 bus->hdrbuf = kzalloc(MAX_HDR_READ + bus->head_align, GFP_KERNEL);
3899 if (!bus->hdrbuf)
3900 return false;
3901 /* Locate an appropriately-aligned portion of hdrbuf */
3902 bus->rxhdr = (u8 *) roundup((unsigned long)&bus->hdrbuf[0],
3903 bus->head_align);
3904
3905 /* Set the poll and/or interrupt flags */
3906 bus->intr = true;
3907 bus->poll = false;
3908 if (bus->poll)
3909 bus->pollrate = 1;
3910
3911 return true;
3912
3913 fail:
3914 sdio_release_host(bus->sdiodev->func[1]);
3915 return false;
3916 }
3917
3918 static bool brcmf_sdbrcm_probe_init(struct brcmf_sdio *bus)
3919 {
3920 brcmf_dbg(TRACE, "Enter\n");
3921
3922 sdio_claim_host(bus->sdiodev->func[1]);
3923
3924 /* Disable F2 to clear any intermediate frame state on the dongle */
3925 sdio_disable_func(bus->sdiodev->func[SDIO_FUNC_2]);
3926
3927 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
3928 bus->rxflow = false;
3929
3930 /* Done with backplane-dependent accesses, can drop clock... */
3931 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
3932
3933 sdio_release_host(bus->sdiodev->func[1]);
3934
3935 /* ...and initialize clock/power states */
3936 bus->clkstate = CLK_SDONLY;
3937 bus->idletime = BRCMF_IDLE_INTERVAL;
3938 bus->idleclock = BRCMF_IDLE_ACTIVE;
3939
3940 /* Query the F2 block size, set roundup accordingly */
3941 bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
3942 bus->roundup = min(max_roundup, bus->blocksize);
3943
3944 /* SR state */
3945 bus->sleeping = false;
3946 bus->sr_enabled = false;
3947
3948 return true;
3949 }
3950
3951 static int
3952 brcmf_sdbrcm_watchdog_thread(void *data)
3953 {
3954 struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
3955
3956 allow_signal(SIGTERM);
3957 /* Run until signal received */
3958 while (1) {
3959 if (kthread_should_stop())
3960 break;
3961 if (!wait_for_completion_interruptible(&bus->watchdog_wait)) {
3962 brcmf_sdbrcm_bus_watchdog(bus);
3963 /* Count the tick for reference */
3964 bus->sdcnt.tickcnt++;
3965 } else
3966 break;
3967 }
3968 return 0;
3969 }
3970
3971 static void
3972 brcmf_sdbrcm_watchdog(unsigned long data)
3973 {
3974 struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
3975
3976 if (bus->watchdog_tsk) {
3977 complete(&bus->watchdog_wait);
3978 /* Reschedule the watchdog */
3979 if (bus->wd_timer_valid)
3980 mod_timer(&bus->timer,
3981 jiffies + BRCMF_WD_POLL_MS * HZ / 1000);
3982 }
3983 }
3984
3985 static void brcmf_sdbrcm_release_dongle(struct brcmf_sdio *bus)
3986 {
3987 brcmf_dbg(TRACE, "Enter\n");
3988
3989 if (bus->ci) {
3990 sdio_claim_host(bus->sdiodev->func[1]);
3991 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
3992 brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
3993 sdio_release_host(bus->sdiodev->func[1]);
3994 brcmf_sdio_chip_detach(&bus->ci);
3995 if (bus->vars && bus->varsz)
3996 kfree(bus->vars);
3997 bus->vars = NULL;
3998 }
3999
4000 brcmf_dbg(TRACE, "Disconnected\n");
4001 }
4002
4003 /* Detach and free everything */
4004 static void brcmf_sdbrcm_release(struct brcmf_sdio *bus)
4005 {
4006 brcmf_dbg(TRACE, "Enter\n");
4007
4008 if (bus) {
4009 /* De-register interrupt handler */
4010 brcmf_sdio_intr_unregister(bus->sdiodev);
4011
4012 cancel_work_sync(&bus->datawork);
4013 if (bus->brcmf_wq)
4014 destroy_workqueue(bus->brcmf_wq);
4015
4016 if (bus->sdiodev->bus_if->drvr) {
4017 brcmf_detach(bus->sdiodev->dev);
4018 brcmf_sdbrcm_release_dongle(bus);
4019 }
4020
4021 brcmu_pkt_buf_free_skb(bus->txglom_sgpad);
4022 brcmf_sdbrcm_release_malloc(bus);
4023 kfree(bus->hdrbuf);
4024 kfree(bus);
4025 }
4026
4027 brcmf_dbg(TRACE, "Disconnected\n");
4028 }
4029
4030 static struct brcmf_bus_ops brcmf_sdio_bus_ops = {
4031 .stop = brcmf_sdbrcm_bus_stop,
4032 .preinit = brcmf_sdbrcm_bus_preinit,
4033 .init = brcmf_sdbrcm_bus_init,
4034 .txdata = brcmf_sdbrcm_bus_txdata,
4035 .txctl = brcmf_sdbrcm_bus_txctl,
4036 .rxctl = brcmf_sdbrcm_bus_rxctl,
4037 .gettxq = brcmf_sdbrcm_bus_gettxq,
4038 };
4039
4040 void *brcmf_sdbrcm_probe(struct brcmf_sdio_dev *sdiodev)
4041 {
4042 int ret;
4043 struct brcmf_sdio *bus;
4044
4045 brcmf_dbg(TRACE, "Enter\n");
4046
4047 /* Allocate private bus interface state */
4048 bus = kzalloc(sizeof(struct brcmf_sdio), GFP_ATOMIC);
4049 if (!bus)
4050 goto fail;
4051
4052 bus->sdiodev = sdiodev;
4053 sdiodev->bus = bus;
4054 skb_queue_head_init(&bus->glom);
4055 bus->txbound = BRCMF_TXBOUND;
4056 bus->rxbound = BRCMF_RXBOUND;
4057 bus->txminmax = BRCMF_TXMINMAX;
4058 bus->tx_seq = SDPCM_SEQ_WRAP - 1;
4059
4060 /* platform specific configuration:
4061 * alignments must be at least 4 bytes for ADMA
4062 */
4063 bus->head_align = ALIGNMENT;
4064 bus->sgentry_align = ALIGNMENT;
4065 if (sdiodev->pdata) {
4066 if (sdiodev->pdata->sd_head_align > ALIGNMENT)
4067 bus->head_align = sdiodev->pdata->sd_head_align;
4068 if (sdiodev->pdata->sd_sgentry_align > ALIGNMENT)
4069 bus->sgentry_align = sdiodev->pdata->sd_sgentry_align;
4070 }
4071
4072 INIT_WORK(&bus->datawork, brcmf_sdio_dataworker);
4073 bus->brcmf_wq = create_singlethread_workqueue("brcmf_wq");
4074 if (bus->brcmf_wq == NULL) {
4075 brcmf_err("insufficient memory to create txworkqueue\n");
4076 goto fail;
4077 }
4078
4079 /* attempt to attach to the dongle */
4080 if (!(brcmf_sdbrcm_probe_attach(bus))) {
4081 brcmf_err("brcmf_sdbrcm_probe_attach failed\n");
4082 goto fail;
4083 }
4084
4085 spin_lock_init(&bus->rxctl_lock);
4086 spin_lock_init(&bus->txqlock);
4087 init_waitqueue_head(&bus->ctrl_wait);
4088 init_waitqueue_head(&bus->dcmd_resp_wait);
4089
4090 /* Set up the watchdog timer */
4091 init_timer(&bus->timer);
4092 bus->timer.data = (unsigned long)bus;
4093 bus->timer.function = brcmf_sdbrcm_watchdog;
4094
4095 /* Initialize watchdog thread */
4096 init_completion(&bus->watchdog_wait);
4097 bus->watchdog_tsk = kthread_run(brcmf_sdbrcm_watchdog_thread,
4098 bus, "brcmf_watchdog");
4099 if (IS_ERR(bus->watchdog_tsk)) {
4100 pr_warn("brcmf_watchdog thread failed to start\n");
4101 bus->watchdog_tsk = NULL;
4102 }
4103 /* Initialize DPC thread */
4104 atomic_set(&bus->dpc_tskcnt, 0);
4105
4106 /* Assign bus interface call back */
4107 bus->sdiodev->bus_if->dev = bus->sdiodev->dev;
4108 bus->sdiodev->bus_if->ops = &brcmf_sdio_bus_ops;
4109 bus->sdiodev->bus_if->chip = bus->ci->chip;
4110 bus->sdiodev->bus_if->chiprev = bus->ci->chiprev;
4111
4112 /* default sdio bus header length for tx packet */
4113 bus->tx_hdrlen = SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN;
4114
4115 /* Attach to the common layer, reserve hdr space */
4116 ret = brcmf_attach(bus->sdiodev->dev);
4117 if (ret != 0) {
4118 brcmf_err("brcmf_attach failed\n");
4119 goto fail;
4120 }
4121
4122 /* Allocate buffers */
4123 if (!(brcmf_sdbrcm_probe_malloc(bus))) {
4124 brcmf_err("brcmf_sdbrcm_probe_malloc failed\n");
4125 goto fail;
4126 }
4127
4128 if (!(brcmf_sdbrcm_probe_init(bus))) {
4129 brcmf_err("brcmf_sdbrcm_probe_init failed\n");
4130 goto fail;
4131 }
4132
4133 brcmf_sdio_debugfs_create(bus);
4134 brcmf_dbg(INFO, "completed!!\n");
4135
4136 /* if firmware path present try to download and bring up bus */
4137 ret = brcmf_bus_start(bus->sdiodev->dev);
4138 if (ret != 0) {
4139 brcmf_err("dongle is not responding\n");
4140 goto fail;
4141 }
4142
4143 return bus;
4144
4145 fail:
4146 brcmf_sdbrcm_release(bus);
4147 return NULL;
4148 }
4149
4150 void brcmf_sdbrcm_disconnect(void *ptr)
4151 {
4152 struct brcmf_sdio *bus = (struct brcmf_sdio *)ptr;
4153
4154 brcmf_dbg(TRACE, "Enter\n");
4155
4156 if (bus)
4157 brcmf_sdbrcm_release(bus);
4158
4159 brcmf_dbg(TRACE, "Disconnected\n");
4160 }
4161
4162 void
4163 brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick)
4164 {
4165 /* Totally stop the timer */
4166 if (!wdtick && bus->wd_timer_valid) {
4167 del_timer_sync(&bus->timer);
4168 bus->wd_timer_valid = false;
4169 bus->save_ms = wdtick;
4170 return;
4171 }
4172
4173 /* don't start the wd until fw is loaded */
4174 if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN)
4175 return;
4176
4177 if (wdtick) {
4178 if (bus->save_ms != BRCMF_WD_POLL_MS) {
4179 if (bus->wd_timer_valid)
4180 /* Stop timer and restart at new value */
4181 del_timer_sync(&bus->timer);
4182
4183 /* Create timer again when watchdog period is
4184 dynamically changed or in the first instance
4185 */
4186 bus->timer.expires =
4187 jiffies + BRCMF_WD_POLL_MS * HZ / 1000;
4188 add_timer(&bus->timer);
4189
4190 } else {
4191 /* Re arm the timer, at last watchdog period */
4192 mod_timer(&bus->timer,
4193 jiffies + BRCMF_WD_POLL_MS * HZ / 1000);
4194 }
4195
4196 bus->wd_timer_valid = true;
4197 bus->save_ms = wdtick;
4198 }
4199 }
This page took 0.200014 seconds and 5 git commands to generate.