brcmfmac: introduce brcmf_net_detach() function
[deliverable/linux.git] / drivers / net / wireless / brcm80211 / brcmfmac / pcie.c
CommitLineData
9e37f045
HM
1/* Copyright (c) 2014 Broadcom Corporation
2 *
3 * Permission to use, copy, modify, and/or distribute this software for any
4 * purpose with or without fee is hereby granted, provided that the above
5 * copyright notice and this permission notice appear in all copies.
6 *
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
10 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
12 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
13 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14 */
15
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/firmware.h>
19#include <linux/pci.h>
20#include <linux/vmalloc.h>
21#include <linux/delay.h>
9e37f045
HM
22#include <linux/interrupt.h>
23#include <linux/bcma/bcma.h>
24#include <linux/sched.h>
a1d69c60 25#include <asm/unaligned.h>
9e37f045
HM
26
27#include <soc.h>
28#include <chipcommon.h>
29#include <brcmu_utils.h>
30#include <brcmu_wifi.h>
31#include <brcm_hw_ids.h>
32
a8e8ed34 33#include "debug.h"
d14f78b9 34#include "bus.h"
9e37f045
HM
35#include "commonring.h"
36#include "msgbuf.h"
37#include "pcie.h"
38#include "firmware.h"
39#include "chip.h"
40
41
42enum brcmf_pcie_state {
43 BRCMFMAC_PCIE_STATE_DOWN,
44 BRCMFMAC_PCIE_STATE_UP
45};
46
47
48#define BRCMF_PCIE_43602_FW_NAME "brcm/brcmfmac43602-pcie.bin"
49#define BRCMF_PCIE_43602_NVRAM_NAME "brcm/brcmfmac43602-pcie.txt"
9e37f045
HM
50#define BRCMF_PCIE_4356_FW_NAME "brcm/brcmfmac4356-pcie.bin"
51#define BRCMF_PCIE_4356_NVRAM_NAME "brcm/brcmfmac4356-pcie.txt"
52#define BRCMF_PCIE_43570_FW_NAME "brcm/brcmfmac43570-pcie.bin"
53#define BRCMF_PCIE_43570_NVRAM_NAME "brcm/brcmfmac43570-pcie.txt"
67f3b6a3
AS
54#define BRCMF_PCIE_4358_FW_NAME "brcm/brcmfmac4358-pcie.bin"
55#define BRCMF_PCIE_4358_NVRAM_NAME "brcm/brcmfmac4358-pcie.txt"
9e37f045
HM
56
57#define BRCMF_PCIE_FW_UP_TIMEOUT 2000 /* msec */
58
59#define BRCMF_PCIE_TCM_MAP_SIZE (4096 * 1024)
60#define BRCMF_PCIE_REG_MAP_SIZE (32 * 1024)
61
62/* backplane addres space accessed by BAR0 */
63#define BRCMF_PCIE_BAR0_WINDOW 0x80
64#define BRCMF_PCIE_BAR0_REG_SIZE 0x1000
65#define BRCMF_PCIE_BAR0_WRAPPERBASE 0x70
66
67#define BRCMF_PCIE_BAR0_WRAPBASE_DMP_OFFSET 0x1000
68#define BRCMF_PCIE_BARO_PCIE_ENUM_OFFSET 0x2000
69
70#define BRCMF_PCIE_ARMCR4REG_BANKIDX 0x40
71#define BRCMF_PCIE_ARMCR4REG_BANKPDA 0x4C
72
73#define BRCMF_PCIE_REG_INTSTATUS 0x90
74#define BRCMF_PCIE_REG_INTMASK 0x94
75#define BRCMF_PCIE_REG_SBMBX 0x98
76
77#define BRCMF_PCIE_PCIE2REG_INTMASK 0x24
78#define BRCMF_PCIE_PCIE2REG_MAILBOXINT 0x48
79#define BRCMF_PCIE_PCIE2REG_MAILBOXMASK 0x4C
80#define BRCMF_PCIE_PCIE2REG_CONFIGADDR 0x120
81#define BRCMF_PCIE_PCIE2REG_CONFIGDATA 0x124
82#define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX 0x140
83
84#define BRCMF_PCIE_GENREV1 1
85#define BRCMF_PCIE_GENREV2 2
86
87#define BRCMF_PCIE2_INTA 0x01
88#define BRCMF_PCIE2_INTB 0x02
89
90#define BRCMF_PCIE_INT_0 0x01
91#define BRCMF_PCIE_INT_1 0x02
92#define BRCMF_PCIE_INT_DEF (BRCMF_PCIE_INT_0 | \
93 BRCMF_PCIE_INT_1)
94
95#define BRCMF_PCIE_MB_INT_FN0_0 0x0100
96#define BRCMF_PCIE_MB_INT_FN0_1 0x0200
97#define BRCMF_PCIE_MB_INT_D2H0_DB0 0x10000
98#define BRCMF_PCIE_MB_INT_D2H0_DB1 0x20000
99#define BRCMF_PCIE_MB_INT_D2H1_DB0 0x40000
100#define BRCMF_PCIE_MB_INT_D2H1_DB1 0x80000
101#define BRCMF_PCIE_MB_INT_D2H2_DB0 0x100000
102#define BRCMF_PCIE_MB_INT_D2H2_DB1 0x200000
103#define BRCMF_PCIE_MB_INT_D2H3_DB0 0x400000
104#define BRCMF_PCIE_MB_INT_D2H3_DB1 0x800000
105
106#define BRCMF_PCIE_MB_INT_D2H_DB (BRCMF_PCIE_MB_INT_D2H0_DB0 | \
107 BRCMF_PCIE_MB_INT_D2H0_DB1 | \
108 BRCMF_PCIE_MB_INT_D2H1_DB0 | \
109 BRCMF_PCIE_MB_INT_D2H1_DB1 | \
110 BRCMF_PCIE_MB_INT_D2H2_DB0 | \
111 BRCMF_PCIE_MB_INT_D2H2_DB1 | \
112 BRCMF_PCIE_MB_INT_D2H3_DB0 | \
113 BRCMF_PCIE_MB_INT_D2H3_DB1)
114
fd5e8cb8 115#define BRCMF_PCIE_MIN_SHARED_VERSION 5
9e37f045
HM
116#define BRCMF_PCIE_MAX_SHARED_VERSION 5
117#define BRCMF_PCIE_SHARED_VERSION_MASK 0x00FF
f3550aeb
FL
118#define BRCMF_PCIE_SHARED_DMA_INDEX 0x10000
119#define BRCMF_PCIE_SHARED_DMA_2B_IDX 0x100000
9e37f045
HM
120
121#define BRCMF_PCIE_FLAGS_HTOD_SPLIT 0x4000
122#define BRCMF_PCIE_FLAGS_DTOH_SPLIT 0x8000
123
124#define BRCMF_SHARED_MAX_RXBUFPOST_OFFSET 34
125#define BRCMF_SHARED_RING_BASE_OFFSET 52
126#define BRCMF_SHARED_RX_DATAOFFSET_OFFSET 36
127#define BRCMF_SHARED_CONSOLE_ADDR_OFFSET 20
128#define BRCMF_SHARED_HTOD_MB_DATA_ADDR_OFFSET 40
129#define BRCMF_SHARED_DTOH_MB_DATA_ADDR_OFFSET 44
130#define BRCMF_SHARED_RING_INFO_ADDR_OFFSET 48
131#define BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET 52
132#define BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET 56
133#define BRCMF_SHARED_DMA_RINGUPD_LEN_OFFSET 64
134#define BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET 68
135
136#define BRCMF_RING_H2D_RING_COUNT_OFFSET 0
137#define BRCMF_RING_D2H_RING_COUNT_OFFSET 1
138#define BRCMF_RING_H2D_RING_MEM_OFFSET 4
139#define BRCMF_RING_H2D_RING_STATE_OFFSET 8
140
141#define BRCMF_RING_MEM_BASE_ADDR_OFFSET 8
142#define BRCMF_RING_MAX_ITEM_OFFSET 4
143#define BRCMF_RING_LEN_ITEMS_OFFSET 6
144#define BRCMF_RING_MEM_SZ 16
145#define BRCMF_RING_STATE_SZ 8
146
147#define BRCMF_SHARED_RING_H2D_W_IDX_PTR_OFFSET 4
148#define BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET 8
149#define BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET 12
150#define BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET 16
f3550aeb
FL
151#define BRCMF_SHARED_RING_H2D_WP_HADDR_OFFSET 20
152#define BRCMF_SHARED_RING_H2D_RP_HADDR_OFFSET 28
153#define BRCMF_SHARED_RING_D2H_WP_HADDR_OFFSET 36
154#define BRCMF_SHARED_RING_D2H_RP_HADDR_OFFSET 44
9e37f045
HM
155#define BRCMF_SHARED_RING_TCM_MEMLOC_OFFSET 0
156#define BRCMF_SHARED_RING_MAX_SUB_QUEUES 52
157
158#define BRCMF_DEF_MAX_RXBUFPOST 255
159
160#define BRCMF_CONSOLE_BUFADDR_OFFSET 8
161#define BRCMF_CONSOLE_BUFSIZE_OFFSET 12
162#define BRCMF_CONSOLE_WRITEIDX_OFFSET 16
163
164#define BRCMF_DMA_D2H_SCRATCH_BUF_LEN 8
165#define BRCMF_DMA_D2H_RINGUPD_BUF_LEN 1024
166
167#define BRCMF_D2H_DEV_D3_ACK 0x00000001
168#define BRCMF_D2H_DEV_DS_ENTER_REQ 0x00000002
169#define BRCMF_D2H_DEV_DS_EXIT_NOTE 0x00000004
170
171#define BRCMF_H2D_HOST_D3_INFORM 0x00000001
172#define BRCMF_H2D_HOST_DS_ACK 0x00000002
4eb3af7c
HM
173#define BRCMF_H2D_HOST_D0_INFORM_IN_USE 0x00000008
174#define BRCMF_H2D_HOST_D0_INFORM 0x00000010
9e37f045
HM
175
176#define BRCMF_PCIE_MBDATA_TIMEOUT 2000
177
bd4f82e3
HM
178#define BRCMF_PCIE_CFGREG_STATUS_CMD 0x4
179#define BRCMF_PCIE_CFGREG_PM_CSR 0x4C
180#define BRCMF_PCIE_CFGREG_MSI_CAP 0x58
181#define BRCMF_PCIE_CFGREG_MSI_ADDR_L 0x5C
182#define BRCMF_PCIE_CFGREG_MSI_ADDR_H 0x60
183#define BRCMF_PCIE_CFGREG_MSI_DATA 0x64
184#define BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL 0xBC
185#define BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL2 0xDC
186#define BRCMF_PCIE_CFGREG_RBAR_CTRL 0x228
187#define BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1 0x248
188#define BRCMF_PCIE_CFGREG_REG_BAR2_CONFIG 0x4E0
189#define BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG 0x4F4
190#define BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB 3
191
9e37f045
HM
192
193MODULE_FIRMWARE(BRCMF_PCIE_43602_FW_NAME);
194MODULE_FIRMWARE(BRCMF_PCIE_43602_NVRAM_NAME);
7fca40eb
AS
195MODULE_FIRMWARE(BRCMF_PCIE_4356_FW_NAME);
196MODULE_FIRMWARE(BRCMF_PCIE_4356_NVRAM_NAME);
9e37f045
HM
197MODULE_FIRMWARE(BRCMF_PCIE_43570_FW_NAME);
198MODULE_FIRMWARE(BRCMF_PCIE_43570_NVRAM_NAME);
67f3b6a3
AS
199MODULE_FIRMWARE(BRCMF_PCIE_4358_FW_NAME);
200MODULE_FIRMWARE(BRCMF_PCIE_4358_NVRAM_NAME);
9e37f045
HM
201
202
203struct brcmf_pcie_console {
204 u32 base_addr;
205 u32 buf_addr;
206 u32 bufsize;
207 u32 read_idx;
208 u8 log_str[256];
209 u8 log_idx;
210};
211
212struct brcmf_pcie_shared_info {
213 u32 tcm_base_address;
214 u32 flags;
215 struct brcmf_pcie_ringbuf *commonrings[BRCMF_NROF_COMMON_MSGRINGS];
216 struct brcmf_pcie_ringbuf *flowrings;
217 u16 max_rxbufpost;
218 u32 nrof_flowrings;
219 u32 rx_dataoffset;
220 u32 htod_mb_data_addr;
221 u32 dtoh_mb_data_addr;
222 u32 ring_info_addr;
223 struct brcmf_pcie_console console;
224 void *scratch;
225 dma_addr_t scratch_dmahandle;
226 void *ringupd;
227 dma_addr_t ringupd_dmahandle;
228};
229
230struct brcmf_pcie_core_info {
231 u32 base;
232 u32 wrapbase;
233};
234
235struct brcmf_pciedev_info {
236 enum brcmf_pcie_state state;
237 bool in_irq;
238 bool irq_requested;
239 struct pci_dev *pdev;
240 char fw_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN];
241 char nvram_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN];
242 void __iomem *regs;
243 void __iomem *tcm;
244 u32 tcm_size;
245 u32 ram_base;
246 u32 ram_size;
247 struct brcmf_chip *ci;
248 u32 coreid;
249 u32 generic_corerev;
250 struct brcmf_pcie_shared_info shared;
251 void (*ringbell)(struct brcmf_pciedev_info *devinfo);
252 wait_queue_head_t mbdata_resp_wait;
253 bool mbdata_completed;
254 bool irq_allocated;
4eb3af7c 255 bool wowl_enabled;
f3550aeb
FL
256 u8 dma_idx_sz;
257 void *idxbuf;
258 u32 idxbuf_sz;
259 dma_addr_t idxbuf_dmahandle;
260 u16 (*read_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset);
261 void (*write_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
262 u16 value);
9e37f045
HM
263};
264
265struct brcmf_pcie_ringbuf {
266 struct brcmf_commonring commonring;
267 dma_addr_t dma_handle;
268 u32 w_idx_addr;
269 u32 r_idx_addr;
270 struct brcmf_pciedev_info *devinfo;
271 u8 id;
272};
273
274
275static const u32 brcmf_ring_max_item[BRCMF_NROF_COMMON_MSGRINGS] = {
276 BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM,
277 BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM,
278 BRCMF_D2H_MSGRING_CONTROL_COMPLETE_MAX_ITEM,
279 BRCMF_D2H_MSGRING_TX_COMPLETE_MAX_ITEM,
280 BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM
281};
282
283static const u32 brcmf_ring_itemsize[BRCMF_NROF_COMMON_MSGRINGS] = {
284 BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE,
285 BRCMF_H2D_MSGRING_RXPOST_SUBMIT_ITEMSIZE,
286 BRCMF_D2H_MSGRING_CONTROL_COMPLETE_ITEMSIZE,
287 BRCMF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE,
288 BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE
289};
290
291
9e37f045
HM
292static u32
293brcmf_pcie_read_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset)
294{
295 void __iomem *address = devinfo->regs + reg_offset;
296
297 return (ioread32(address));
298}
299
300
301static void
302brcmf_pcie_write_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset,
303 u32 value)
304{
305 void __iomem *address = devinfo->regs + reg_offset;
306
307 iowrite32(value, address);
308}
309
310
311static u8
312brcmf_pcie_read_tcm8(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
313{
314 void __iomem *address = devinfo->tcm + mem_offset;
315
316 return (ioread8(address));
317}
318
319
320static u16
321brcmf_pcie_read_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
322{
323 void __iomem *address = devinfo->tcm + mem_offset;
324
325 return (ioread16(address));
326}
327
328
329static void
330brcmf_pcie_write_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
331 u16 value)
332{
333 void __iomem *address = devinfo->tcm + mem_offset;
334
335 iowrite16(value, address);
336}
337
338
f3550aeb
FL
339static u16
340brcmf_pcie_read_idx(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
341{
342 u16 *address = devinfo->idxbuf + mem_offset;
343
344 return (*(address));
345}
346
347
348static void
349brcmf_pcie_write_idx(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
350 u16 value)
351{
352 u16 *address = devinfo->idxbuf + mem_offset;
353
354 *(address) = value;
355}
356
357
9e37f045
HM
358static u32
359brcmf_pcie_read_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
360{
361 void __iomem *address = devinfo->tcm + mem_offset;
362
363 return (ioread32(address));
364}
365
366
367static void
368brcmf_pcie_write_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
369 u32 value)
370{
371 void __iomem *address = devinfo->tcm + mem_offset;
372
373 iowrite32(value, address);
374}
375
376
377static u32
378brcmf_pcie_read_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
379{
380 void __iomem *addr = devinfo->tcm + devinfo->ci->rambase + mem_offset;
381
382 return (ioread32(addr));
383}
384
385
386static void
387brcmf_pcie_write_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
388 u32 value)
389{
390 void __iomem *addr = devinfo->tcm + devinfo->ci->rambase + mem_offset;
391
392 iowrite32(value, addr);
393}
394
395
396static void
397brcmf_pcie_copy_mem_todev(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
398 void *srcaddr, u32 len)
399{
400 void __iomem *address = devinfo->tcm + mem_offset;
401 __le32 *src32;
402 __le16 *src16;
403 u8 *src8;
404
405 if (((ulong)address & 4) || ((ulong)srcaddr & 4) || (len & 4)) {
406 if (((ulong)address & 2) || ((ulong)srcaddr & 2) || (len & 2)) {
407 src8 = (u8 *)srcaddr;
408 while (len) {
409 iowrite8(*src8, address);
410 address++;
411 src8++;
412 len--;
413 }
414 } else {
415 len = len / 2;
416 src16 = (__le16 *)srcaddr;
417 while (len) {
418 iowrite16(le16_to_cpu(*src16), address);
419 address += 2;
420 src16++;
421 len--;
422 }
423 }
424 } else {
425 len = len / 4;
426 src32 = (__le32 *)srcaddr;
427 while (len) {
428 iowrite32(le32_to_cpu(*src32), address);
429 address += 4;
430 src32++;
431 len--;
432 }
433 }
434}
435
436
437#define WRITECC32(devinfo, reg, value) brcmf_pcie_write_reg32(devinfo, \
438 CHIPCREGOFFS(reg), value)
439
440
441static void
442brcmf_pcie_select_core(struct brcmf_pciedev_info *devinfo, u16 coreid)
443{
444 const struct pci_dev *pdev = devinfo->pdev;
445 struct brcmf_core *core;
446 u32 bar0_win;
447
448 core = brcmf_chip_get_core(devinfo->ci, coreid);
449 if (core) {
450 bar0_win = core->base;
451 pci_write_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW, bar0_win);
452 if (pci_read_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW,
453 &bar0_win) == 0) {
454 if (bar0_win != core->base) {
455 bar0_win = core->base;
456 pci_write_config_dword(pdev,
457 BRCMF_PCIE_BAR0_WINDOW,
458 bar0_win);
459 }
460 }
461 } else {
462 brcmf_err("Unsupported core selected %x\n", coreid);
463 }
464}
465
466
bd4f82e3 467static void brcmf_pcie_reset_device(struct brcmf_pciedev_info *devinfo)
9e37f045 468{
bd4f82e3
HM
469 u16 cfg_offset[] = { BRCMF_PCIE_CFGREG_STATUS_CMD,
470 BRCMF_PCIE_CFGREG_PM_CSR,
471 BRCMF_PCIE_CFGREG_MSI_CAP,
472 BRCMF_PCIE_CFGREG_MSI_ADDR_L,
473 BRCMF_PCIE_CFGREG_MSI_ADDR_H,
474 BRCMF_PCIE_CFGREG_MSI_DATA,
475 BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL2,
476 BRCMF_PCIE_CFGREG_RBAR_CTRL,
477 BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1,
478 BRCMF_PCIE_CFGREG_REG_BAR2_CONFIG,
479 BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG };
9e37f045
HM
480 u32 i;
481 u32 val;
bd4f82e3 482 u32 lsc;
9e37f045
HM
483
484 if (!devinfo->ci)
485 return;
486
bd4f82e3
HM
487 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
488 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR,
489 BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL);
490 lsc = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA);
491 val = lsc & (~BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB);
492 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, val);
9e37f045 493
bd4f82e3
HM
494 brcmf_pcie_select_core(devinfo, BCMA_CORE_CHIPCOMMON);
495 WRITECC32(devinfo, watchdog, 4);
9e37f045
HM
496 msleep(100);
497
bd4f82e3
HM
498 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
499 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR,
500 BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL);
501 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, lsc);
502
9e37f045
HM
503 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
504 for (i = 0; i < ARRAY_SIZE(cfg_offset); i++) {
505 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR,
506 cfg_offset[i]);
507 val = brcmf_pcie_read_reg32(devinfo,
508 BRCMF_PCIE_PCIE2REG_CONFIGDATA);
509 brcmf_dbg(PCIE, "config offset 0x%04x, value 0x%04x\n",
510 cfg_offset[i], val);
511 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA,
512 val);
513 }
514}
515
516
517static void brcmf_pcie_attach(struct brcmf_pciedev_info *devinfo)
518{
519 u32 config;
520
521 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
522 if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_INTMASK) != 0)
bd4f82e3 523 brcmf_pcie_reset_device(devinfo);
9e37f045
HM
524 /* BAR1 window may not be sized properly */
525 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
526 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR, 0x4e0);
527 config = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA);
528 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, config);
529
530 device_wakeup_enable(&devinfo->pdev->dev);
531}
532
533
534static int brcmf_pcie_enter_download_state(struct brcmf_pciedev_info *devinfo)
535{
9e37f045
HM
536 if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) {
537 brcmf_pcie_select_core(devinfo, BCMA_CORE_ARM_CR4);
538 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX,
539 5);
540 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKPDA,
541 0);
542 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX,
543 7);
544 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKPDA,
545 0);
546 }
547 return 0;
548}
549
550
551static int brcmf_pcie_exit_download_state(struct brcmf_pciedev_info *devinfo,
552 u32 resetintr)
553{
554 struct brcmf_core *core;
555
556 if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) {
557 core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_INTERNAL_MEM);
558 brcmf_chip_resetcore(core, 0, 0, 0);
559 }
560
d380ebc9 561 return !brcmf_chip_set_active(devinfo->ci, resetintr);
9e37f045
HM
562}
563
564
4eb3af7c 565static int
9e37f045
HM
566brcmf_pcie_send_mb_data(struct brcmf_pciedev_info *devinfo, u32 htod_mb_data)
567{
568 struct brcmf_pcie_shared_info *shared;
569 u32 addr;
570 u32 cur_htod_mb_data;
571 u32 i;
572
573 shared = &devinfo->shared;
574 addr = shared->htod_mb_data_addr;
575 cur_htod_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
576
577 if (cur_htod_mb_data != 0)
578 brcmf_dbg(PCIE, "MB transaction is already pending 0x%04x\n",
579 cur_htod_mb_data);
580
581 i = 0;
582 while (cur_htod_mb_data != 0) {
583 msleep(10);
584 i++;
585 if (i > 100)
4eb3af7c 586 return -EIO;
9e37f045
HM
587 cur_htod_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
588 }
589
590 brcmf_pcie_write_tcm32(devinfo, addr, htod_mb_data);
591 pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1);
592 pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1);
4eb3af7c
HM
593
594 return 0;
9e37f045
HM
595}
596
597
598static void brcmf_pcie_handle_mb_data(struct brcmf_pciedev_info *devinfo)
599{
600 struct brcmf_pcie_shared_info *shared;
601 u32 addr;
602 u32 dtoh_mb_data;
603
604 shared = &devinfo->shared;
605 addr = shared->dtoh_mb_data_addr;
606 dtoh_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
607
608 if (!dtoh_mb_data)
609 return;
610
611 brcmf_pcie_write_tcm32(devinfo, addr, 0);
612
613 brcmf_dbg(PCIE, "D2H_MB_DATA: 0x%04x\n", dtoh_mb_data);
614 if (dtoh_mb_data & BRCMF_D2H_DEV_DS_ENTER_REQ) {
615 brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP REQ\n");
616 brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_DS_ACK);
617 brcmf_dbg(PCIE, "D2H_MB_DATA: sent DEEP SLEEP ACK\n");
618 }
619 if (dtoh_mb_data & BRCMF_D2H_DEV_DS_EXIT_NOTE)
620 brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP EXIT\n");
ebcc2f51 621 if (dtoh_mb_data & BRCMF_D2H_DEV_D3_ACK) {
9e37f045
HM
622 brcmf_dbg(PCIE, "D2H_MB_DATA: D3 ACK\n");
623 if (waitqueue_active(&devinfo->mbdata_resp_wait)) {
624 devinfo->mbdata_completed = true;
625 wake_up(&devinfo->mbdata_resp_wait);
626 }
ebcc2f51 627 }
9e37f045
HM
628}
629
630
631static void brcmf_pcie_bus_console_init(struct brcmf_pciedev_info *devinfo)
632{
633 struct brcmf_pcie_shared_info *shared;
634 struct brcmf_pcie_console *console;
635 u32 addr;
636
637 shared = &devinfo->shared;
638 console = &shared->console;
639 addr = shared->tcm_base_address + BRCMF_SHARED_CONSOLE_ADDR_OFFSET;
640 console->base_addr = brcmf_pcie_read_tcm32(devinfo, addr);
641
642 addr = console->base_addr + BRCMF_CONSOLE_BUFADDR_OFFSET;
643 console->buf_addr = brcmf_pcie_read_tcm32(devinfo, addr);
644 addr = console->base_addr + BRCMF_CONSOLE_BUFSIZE_OFFSET;
645 console->bufsize = brcmf_pcie_read_tcm32(devinfo, addr);
646
9d6c1dc4 647 brcmf_dbg(FWCON, "Console: base %x, buf %x, size %d\n",
9e37f045
HM
648 console->base_addr, console->buf_addr, console->bufsize);
649}
650
651
652static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo)
653{
654 struct brcmf_pcie_console *console;
655 u32 addr;
656 u8 ch;
657 u32 newidx;
658
9d6c1dc4
AS
659 if (!BRCMF_FWCON_ON())
660 return;
661
9e37f045
HM
662 console = &devinfo->shared.console;
663 addr = console->base_addr + BRCMF_CONSOLE_WRITEIDX_OFFSET;
664 newidx = brcmf_pcie_read_tcm32(devinfo, addr);
665 while (newidx != console->read_idx) {
666 addr = console->buf_addr + console->read_idx;
667 ch = brcmf_pcie_read_tcm8(devinfo, addr);
668 console->read_idx++;
669 if (console->read_idx == console->bufsize)
670 console->read_idx = 0;
671 if (ch == '\r')
672 continue;
673 console->log_str[console->log_idx] = ch;
674 console->log_idx++;
675 if ((ch != '\n') &&
676 (console->log_idx == (sizeof(console->log_str) - 2))) {
677 ch = '\n';
678 console->log_str[console->log_idx] = ch;
679 console->log_idx++;
680 }
9e37f045
HM
681 if (ch == '\n') {
682 console->log_str[console->log_idx] = 0;
9d6c1dc4 683 pr_debug("CONSOLE: %s", console->log_str);
9e37f045
HM
684 console->log_idx = 0;
685 }
686 }
687}
688
689
690static __used void brcmf_pcie_ringbell_v1(struct brcmf_pciedev_info *devinfo)
691{
692 u32 reg_value;
693
694 brcmf_dbg(PCIE, "RING !\n");
695 reg_value = brcmf_pcie_read_reg32(devinfo,
696 BRCMF_PCIE_PCIE2REG_MAILBOXINT);
697 reg_value |= BRCMF_PCIE2_INTB;
698 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
699 reg_value);
700}
701
702
703static void brcmf_pcie_ringbell_v2(struct brcmf_pciedev_info *devinfo)
704{
705 brcmf_dbg(PCIE, "RING !\n");
706 /* Any arbitrary value will do, lets use 1 */
707 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_H2D_MAILBOX, 1);
708}
709
710
711static void brcmf_pcie_intr_disable(struct brcmf_pciedev_info *devinfo)
712{
713 if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1)
714 pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_INTMASK,
715 0);
716 else
717 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK,
718 0);
719}
720
721
722static void brcmf_pcie_intr_enable(struct brcmf_pciedev_info *devinfo)
723{
724 if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1)
725 pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_INTMASK,
726 BRCMF_PCIE_INT_DEF);
727 else
728 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK,
729 BRCMF_PCIE_MB_INT_D2H_DB |
730 BRCMF_PCIE_MB_INT_FN0_0 |
731 BRCMF_PCIE_MB_INT_FN0_1);
732}
733
734
735static irqreturn_t brcmf_pcie_quick_check_isr_v1(int irq, void *arg)
736{
737 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
738 u32 status;
739
740 status = 0;
741 pci_read_config_dword(devinfo->pdev, BRCMF_PCIE_REG_INTSTATUS, &status);
742 if (status) {
743 brcmf_pcie_intr_disable(devinfo);
744 brcmf_dbg(PCIE, "Enter\n");
745 return IRQ_WAKE_THREAD;
746 }
747 return IRQ_NONE;
748}
749
750
751static irqreturn_t brcmf_pcie_quick_check_isr_v2(int irq, void *arg)
752{
753 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
754
755 if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT)) {
756 brcmf_pcie_intr_disable(devinfo);
757 brcmf_dbg(PCIE, "Enter\n");
758 return IRQ_WAKE_THREAD;
759 }
760 return IRQ_NONE;
761}
762
763
764static irqreturn_t brcmf_pcie_isr_thread_v1(int irq, void *arg)
765{
766 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
767 const struct pci_dev *pdev = devinfo->pdev;
768 u32 status;
769
770 devinfo->in_irq = true;
771 status = 0;
772 pci_read_config_dword(pdev, BRCMF_PCIE_REG_INTSTATUS, &status);
773 brcmf_dbg(PCIE, "Enter %x\n", status);
774 if (status) {
775 pci_write_config_dword(pdev, BRCMF_PCIE_REG_INTSTATUS, status);
776 if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
777 brcmf_proto_msgbuf_rx_trigger(&devinfo->pdev->dev);
778 }
779 if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
780 brcmf_pcie_intr_enable(devinfo);
781 devinfo->in_irq = false;
782 return IRQ_HANDLED;
783}
784
785
786static irqreturn_t brcmf_pcie_isr_thread_v2(int irq, void *arg)
787{
788 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
789 u32 status;
790
791 devinfo->in_irq = true;
792 status = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
793 brcmf_dbg(PCIE, "Enter %x\n", status);
794 if (status) {
795 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
796 status);
797 if (status & (BRCMF_PCIE_MB_INT_FN0_0 |
798 BRCMF_PCIE_MB_INT_FN0_1))
799 brcmf_pcie_handle_mb_data(devinfo);
800 if (status & BRCMF_PCIE_MB_INT_D2H_DB) {
801 if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
802 brcmf_proto_msgbuf_rx_trigger(
803 &devinfo->pdev->dev);
804 }
805 }
806 brcmf_pcie_bus_console_read(devinfo);
807 if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
808 brcmf_pcie_intr_enable(devinfo);
809 devinfo->in_irq = false;
810 return IRQ_HANDLED;
811}
812
813
814static int brcmf_pcie_request_irq(struct brcmf_pciedev_info *devinfo)
815{
816 struct pci_dev *pdev;
817
818 pdev = devinfo->pdev;
819
820 brcmf_pcie_intr_disable(devinfo);
821
822 brcmf_dbg(PCIE, "Enter\n");
823 /* is it a v1 or v2 implementation */
824 devinfo->irq_requested = false;
e9efa340 825 pci_enable_msi(pdev);
9e37f045
HM
826 if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1) {
827 if (request_threaded_irq(pdev->irq,
828 brcmf_pcie_quick_check_isr_v1,
829 brcmf_pcie_isr_thread_v1,
830 IRQF_SHARED, "brcmf_pcie_intr",
831 devinfo)) {
e9efa340 832 pci_disable_msi(pdev);
9e37f045
HM
833 brcmf_err("Failed to request IRQ %d\n", pdev->irq);
834 return -EIO;
835 }
836 } else {
837 if (request_threaded_irq(pdev->irq,
838 brcmf_pcie_quick_check_isr_v2,
839 brcmf_pcie_isr_thread_v2,
840 IRQF_SHARED, "brcmf_pcie_intr",
841 devinfo)) {
e9efa340 842 pci_disable_msi(pdev);
9e37f045
HM
843 brcmf_err("Failed to request IRQ %d\n", pdev->irq);
844 return -EIO;
845 }
846 }
847 devinfo->irq_requested = true;
848 devinfo->irq_allocated = true;
849 return 0;
850}
851
852
853static void brcmf_pcie_release_irq(struct brcmf_pciedev_info *devinfo)
854{
855 struct pci_dev *pdev;
856 u32 status;
857 u32 count;
858
859 if (!devinfo->irq_allocated)
860 return;
861
862 pdev = devinfo->pdev;
863
864 brcmf_pcie_intr_disable(devinfo);
865 if (!devinfo->irq_requested)
866 return;
867 devinfo->irq_requested = false;
868 free_irq(pdev->irq, devinfo);
e9efa340 869 pci_disable_msi(pdev);
9e37f045
HM
870
871 msleep(50);
872 count = 0;
873 while ((devinfo->in_irq) && (count < 20)) {
874 msleep(50);
875 count++;
876 }
877 if (devinfo->in_irq)
878 brcmf_err("Still in IRQ (processing) !!!\n");
879
880 if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1) {
881 status = 0;
882 pci_read_config_dword(pdev, BRCMF_PCIE_REG_INTSTATUS, &status);
883 pci_write_config_dword(pdev, BRCMF_PCIE_REG_INTSTATUS, status);
884 } else {
885 status = brcmf_pcie_read_reg32(devinfo,
886 BRCMF_PCIE_PCIE2REG_MAILBOXINT);
887 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
888 status);
889 }
890 devinfo->irq_allocated = false;
891}
892
893
894static int brcmf_pcie_ring_mb_write_rptr(void *ctx)
895{
896 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
897 struct brcmf_pciedev_info *devinfo = ring->devinfo;
898 struct brcmf_commonring *commonring = &ring->commonring;
899
900 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
901 return -EIO;
902
903 brcmf_dbg(PCIE, "W r_ptr %d (%d), ring %d\n", commonring->r_ptr,
904 commonring->w_ptr, ring->id);
905
f3550aeb 906 devinfo->write_ptr(devinfo, ring->r_idx_addr, commonring->r_ptr);
9e37f045
HM
907
908 return 0;
909}
910
911
912static int brcmf_pcie_ring_mb_write_wptr(void *ctx)
913{
914 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
915 struct brcmf_pciedev_info *devinfo = ring->devinfo;
916 struct brcmf_commonring *commonring = &ring->commonring;
917
918 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
919 return -EIO;
920
921 brcmf_dbg(PCIE, "W w_ptr %d (%d), ring %d\n", commonring->w_ptr,
922 commonring->r_ptr, ring->id);
923
f3550aeb 924 devinfo->write_ptr(devinfo, ring->w_idx_addr, commonring->w_ptr);
9e37f045
HM
925
926 return 0;
927}
928
929
930static int brcmf_pcie_ring_mb_ring_bell(void *ctx)
931{
932 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
933 struct brcmf_pciedev_info *devinfo = ring->devinfo;
934
935 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
936 return -EIO;
937
938 devinfo->ringbell(devinfo);
939
940 return 0;
941}
942
943
944static int brcmf_pcie_ring_mb_update_rptr(void *ctx)
945{
946 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
947 struct brcmf_pciedev_info *devinfo = ring->devinfo;
948 struct brcmf_commonring *commonring = &ring->commonring;
949
950 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
951 return -EIO;
952
f3550aeb 953 commonring->r_ptr = devinfo->read_ptr(devinfo, ring->r_idx_addr);
9e37f045
HM
954
955 brcmf_dbg(PCIE, "R r_ptr %d (%d), ring %d\n", commonring->r_ptr,
956 commonring->w_ptr, ring->id);
957
958 return 0;
959}
960
961
962static int brcmf_pcie_ring_mb_update_wptr(void *ctx)
963{
964 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
965 struct brcmf_pciedev_info *devinfo = ring->devinfo;
966 struct brcmf_commonring *commonring = &ring->commonring;
967
968 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
969 return -EIO;
970
f3550aeb 971 commonring->w_ptr = devinfo->read_ptr(devinfo, ring->w_idx_addr);
9e37f045
HM
972
973 brcmf_dbg(PCIE, "R w_ptr %d (%d), ring %d\n", commonring->w_ptr,
974 commonring->r_ptr, ring->id);
975
976 return 0;
977}
978
979
980static void *
981brcmf_pcie_init_dmabuffer_for_device(struct brcmf_pciedev_info *devinfo,
982 u32 size, u32 tcm_dma_phys_addr,
983 dma_addr_t *dma_handle)
984{
985 void *ring;
83297aaa 986 u64 address;
9e37f045
HM
987
988 ring = dma_alloc_coherent(&devinfo->pdev->dev, size, dma_handle,
989 GFP_KERNEL);
990 if (!ring)
991 return NULL;
992
83297aaa 993 address = (u64)*dma_handle;
9e37f045
HM
994 brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr,
995 address & 0xffffffff);
996 brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr + 4, address >> 32);
997
998 memset(ring, 0, size);
999
1000 return (ring);
1001}
1002
1003
1004static struct brcmf_pcie_ringbuf *
1005brcmf_pcie_alloc_dma_and_ring(struct brcmf_pciedev_info *devinfo, u32 ring_id,
1006 u32 tcm_ring_phys_addr)
1007{
1008 void *dma_buf;
1009 dma_addr_t dma_handle;
1010 struct brcmf_pcie_ringbuf *ring;
1011 u32 size;
1012 u32 addr;
1013
1014 size = brcmf_ring_max_item[ring_id] * brcmf_ring_itemsize[ring_id];
1015 dma_buf = brcmf_pcie_init_dmabuffer_for_device(devinfo, size,
1016 tcm_ring_phys_addr + BRCMF_RING_MEM_BASE_ADDR_OFFSET,
1017 &dma_handle);
1018 if (!dma_buf)
1019 return NULL;
1020
1021 addr = tcm_ring_phys_addr + BRCMF_RING_MAX_ITEM_OFFSET;
1022 brcmf_pcie_write_tcm16(devinfo, addr, brcmf_ring_max_item[ring_id]);
1023 addr = tcm_ring_phys_addr + BRCMF_RING_LEN_ITEMS_OFFSET;
1024 brcmf_pcie_write_tcm16(devinfo, addr, brcmf_ring_itemsize[ring_id]);
1025
1026 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1027 if (!ring) {
1028 dma_free_coherent(&devinfo->pdev->dev, size, dma_buf,
1029 dma_handle);
1030 return NULL;
1031 }
1032 brcmf_commonring_config(&ring->commonring, brcmf_ring_max_item[ring_id],
1033 brcmf_ring_itemsize[ring_id], dma_buf);
1034 ring->dma_handle = dma_handle;
1035 ring->devinfo = devinfo;
1036 brcmf_commonring_register_cb(&ring->commonring,
1037 brcmf_pcie_ring_mb_ring_bell,
1038 brcmf_pcie_ring_mb_update_rptr,
1039 brcmf_pcie_ring_mb_update_wptr,
1040 brcmf_pcie_ring_mb_write_rptr,
1041 brcmf_pcie_ring_mb_write_wptr, ring);
1042
1043 return (ring);
1044}
1045
1046
1047static void brcmf_pcie_release_ringbuffer(struct device *dev,
1048 struct brcmf_pcie_ringbuf *ring)
1049{
1050 void *dma_buf;
1051 u32 size;
1052
1053 if (!ring)
1054 return;
1055
1056 dma_buf = ring->commonring.buf_addr;
1057 if (dma_buf) {
1058 size = ring->commonring.depth * ring->commonring.item_len;
1059 dma_free_coherent(dev, size, dma_buf, ring->dma_handle);
1060 }
1061 kfree(ring);
1062}
1063
1064
1065static void brcmf_pcie_release_ringbuffers(struct brcmf_pciedev_info *devinfo)
1066{
1067 u32 i;
1068
1069 for (i = 0; i < BRCMF_NROF_COMMON_MSGRINGS; i++) {
1070 brcmf_pcie_release_ringbuffer(&devinfo->pdev->dev,
1071 devinfo->shared.commonrings[i]);
1072 devinfo->shared.commonrings[i] = NULL;
1073 }
1074 kfree(devinfo->shared.flowrings);
1075 devinfo->shared.flowrings = NULL;
f3550aeb
FL
1076 if (devinfo->idxbuf) {
1077 dma_free_coherent(&devinfo->pdev->dev,
1078 devinfo->idxbuf_sz,
1079 devinfo->idxbuf,
1080 devinfo->idxbuf_dmahandle);
1081 devinfo->idxbuf = NULL;
1082 }
9e37f045
HM
1083}
1084
1085
1086static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
1087{
1088 struct brcmf_pcie_ringbuf *ring;
1089 struct brcmf_pcie_ringbuf *rings;
1090 u32 ring_addr;
1091 u32 d2h_w_idx_ptr;
1092 u32 d2h_r_idx_ptr;
1093 u32 h2d_w_idx_ptr;
1094 u32 h2d_r_idx_ptr;
1095 u32 addr;
1096 u32 ring_mem_ptr;
1097 u32 i;
f3550aeb
FL
1098 u64 address;
1099 u32 bufsz;
9e37f045 1100 u16 max_sub_queues;
f3550aeb 1101 u8 idx_offset;
9e37f045
HM
1102
1103 ring_addr = devinfo->shared.ring_info_addr;
1104 brcmf_dbg(PCIE, "Base ring addr = 0x%08x\n", ring_addr);
f3550aeb
FL
1105 addr = ring_addr + BRCMF_SHARED_RING_MAX_SUB_QUEUES;
1106 max_sub_queues = brcmf_pcie_read_tcm16(devinfo, addr);
1107
1108 if (devinfo->dma_idx_sz != 0) {
1109 bufsz = (BRCMF_NROF_D2H_COMMON_MSGRINGS + max_sub_queues) *
1110 devinfo->dma_idx_sz * 2;
1111 devinfo->idxbuf = dma_alloc_coherent(&devinfo->pdev->dev, bufsz,
1112 &devinfo->idxbuf_dmahandle,
1113 GFP_KERNEL);
1114 if (!devinfo->idxbuf)
1115 devinfo->dma_idx_sz = 0;
1116 }
9e37f045 1117
f3550aeb
FL
1118 if (devinfo->dma_idx_sz == 0) {
1119 addr = ring_addr + BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET;
1120 d2h_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
1121 addr = ring_addr + BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET;
1122 d2h_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
1123 addr = ring_addr + BRCMF_SHARED_RING_H2D_W_IDX_PTR_OFFSET;
1124 h2d_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
1125 addr = ring_addr + BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET;
1126 h2d_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
1127 idx_offset = sizeof(u32);
1128 devinfo->write_ptr = brcmf_pcie_write_tcm16;
1129 devinfo->read_ptr = brcmf_pcie_read_tcm16;
1130 brcmf_dbg(PCIE, "Using TCM indices\n");
1131 } else {
1132 memset(devinfo->idxbuf, 0, bufsz);
1133 devinfo->idxbuf_sz = bufsz;
1134 idx_offset = devinfo->dma_idx_sz;
1135 devinfo->write_ptr = brcmf_pcie_write_idx;
1136 devinfo->read_ptr = brcmf_pcie_read_idx;
1137
1138 h2d_w_idx_ptr = 0;
1139 addr = ring_addr + BRCMF_SHARED_RING_H2D_WP_HADDR_OFFSET;
1140 address = (u64)devinfo->idxbuf_dmahandle;
1141 brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
1142 brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
1143
1144 h2d_r_idx_ptr = h2d_w_idx_ptr + max_sub_queues * idx_offset;
1145 addr = ring_addr + BRCMF_SHARED_RING_H2D_RP_HADDR_OFFSET;
1146 address += max_sub_queues * idx_offset;
1147 brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
1148 brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
1149
1150 d2h_w_idx_ptr = h2d_r_idx_ptr + max_sub_queues * idx_offset;
1151 addr = ring_addr + BRCMF_SHARED_RING_D2H_WP_HADDR_OFFSET;
1152 address += max_sub_queues * idx_offset;
1153 brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
1154 brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
1155
1156 d2h_r_idx_ptr = d2h_w_idx_ptr +
1157 BRCMF_NROF_D2H_COMMON_MSGRINGS * idx_offset;
1158 addr = ring_addr + BRCMF_SHARED_RING_D2H_RP_HADDR_OFFSET;
1159 address += BRCMF_NROF_D2H_COMMON_MSGRINGS * idx_offset;
1160 brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
1161 brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
1162 brcmf_dbg(PCIE, "Using host memory indices\n");
1163 }
9e37f045
HM
1164
1165 addr = ring_addr + BRCMF_SHARED_RING_TCM_MEMLOC_OFFSET;
1166 ring_mem_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
1167
1168 for (i = 0; i < BRCMF_NROF_H2D_COMMON_MSGRINGS; i++) {
1169 ring = brcmf_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr);
1170 if (!ring)
1171 goto fail;
1172 ring->w_idx_addr = h2d_w_idx_ptr;
1173 ring->r_idx_addr = h2d_r_idx_ptr;
1174 ring->id = i;
1175 devinfo->shared.commonrings[i] = ring;
1176
f3550aeb
FL
1177 h2d_w_idx_ptr += idx_offset;
1178 h2d_r_idx_ptr += idx_offset;
9e37f045
HM
1179 ring_mem_ptr += BRCMF_RING_MEM_SZ;
1180 }
1181
1182 for (i = BRCMF_NROF_H2D_COMMON_MSGRINGS;
1183 i < BRCMF_NROF_COMMON_MSGRINGS; i++) {
1184 ring = brcmf_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr);
1185 if (!ring)
1186 goto fail;
1187 ring->w_idx_addr = d2h_w_idx_ptr;
1188 ring->r_idx_addr = d2h_r_idx_ptr;
1189 ring->id = i;
1190 devinfo->shared.commonrings[i] = ring;
1191
f3550aeb
FL
1192 d2h_w_idx_ptr += idx_offset;
1193 d2h_r_idx_ptr += idx_offset;
9e37f045
HM
1194 ring_mem_ptr += BRCMF_RING_MEM_SZ;
1195 }
1196
9e37f045
HM
1197 devinfo->shared.nrof_flowrings =
1198 max_sub_queues - BRCMF_NROF_H2D_COMMON_MSGRINGS;
1199 rings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(*ring),
1200 GFP_KERNEL);
1201 if (!rings)
1202 goto fail;
1203
1204 brcmf_dbg(PCIE, "Nr of flowrings is %d\n",
1205 devinfo->shared.nrof_flowrings);
1206
1207 for (i = 0; i < devinfo->shared.nrof_flowrings; i++) {
1208 ring = &rings[i];
1209 ring->devinfo = devinfo;
1210 ring->id = i + BRCMF_NROF_COMMON_MSGRINGS;
1211 brcmf_commonring_register_cb(&ring->commonring,
1212 brcmf_pcie_ring_mb_ring_bell,
1213 brcmf_pcie_ring_mb_update_rptr,
1214 brcmf_pcie_ring_mb_update_wptr,
1215 brcmf_pcie_ring_mb_write_rptr,
1216 brcmf_pcie_ring_mb_write_wptr,
1217 ring);
1218 ring->w_idx_addr = h2d_w_idx_ptr;
1219 ring->r_idx_addr = h2d_r_idx_ptr;
f3550aeb
FL
1220 h2d_w_idx_ptr += idx_offset;
1221 h2d_r_idx_ptr += idx_offset;
9e37f045
HM
1222 }
1223 devinfo->shared.flowrings = rings;
1224
1225 return 0;
1226
1227fail:
f3550aeb 1228 brcmf_err("Allocating ring buffers failed\n");
9e37f045
HM
1229 brcmf_pcie_release_ringbuffers(devinfo);
1230 return -ENOMEM;
1231}
1232
1233
1234static void
1235brcmf_pcie_release_scratchbuffers(struct brcmf_pciedev_info *devinfo)
1236{
1237 if (devinfo->shared.scratch)
1238 dma_free_coherent(&devinfo->pdev->dev,
1239 BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
1240 devinfo->shared.scratch,
1241 devinfo->shared.scratch_dmahandle);
1242 if (devinfo->shared.ringupd)
1243 dma_free_coherent(&devinfo->pdev->dev,
1244 BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
1245 devinfo->shared.ringupd,
1246 devinfo->shared.ringupd_dmahandle);
1247}
1248
1249static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
1250{
83297aaa 1251 u64 address;
9e37f045
HM
1252 u32 addr;
1253
1254 devinfo->shared.scratch = dma_alloc_coherent(&devinfo->pdev->dev,
1255 BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
1256 &devinfo->shared.scratch_dmahandle, GFP_KERNEL);
1257 if (!devinfo->shared.scratch)
1258 goto fail;
1259
1260 memset(devinfo->shared.scratch, 0, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
9e37f045
HM
1261
1262 addr = devinfo->shared.tcm_base_address +
1263 BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET;
83297aaa 1264 address = (u64)devinfo->shared.scratch_dmahandle;
9e37f045
HM
1265 brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
1266 brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
1267 addr = devinfo->shared.tcm_base_address +
1268 BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET;
1269 brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
1270
1271 devinfo->shared.ringupd = dma_alloc_coherent(&devinfo->pdev->dev,
1272 BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
1273 &devinfo->shared.ringupd_dmahandle, GFP_KERNEL);
1274 if (!devinfo->shared.ringupd)
1275 goto fail;
1276
1277 memset(devinfo->shared.ringupd, 0, BRCMF_DMA_D2H_RINGUPD_BUF_LEN);
9e37f045
HM
1278
1279 addr = devinfo->shared.tcm_base_address +
1280 BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET;
83297aaa 1281 address = (u64)devinfo->shared.ringupd_dmahandle;
9e37f045
HM
1282 brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
1283 brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
1284 addr = devinfo->shared.tcm_base_address +
1285 BRCMF_SHARED_DMA_RINGUPD_LEN_OFFSET;
1286 brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_RINGUPD_BUF_LEN);
1287 return 0;
1288
1289fail:
1290 brcmf_err("Allocating scratch buffers failed\n");
1291 brcmf_pcie_release_scratchbuffers(devinfo);
1292 return -ENOMEM;
1293}
1294
1295
1296static void brcmf_pcie_down(struct device *dev)
1297{
1298}
1299
1300
1301static int brcmf_pcie_tx(struct device *dev, struct sk_buff *skb)
1302{
1303 return 0;
1304}
1305
1306
1307static int brcmf_pcie_tx_ctlpkt(struct device *dev, unsigned char *msg,
1308 uint len)
1309{
1310 return 0;
1311}
1312
1313
1314static int brcmf_pcie_rx_ctlpkt(struct device *dev, unsigned char *msg,
1315 uint len)
1316{
1317 return 0;
1318}
1319
1320
4eb3af7c
HM
1321static void brcmf_pcie_wowl_config(struct device *dev, bool enabled)
1322{
1323 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1324 struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
1325 struct brcmf_pciedev_info *devinfo = buspub->devinfo;
1326
1327 brcmf_dbg(PCIE, "Configuring WOWL, enabled=%d\n", enabled);
1328 devinfo->wowl_enabled = enabled;
1329 if (enabled)
1330 device_set_wakeup_enable(&devinfo->pdev->dev, true);
1331 else
1332 device_set_wakeup_enable(&devinfo->pdev->dev, false);
1333}
1334
1335
9e37f045
HM
1336static struct brcmf_bus_ops brcmf_pcie_bus_ops = {
1337 .txdata = brcmf_pcie_tx,
1338 .stop = brcmf_pcie_down,
1339 .txctl = brcmf_pcie_tx_ctlpkt,
1340 .rxctl = brcmf_pcie_rx_ctlpkt,
4eb3af7c 1341 .wowl_config = brcmf_pcie_wowl_config,
9e37f045
HM
1342};
1343
1344
1345static int
1346brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info *devinfo,
1347 u32 sharedram_addr)
1348{
1349 struct brcmf_pcie_shared_info *shared;
1350 u32 addr;
1351 u32 version;
1352
1353 shared = &devinfo->shared;
1354 shared->tcm_base_address = sharedram_addr;
1355
1356 shared->flags = brcmf_pcie_read_tcm32(devinfo, sharedram_addr);
1357 version = shared->flags & BRCMF_PCIE_SHARED_VERSION_MASK;
1358 brcmf_dbg(PCIE, "PCIe protocol version %d\n", version);
1359 if ((version > BRCMF_PCIE_MAX_SHARED_VERSION) ||
1360 (version < BRCMF_PCIE_MIN_SHARED_VERSION)) {
1361 brcmf_err("Unsupported PCIE version %d\n", version);
1362 return -EINVAL;
1363 }
9e37f045 1364
f3550aeb
FL
1365 /* check firmware support dma indicies */
1366 if (shared->flags & BRCMF_PCIE_SHARED_DMA_INDEX) {
1367 if (shared->flags & BRCMF_PCIE_SHARED_DMA_2B_IDX)
1368 devinfo->dma_idx_sz = sizeof(u16);
1369 else
1370 devinfo->dma_idx_sz = sizeof(u32);
1371 }
1372
9e37f045
HM
1373 addr = sharedram_addr + BRCMF_SHARED_MAX_RXBUFPOST_OFFSET;
1374 shared->max_rxbufpost = brcmf_pcie_read_tcm16(devinfo, addr);
1375 if (shared->max_rxbufpost == 0)
1376 shared->max_rxbufpost = BRCMF_DEF_MAX_RXBUFPOST;
1377
1378 addr = sharedram_addr + BRCMF_SHARED_RX_DATAOFFSET_OFFSET;
1379 shared->rx_dataoffset = brcmf_pcie_read_tcm32(devinfo, addr);
1380
1381 addr = sharedram_addr + BRCMF_SHARED_HTOD_MB_DATA_ADDR_OFFSET;
1382 shared->htod_mb_data_addr = brcmf_pcie_read_tcm32(devinfo, addr);
1383
1384 addr = sharedram_addr + BRCMF_SHARED_DTOH_MB_DATA_ADDR_OFFSET;
1385 shared->dtoh_mb_data_addr = brcmf_pcie_read_tcm32(devinfo, addr);
1386
1387 addr = sharedram_addr + BRCMF_SHARED_RING_INFO_ADDR_OFFSET;
1388 shared->ring_info_addr = brcmf_pcie_read_tcm32(devinfo, addr);
1389
1390 brcmf_dbg(PCIE, "max rx buf post %d, rx dataoffset %d\n",
1391 shared->max_rxbufpost, shared->rx_dataoffset);
1392
1393 brcmf_pcie_bus_console_init(devinfo);
1394
1395 return 0;
1396}
1397
1398
1399static int brcmf_pcie_get_fwnames(struct brcmf_pciedev_info *devinfo)
1400{
1401 char *fw_name;
1402 char *nvram_name;
1403 uint fw_len, nv_len;
1404 char end;
1405
1406 brcmf_dbg(PCIE, "Enter, chip 0x%04x chiprev %d\n", devinfo->ci->chip,
1407 devinfo->ci->chiprev);
1408
1409 switch (devinfo->ci->chip) {
1410 case BRCM_CC_43602_CHIP_ID:
1411 fw_name = BRCMF_PCIE_43602_FW_NAME;
1412 nvram_name = BRCMF_PCIE_43602_NVRAM_NAME;
1413 break;
9e37f045
HM
1414 case BRCM_CC_4356_CHIP_ID:
1415 fw_name = BRCMF_PCIE_4356_FW_NAME;
1416 nvram_name = BRCMF_PCIE_4356_NVRAM_NAME;
1417 break;
1418 case BRCM_CC_43567_CHIP_ID:
1419 case BRCM_CC_43569_CHIP_ID:
1420 case BRCM_CC_43570_CHIP_ID:
1421 fw_name = BRCMF_PCIE_43570_FW_NAME;
1422 nvram_name = BRCMF_PCIE_43570_NVRAM_NAME;
1423 break;
67f3b6a3
AS
1424 case BRCM_CC_4358_CHIP_ID:
1425 fw_name = BRCMF_PCIE_4358_FW_NAME;
1426 nvram_name = BRCMF_PCIE_4358_NVRAM_NAME;
1427 break;
9e37f045
HM
1428 default:
1429 brcmf_err("Unsupported chip 0x%04x\n", devinfo->ci->chip);
1430 return -ENODEV;
1431 }
1432
1433 fw_len = sizeof(devinfo->fw_name) - 1;
1434 nv_len = sizeof(devinfo->nvram_name) - 1;
1435 /* check if firmware path is provided by module parameter */
1436 if (brcmf_firmware_path[0] != '\0') {
1437 strncpy(devinfo->fw_name, brcmf_firmware_path, fw_len);
1438 strncpy(devinfo->nvram_name, brcmf_firmware_path, nv_len);
1439 fw_len -= strlen(devinfo->fw_name);
1440 nv_len -= strlen(devinfo->nvram_name);
1441
1442 end = brcmf_firmware_path[strlen(brcmf_firmware_path) - 1];
1443 if (end != '/') {
1444 strncat(devinfo->fw_name, "/", fw_len);
1445 strncat(devinfo->nvram_name, "/", nv_len);
1446 fw_len--;
1447 nv_len--;
1448 }
1449 }
1450 strncat(devinfo->fw_name, fw_name, fw_len);
1451 strncat(devinfo->nvram_name, nvram_name, nv_len);
1452
1453 return 0;
1454}
1455
1456
1457static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
1458 const struct firmware *fw, void *nvram,
1459 u32 nvram_len)
1460{
1461 u32 sharedram_addr;
1462 u32 sharedram_addr_written;
1463 u32 loop_counter;
1464 int err;
1465 u32 address;
1466 u32 resetintr;
1467
1468 devinfo->ringbell = brcmf_pcie_ringbell_v2;
1469 devinfo->generic_corerev = BRCMF_PCIE_GENREV2;
1470
1471 brcmf_dbg(PCIE, "Halt ARM.\n");
1472 err = brcmf_pcie_enter_download_state(devinfo);
1473 if (err)
1474 return err;
1475
1476 brcmf_dbg(PCIE, "Download FW %s\n", devinfo->fw_name);
1477 brcmf_pcie_copy_mem_todev(devinfo, devinfo->ci->rambase,
1478 (void *)fw->data, fw->size);
1479
1480 resetintr = get_unaligned_le32(fw->data);
1481 release_firmware(fw);
1482
1483 /* reset last 4 bytes of RAM address. to be used for shared
1484 * area. This identifies when FW is running
1485 */
1486 brcmf_pcie_write_ram32(devinfo, devinfo->ci->ramsize - 4, 0);
1487
1488 if (nvram) {
1489 brcmf_dbg(PCIE, "Download NVRAM %s\n", devinfo->nvram_name);
1490 address = devinfo->ci->rambase + devinfo->ci->ramsize -
1491 nvram_len;
1492 brcmf_pcie_copy_mem_todev(devinfo, address, nvram, nvram_len);
1493 brcmf_fw_nvram_free(nvram);
1494 } else {
1495 brcmf_dbg(PCIE, "No matching NVRAM file found %s\n",
1496 devinfo->nvram_name);
1497 }
1498
1499 sharedram_addr_written = brcmf_pcie_read_ram32(devinfo,
1500 devinfo->ci->ramsize -
1501 4);
1502 brcmf_dbg(PCIE, "Bring ARM in running state\n");
1503 err = brcmf_pcie_exit_download_state(devinfo, resetintr);
1504 if (err)
1505 return err;
1506
1507 brcmf_dbg(PCIE, "Wait for FW init\n");
1508 sharedram_addr = sharedram_addr_written;
1509 loop_counter = BRCMF_PCIE_FW_UP_TIMEOUT / 50;
1510 while ((sharedram_addr == sharedram_addr_written) && (loop_counter)) {
1511 msleep(50);
1512 sharedram_addr = brcmf_pcie_read_ram32(devinfo,
1513 devinfo->ci->ramsize -
1514 4);
1515 loop_counter--;
1516 }
1517 if (sharedram_addr == sharedram_addr_written) {
1518 brcmf_err("FW failed to initialize\n");
1519 return -ENODEV;
1520 }
1521 brcmf_dbg(PCIE, "Shared RAM addr: 0x%08x\n", sharedram_addr);
1522
1523 return (brcmf_pcie_init_share_ram_info(devinfo, sharedram_addr));
1524}
1525
1526
1527static int brcmf_pcie_get_resource(struct brcmf_pciedev_info *devinfo)
1528{
1529 struct pci_dev *pdev;
1530 int err;
1531 phys_addr_t bar0_addr, bar1_addr;
1532 ulong bar1_size;
1533
1534 pdev = devinfo->pdev;
1535
1536 err = pci_enable_device(pdev);
1537 if (err) {
1538 brcmf_err("pci_enable_device failed err=%d\n", err);
1539 return err;
1540 }
1541
1542 pci_set_master(pdev);
1543
1544 /* Bar-0 mapped address */
1545 bar0_addr = pci_resource_start(pdev, 0);
1546 /* Bar-1 mapped address */
1547 bar1_addr = pci_resource_start(pdev, 2);
1548 /* read Bar-1 mapped memory range */
1549 bar1_size = pci_resource_len(pdev, 2);
1550 if ((bar1_size == 0) || (bar1_addr == 0)) {
1551 brcmf_err("BAR1 Not enabled, device size=%ld, addr=%#016llx\n",
1552 bar1_size, (unsigned long long)bar1_addr);
1553 return -EINVAL;
1554 }
1555
1556 devinfo->regs = ioremap_nocache(bar0_addr, BRCMF_PCIE_REG_MAP_SIZE);
1557 devinfo->tcm = ioremap_nocache(bar1_addr, BRCMF_PCIE_TCM_MAP_SIZE);
1558 devinfo->tcm_size = BRCMF_PCIE_TCM_MAP_SIZE;
1559
1560 if (!devinfo->regs || !devinfo->tcm) {
1561 brcmf_err("ioremap() failed (%p,%p)\n", devinfo->regs,
1562 devinfo->tcm);
1563 return -EINVAL;
1564 }
1565 brcmf_dbg(PCIE, "Phys addr : reg space = %p base addr %#016llx\n",
1566 devinfo->regs, (unsigned long long)bar0_addr);
1567 brcmf_dbg(PCIE, "Phys addr : mem space = %p base addr %#016llx\n",
1568 devinfo->tcm, (unsigned long long)bar1_addr);
1569
1570 return 0;
1571}
1572
1573
1574static void brcmf_pcie_release_resource(struct brcmf_pciedev_info *devinfo)
1575{
1576 if (devinfo->tcm)
1577 iounmap(devinfo->tcm);
1578 if (devinfo->regs)
1579 iounmap(devinfo->regs);
1580
1581 pci_disable_device(devinfo->pdev);
1582}
1583
1584
1585static int brcmf_pcie_attach_bus(struct device *dev)
1586{
1587 int ret;
1588
1589 /* Attach to the common driver interface */
1590 ret = brcmf_attach(dev);
1591 if (ret) {
1592 brcmf_err("brcmf_attach failed\n");
1593 } else {
1594 ret = brcmf_bus_start(dev);
1595 if (ret)
1596 brcmf_err("dongle is not responding\n");
1597 }
1598
1599 return ret;
1600}
1601
1602
1603static u32 brcmf_pcie_buscore_prep_addr(const struct pci_dev *pdev, u32 addr)
1604{
1605 u32 ret_addr;
1606
1607 ret_addr = addr & (BRCMF_PCIE_BAR0_REG_SIZE - 1);
1608 addr &= ~(BRCMF_PCIE_BAR0_REG_SIZE - 1);
1609 pci_write_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW, addr);
1610
1611 return ret_addr;
1612}
1613
1614
1615static u32 brcmf_pcie_buscore_read32(void *ctx, u32 addr)
1616{
1617 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
1618
1619 addr = brcmf_pcie_buscore_prep_addr(devinfo->pdev, addr);
1620 return brcmf_pcie_read_reg32(devinfo, addr);
1621}
1622
1623
1624static void brcmf_pcie_buscore_write32(void *ctx, u32 addr, u32 value)
1625{
1626 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
1627
1628 addr = brcmf_pcie_buscore_prep_addr(devinfo->pdev, addr);
1629 brcmf_pcie_write_reg32(devinfo, addr, value);
1630}
1631
1632
1633static int brcmf_pcie_buscoreprep(void *ctx)
1634{
c161f29b 1635 return brcmf_pcie_get_resource(ctx);
9e37f045
HM
1636}
1637
1638
d380ebc9
AS
1639static void brcmf_pcie_buscore_activate(void *ctx, struct brcmf_chip *chip,
1640 u32 rstvec)
9e37f045
HM
1641{
1642 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
1643
1644 brcmf_pcie_write_tcm32(devinfo, 0, rstvec);
1645}
1646
1647
1648static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = {
1649 .prepare = brcmf_pcie_buscoreprep,
d380ebc9 1650 .activate = brcmf_pcie_buscore_activate,
9e37f045
HM
1651 .read32 = brcmf_pcie_buscore_read32,
1652 .write32 = brcmf_pcie_buscore_write32,
1653};
1654
1655static void brcmf_pcie_setup(struct device *dev, const struct firmware *fw,
1656 void *nvram, u32 nvram_len)
1657{
1658 struct brcmf_bus *bus = dev_get_drvdata(dev);
1659 struct brcmf_pciedev *pcie_bus_dev = bus->bus_priv.pcie;
1660 struct brcmf_pciedev_info *devinfo = pcie_bus_dev->devinfo;
1661 struct brcmf_commonring **flowrings;
1662 int ret;
1663 u32 i;
1664
1665 brcmf_pcie_attach(devinfo);
1666
1667 ret = brcmf_pcie_download_fw_nvram(devinfo, fw, nvram, nvram_len);
1668 if (ret)
1669 goto fail;
1670
1671 devinfo->state = BRCMFMAC_PCIE_STATE_UP;
1672
1673 ret = brcmf_pcie_init_ringbuffers(devinfo);
1674 if (ret)
1675 goto fail;
1676
1677 ret = brcmf_pcie_init_scratchbuffers(devinfo);
1678 if (ret)
1679 goto fail;
1680
1681 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
1682 ret = brcmf_pcie_request_irq(devinfo);
1683 if (ret)
1684 goto fail;
1685
1686 /* hook the commonrings in the bus structure. */
1687 for (i = 0; i < BRCMF_NROF_COMMON_MSGRINGS; i++)
1688 bus->msgbuf->commonrings[i] =
1689 &devinfo->shared.commonrings[i]->commonring;
1690
d5c5181c 1691 flowrings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(*flowrings),
9e37f045
HM
1692 GFP_KERNEL);
1693 if (!flowrings)
1694 goto fail;
1695
1696 for (i = 0; i < devinfo->shared.nrof_flowrings; i++)
1697 flowrings[i] = &devinfo->shared.flowrings[i].commonring;
1698 bus->msgbuf->flowrings = flowrings;
1699
1700 bus->msgbuf->rx_dataoffset = devinfo->shared.rx_dataoffset;
1701 bus->msgbuf->max_rxbufpost = devinfo->shared.max_rxbufpost;
1702 bus->msgbuf->nrof_flowrings = devinfo->shared.nrof_flowrings;
1703
1704 init_waitqueue_head(&devinfo->mbdata_resp_wait);
1705
1706 brcmf_pcie_intr_enable(devinfo);
1707 if (brcmf_pcie_attach_bus(bus->dev) == 0)
1708 return;
1709
1710 brcmf_pcie_bus_console_read(devinfo);
1711
1712fail:
1713 device_release_driver(dev);
1714}
1715
1716static int
1717brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1718{
1719 int ret;
1720 struct brcmf_pciedev_info *devinfo;
1721 struct brcmf_pciedev *pcie_bus_dev;
1722 struct brcmf_bus *bus;
c4365534
HM
1723 u16 domain_nr;
1724 u16 bus_nr;
9e37f045 1725
c4365534
HM
1726 domain_nr = pci_domain_nr(pdev->bus) + 1;
1727 bus_nr = pdev->bus->number;
1728 brcmf_dbg(PCIE, "Enter %x:%x (%d/%d)\n", pdev->vendor, pdev->device,
1729 domain_nr, bus_nr);
9e37f045
HM
1730
1731 ret = -ENOMEM;
1732 devinfo = kzalloc(sizeof(*devinfo), GFP_KERNEL);
1733 if (devinfo == NULL)
1734 return ret;
1735
1736 devinfo->pdev = pdev;
1737 pcie_bus_dev = NULL;
1738 devinfo->ci = brcmf_chip_attach(devinfo, &brcmf_pcie_buscore_ops);
1739 if (IS_ERR(devinfo->ci)) {
1740 ret = PTR_ERR(devinfo->ci);
1741 devinfo->ci = NULL;
1742 goto fail;
1743 }
1744
1745 pcie_bus_dev = kzalloc(sizeof(*pcie_bus_dev), GFP_KERNEL);
1746 if (pcie_bus_dev == NULL) {
1747 ret = -ENOMEM;
1748 goto fail;
1749 }
1750
1751 bus = kzalloc(sizeof(*bus), GFP_KERNEL);
1752 if (!bus) {
1753 ret = -ENOMEM;
1754 goto fail;
1755 }
1756 bus->msgbuf = kzalloc(sizeof(*bus->msgbuf), GFP_KERNEL);
1757 if (!bus->msgbuf) {
1758 ret = -ENOMEM;
1759 kfree(bus);
1760 goto fail;
1761 }
1762
1763 /* hook it all together. */
1764 pcie_bus_dev->devinfo = devinfo;
1765 pcie_bus_dev->bus = bus;
1766 bus->dev = &pdev->dev;
1767 bus->bus_priv.pcie = pcie_bus_dev;
1768 bus->ops = &brcmf_pcie_bus_ops;
1769 bus->proto_type = BRCMF_PROTO_MSGBUF;
1770 bus->chip = devinfo->coreid;
4eb3af7c 1771 bus->wowl_supported = pci_pme_capable(pdev, PCI_D3hot);
9e37f045
HM
1772 dev_set_drvdata(&pdev->dev, bus);
1773
1774 ret = brcmf_pcie_get_fwnames(devinfo);
1775 if (ret)
1776 goto fail_bus;
1777
c4365534
HM
1778 ret = brcmf_fw_get_firmwares_pcie(bus->dev, BRCMF_FW_REQUEST_NVRAM |
1779 BRCMF_FW_REQ_NV_OPTIONAL,
1780 devinfo->fw_name, devinfo->nvram_name,
1781 brcmf_pcie_setup, domain_nr, bus_nr);
9e37f045
HM
1782 if (ret == 0)
1783 return 0;
1784fail_bus:
1785 kfree(bus->msgbuf);
1786 kfree(bus);
1787fail:
1788 brcmf_err("failed %x:%x\n", pdev->vendor, pdev->device);
1789 brcmf_pcie_release_resource(devinfo);
1790 if (devinfo->ci)
1791 brcmf_chip_detach(devinfo->ci);
1792 kfree(pcie_bus_dev);
1793 kfree(devinfo);
1794 return ret;
1795}
1796
1797
1798static void
1799brcmf_pcie_remove(struct pci_dev *pdev)
1800{
1801 struct brcmf_pciedev_info *devinfo;
1802 struct brcmf_bus *bus;
1803
1804 brcmf_dbg(PCIE, "Enter\n");
1805
1806 bus = dev_get_drvdata(&pdev->dev);
1807 if (bus == NULL)
1808 return;
1809
1810 devinfo = bus->bus_priv.pcie->devinfo;
1811
1812 devinfo->state = BRCMFMAC_PCIE_STATE_DOWN;
1813 if (devinfo->ci)
1814 brcmf_pcie_intr_disable(devinfo);
1815
1816 brcmf_detach(&pdev->dev);
c161f29b 1817 brcmf_pcie_reset_device(devinfo);
9e37f045
HM
1818
1819 kfree(bus->bus_priv.pcie);
1820 kfree(bus->msgbuf->flowrings);
1821 kfree(bus->msgbuf);
1822 kfree(bus);
1823
1824 brcmf_pcie_release_irq(devinfo);
1825 brcmf_pcie_release_scratchbuffers(devinfo);
1826 brcmf_pcie_release_ringbuffers(devinfo);
bd4f82e3 1827 brcmf_pcie_reset_device(devinfo);
9e37f045
HM
1828 brcmf_pcie_release_resource(devinfo);
1829
1830 if (devinfo->ci)
1831 brcmf_chip_detach(devinfo->ci);
1832
1833 kfree(devinfo);
1834 dev_set_drvdata(&pdev->dev, NULL);
1835}
1836
1837
1838#ifdef CONFIG_PM
1839
1840
1841static int brcmf_pcie_suspend(struct pci_dev *pdev, pm_message_t state)
1842{
1843 struct brcmf_pciedev_info *devinfo;
1844 struct brcmf_bus *bus;
1845 int err;
1846
1847 brcmf_dbg(PCIE, "Enter, state=%d, pdev=%p\n", state.event, pdev);
1848
1849 bus = dev_get_drvdata(&pdev->dev);
1850 devinfo = bus->bus_priv.pcie->devinfo;
1851
1852 brcmf_bus_change_state(bus, BRCMF_BUS_DOWN);
1853
1854 devinfo->mbdata_completed = false;
1855 brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D3_INFORM);
1856
1857 wait_event_timeout(devinfo->mbdata_resp_wait,
1858 devinfo->mbdata_completed,
1859 msecs_to_jiffies(BRCMF_PCIE_MBDATA_TIMEOUT));
1860 if (!devinfo->mbdata_completed) {
1861 brcmf_err("Timeout on response for entering D3 substate\n");
1862 return -EIO;
1863 }
4eb3af7c 1864 brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D0_INFORM_IN_USE);
9e37f045
HM
1865
1866 err = pci_save_state(pdev);
4eb3af7c 1867 if (err)
9e37f045 1868 brcmf_err("pci_save_state failed, err=%d\n", err);
4eb3af7c
HM
1869 if ((err) || (!devinfo->wowl_enabled)) {
1870 brcmf_chip_detach(devinfo->ci);
1871 devinfo->ci = NULL;
1872 brcmf_pcie_remove(pdev);
1873 return 0;
9e37f045
HM
1874 }
1875
9e37f045
HM
1876 return pci_prepare_to_sleep(pdev);
1877}
1878
9e37f045
HM
1879static int brcmf_pcie_resume(struct pci_dev *pdev)
1880{
4eb3af7c
HM
1881 struct brcmf_pciedev_info *devinfo;
1882 struct brcmf_bus *bus;
9e37f045
HM
1883 int err;
1884
4eb3af7c
HM
1885 bus = dev_get_drvdata(&pdev->dev);
1886 brcmf_dbg(PCIE, "Enter, pdev=%p, bus=%p\n", pdev, bus);
9e37f045
HM
1887
1888 err = pci_set_power_state(pdev, PCI_D0);
1889 if (err) {
1890 brcmf_err("pci_set_power_state failed, err=%d\n", err);
4eb3af7c 1891 goto cleanup;
9e37f045
HM
1892 }
1893 pci_restore_state(pdev);
4eb3af7c
HM
1894 pci_enable_wake(pdev, PCI_D3hot, false);
1895 pci_enable_wake(pdev, PCI_D3cold, false);
1896
1897 /* Check if device is still up and running, if so we are ready */
1898 if (bus) {
1899 devinfo = bus->bus_priv.pcie->devinfo;
1900 if (brcmf_pcie_read_reg32(devinfo,
1901 BRCMF_PCIE_PCIE2REG_INTMASK) != 0) {
1902 if (brcmf_pcie_send_mb_data(devinfo,
1903 BRCMF_H2D_HOST_D0_INFORM))
1904 goto cleanup;
1905 brcmf_dbg(PCIE, "Hot resume, continue....\n");
1906 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
a1cee865 1907 brcmf_bus_change_state(bus, BRCMF_BUS_UP);
4eb3af7c
HM
1908 brcmf_pcie_intr_enable(devinfo);
1909 return 0;
1910 }
1911 }
9e37f045 1912
4eb3af7c
HM
1913cleanup:
1914 if (bus) {
1915 devinfo = bus->bus_priv.pcie->devinfo;
1916 brcmf_chip_detach(devinfo->ci);
1917 devinfo->ci = NULL;
1918 brcmf_pcie_remove(pdev);
1919 }
9e37f045
HM
1920 err = brcmf_pcie_probe(pdev, NULL);
1921 if (err)
1922 brcmf_err("probe after resume failed, err=%d\n", err);
1923
1924 return err;
1925}
1926
1927
1928#endif /* CONFIG_PM */
1929
1930
1931#define BRCMF_PCIE_DEVICE(dev_id) { BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\
1932 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 }
1933
1934static struct pci_device_id brcmf_pcie_devid_table[] = {
9e37f045
HM
1935 BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID),
1936 BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID),
1937 BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID),
67f3b6a3 1938 BRCMF_PCIE_DEVICE(BRCM_PCIE_4358_DEVICE_ID),
9e37f045 1939 BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_DEVICE_ID),
48fd818f
HM
1940 BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_2G_DEVICE_ID),
1941 BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_5G_DEVICE_ID),
27aace2d 1942 BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_RAW_DEVICE_ID),
9e37f045
HM
1943 { /* end: all zeroes */ }
1944};
1945
1946
1947MODULE_DEVICE_TABLE(pci, brcmf_pcie_devid_table);
1948
1949
1950static struct pci_driver brcmf_pciedrvr = {
1951 .node = {},
1952 .name = KBUILD_MODNAME,
1953 .id_table = brcmf_pcie_devid_table,
1954 .probe = brcmf_pcie_probe,
1955 .remove = brcmf_pcie_remove,
1956#ifdef CONFIG_PM
1957 .suspend = brcmf_pcie_suspend,
1958 .resume = brcmf_pcie_resume
1959#endif /* CONFIG_PM */
1960};
1961
1962
1963void brcmf_pcie_register(void)
1964{
1965 int err;
1966
1967 brcmf_dbg(PCIE, "Enter\n");
1968 err = pci_register_driver(&brcmf_pciedrvr);
1969 if (err)
1970 brcmf_err("PCIE driver registration failed, err=%d\n", err);
1971}
1972
1973
1974void brcmf_pcie_exit(void)
1975{
1976 brcmf_dbg(PCIE, "Enter\n");
1977 pci_unregister_driver(&brcmf_pciedrvr);
1978}
This page took 0.169044 seconds and 5 git commands to generate.