Commit | Line | Data |
---|---|---|
9e37f045 HM |
1 | /* Copyright (c) 2014 Broadcom Corporation |
2 | * | |
3 | * Permission to use, copy, modify, and/or distribute this software for any | |
4 | * purpose with or without fee is hereby granted, provided that the above | |
5 | * copyright notice and this permission notice appear in all copies. | |
6 | * | |
7 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | |
8 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |
9 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY | |
10 | * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | |
11 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION | |
12 | * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN | |
13 | * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
14 | */ | |
15 | ||
16 | #include <linux/kernel.h> | |
17 | #include <linux/module.h> | |
18 | #include <linux/firmware.h> | |
19 | #include <linux/pci.h> | |
20 | #include <linux/vmalloc.h> | |
21 | #include <linux/delay.h> | |
22 | #include <linux/unaligned/access_ok.h> | |
23 | #include <linux/interrupt.h> | |
24 | #include <linux/bcma/bcma.h> | |
25 | #include <linux/sched.h> | |
26 | ||
27 | #include <soc.h> | |
28 | #include <chipcommon.h> | |
29 | #include <brcmu_utils.h> | |
30 | #include <brcmu_wifi.h> | |
31 | #include <brcm_hw_ids.h> | |
32 | ||
33 | #include "dhd_dbg.h" | |
34 | #include "dhd_bus.h" | |
35 | #include "commonring.h" | |
36 | #include "msgbuf.h" | |
37 | #include "pcie.h" | |
38 | #include "firmware.h" | |
39 | #include "chip.h" | |
40 | ||
41 | ||
42 | enum brcmf_pcie_state { | |
43 | BRCMFMAC_PCIE_STATE_DOWN, | |
44 | BRCMFMAC_PCIE_STATE_UP | |
45 | }; | |
46 | ||
47 | ||
48 | #define BRCMF_PCIE_43602_FW_NAME "brcm/brcmfmac43602-pcie.bin" | |
49 | #define BRCMF_PCIE_43602_NVRAM_NAME "brcm/brcmfmac43602-pcie.txt" | |
50 | #define BRCMF_PCIE_4354_FW_NAME "brcm/brcmfmac4354-pcie.bin" | |
51 | #define BRCMF_PCIE_4354_NVRAM_NAME "brcm/brcmfmac4354-pcie.txt" | |
52 | #define BRCMF_PCIE_4356_FW_NAME "brcm/brcmfmac4356-pcie.bin" | |
53 | #define BRCMF_PCIE_4356_NVRAM_NAME "brcm/brcmfmac4356-pcie.txt" | |
54 | #define BRCMF_PCIE_43570_FW_NAME "brcm/brcmfmac43570-pcie.bin" | |
55 | #define BRCMF_PCIE_43570_NVRAM_NAME "brcm/brcmfmac43570-pcie.txt" | |
56 | ||
57 | #define BRCMF_PCIE_FW_UP_TIMEOUT 2000 /* msec */ | |
58 | ||
59 | #define BRCMF_PCIE_TCM_MAP_SIZE (4096 * 1024) | |
60 | #define BRCMF_PCIE_REG_MAP_SIZE (32 * 1024) | |
61 | ||
62 | /* backplane addres space accessed by BAR0 */ | |
63 | #define BRCMF_PCIE_BAR0_WINDOW 0x80 | |
64 | #define BRCMF_PCIE_BAR0_REG_SIZE 0x1000 | |
65 | #define BRCMF_PCIE_BAR0_WRAPPERBASE 0x70 | |
66 | ||
67 | #define BRCMF_PCIE_BAR0_WRAPBASE_DMP_OFFSET 0x1000 | |
68 | #define BRCMF_PCIE_BARO_PCIE_ENUM_OFFSET 0x2000 | |
69 | ||
70 | #define BRCMF_PCIE_ARMCR4REG_BANKIDX 0x40 | |
71 | #define BRCMF_PCIE_ARMCR4REG_BANKPDA 0x4C | |
72 | ||
73 | #define BRCMF_PCIE_REG_INTSTATUS 0x90 | |
74 | #define BRCMF_PCIE_REG_INTMASK 0x94 | |
75 | #define BRCMF_PCIE_REG_SBMBX 0x98 | |
76 | ||
77 | #define BRCMF_PCIE_PCIE2REG_INTMASK 0x24 | |
78 | #define BRCMF_PCIE_PCIE2REG_MAILBOXINT 0x48 | |
79 | #define BRCMF_PCIE_PCIE2REG_MAILBOXMASK 0x4C | |
80 | #define BRCMF_PCIE_PCIE2REG_CONFIGADDR 0x120 | |
81 | #define BRCMF_PCIE_PCIE2REG_CONFIGDATA 0x124 | |
82 | #define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX 0x140 | |
83 | ||
84 | #define BRCMF_PCIE_GENREV1 1 | |
85 | #define BRCMF_PCIE_GENREV2 2 | |
86 | ||
87 | #define BRCMF_PCIE2_INTA 0x01 | |
88 | #define BRCMF_PCIE2_INTB 0x02 | |
89 | ||
90 | #define BRCMF_PCIE_INT_0 0x01 | |
91 | #define BRCMF_PCIE_INT_1 0x02 | |
92 | #define BRCMF_PCIE_INT_DEF (BRCMF_PCIE_INT_0 | \ | |
93 | BRCMF_PCIE_INT_1) | |
94 | ||
95 | #define BRCMF_PCIE_MB_INT_FN0_0 0x0100 | |
96 | #define BRCMF_PCIE_MB_INT_FN0_1 0x0200 | |
97 | #define BRCMF_PCIE_MB_INT_D2H0_DB0 0x10000 | |
98 | #define BRCMF_PCIE_MB_INT_D2H0_DB1 0x20000 | |
99 | #define BRCMF_PCIE_MB_INT_D2H1_DB0 0x40000 | |
100 | #define BRCMF_PCIE_MB_INT_D2H1_DB1 0x80000 | |
101 | #define BRCMF_PCIE_MB_INT_D2H2_DB0 0x100000 | |
102 | #define BRCMF_PCIE_MB_INT_D2H2_DB1 0x200000 | |
103 | #define BRCMF_PCIE_MB_INT_D2H3_DB0 0x400000 | |
104 | #define BRCMF_PCIE_MB_INT_D2H3_DB1 0x800000 | |
105 | ||
106 | #define BRCMF_PCIE_MB_INT_D2H_DB (BRCMF_PCIE_MB_INT_D2H0_DB0 | \ | |
107 | BRCMF_PCIE_MB_INT_D2H0_DB1 | \ | |
108 | BRCMF_PCIE_MB_INT_D2H1_DB0 | \ | |
109 | BRCMF_PCIE_MB_INT_D2H1_DB1 | \ | |
110 | BRCMF_PCIE_MB_INT_D2H2_DB0 | \ | |
111 | BRCMF_PCIE_MB_INT_D2H2_DB1 | \ | |
112 | BRCMF_PCIE_MB_INT_D2H3_DB0 | \ | |
113 | BRCMF_PCIE_MB_INT_D2H3_DB1) | |
114 | ||
115 | #define BRCMF_PCIE_MIN_SHARED_VERSION 4 | |
116 | #define BRCMF_PCIE_MAX_SHARED_VERSION 5 | |
117 | #define BRCMF_PCIE_SHARED_VERSION_MASK 0x00FF | |
118 | #define BRCMF_PCIE_SHARED_TXPUSH_SUPPORT 0x4000 | |
119 | ||
120 | #define BRCMF_PCIE_FLAGS_HTOD_SPLIT 0x4000 | |
121 | #define BRCMF_PCIE_FLAGS_DTOH_SPLIT 0x8000 | |
122 | ||
123 | #define BRCMF_SHARED_MAX_RXBUFPOST_OFFSET 34 | |
124 | #define BRCMF_SHARED_RING_BASE_OFFSET 52 | |
125 | #define BRCMF_SHARED_RX_DATAOFFSET_OFFSET 36 | |
126 | #define BRCMF_SHARED_CONSOLE_ADDR_OFFSET 20 | |
127 | #define BRCMF_SHARED_HTOD_MB_DATA_ADDR_OFFSET 40 | |
128 | #define BRCMF_SHARED_DTOH_MB_DATA_ADDR_OFFSET 44 | |
129 | #define BRCMF_SHARED_RING_INFO_ADDR_OFFSET 48 | |
130 | #define BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET 52 | |
131 | #define BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET 56 | |
132 | #define BRCMF_SHARED_DMA_RINGUPD_LEN_OFFSET 64 | |
133 | #define BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET 68 | |
134 | ||
135 | #define BRCMF_RING_H2D_RING_COUNT_OFFSET 0 | |
136 | #define BRCMF_RING_D2H_RING_COUNT_OFFSET 1 | |
137 | #define BRCMF_RING_H2D_RING_MEM_OFFSET 4 | |
138 | #define BRCMF_RING_H2D_RING_STATE_OFFSET 8 | |
139 | ||
140 | #define BRCMF_RING_MEM_BASE_ADDR_OFFSET 8 | |
141 | #define BRCMF_RING_MAX_ITEM_OFFSET 4 | |
142 | #define BRCMF_RING_LEN_ITEMS_OFFSET 6 | |
143 | #define BRCMF_RING_MEM_SZ 16 | |
144 | #define BRCMF_RING_STATE_SZ 8 | |
145 | ||
146 | #define BRCMF_SHARED_RING_H2D_W_IDX_PTR_OFFSET 4 | |
147 | #define BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET 8 | |
148 | #define BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET 12 | |
149 | #define BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET 16 | |
150 | #define BRCMF_SHARED_RING_TCM_MEMLOC_OFFSET 0 | |
151 | #define BRCMF_SHARED_RING_MAX_SUB_QUEUES 52 | |
152 | ||
153 | #define BRCMF_DEF_MAX_RXBUFPOST 255 | |
154 | ||
155 | #define BRCMF_CONSOLE_BUFADDR_OFFSET 8 | |
156 | #define BRCMF_CONSOLE_BUFSIZE_OFFSET 12 | |
157 | #define BRCMF_CONSOLE_WRITEIDX_OFFSET 16 | |
158 | ||
159 | #define BRCMF_DMA_D2H_SCRATCH_BUF_LEN 8 | |
160 | #define BRCMF_DMA_D2H_RINGUPD_BUF_LEN 1024 | |
161 | ||
162 | #define BRCMF_D2H_DEV_D3_ACK 0x00000001 | |
163 | #define BRCMF_D2H_DEV_DS_ENTER_REQ 0x00000002 | |
164 | #define BRCMF_D2H_DEV_DS_EXIT_NOTE 0x00000004 | |
165 | ||
166 | #define BRCMF_H2D_HOST_D3_INFORM 0x00000001 | |
167 | #define BRCMF_H2D_HOST_DS_ACK 0x00000002 | |
168 | ||
169 | #define BRCMF_PCIE_MBDATA_TIMEOUT 2000 | |
170 | ||
bd4f82e3 HM |
171 | #define BRCMF_PCIE_CFGREG_STATUS_CMD 0x4 |
172 | #define BRCMF_PCIE_CFGREG_PM_CSR 0x4C | |
173 | #define BRCMF_PCIE_CFGREG_MSI_CAP 0x58 | |
174 | #define BRCMF_PCIE_CFGREG_MSI_ADDR_L 0x5C | |
175 | #define BRCMF_PCIE_CFGREG_MSI_ADDR_H 0x60 | |
176 | #define BRCMF_PCIE_CFGREG_MSI_DATA 0x64 | |
177 | #define BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL 0xBC | |
178 | #define BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL2 0xDC | |
179 | #define BRCMF_PCIE_CFGREG_RBAR_CTRL 0x228 | |
180 | #define BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1 0x248 | |
181 | #define BRCMF_PCIE_CFGREG_REG_BAR2_CONFIG 0x4E0 | |
182 | #define BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG 0x4F4 | |
183 | #define BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB 3 | |
184 | ||
9e37f045 HM |
185 | |
186 | MODULE_FIRMWARE(BRCMF_PCIE_43602_FW_NAME); | |
187 | MODULE_FIRMWARE(BRCMF_PCIE_43602_NVRAM_NAME); | |
188 | MODULE_FIRMWARE(BRCMF_PCIE_4354_FW_NAME); | |
189 | MODULE_FIRMWARE(BRCMF_PCIE_4354_NVRAM_NAME); | |
190 | MODULE_FIRMWARE(BRCMF_PCIE_43570_FW_NAME); | |
191 | MODULE_FIRMWARE(BRCMF_PCIE_43570_NVRAM_NAME); | |
192 | ||
193 | ||
194 | struct brcmf_pcie_console { | |
195 | u32 base_addr; | |
196 | u32 buf_addr; | |
197 | u32 bufsize; | |
198 | u32 read_idx; | |
199 | u8 log_str[256]; | |
200 | u8 log_idx; | |
201 | }; | |
202 | ||
203 | struct brcmf_pcie_shared_info { | |
204 | u32 tcm_base_address; | |
205 | u32 flags; | |
206 | struct brcmf_pcie_ringbuf *commonrings[BRCMF_NROF_COMMON_MSGRINGS]; | |
207 | struct brcmf_pcie_ringbuf *flowrings; | |
208 | u16 max_rxbufpost; | |
209 | u32 nrof_flowrings; | |
210 | u32 rx_dataoffset; | |
211 | u32 htod_mb_data_addr; | |
212 | u32 dtoh_mb_data_addr; | |
213 | u32 ring_info_addr; | |
214 | struct brcmf_pcie_console console; | |
215 | void *scratch; | |
216 | dma_addr_t scratch_dmahandle; | |
217 | void *ringupd; | |
218 | dma_addr_t ringupd_dmahandle; | |
219 | }; | |
220 | ||
221 | struct brcmf_pcie_core_info { | |
222 | u32 base; | |
223 | u32 wrapbase; | |
224 | }; | |
225 | ||
226 | struct brcmf_pciedev_info { | |
227 | enum brcmf_pcie_state state; | |
228 | bool in_irq; | |
229 | bool irq_requested; | |
230 | struct pci_dev *pdev; | |
231 | char fw_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN]; | |
232 | char nvram_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN]; | |
233 | void __iomem *regs; | |
234 | void __iomem *tcm; | |
235 | u32 tcm_size; | |
236 | u32 ram_base; | |
237 | u32 ram_size; | |
238 | struct brcmf_chip *ci; | |
239 | u32 coreid; | |
240 | u32 generic_corerev; | |
241 | struct brcmf_pcie_shared_info shared; | |
242 | void (*ringbell)(struct brcmf_pciedev_info *devinfo); | |
243 | wait_queue_head_t mbdata_resp_wait; | |
244 | bool mbdata_completed; | |
245 | bool irq_allocated; | |
246 | }; | |
247 | ||
248 | struct brcmf_pcie_ringbuf { | |
249 | struct brcmf_commonring commonring; | |
250 | dma_addr_t dma_handle; | |
251 | u32 w_idx_addr; | |
252 | u32 r_idx_addr; | |
253 | struct brcmf_pciedev_info *devinfo; | |
254 | u8 id; | |
255 | }; | |
256 | ||
257 | ||
258 | static const u32 brcmf_ring_max_item[BRCMF_NROF_COMMON_MSGRINGS] = { | |
259 | BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM, | |
260 | BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM, | |
261 | BRCMF_D2H_MSGRING_CONTROL_COMPLETE_MAX_ITEM, | |
262 | BRCMF_D2H_MSGRING_TX_COMPLETE_MAX_ITEM, | |
263 | BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM | |
264 | }; | |
265 | ||
266 | static const u32 brcmf_ring_itemsize[BRCMF_NROF_COMMON_MSGRINGS] = { | |
267 | BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE, | |
268 | BRCMF_H2D_MSGRING_RXPOST_SUBMIT_ITEMSIZE, | |
269 | BRCMF_D2H_MSGRING_CONTROL_COMPLETE_ITEMSIZE, | |
270 | BRCMF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE, | |
271 | BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE | |
272 | }; | |
273 | ||
274 | ||
275 | /* dma flushing needs implementation for mips and arm platforms. Should | |
276 | * be put in util. Note, this is not real flushing. It is virtual non | |
277 | * cached memory. Only write buffers should have to be drained. Though | |
278 | * this may be different depending on platform...... | |
279 | */ | |
280 | #define brcmf_dma_flush(addr, len) | |
281 | #define brcmf_dma_invalidate_cache(addr, len) | |
282 | ||
283 | ||
284 | static u32 | |
285 | brcmf_pcie_read_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset) | |
286 | { | |
287 | void __iomem *address = devinfo->regs + reg_offset; | |
288 | ||
289 | return (ioread32(address)); | |
290 | } | |
291 | ||
292 | ||
293 | static void | |
294 | brcmf_pcie_write_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset, | |
295 | u32 value) | |
296 | { | |
297 | void __iomem *address = devinfo->regs + reg_offset; | |
298 | ||
299 | iowrite32(value, address); | |
300 | } | |
301 | ||
302 | ||
303 | static u8 | |
304 | brcmf_pcie_read_tcm8(struct brcmf_pciedev_info *devinfo, u32 mem_offset) | |
305 | { | |
306 | void __iomem *address = devinfo->tcm + mem_offset; | |
307 | ||
308 | return (ioread8(address)); | |
309 | } | |
310 | ||
311 | ||
312 | static u16 | |
313 | brcmf_pcie_read_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset) | |
314 | { | |
315 | void __iomem *address = devinfo->tcm + mem_offset; | |
316 | ||
317 | return (ioread16(address)); | |
318 | } | |
319 | ||
320 | ||
321 | static void | |
322 | brcmf_pcie_write_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset, | |
323 | u16 value) | |
324 | { | |
325 | void __iomem *address = devinfo->tcm + mem_offset; | |
326 | ||
327 | iowrite16(value, address); | |
328 | } | |
329 | ||
330 | ||
331 | static u32 | |
332 | brcmf_pcie_read_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset) | |
333 | { | |
334 | void __iomem *address = devinfo->tcm + mem_offset; | |
335 | ||
336 | return (ioread32(address)); | |
337 | } | |
338 | ||
339 | ||
340 | static void | |
341 | brcmf_pcie_write_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset, | |
342 | u32 value) | |
343 | { | |
344 | void __iomem *address = devinfo->tcm + mem_offset; | |
345 | ||
346 | iowrite32(value, address); | |
347 | } | |
348 | ||
349 | ||
350 | static u32 | |
351 | brcmf_pcie_read_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset) | |
352 | { | |
353 | void __iomem *addr = devinfo->tcm + devinfo->ci->rambase + mem_offset; | |
354 | ||
355 | return (ioread32(addr)); | |
356 | } | |
357 | ||
358 | ||
359 | static void | |
360 | brcmf_pcie_write_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset, | |
361 | u32 value) | |
362 | { | |
363 | void __iomem *addr = devinfo->tcm + devinfo->ci->rambase + mem_offset; | |
364 | ||
365 | iowrite32(value, addr); | |
366 | } | |
367 | ||
368 | ||
369 | static void | |
370 | brcmf_pcie_copy_mem_todev(struct brcmf_pciedev_info *devinfo, u32 mem_offset, | |
371 | void *srcaddr, u32 len) | |
372 | { | |
373 | void __iomem *address = devinfo->tcm + mem_offset; | |
374 | __le32 *src32; | |
375 | __le16 *src16; | |
376 | u8 *src8; | |
377 | ||
378 | if (((ulong)address & 4) || ((ulong)srcaddr & 4) || (len & 4)) { | |
379 | if (((ulong)address & 2) || ((ulong)srcaddr & 2) || (len & 2)) { | |
380 | src8 = (u8 *)srcaddr; | |
381 | while (len) { | |
382 | iowrite8(*src8, address); | |
383 | address++; | |
384 | src8++; | |
385 | len--; | |
386 | } | |
387 | } else { | |
388 | len = len / 2; | |
389 | src16 = (__le16 *)srcaddr; | |
390 | while (len) { | |
391 | iowrite16(le16_to_cpu(*src16), address); | |
392 | address += 2; | |
393 | src16++; | |
394 | len--; | |
395 | } | |
396 | } | |
397 | } else { | |
398 | len = len / 4; | |
399 | src32 = (__le32 *)srcaddr; | |
400 | while (len) { | |
401 | iowrite32(le32_to_cpu(*src32), address); | |
402 | address += 4; | |
403 | src32++; | |
404 | len--; | |
405 | } | |
406 | } | |
407 | } | |
408 | ||
409 | ||
410 | #define WRITECC32(devinfo, reg, value) brcmf_pcie_write_reg32(devinfo, \ | |
411 | CHIPCREGOFFS(reg), value) | |
412 | ||
413 | ||
414 | static void | |
415 | brcmf_pcie_select_core(struct brcmf_pciedev_info *devinfo, u16 coreid) | |
416 | { | |
417 | const struct pci_dev *pdev = devinfo->pdev; | |
418 | struct brcmf_core *core; | |
419 | u32 bar0_win; | |
420 | ||
421 | core = brcmf_chip_get_core(devinfo->ci, coreid); | |
422 | if (core) { | |
423 | bar0_win = core->base; | |
424 | pci_write_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW, bar0_win); | |
425 | if (pci_read_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW, | |
426 | &bar0_win) == 0) { | |
427 | if (bar0_win != core->base) { | |
428 | bar0_win = core->base; | |
429 | pci_write_config_dword(pdev, | |
430 | BRCMF_PCIE_BAR0_WINDOW, | |
431 | bar0_win); | |
432 | } | |
433 | } | |
434 | } else { | |
435 | brcmf_err("Unsupported core selected %x\n", coreid); | |
436 | } | |
437 | } | |
438 | ||
439 | ||
bd4f82e3 | 440 | static void brcmf_pcie_reset_device(struct brcmf_pciedev_info *devinfo) |
9e37f045 | 441 | { |
bd4f82e3 HM |
442 | u16 cfg_offset[] = { BRCMF_PCIE_CFGREG_STATUS_CMD, |
443 | BRCMF_PCIE_CFGREG_PM_CSR, | |
444 | BRCMF_PCIE_CFGREG_MSI_CAP, | |
445 | BRCMF_PCIE_CFGREG_MSI_ADDR_L, | |
446 | BRCMF_PCIE_CFGREG_MSI_ADDR_H, | |
447 | BRCMF_PCIE_CFGREG_MSI_DATA, | |
448 | BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL2, | |
449 | BRCMF_PCIE_CFGREG_RBAR_CTRL, | |
450 | BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1, | |
451 | BRCMF_PCIE_CFGREG_REG_BAR2_CONFIG, | |
452 | BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG }; | |
9e37f045 HM |
453 | u32 i; |
454 | u32 val; | |
bd4f82e3 | 455 | u32 lsc; |
9e37f045 HM |
456 | |
457 | if (!devinfo->ci) | |
458 | return; | |
459 | ||
bd4f82e3 HM |
460 | brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2); |
461 | brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR, | |
462 | BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL); | |
463 | lsc = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA); | |
464 | val = lsc & (~BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB); | |
465 | brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, val); | |
9e37f045 | 466 | |
bd4f82e3 HM |
467 | brcmf_pcie_select_core(devinfo, BCMA_CORE_CHIPCOMMON); |
468 | WRITECC32(devinfo, watchdog, 4); | |
9e37f045 HM |
469 | msleep(100); |
470 | ||
bd4f82e3 HM |
471 | brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2); |
472 | brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR, | |
473 | BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL); | |
474 | brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, lsc); | |
475 | ||
9e37f045 HM |
476 | brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2); |
477 | for (i = 0; i < ARRAY_SIZE(cfg_offset); i++) { | |
478 | brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR, | |
479 | cfg_offset[i]); | |
480 | val = brcmf_pcie_read_reg32(devinfo, | |
481 | BRCMF_PCIE_PCIE2REG_CONFIGDATA); | |
482 | brcmf_dbg(PCIE, "config offset 0x%04x, value 0x%04x\n", | |
483 | cfg_offset[i], val); | |
484 | brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, | |
485 | val); | |
486 | } | |
487 | } | |
488 | ||
489 | ||
490 | static void brcmf_pcie_attach(struct brcmf_pciedev_info *devinfo) | |
491 | { | |
492 | u32 config; | |
493 | ||
494 | brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2); | |
495 | if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_INTMASK) != 0) | |
bd4f82e3 | 496 | brcmf_pcie_reset_device(devinfo); |
9e37f045 HM |
497 | /* BAR1 window may not be sized properly */ |
498 | brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2); | |
499 | brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR, 0x4e0); | |
500 | config = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA); | |
501 | brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, config); | |
502 | ||
503 | device_wakeup_enable(&devinfo->pdev->dev); | |
504 | } | |
505 | ||
506 | ||
507 | static int brcmf_pcie_enter_download_state(struct brcmf_pciedev_info *devinfo) | |
508 | { | |
509 | brcmf_chip_enter_download(devinfo->ci); | |
510 | ||
511 | if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) { | |
512 | brcmf_pcie_select_core(devinfo, BCMA_CORE_ARM_CR4); | |
513 | brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX, | |
514 | 5); | |
515 | brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKPDA, | |
516 | 0); | |
517 | brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX, | |
518 | 7); | |
519 | brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKPDA, | |
520 | 0); | |
521 | } | |
522 | return 0; | |
523 | } | |
524 | ||
525 | ||
526 | static int brcmf_pcie_exit_download_state(struct brcmf_pciedev_info *devinfo, | |
527 | u32 resetintr) | |
528 | { | |
529 | struct brcmf_core *core; | |
530 | ||
531 | if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) { | |
532 | core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_INTERNAL_MEM); | |
533 | brcmf_chip_resetcore(core, 0, 0, 0); | |
534 | } | |
535 | ||
536 | return !brcmf_chip_exit_download(devinfo->ci, resetintr); | |
537 | } | |
538 | ||
539 | ||
540 | static void | |
541 | brcmf_pcie_send_mb_data(struct brcmf_pciedev_info *devinfo, u32 htod_mb_data) | |
542 | { | |
543 | struct brcmf_pcie_shared_info *shared; | |
544 | u32 addr; | |
545 | u32 cur_htod_mb_data; | |
546 | u32 i; | |
547 | ||
548 | shared = &devinfo->shared; | |
549 | addr = shared->htod_mb_data_addr; | |
550 | cur_htod_mb_data = brcmf_pcie_read_tcm32(devinfo, addr); | |
551 | ||
552 | if (cur_htod_mb_data != 0) | |
553 | brcmf_dbg(PCIE, "MB transaction is already pending 0x%04x\n", | |
554 | cur_htod_mb_data); | |
555 | ||
556 | i = 0; | |
557 | while (cur_htod_mb_data != 0) { | |
558 | msleep(10); | |
559 | i++; | |
560 | if (i > 100) | |
561 | break; | |
562 | cur_htod_mb_data = brcmf_pcie_read_tcm32(devinfo, addr); | |
563 | } | |
564 | ||
565 | brcmf_pcie_write_tcm32(devinfo, addr, htod_mb_data); | |
566 | pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1); | |
567 | pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1); | |
568 | } | |
569 | ||
570 | ||
571 | static void brcmf_pcie_handle_mb_data(struct brcmf_pciedev_info *devinfo) | |
572 | { | |
573 | struct brcmf_pcie_shared_info *shared; | |
574 | u32 addr; | |
575 | u32 dtoh_mb_data; | |
576 | ||
577 | shared = &devinfo->shared; | |
578 | addr = shared->dtoh_mb_data_addr; | |
579 | dtoh_mb_data = brcmf_pcie_read_tcm32(devinfo, addr); | |
580 | ||
581 | if (!dtoh_mb_data) | |
582 | return; | |
583 | ||
584 | brcmf_pcie_write_tcm32(devinfo, addr, 0); | |
585 | ||
586 | brcmf_dbg(PCIE, "D2H_MB_DATA: 0x%04x\n", dtoh_mb_data); | |
587 | if (dtoh_mb_data & BRCMF_D2H_DEV_DS_ENTER_REQ) { | |
588 | brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP REQ\n"); | |
589 | brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_DS_ACK); | |
590 | brcmf_dbg(PCIE, "D2H_MB_DATA: sent DEEP SLEEP ACK\n"); | |
591 | } | |
592 | if (dtoh_mb_data & BRCMF_D2H_DEV_DS_EXIT_NOTE) | |
593 | brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP EXIT\n"); | |
594 | if (dtoh_mb_data & BRCMF_D2H_DEV_D3_ACK) | |
595 | brcmf_dbg(PCIE, "D2H_MB_DATA: D3 ACK\n"); | |
596 | if (waitqueue_active(&devinfo->mbdata_resp_wait)) { | |
597 | devinfo->mbdata_completed = true; | |
598 | wake_up(&devinfo->mbdata_resp_wait); | |
599 | } | |
600 | } | |
601 | ||
602 | ||
603 | static void brcmf_pcie_bus_console_init(struct brcmf_pciedev_info *devinfo) | |
604 | { | |
605 | struct brcmf_pcie_shared_info *shared; | |
606 | struct brcmf_pcie_console *console; | |
607 | u32 addr; | |
608 | ||
609 | shared = &devinfo->shared; | |
610 | console = &shared->console; | |
611 | addr = shared->tcm_base_address + BRCMF_SHARED_CONSOLE_ADDR_OFFSET; | |
612 | console->base_addr = brcmf_pcie_read_tcm32(devinfo, addr); | |
613 | ||
614 | addr = console->base_addr + BRCMF_CONSOLE_BUFADDR_OFFSET; | |
615 | console->buf_addr = brcmf_pcie_read_tcm32(devinfo, addr); | |
616 | addr = console->base_addr + BRCMF_CONSOLE_BUFSIZE_OFFSET; | |
617 | console->bufsize = brcmf_pcie_read_tcm32(devinfo, addr); | |
618 | ||
619 | brcmf_dbg(PCIE, "Console: base %x, buf %x, size %d\n", | |
620 | console->base_addr, console->buf_addr, console->bufsize); | |
621 | } | |
622 | ||
623 | ||
624 | static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo) | |
625 | { | |
626 | struct brcmf_pcie_console *console; | |
627 | u32 addr; | |
628 | u8 ch; | |
629 | u32 newidx; | |
630 | ||
631 | console = &devinfo->shared.console; | |
632 | addr = console->base_addr + BRCMF_CONSOLE_WRITEIDX_OFFSET; | |
633 | newidx = brcmf_pcie_read_tcm32(devinfo, addr); | |
634 | while (newidx != console->read_idx) { | |
635 | addr = console->buf_addr + console->read_idx; | |
636 | ch = brcmf_pcie_read_tcm8(devinfo, addr); | |
637 | console->read_idx++; | |
638 | if (console->read_idx == console->bufsize) | |
639 | console->read_idx = 0; | |
640 | if (ch == '\r') | |
641 | continue; | |
642 | console->log_str[console->log_idx] = ch; | |
643 | console->log_idx++; | |
644 | if ((ch != '\n') && | |
645 | (console->log_idx == (sizeof(console->log_str) - 2))) { | |
646 | ch = '\n'; | |
647 | console->log_str[console->log_idx] = ch; | |
648 | console->log_idx++; | |
649 | } | |
650 | ||
651 | if (ch == '\n') { | |
652 | console->log_str[console->log_idx] = 0; | |
653 | brcmf_dbg(PCIE, "CONSOLE: %s\n", console->log_str); | |
654 | console->log_idx = 0; | |
655 | } | |
656 | } | |
657 | } | |
658 | ||
659 | ||
660 | static __used void brcmf_pcie_ringbell_v1(struct brcmf_pciedev_info *devinfo) | |
661 | { | |
662 | u32 reg_value; | |
663 | ||
664 | brcmf_dbg(PCIE, "RING !\n"); | |
665 | reg_value = brcmf_pcie_read_reg32(devinfo, | |
666 | BRCMF_PCIE_PCIE2REG_MAILBOXINT); | |
667 | reg_value |= BRCMF_PCIE2_INTB; | |
668 | brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT, | |
669 | reg_value); | |
670 | } | |
671 | ||
672 | ||
673 | static void brcmf_pcie_ringbell_v2(struct brcmf_pciedev_info *devinfo) | |
674 | { | |
675 | brcmf_dbg(PCIE, "RING !\n"); | |
676 | /* Any arbitrary value will do, lets use 1 */ | |
677 | brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_H2D_MAILBOX, 1); | |
678 | } | |
679 | ||
680 | ||
681 | static void brcmf_pcie_intr_disable(struct brcmf_pciedev_info *devinfo) | |
682 | { | |
683 | if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1) | |
684 | pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_INTMASK, | |
685 | 0); | |
686 | else | |
687 | brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK, | |
688 | 0); | |
689 | } | |
690 | ||
691 | ||
692 | static void brcmf_pcie_intr_enable(struct brcmf_pciedev_info *devinfo) | |
693 | { | |
694 | if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1) | |
695 | pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_INTMASK, | |
696 | BRCMF_PCIE_INT_DEF); | |
697 | else | |
698 | brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK, | |
699 | BRCMF_PCIE_MB_INT_D2H_DB | | |
700 | BRCMF_PCIE_MB_INT_FN0_0 | | |
701 | BRCMF_PCIE_MB_INT_FN0_1); | |
702 | } | |
703 | ||
704 | ||
705 | static irqreturn_t brcmf_pcie_quick_check_isr_v1(int irq, void *arg) | |
706 | { | |
707 | struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg; | |
708 | u32 status; | |
709 | ||
710 | status = 0; | |
711 | pci_read_config_dword(devinfo->pdev, BRCMF_PCIE_REG_INTSTATUS, &status); | |
712 | if (status) { | |
713 | brcmf_pcie_intr_disable(devinfo); | |
714 | brcmf_dbg(PCIE, "Enter\n"); | |
715 | return IRQ_WAKE_THREAD; | |
716 | } | |
717 | return IRQ_NONE; | |
718 | } | |
719 | ||
720 | ||
721 | static irqreturn_t brcmf_pcie_quick_check_isr_v2(int irq, void *arg) | |
722 | { | |
723 | struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg; | |
724 | ||
725 | if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT)) { | |
726 | brcmf_pcie_intr_disable(devinfo); | |
727 | brcmf_dbg(PCIE, "Enter\n"); | |
728 | return IRQ_WAKE_THREAD; | |
729 | } | |
730 | return IRQ_NONE; | |
731 | } | |
732 | ||
733 | ||
734 | static irqreturn_t brcmf_pcie_isr_thread_v1(int irq, void *arg) | |
735 | { | |
736 | struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg; | |
737 | const struct pci_dev *pdev = devinfo->pdev; | |
738 | u32 status; | |
739 | ||
740 | devinfo->in_irq = true; | |
741 | status = 0; | |
742 | pci_read_config_dword(pdev, BRCMF_PCIE_REG_INTSTATUS, &status); | |
743 | brcmf_dbg(PCIE, "Enter %x\n", status); | |
744 | if (status) { | |
745 | pci_write_config_dword(pdev, BRCMF_PCIE_REG_INTSTATUS, status); | |
746 | if (devinfo->state == BRCMFMAC_PCIE_STATE_UP) | |
747 | brcmf_proto_msgbuf_rx_trigger(&devinfo->pdev->dev); | |
748 | } | |
749 | if (devinfo->state == BRCMFMAC_PCIE_STATE_UP) | |
750 | brcmf_pcie_intr_enable(devinfo); | |
751 | devinfo->in_irq = false; | |
752 | return IRQ_HANDLED; | |
753 | } | |
754 | ||
755 | ||
756 | static irqreturn_t brcmf_pcie_isr_thread_v2(int irq, void *arg) | |
757 | { | |
758 | struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg; | |
759 | u32 status; | |
760 | ||
761 | devinfo->in_irq = true; | |
762 | status = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT); | |
763 | brcmf_dbg(PCIE, "Enter %x\n", status); | |
764 | if (status) { | |
765 | brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT, | |
766 | status); | |
767 | if (status & (BRCMF_PCIE_MB_INT_FN0_0 | | |
768 | BRCMF_PCIE_MB_INT_FN0_1)) | |
769 | brcmf_pcie_handle_mb_data(devinfo); | |
770 | if (status & BRCMF_PCIE_MB_INT_D2H_DB) { | |
771 | if (devinfo->state == BRCMFMAC_PCIE_STATE_UP) | |
772 | brcmf_proto_msgbuf_rx_trigger( | |
773 | &devinfo->pdev->dev); | |
774 | } | |
775 | } | |
776 | brcmf_pcie_bus_console_read(devinfo); | |
777 | if (devinfo->state == BRCMFMAC_PCIE_STATE_UP) | |
778 | brcmf_pcie_intr_enable(devinfo); | |
779 | devinfo->in_irq = false; | |
780 | return IRQ_HANDLED; | |
781 | } | |
782 | ||
783 | ||
784 | static int brcmf_pcie_request_irq(struct brcmf_pciedev_info *devinfo) | |
785 | { | |
786 | struct pci_dev *pdev; | |
787 | ||
788 | pdev = devinfo->pdev; | |
789 | ||
790 | brcmf_pcie_intr_disable(devinfo); | |
791 | ||
792 | brcmf_dbg(PCIE, "Enter\n"); | |
793 | /* is it a v1 or v2 implementation */ | |
794 | devinfo->irq_requested = false; | |
795 | if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1) { | |
796 | if (request_threaded_irq(pdev->irq, | |
797 | brcmf_pcie_quick_check_isr_v1, | |
798 | brcmf_pcie_isr_thread_v1, | |
799 | IRQF_SHARED, "brcmf_pcie_intr", | |
800 | devinfo)) { | |
801 | brcmf_err("Failed to request IRQ %d\n", pdev->irq); | |
802 | return -EIO; | |
803 | } | |
804 | } else { | |
805 | if (request_threaded_irq(pdev->irq, | |
806 | brcmf_pcie_quick_check_isr_v2, | |
807 | brcmf_pcie_isr_thread_v2, | |
808 | IRQF_SHARED, "brcmf_pcie_intr", | |
809 | devinfo)) { | |
810 | brcmf_err("Failed to request IRQ %d\n", pdev->irq); | |
811 | return -EIO; | |
812 | } | |
813 | } | |
814 | devinfo->irq_requested = true; | |
815 | devinfo->irq_allocated = true; | |
816 | return 0; | |
817 | } | |
818 | ||
819 | ||
820 | static void brcmf_pcie_release_irq(struct brcmf_pciedev_info *devinfo) | |
821 | { | |
822 | struct pci_dev *pdev; | |
823 | u32 status; | |
824 | u32 count; | |
825 | ||
826 | if (!devinfo->irq_allocated) | |
827 | return; | |
828 | ||
829 | pdev = devinfo->pdev; | |
830 | ||
831 | brcmf_pcie_intr_disable(devinfo); | |
832 | if (!devinfo->irq_requested) | |
833 | return; | |
834 | devinfo->irq_requested = false; | |
835 | free_irq(pdev->irq, devinfo); | |
836 | ||
837 | msleep(50); | |
838 | count = 0; | |
839 | while ((devinfo->in_irq) && (count < 20)) { | |
840 | msleep(50); | |
841 | count++; | |
842 | } | |
843 | if (devinfo->in_irq) | |
844 | brcmf_err("Still in IRQ (processing) !!!\n"); | |
845 | ||
846 | if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1) { | |
847 | status = 0; | |
848 | pci_read_config_dword(pdev, BRCMF_PCIE_REG_INTSTATUS, &status); | |
849 | pci_write_config_dword(pdev, BRCMF_PCIE_REG_INTSTATUS, status); | |
850 | } else { | |
851 | status = brcmf_pcie_read_reg32(devinfo, | |
852 | BRCMF_PCIE_PCIE2REG_MAILBOXINT); | |
853 | brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT, | |
854 | status); | |
855 | } | |
856 | devinfo->irq_allocated = false; | |
857 | } | |
858 | ||
859 | ||
860 | static int brcmf_pcie_ring_mb_write_rptr(void *ctx) | |
861 | { | |
862 | struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx; | |
863 | struct brcmf_pciedev_info *devinfo = ring->devinfo; | |
864 | struct brcmf_commonring *commonring = &ring->commonring; | |
865 | ||
866 | if (devinfo->state != BRCMFMAC_PCIE_STATE_UP) | |
867 | return -EIO; | |
868 | ||
869 | brcmf_dbg(PCIE, "W r_ptr %d (%d), ring %d\n", commonring->r_ptr, | |
870 | commonring->w_ptr, ring->id); | |
871 | ||
872 | brcmf_pcie_write_tcm16(devinfo, ring->r_idx_addr, commonring->r_ptr); | |
873 | ||
874 | return 0; | |
875 | } | |
876 | ||
877 | ||
878 | static int brcmf_pcie_ring_mb_write_wptr(void *ctx) | |
879 | { | |
880 | struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx; | |
881 | struct brcmf_pciedev_info *devinfo = ring->devinfo; | |
882 | struct brcmf_commonring *commonring = &ring->commonring; | |
883 | ||
884 | if (devinfo->state != BRCMFMAC_PCIE_STATE_UP) | |
885 | return -EIO; | |
886 | ||
887 | brcmf_dbg(PCIE, "W w_ptr %d (%d), ring %d\n", commonring->w_ptr, | |
888 | commonring->r_ptr, ring->id); | |
889 | ||
890 | brcmf_pcie_write_tcm16(devinfo, ring->w_idx_addr, commonring->w_ptr); | |
891 | ||
892 | return 0; | |
893 | } | |
894 | ||
895 | ||
896 | static int brcmf_pcie_ring_mb_ring_bell(void *ctx) | |
897 | { | |
898 | struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx; | |
899 | struct brcmf_pciedev_info *devinfo = ring->devinfo; | |
900 | ||
901 | if (devinfo->state != BRCMFMAC_PCIE_STATE_UP) | |
902 | return -EIO; | |
903 | ||
904 | devinfo->ringbell(devinfo); | |
905 | ||
906 | return 0; | |
907 | } | |
908 | ||
909 | ||
910 | static int brcmf_pcie_ring_mb_update_rptr(void *ctx) | |
911 | { | |
912 | struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx; | |
913 | struct brcmf_pciedev_info *devinfo = ring->devinfo; | |
914 | struct brcmf_commonring *commonring = &ring->commonring; | |
915 | ||
916 | if (devinfo->state != BRCMFMAC_PCIE_STATE_UP) | |
917 | return -EIO; | |
918 | ||
919 | commonring->r_ptr = brcmf_pcie_read_tcm16(devinfo, ring->r_idx_addr); | |
920 | ||
921 | brcmf_dbg(PCIE, "R r_ptr %d (%d), ring %d\n", commonring->r_ptr, | |
922 | commonring->w_ptr, ring->id); | |
923 | ||
924 | return 0; | |
925 | } | |
926 | ||
927 | ||
928 | static int brcmf_pcie_ring_mb_update_wptr(void *ctx) | |
929 | { | |
930 | struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx; | |
931 | struct brcmf_pciedev_info *devinfo = ring->devinfo; | |
932 | struct brcmf_commonring *commonring = &ring->commonring; | |
933 | ||
934 | if (devinfo->state != BRCMFMAC_PCIE_STATE_UP) | |
935 | return -EIO; | |
936 | ||
937 | commonring->w_ptr = brcmf_pcie_read_tcm16(devinfo, ring->w_idx_addr); | |
938 | ||
939 | brcmf_dbg(PCIE, "R w_ptr %d (%d), ring %d\n", commonring->w_ptr, | |
940 | commonring->r_ptr, ring->id); | |
941 | ||
942 | return 0; | |
943 | } | |
944 | ||
945 | ||
946 | static void * | |
947 | brcmf_pcie_init_dmabuffer_for_device(struct brcmf_pciedev_info *devinfo, | |
948 | u32 size, u32 tcm_dma_phys_addr, | |
949 | dma_addr_t *dma_handle) | |
950 | { | |
951 | void *ring; | |
952 | long long address; | |
953 | ||
954 | ring = dma_alloc_coherent(&devinfo->pdev->dev, size, dma_handle, | |
955 | GFP_KERNEL); | |
956 | if (!ring) | |
957 | return NULL; | |
958 | ||
959 | address = (long long)(long)*dma_handle; | |
960 | brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr, | |
961 | address & 0xffffffff); | |
962 | brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr + 4, address >> 32); | |
963 | ||
964 | memset(ring, 0, size); | |
965 | ||
966 | return (ring); | |
967 | } | |
968 | ||
969 | ||
970 | static struct brcmf_pcie_ringbuf * | |
971 | brcmf_pcie_alloc_dma_and_ring(struct brcmf_pciedev_info *devinfo, u32 ring_id, | |
972 | u32 tcm_ring_phys_addr) | |
973 | { | |
974 | void *dma_buf; | |
975 | dma_addr_t dma_handle; | |
976 | struct brcmf_pcie_ringbuf *ring; | |
977 | u32 size; | |
978 | u32 addr; | |
979 | ||
980 | size = brcmf_ring_max_item[ring_id] * brcmf_ring_itemsize[ring_id]; | |
981 | dma_buf = brcmf_pcie_init_dmabuffer_for_device(devinfo, size, | |
982 | tcm_ring_phys_addr + BRCMF_RING_MEM_BASE_ADDR_OFFSET, | |
983 | &dma_handle); | |
984 | if (!dma_buf) | |
985 | return NULL; | |
986 | ||
987 | addr = tcm_ring_phys_addr + BRCMF_RING_MAX_ITEM_OFFSET; | |
988 | brcmf_pcie_write_tcm16(devinfo, addr, brcmf_ring_max_item[ring_id]); | |
989 | addr = tcm_ring_phys_addr + BRCMF_RING_LEN_ITEMS_OFFSET; | |
990 | brcmf_pcie_write_tcm16(devinfo, addr, brcmf_ring_itemsize[ring_id]); | |
991 | ||
992 | ring = kzalloc(sizeof(*ring), GFP_KERNEL); | |
993 | if (!ring) { | |
994 | dma_free_coherent(&devinfo->pdev->dev, size, dma_buf, | |
995 | dma_handle); | |
996 | return NULL; | |
997 | } | |
998 | brcmf_commonring_config(&ring->commonring, brcmf_ring_max_item[ring_id], | |
999 | brcmf_ring_itemsize[ring_id], dma_buf); | |
1000 | ring->dma_handle = dma_handle; | |
1001 | ring->devinfo = devinfo; | |
1002 | brcmf_commonring_register_cb(&ring->commonring, | |
1003 | brcmf_pcie_ring_mb_ring_bell, | |
1004 | brcmf_pcie_ring_mb_update_rptr, | |
1005 | brcmf_pcie_ring_mb_update_wptr, | |
1006 | brcmf_pcie_ring_mb_write_rptr, | |
1007 | brcmf_pcie_ring_mb_write_wptr, ring); | |
1008 | ||
1009 | return (ring); | |
1010 | } | |
1011 | ||
1012 | ||
1013 | static void brcmf_pcie_release_ringbuffer(struct device *dev, | |
1014 | struct brcmf_pcie_ringbuf *ring) | |
1015 | { | |
1016 | void *dma_buf; | |
1017 | u32 size; | |
1018 | ||
1019 | if (!ring) | |
1020 | return; | |
1021 | ||
1022 | dma_buf = ring->commonring.buf_addr; | |
1023 | if (dma_buf) { | |
1024 | size = ring->commonring.depth * ring->commonring.item_len; | |
1025 | dma_free_coherent(dev, size, dma_buf, ring->dma_handle); | |
1026 | } | |
1027 | kfree(ring); | |
1028 | } | |
1029 | ||
1030 | ||
1031 | static void brcmf_pcie_release_ringbuffers(struct brcmf_pciedev_info *devinfo) | |
1032 | { | |
1033 | u32 i; | |
1034 | ||
1035 | for (i = 0; i < BRCMF_NROF_COMMON_MSGRINGS; i++) { | |
1036 | brcmf_pcie_release_ringbuffer(&devinfo->pdev->dev, | |
1037 | devinfo->shared.commonrings[i]); | |
1038 | devinfo->shared.commonrings[i] = NULL; | |
1039 | } | |
1040 | kfree(devinfo->shared.flowrings); | |
1041 | devinfo->shared.flowrings = NULL; | |
1042 | } | |
1043 | ||
1044 | ||
1045 | static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo) | |
1046 | { | |
1047 | struct brcmf_pcie_ringbuf *ring; | |
1048 | struct brcmf_pcie_ringbuf *rings; | |
1049 | u32 ring_addr; | |
1050 | u32 d2h_w_idx_ptr; | |
1051 | u32 d2h_r_idx_ptr; | |
1052 | u32 h2d_w_idx_ptr; | |
1053 | u32 h2d_r_idx_ptr; | |
1054 | u32 addr; | |
1055 | u32 ring_mem_ptr; | |
1056 | u32 i; | |
1057 | u16 max_sub_queues; | |
1058 | ||
1059 | ring_addr = devinfo->shared.ring_info_addr; | |
1060 | brcmf_dbg(PCIE, "Base ring addr = 0x%08x\n", ring_addr); | |
1061 | ||
1062 | addr = ring_addr + BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET; | |
1063 | d2h_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr); | |
1064 | addr = ring_addr + BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET; | |
1065 | d2h_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr); | |
1066 | addr = ring_addr + BRCMF_SHARED_RING_H2D_W_IDX_PTR_OFFSET; | |
1067 | h2d_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr); | |
1068 | addr = ring_addr + BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET; | |
1069 | h2d_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr); | |
1070 | ||
1071 | addr = ring_addr + BRCMF_SHARED_RING_TCM_MEMLOC_OFFSET; | |
1072 | ring_mem_ptr = brcmf_pcie_read_tcm32(devinfo, addr); | |
1073 | ||
1074 | for (i = 0; i < BRCMF_NROF_H2D_COMMON_MSGRINGS; i++) { | |
1075 | ring = brcmf_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr); | |
1076 | if (!ring) | |
1077 | goto fail; | |
1078 | ring->w_idx_addr = h2d_w_idx_ptr; | |
1079 | ring->r_idx_addr = h2d_r_idx_ptr; | |
1080 | ring->id = i; | |
1081 | devinfo->shared.commonrings[i] = ring; | |
1082 | ||
1083 | h2d_w_idx_ptr += sizeof(u32); | |
1084 | h2d_r_idx_ptr += sizeof(u32); | |
1085 | ring_mem_ptr += BRCMF_RING_MEM_SZ; | |
1086 | } | |
1087 | ||
1088 | for (i = BRCMF_NROF_H2D_COMMON_MSGRINGS; | |
1089 | i < BRCMF_NROF_COMMON_MSGRINGS; i++) { | |
1090 | ring = brcmf_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr); | |
1091 | if (!ring) | |
1092 | goto fail; | |
1093 | ring->w_idx_addr = d2h_w_idx_ptr; | |
1094 | ring->r_idx_addr = d2h_r_idx_ptr; | |
1095 | ring->id = i; | |
1096 | devinfo->shared.commonrings[i] = ring; | |
1097 | ||
1098 | d2h_w_idx_ptr += sizeof(u32); | |
1099 | d2h_r_idx_ptr += sizeof(u32); | |
1100 | ring_mem_ptr += BRCMF_RING_MEM_SZ; | |
1101 | } | |
1102 | ||
1103 | addr = ring_addr + BRCMF_SHARED_RING_MAX_SUB_QUEUES; | |
1104 | max_sub_queues = brcmf_pcie_read_tcm16(devinfo, addr); | |
1105 | devinfo->shared.nrof_flowrings = | |
1106 | max_sub_queues - BRCMF_NROF_H2D_COMMON_MSGRINGS; | |
1107 | rings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(*ring), | |
1108 | GFP_KERNEL); | |
1109 | if (!rings) | |
1110 | goto fail; | |
1111 | ||
1112 | brcmf_dbg(PCIE, "Nr of flowrings is %d\n", | |
1113 | devinfo->shared.nrof_flowrings); | |
1114 | ||
1115 | for (i = 0; i < devinfo->shared.nrof_flowrings; i++) { | |
1116 | ring = &rings[i]; | |
1117 | ring->devinfo = devinfo; | |
1118 | ring->id = i + BRCMF_NROF_COMMON_MSGRINGS; | |
1119 | brcmf_commonring_register_cb(&ring->commonring, | |
1120 | brcmf_pcie_ring_mb_ring_bell, | |
1121 | brcmf_pcie_ring_mb_update_rptr, | |
1122 | brcmf_pcie_ring_mb_update_wptr, | |
1123 | brcmf_pcie_ring_mb_write_rptr, | |
1124 | brcmf_pcie_ring_mb_write_wptr, | |
1125 | ring); | |
1126 | ring->w_idx_addr = h2d_w_idx_ptr; | |
1127 | ring->r_idx_addr = h2d_r_idx_ptr; | |
1128 | h2d_w_idx_ptr += sizeof(u32); | |
1129 | h2d_r_idx_ptr += sizeof(u32); | |
1130 | } | |
1131 | devinfo->shared.flowrings = rings; | |
1132 | ||
1133 | return 0; | |
1134 | ||
1135 | fail: | |
1136 | brcmf_err("Allocating commonring buffers failed\n"); | |
1137 | brcmf_pcie_release_ringbuffers(devinfo); | |
1138 | return -ENOMEM; | |
1139 | } | |
1140 | ||
1141 | ||
1142 | static void | |
1143 | brcmf_pcie_release_scratchbuffers(struct brcmf_pciedev_info *devinfo) | |
1144 | { | |
1145 | if (devinfo->shared.scratch) | |
1146 | dma_free_coherent(&devinfo->pdev->dev, | |
1147 | BRCMF_DMA_D2H_SCRATCH_BUF_LEN, | |
1148 | devinfo->shared.scratch, | |
1149 | devinfo->shared.scratch_dmahandle); | |
1150 | if (devinfo->shared.ringupd) | |
1151 | dma_free_coherent(&devinfo->pdev->dev, | |
1152 | BRCMF_DMA_D2H_RINGUPD_BUF_LEN, | |
1153 | devinfo->shared.ringupd, | |
1154 | devinfo->shared.ringupd_dmahandle); | |
1155 | } | |
1156 | ||
1157 | static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo) | |
1158 | { | |
1159 | long long address; | |
1160 | u32 addr; | |
1161 | ||
1162 | devinfo->shared.scratch = dma_alloc_coherent(&devinfo->pdev->dev, | |
1163 | BRCMF_DMA_D2H_SCRATCH_BUF_LEN, | |
1164 | &devinfo->shared.scratch_dmahandle, GFP_KERNEL); | |
1165 | if (!devinfo->shared.scratch) | |
1166 | goto fail; | |
1167 | ||
1168 | memset(devinfo->shared.scratch, 0, BRCMF_DMA_D2H_SCRATCH_BUF_LEN); | |
1169 | brcmf_dma_flush(devinfo->shared.scratch, BRCMF_DMA_D2H_SCRATCH_BUF_LEN); | |
1170 | ||
1171 | addr = devinfo->shared.tcm_base_address + | |
1172 | BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET; | |
1173 | address = (long long)(long)devinfo->shared.scratch_dmahandle; | |
1174 | brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff); | |
1175 | brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32); | |
1176 | addr = devinfo->shared.tcm_base_address + | |
1177 | BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET; | |
1178 | brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_SCRATCH_BUF_LEN); | |
1179 | ||
1180 | devinfo->shared.ringupd = dma_alloc_coherent(&devinfo->pdev->dev, | |
1181 | BRCMF_DMA_D2H_RINGUPD_BUF_LEN, | |
1182 | &devinfo->shared.ringupd_dmahandle, GFP_KERNEL); | |
1183 | if (!devinfo->shared.ringupd) | |
1184 | goto fail; | |
1185 | ||
1186 | memset(devinfo->shared.ringupd, 0, BRCMF_DMA_D2H_RINGUPD_BUF_LEN); | |
1187 | brcmf_dma_flush(devinfo->shared.ringupd, BRCMF_DMA_D2H_RINGUPD_BUF_LEN); | |
1188 | ||
1189 | addr = devinfo->shared.tcm_base_address + | |
1190 | BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET; | |
1191 | address = (long long)(long)devinfo->shared.ringupd_dmahandle; | |
1192 | brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff); | |
1193 | brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32); | |
1194 | addr = devinfo->shared.tcm_base_address + | |
1195 | BRCMF_SHARED_DMA_RINGUPD_LEN_OFFSET; | |
1196 | brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_RINGUPD_BUF_LEN); | |
1197 | return 0; | |
1198 | ||
1199 | fail: | |
1200 | brcmf_err("Allocating scratch buffers failed\n"); | |
1201 | brcmf_pcie_release_scratchbuffers(devinfo); | |
1202 | return -ENOMEM; | |
1203 | } | |
1204 | ||
1205 | ||
1206 | static void brcmf_pcie_down(struct device *dev) | |
1207 | { | |
1208 | } | |
1209 | ||
1210 | ||
1211 | static int brcmf_pcie_tx(struct device *dev, struct sk_buff *skb) | |
1212 | { | |
1213 | return 0; | |
1214 | } | |
1215 | ||
1216 | ||
1217 | static int brcmf_pcie_tx_ctlpkt(struct device *dev, unsigned char *msg, | |
1218 | uint len) | |
1219 | { | |
1220 | return 0; | |
1221 | } | |
1222 | ||
1223 | ||
1224 | static int brcmf_pcie_rx_ctlpkt(struct device *dev, unsigned char *msg, | |
1225 | uint len) | |
1226 | { | |
1227 | return 0; | |
1228 | } | |
1229 | ||
1230 | ||
1231 | static struct brcmf_bus_ops brcmf_pcie_bus_ops = { | |
1232 | .txdata = brcmf_pcie_tx, | |
1233 | .stop = brcmf_pcie_down, | |
1234 | .txctl = brcmf_pcie_tx_ctlpkt, | |
1235 | .rxctl = brcmf_pcie_rx_ctlpkt, | |
1236 | }; | |
1237 | ||
1238 | ||
1239 | static int | |
1240 | brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info *devinfo, | |
1241 | u32 sharedram_addr) | |
1242 | { | |
1243 | struct brcmf_pcie_shared_info *shared; | |
1244 | u32 addr; | |
1245 | u32 version; | |
1246 | ||
1247 | shared = &devinfo->shared; | |
1248 | shared->tcm_base_address = sharedram_addr; | |
1249 | ||
1250 | shared->flags = brcmf_pcie_read_tcm32(devinfo, sharedram_addr); | |
1251 | version = shared->flags & BRCMF_PCIE_SHARED_VERSION_MASK; | |
1252 | brcmf_dbg(PCIE, "PCIe protocol version %d\n", version); | |
1253 | if ((version > BRCMF_PCIE_MAX_SHARED_VERSION) || | |
1254 | (version < BRCMF_PCIE_MIN_SHARED_VERSION)) { | |
1255 | brcmf_err("Unsupported PCIE version %d\n", version); | |
1256 | return -EINVAL; | |
1257 | } | |
1258 | if (shared->flags & BRCMF_PCIE_SHARED_TXPUSH_SUPPORT) { | |
1259 | brcmf_err("Unsupported legacy TX mode 0x%x\n", | |
1260 | shared->flags & BRCMF_PCIE_SHARED_TXPUSH_SUPPORT); | |
1261 | return -EINVAL; | |
1262 | } | |
1263 | ||
1264 | addr = sharedram_addr + BRCMF_SHARED_MAX_RXBUFPOST_OFFSET; | |
1265 | shared->max_rxbufpost = brcmf_pcie_read_tcm16(devinfo, addr); | |
1266 | if (shared->max_rxbufpost == 0) | |
1267 | shared->max_rxbufpost = BRCMF_DEF_MAX_RXBUFPOST; | |
1268 | ||
1269 | addr = sharedram_addr + BRCMF_SHARED_RX_DATAOFFSET_OFFSET; | |
1270 | shared->rx_dataoffset = brcmf_pcie_read_tcm32(devinfo, addr); | |
1271 | ||
1272 | addr = sharedram_addr + BRCMF_SHARED_HTOD_MB_DATA_ADDR_OFFSET; | |
1273 | shared->htod_mb_data_addr = brcmf_pcie_read_tcm32(devinfo, addr); | |
1274 | ||
1275 | addr = sharedram_addr + BRCMF_SHARED_DTOH_MB_DATA_ADDR_OFFSET; | |
1276 | shared->dtoh_mb_data_addr = brcmf_pcie_read_tcm32(devinfo, addr); | |
1277 | ||
1278 | addr = sharedram_addr + BRCMF_SHARED_RING_INFO_ADDR_OFFSET; | |
1279 | shared->ring_info_addr = brcmf_pcie_read_tcm32(devinfo, addr); | |
1280 | ||
1281 | brcmf_dbg(PCIE, "max rx buf post %d, rx dataoffset %d\n", | |
1282 | shared->max_rxbufpost, shared->rx_dataoffset); | |
1283 | ||
1284 | brcmf_pcie_bus_console_init(devinfo); | |
1285 | ||
1286 | return 0; | |
1287 | } | |
1288 | ||
1289 | ||
1290 | static int brcmf_pcie_get_fwnames(struct brcmf_pciedev_info *devinfo) | |
1291 | { | |
1292 | char *fw_name; | |
1293 | char *nvram_name; | |
1294 | uint fw_len, nv_len; | |
1295 | char end; | |
1296 | ||
1297 | brcmf_dbg(PCIE, "Enter, chip 0x%04x chiprev %d\n", devinfo->ci->chip, | |
1298 | devinfo->ci->chiprev); | |
1299 | ||
1300 | switch (devinfo->ci->chip) { | |
1301 | case BRCM_CC_43602_CHIP_ID: | |
1302 | fw_name = BRCMF_PCIE_43602_FW_NAME; | |
1303 | nvram_name = BRCMF_PCIE_43602_NVRAM_NAME; | |
1304 | break; | |
1305 | case BRCM_CC_4354_CHIP_ID: | |
1306 | fw_name = BRCMF_PCIE_4354_FW_NAME; | |
1307 | nvram_name = BRCMF_PCIE_4354_NVRAM_NAME; | |
1308 | break; | |
1309 | case BRCM_CC_4356_CHIP_ID: | |
1310 | fw_name = BRCMF_PCIE_4356_FW_NAME; | |
1311 | nvram_name = BRCMF_PCIE_4356_NVRAM_NAME; | |
1312 | break; | |
1313 | case BRCM_CC_43567_CHIP_ID: | |
1314 | case BRCM_CC_43569_CHIP_ID: | |
1315 | case BRCM_CC_43570_CHIP_ID: | |
1316 | fw_name = BRCMF_PCIE_43570_FW_NAME; | |
1317 | nvram_name = BRCMF_PCIE_43570_NVRAM_NAME; | |
1318 | break; | |
1319 | default: | |
1320 | brcmf_err("Unsupported chip 0x%04x\n", devinfo->ci->chip); | |
1321 | return -ENODEV; | |
1322 | } | |
1323 | ||
1324 | fw_len = sizeof(devinfo->fw_name) - 1; | |
1325 | nv_len = sizeof(devinfo->nvram_name) - 1; | |
1326 | /* check if firmware path is provided by module parameter */ | |
1327 | if (brcmf_firmware_path[0] != '\0') { | |
1328 | strncpy(devinfo->fw_name, brcmf_firmware_path, fw_len); | |
1329 | strncpy(devinfo->nvram_name, brcmf_firmware_path, nv_len); | |
1330 | fw_len -= strlen(devinfo->fw_name); | |
1331 | nv_len -= strlen(devinfo->nvram_name); | |
1332 | ||
1333 | end = brcmf_firmware_path[strlen(brcmf_firmware_path) - 1]; | |
1334 | if (end != '/') { | |
1335 | strncat(devinfo->fw_name, "/", fw_len); | |
1336 | strncat(devinfo->nvram_name, "/", nv_len); | |
1337 | fw_len--; | |
1338 | nv_len--; | |
1339 | } | |
1340 | } | |
1341 | strncat(devinfo->fw_name, fw_name, fw_len); | |
1342 | strncat(devinfo->nvram_name, nvram_name, nv_len); | |
1343 | ||
1344 | return 0; | |
1345 | } | |
1346 | ||
1347 | ||
1348 | static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo, | |
1349 | const struct firmware *fw, void *nvram, | |
1350 | u32 nvram_len) | |
1351 | { | |
1352 | u32 sharedram_addr; | |
1353 | u32 sharedram_addr_written; | |
1354 | u32 loop_counter; | |
1355 | int err; | |
1356 | u32 address; | |
1357 | u32 resetintr; | |
1358 | ||
1359 | devinfo->ringbell = brcmf_pcie_ringbell_v2; | |
1360 | devinfo->generic_corerev = BRCMF_PCIE_GENREV2; | |
1361 | ||
1362 | brcmf_dbg(PCIE, "Halt ARM.\n"); | |
1363 | err = brcmf_pcie_enter_download_state(devinfo); | |
1364 | if (err) | |
1365 | return err; | |
1366 | ||
1367 | brcmf_dbg(PCIE, "Download FW %s\n", devinfo->fw_name); | |
1368 | brcmf_pcie_copy_mem_todev(devinfo, devinfo->ci->rambase, | |
1369 | (void *)fw->data, fw->size); | |
1370 | ||
1371 | resetintr = get_unaligned_le32(fw->data); | |
1372 | release_firmware(fw); | |
1373 | ||
1374 | /* reset last 4 bytes of RAM address. to be used for shared | |
1375 | * area. This identifies when FW is running | |
1376 | */ | |
1377 | brcmf_pcie_write_ram32(devinfo, devinfo->ci->ramsize - 4, 0); | |
1378 | ||
1379 | if (nvram) { | |
1380 | brcmf_dbg(PCIE, "Download NVRAM %s\n", devinfo->nvram_name); | |
1381 | address = devinfo->ci->rambase + devinfo->ci->ramsize - | |
1382 | nvram_len; | |
1383 | brcmf_pcie_copy_mem_todev(devinfo, address, nvram, nvram_len); | |
1384 | brcmf_fw_nvram_free(nvram); | |
1385 | } else { | |
1386 | brcmf_dbg(PCIE, "No matching NVRAM file found %s\n", | |
1387 | devinfo->nvram_name); | |
1388 | } | |
1389 | ||
1390 | sharedram_addr_written = brcmf_pcie_read_ram32(devinfo, | |
1391 | devinfo->ci->ramsize - | |
1392 | 4); | |
1393 | brcmf_dbg(PCIE, "Bring ARM in running state\n"); | |
1394 | err = brcmf_pcie_exit_download_state(devinfo, resetintr); | |
1395 | if (err) | |
1396 | return err; | |
1397 | ||
1398 | brcmf_dbg(PCIE, "Wait for FW init\n"); | |
1399 | sharedram_addr = sharedram_addr_written; | |
1400 | loop_counter = BRCMF_PCIE_FW_UP_TIMEOUT / 50; | |
1401 | while ((sharedram_addr == sharedram_addr_written) && (loop_counter)) { | |
1402 | msleep(50); | |
1403 | sharedram_addr = brcmf_pcie_read_ram32(devinfo, | |
1404 | devinfo->ci->ramsize - | |
1405 | 4); | |
1406 | loop_counter--; | |
1407 | } | |
1408 | if (sharedram_addr == sharedram_addr_written) { | |
1409 | brcmf_err("FW failed to initialize\n"); | |
1410 | return -ENODEV; | |
1411 | } | |
1412 | brcmf_dbg(PCIE, "Shared RAM addr: 0x%08x\n", sharedram_addr); | |
1413 | ||
1414 | return (brcmf_pcie_init_share_ram_info(devinfo, sharedram_addr)); | |
1415 | } | |
1416 | ||
1417 | ||
1418 | static int brcmf_pcie_get_resource(struct brcmf_pciedev_info *devinfo) | |
1419 | { | |
1420 | struct pci_dev *pdev; | |
1421 | int err; | |
1422 | phys_addr_t bar0_addr, bar1_addr; | |
1423 | ulong bar1_size; | |
1424 | ||
1425 | pdev = devinfo->pdev; | |
1426 | ||
1427 | err = pci_enable_device(pdev); | |
1428 | if (err) { | |
1429 | brcmf_err("pci_enable_device failed err=%d\n", err); | |
1430 | return err; | |
1431 | } | |
1432 | ||
1433 | pci_set_master(pdev); | |
1434 | ||
1435 | /* Bar-0 mapped address */ | |
1436 | bar0_addr = pci_resource_start(pdev, 0); | |
1437 | /* Bar-1 mapped address */ | |
1438 | bar1_addr = pci_resource_start(pdev, 2); | |
1439 | /* read Bar-1 mapped memory range */ | |
1440 | bar1_size = pci_resource_len(pdev, 2); | |
1441 | if ((bar1_size == 0) || (bar1_addr == 0)) { | |
1442 | brcmf_err("BAR1 Not enabled, device size=%ld, addr=%#016llx\n", | |
1443 | bar1_size, (unsigned long long)bar1_addr); | |
1444 | return -EINVAL; | |
1445 | } | |
1446 | ||
1447 | devinfo->regs = ioremap_nocache(bar0_addr, BRCMF_PCIE_REG_MAP_SIZE); | |
1448 | devinfo->tcm = ioremap_nocache(bar1_addr, BRCMF_PCIE_TCM_MAP_SIZE); | |
1449 | devinfo->tcm_size = BRCMF_PCIE_TCM_MAP_SIZE; | |
1450 | ||
1451 | if (!devinfo->regs || !devinfo->tcm) { | |
1452 | brcmf_err("ioremap() failed (%p,%p)\n", devinfo->regs, | |
1453 | devinfo->tcm); | |
1454 | return -EINVAL; | |
1455 | } | |
1456 | brcmf_dbg(PCIE, "Phys addr : reg space = %p base addr %#016llx\n", | |
1457 | devinfo->regs, (unsigned long long)bar0_addr); | |
1458 | brcmf_dbg(PCIE, "Phys addr : mem space = %p base addr %#016llx\n", | |
1459 | devinfo->tcm, (unsigned long long)bar1_addr); | |
1460 | ||
1461 | return 0; | |
1462 | } | |
1463 | ||
1464 | ||
1465 | static void brcmf_pcie_release_resource(struct brcmf_pciedev_info *devinfo) | |
1466 | { | |
1467 | if (devinfo->tcm) | |
1468 | iounmap(devinfo->tcm); | |
1469 | if (devinfo->regs) | |
1470 | iounmap(devinfo->regs); | |
1471 | ||
1472 | pci_disable_device(devinfo->pdev); | |
1473 | } | |
1474 | ||
1475 | ||
1476 | static int brcmf_pcie_attach_bus(struct device *dev) | |
1477 | { | |
1478 | int ret; | |
1479 | ||
1480 | /* Attach to the common driver interface */ | |
1481 | ret = brcmf_attach(dev); | |
1482 | if (ret) { | |
1483 | brcmf_err("brcmf_attach failed\n"); | |
1484 | } else { | |
1485 | ret = brcmf_bus_start(dev); | |
1486 | if (ret) | |
1487 | brcmf_err("dongle is not responding\n"); | |
1488 | } | |
1489 | ||
1490 | return ret; | |
1491 | } | |
1492 | ||
1493 | ||
1494 | static u32 brcmf_pcie_buscore_prep_addr(const struct pci_dev *pdev, u32 addr) | |
1495 | { | |
1496 | u32 ret_addr; | |
1497 | ||
1498 | ret_addr = addr & (BRCMF_PCIE_BAR0_REG_SIZE - 1); | |
1499 | addr &= ~(BRCMF_PCIE_BAR0_REG_SIZE - 1); | |
1500 | pci_write_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW, addr); | |
1501 | ||
1502 | return ret_addr; | |
1503 | } | |
1504 | ||
1505 | ||
1506 | static u32 brcmf_pcie_buscore_read32(void *ctx, u32 addr) | |
1507 | { | |
1508 | struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx; | |
1509 | ||
1510 | addr = brcmf_pcie_buscore_prep_addr(devinfo->pdev, addr); | |
1511 | return brcmf_pcie_read_reg32(devinfo, addr); | |
1512 | } | |
1513 | ||
1514 | ||
1515 | static void brcmf_pcie_buscore_write32(void *ctx, u32 addr, u32 value) | |
1516 | { | |
1517 | struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx; | |
1518 | ||
1519 | addr = brcmf_pcie_buscore_prep_addr(devinfo->pdev, addr); | |
1520 | brcmf_pcie_write_reg32(devinfo, addr, value); | |
1521 | } | |
1522 | ||
1523 | ||
1524 | static int brcmf_pcie_buscoreprep(void *ctx) | |
1525 | { | |
1526 | struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx; | |
1527 | int err; | |
1528 | ||
1529 | err = brcmf_pcie_get_resource(devinfo); | |
1530 | if (err == 0) { | |
1531 | /* Set CC watchdog to reset all the cores on the chip to bring | |
1532 | * back dongle to a sane state. | |
1533 | */ | |
1534 | brcmf_pcie_buscore_write32(ctx, CORE_CC_REG(SI_ENUM_BASE, | |
1535 | watchdog), 4); | |
1536 | msleep(100); | |
1537 | } | |
1538 | ||
1539 | return err; | |
1540 | } | |
1541 | ||
1542 | ||
1543 | static void brcmf_pcie_buscore_exitdl(void *ctx, struct brcmf_chip *chip, | |
1544 | u32 rstvec) | |
1545 | { | |
1546 | struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx; | |
1547 | ||
1548 | brcmf_pcie_write_tcm32(devinfo, 0, rstvec); | |
1549 | } | |
1550 | ||
1551 | ||
1552 | static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = { | |
1553 | .prepare = brcmf_pcie_buscoreprep, | |
1554 | .exit_dl = brcmf_pcie_buscore_exitdl, | |
1555 | .read32 = brcmf_pcie_buscore_read32, | |
1556 | .write32 = brcmf_pcie_buscore_write32, | |
1557 | }; | |
1558 | ||
1559 | static void brcmf_pcie_setup(struct device *dev, const struct firmware *fw, | |
1560 | void *nvram, u32 nvram_len) | |
1561 | { | |
1562 | struct brcmf_bus *bus = dev_get_drvdata(dev); | |
1563 | struct brcmf_pciedev *pcie_bus_dev = bus->bus_priv.pcie; | |
1564 | struct brcmf_pciedev_info *devinfo = pcie_bus_dev->devinfo; | |
1565 | struct brcmf_commonring **flowrings; | |
1566 | int ret; | |
1567 | u32 i; | |
1568 | ||
1569 | brcmf_pcie_attach(devinfo); | |
1570 | ||
1571 | ret = brcmf_pcie_download_fw_nvram(devinfo, fw, nvram, nvram_len); | |
1572 | if (ret) | |
1573 | goto fail; | |
1574 | ||
1575 | devinfo->state = BRCMFMAC_PCIE_STATE_UP; | |
1576 | ||
1577 | ret = brcmf_pcie_init_ringbuffers(devinfo); | |
1578 | if (ret) | |
1579 | goto fail; | |
1580 | ||
1581 | ret = brcmf_pcie_init_scratchbuffers(devinfo); | |
1582 | if (ret) | |
1583 | goto fail; | |
1584 | ||
1585 | brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2); | |
1586 | ret = brcmf_pcie_request_irq(devinfo); | |
1587 | if (ret) | |
1588 | goto fail; | |
1589 | ||
1590 | /* hook the commonrings in the bus structure. */ | |
1591 | for (i = 0; i < BRCMF_NROF_COMMON_MSGRINGS; i++) | |
1592 | bus->msgbuf->commonrings[i] = | |
1593 | &devinfo->shared.commonrings[i]->commonring; | |
1594 | ||
1595 | flowrings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(flowrings), | |
1596 | GFP_KERNEL); | |
1597 | if (!flowrings) | |
1598 | goto fail; | |
1599 | ||
1600 | for (i = 0; i < devinfo->shared.nrof_flowrings; i++) | |
1601 | flowrings[i] = &devinfo->shared.flowrings[i].commonring; | |
1602 | bus->msgbuf->flowrings = flowrings; | |
1603 | ||
1604 | bus->msgbuf->rx_dataoffset = devinfo->shared.rx_dataoffset; | |
1605 | bus->msgbuf->max_rxbufpost = devinfo->shared.max_rxbufpost; | |
1606 | bus->msgbuf->nrof_flowrings = devinfo->shared.nrof_flowrings; | |
1607 | ||
1608 | init_waitqueue_head(&devinfo->mbdata_resp_wait); | |
1609 | ||
1610 | brcmf_pcie_intr_enable(devinfo); | |
1611 | if (brcmf_pcie_attach_bus(bus->dev) == 0) | |
1612 | return; | |
1613 | ||
1614 | brcmf_pcie_bus_console_read(devinfo); | |
1615 | ||
1616 | fail: | |
1617 | device_release_driver(dev); | |
1618 | } | |
1619 | ||
1620 | static int | |
1621 | brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |
1622 | { | |
1623 | int ret; | |
1624 | struct brcmf_pciedev_info *devinfo; | |
1625 | struct brcmf_pciedev *pcie_bus_dev; | |
1626 | struct brcmf_bus *bus; | |
1627 | ||
1628 | brcmf_dbg(PCIE, "Enter %x:%x\n", pdev->vendor, pdev->device); | |
1629 | ||
1630 | ret = -ENOMEM; | |
1631 | devinfo = kzalloc(sizeof(*devinfo), GFP_KERNEL); | |
1632 | if (devinfo == NULL) | |
1633 | return ret; | |
1634 | ||
1635 | devinfo->pdev = pdev; | |
1636 | pcie_bus_dev = NULL; | |
1637 | devinfo->ci = brcmf_chip_attach(devinfo, &brcmf_pcie_buscore_ops); | |
1638 | if (IS_ERR(devinfo->ci)) { | |
1639 | ret = PTR_ERR(devinfo->ci); | |
1640 | devinfo->ci = NULL; | |
1641 | goto fail; | |
1642 | } | |
1643 | ||
1644 | pcie_bus_dev = kzalloc(sizeof(*pcie_bus_dev), GFP_KERNEL); | |
1645 | if (pcie_bus_dev == NULL) { | |
1646 | ret = -ENOMEM; | |
1647 | goto fail; | |
1648 | } | |
1649 | ||
1650 | bus = kzalloc(sizeof(*bus), GFP_KERNEL); | |
1651 | if (!bus) { | |
1652 | ret = -ENOMEM; | |
1653 | goto fail; | |
1654 | } | |
1655 | bus->msgbuf = kzalloc(sizeof(*bus->msgbuf), GFP_KERNEL); | |
1656 | if (!bus->msgbuf) { | |
1657 | ret = -ENOMEM; | |
1658 | kfree(bus); | |
1659 | goto fail; | |
1660 | } | |
1661 | ||
1662 | /* hook it all together. */ | |
1663 | pcie_bus_dev->devinfo = devinfo; | |
1664 | pcie_bus_dev->bus = bus; | |
1665 | bus->dev = &pdev->dev; | |
1666 | bus->bus_priv.pcie = pcie_bus_dev; | |
1667 | bus->ops = &brcmf_pcie_bus_ops; | |
1668 | bus->proto_type = BRCMF_PROTO_MSGBUF; | |
1669 | bus->chip = devinfo->coreid; | |
1670 | dev_set_drvdata(&pdev->dev, bus); | |
1671 | ||
1672 | ret = brcmf_pcie_get_fwnames(devinfo); | |
1673 | if (ret) | |
1674 | goto fail_bus; | |
1675 | ||
1676 | ret = brcmf_fw_get_firmwares(bus->dev, BRCMF_FW_REQUEST_NVRAM | | |
1677 | BRCMF_FW_REQ_NV_OPTIONAL, | |
1678 | devinfo->fw_name, devinfo->nvram_name, | |
1679 | brcmf_pcie_setup); | |
1680 | if (ret == 0) | |
1681 | return 0; | |
1682 | fail_bus: | |
1683 | kfree(bus->msgbuf); | |
1684 | kfree(bus); | |
1685 | fail: | |
1686 | brcmf_err("failed %x:%x\n", pdev->vendor, pdev->device); | |
1687 | brcmf_pcie_release_resource(devinfo); | |
1688 | if (devinfo->ci) | |
1689 | brcmf_chip_detach(devinfo->ci); | |
1690 | kfree(pcie_bus_dev); | |
1691 | kfree(devinfo); | |
1692 | return ret; | |
1693 | } | |
1694 | ||
1695 | ||
1696 | static void | |
1697 | brcmf_pcie_remove(struct pci_dev *pdev) | |
1698 | { | |
1699 | struct brcmf_pciedev_info *devinfo; | |
1700 | struct brcmf_bus *bus; | |
1701 | ||
1702 | brcmf_dbg(PCIE, "Enter\n"); | |
1703 | ||
1704 | bus = dev_get_drvdata(&pdev->dev); | |
1705 | if (bus == NULL) | |
1706 | return; | |
1707 | ||
1708 | devinfo = bus->bus_priv.pcie->devinfo; | |
1709 | ||
1710 | devinfo->state = BRCMFMAC_PCIE_STATE_DOWN; | |
1711 | if (devinfo->ci) | |
1712 | brcmf_pcie_intr_disable(devinfo); | |
1713 | ||
1714 | brcmf_detach(&pdev->dev); | |
1715 | ||
1716 | kfree(bus->bus_priv.pcie); | |
1717 | kfree(bus->msgbuf->flowrings); | |
1718 | kfree(bus->msgbuf); | |
1719 | kfree(bus); | |
1720 | ||
1721 | brcmf_pcie_release_irq(devinfo); | |
1722 | brcmf_pcie_release_scratchbuffers(devinfo); | |
1723 | brcmf_pcie_release_ringbuffers(devinfo); | |
bd4f82e3 | 1724 | brcmf_pcie_reset_device(devinfo); |
9e37f045 HM |
1725 | brcmf_pcie_release_resource(devinfo); |
1726 | ||
1727 | if (devinfo->ci) | |
1728 | brcmf_chip_detach(devinfo->ci); | |
1729 | ||
1730 | kfree(devinfo); | |
1731 | dev_set_drvdata(&pdev->dev, NULL); | |
1732 | } | |
1733 | ||
1734 | ||
1735 | #ifdef CONFIG_PM | |
1736 | ||
1737 | ||
1738 | static int brcmf_pcie_suspend(struct pci_dev *pdev, pm_message_t state) | |
1739 | { | |
1740 | struct brcmf_pciedev_info *devinfo; | |
1741 | struct brcmf_bus *bus; | |
1742 | int err; | |
1743 | ||
1744 | brcmf_dbg(PCIE, "Enter, state=%d, pdev=%p\n", state.event, pdev); | |
1745 | ||
1746 | bus = dev_get_drvdata(&pdev->dev); | |
1747 | devinfo = bus->bus_priv.pcie->devinfo; | |
1748 | ||
1749 | brcmf_bus_change_state(bus, BRCMF_BUS_DOWN); | |
1750 | ||
1751 | devinfo->mbdata_completed = false; | |
1752 | brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D3_INFORM); | |
1753 | ||
1754 | wait_event_timeout(devinfo->mbdata_resp_wait, | |
1755 | devinfo->mbdata_completed, | |
1756 | msecs_to_jiffies(BRCMF_PCIE_MBDATA_TIMEOUT)); | |
1757 | if (!devinfo->mbdata_completed) { | |
1758 | brcmf_err("Timeout on response for entering D3 substate\n"); | |
1759 | return -EIO; | |
1760 | } | |
1761 | brcmf_pcie_release_irq(devinfo); | |
1762 | ||
1763 | err = pci_save_state(pdev); | |
1764 | if (err) { | |
1765 | brcmf_err("pci_save_state failed, err=%d\n", err); | |
1766 | return err; | |
1767 | } | |
1768 | ||
1769 | brcmf_chip_detach(devinfo->ci); | |
1770 | devinfo->ci = NULL; | |
1771 | ||
1772 | brcmf_pcie_remove(pdev); | |
1773 | ||
1774 | return pci_prepare_to_sleep(pdev); | |
1775 | } | |
1776 | ||
1777 | ||
1778 | static int brcmf_pcie_resume(struct pci_dev *pdev) | |
1779 | { | |
1780 | int err; | |
1781 | ||
1782 | brcmf_dbg(PCIE, "Enter, pdev=%p\n", pdev); | |
1783 | ||
1784 | err = pci_set_power_state(pdev, PCI_D0); | |
1785 | if (err) { | |
1786 | brcmf_err("pci_set_power_state failed, err=%d\n", err); | |
1787 | return err; | |
1788 | } | |
1789 | pci_restore_state(pdev); | |
1790 | ||
1791 | err = brcmf_pcie_probe(pdev, NULL); | |
1792 | if (err) | |
1793 | brcmf_err("probe after resume failed, err=%d\n", err); | |
1794 | ||
1795 | return err; | |
1796 | } | |
1797 | ||
1798 | ||
1799 | #endif /* CONFIG_PM */ | |
1800 | ||
1801 | ||
1802 | #define BRCMF_PCIE_DEVICE(dev_id) { BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\ | |
1803 | PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 } | |
1804 | ||
1805 | static struct pci_device_id brcmf_pcie_devid_table[] = { | |
1806 | BRCMF_PCIE_DEVICE(BRCM_PCIE_4354_DEVICE_ID), | |
1807 | BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID), | |
1808 | BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID), | |
1809 | BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID), | |
1810 | BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_DEVICE_ID), | |
1811 | { /* end: all zeroes */ } | |
1812 | }; | |
1813 | ||
1814 | ||
1815 | MODULE_DEVICE_TABLE(pci, brcmf_pcie_devid_table); | |
1816 | ||
1817 | ||
1818 | static struct pci_driver brcmf_pciedrvr = { | |
1819 | .node = {}, | |
1820 | .name = KBUILD_MODNAME, | |
1821 | .id_table = brcmf_pcie_devid_table, | |
1822 | .probe = brcmf_pcie_probe, | |
1823 | .remove = brcmf_pcie_remove, | |
1824 | #ifdef CONFIG_PM | |
1825 | .suspend = brcmf_pcie_suspend, | |
1826 | .resume = brcmf_pcie_resume | |
1827 | #endif /* CONFIG_PM */ | |
1828 | }; | |
1829 | ||
1830 | ||
1831 | void brcmf_pcie_register(void) | |
1832 | { | |
1833 | int err; | |
1834 | ||
1835 | brcmf_dbg(PCIE, "Enter\n"); | |
1836 | err = pci_register_driver(&brcmf_pciedrvr); | |
1837 | if (err) | |
1838 | brcmf_err("PCIE driver registration failed, err=%d\n", err); | |
1839 | } | |
1840 | ||
1841 | ||
1842 | void brcmf_pcie_exit(void) | |
1843 | { | |
1844 | brcmf_dbg(PCIE, "Enter\n"); | |
1845 | pci_unregister_driver(&brcmf_pciedrvr); | |
1846 | } |