Commit | Line | Data |
---|---|---|
9e37f045 HM |
1 | /* Copyright (c) 2014 Broadcom Corporation |
2 | * | |
3 | * Permission to use, copy, modify, and/or distribute this software for any | |
4 | * purpose with or without fee is hereby granted, provided that the above | |
5 | * copyright notice and this permission notice appear in all copies. | |
6 | * | |
7 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | |
8 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |
9 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY | |
10 | * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | |
11 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION | |
12 | * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN | |
13 | * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
14 | */ | |
15 | ||
16 | #include <linux/kernel.h> | |
17 | #include <linux/module.h> | |
18 | #include <linux/firmware.h> | |
19 | #include <linux/pci.h> | |
20 | #include <linux/vmalloc.h> | |
21 | #include <linux/delay.h> | |
9e37f045 HM |
22 | #include <linux/interrupt.h> |
23 | #include <linux/bcma/bcma.h> | |
24 | #include <linux/sched.h> | |
a1d69c60 | 25 | #include <asm/unaligned.h> |
9e37f045 HM |
26 | |
27 | #include <soc.h> | |
28 | #include <chipcommon.h> | |
29 | #include <brcmu_utils.h> | |
30 | #include <brcmu_wifi.h> | |
31 | #include <brcm_hw_ids.h> | |
32 | ||
a8e8ed34 | 33 | #include "debug.h" |
d14f78b9 | 34 | #include "bus.h" |
9e37f045 HM |
35 | #include "commonring.h" |
36 | #include "msgbuf.h" | |
37 | #include "pcie.h" | |
38 | #include "firmware.h" | |
39 | #include "chip.h" | |
40 | ||
41 | ||
42 | enum brcmf_pcie_state { | |
43 | BRCMFMAC_PCIE_STATE_DOWN, | |
44 | BRCMFMAC_PCIE_STATE_UP | |
45 | }; | |
46 | ||
47 | ||
48 | #define BRCMF_PCIE_43602_FW_NAME "brcm/brcmfmac43602-pcie.bin" | |
49 | #define BRCMF_PCIE_43602_NVRAM_NAME "brcm/brcmfmac43602-pcie.txt" | |
9e37f045 HM |
50 | #define BRCMF_PCIE_4356_FW_NAME "brcm/brcmfmac4356-pcie.bin" |
51 | #define BRCMF_PCIE_4356_NVRAM_NAME "brcm/brcmfmac4356-pcie.txt" | |
52 | #define BRCMF_PCIE_43570_FW_NAME "brcm/brcmfmac43570-pcie.bin" | |
53 | #define BRCMF_PCIE_43570_NVRAM_NAME "brcm/brcmfmac43570-pcie.txt" | |
54 | ||
55 | #define BRCMF_PCIE_FW_UP_TIMEOUT 2000 /* msec */ | |
56 | ||
57 | #define BRCMF_PCIE_TCM_MAP_SIZE (4096 * 1024) | |
58 | #define BRCMF_PCIE_REG_MAP_SIZE (32 * 1024) | |
59 | ||
60 | /* backplane addres space accessed by BAR0 */ | |
61 | #define BRCMF_PCIE_BAR0_WINDOW 0x80 | |
62 | #define BRCMF_PCIE_BAR0_REG_SIZE 0x1000 | |
63 | #define BRCMF_PCIE_BAR0_WRAPPERBASE 0x70 | |
64 | ||
65 | #define BRCMF_PCIE_BAR0_WRAPBASE_DMP_OFFSET 0x1000 | |
66 | #define BRCMF_PCIE_BARO_PCIE_ENUM_OFFSET 0x2000 | |
67 | ||
68 | #define BRCMF_PCIE_ARMCR4REG_BANKIDX 0x40 | |
69 | #define BRCMF_PCIE_ARMCR4REG_BANKPDA 0x4C | |
70 | ||
71 | #define BRCMF_PCIE_REG_INTSTATUS 0x90 | |
72 | #define BRCMF_PCIE_REG_INTMASK 0x94 | |
73 | #define BRCMF_PCIE_REG_SBMBX 0x98 | |
74 | ||
75 | #define BRCMF_PCIE_PCIE2REG_INTMASK 0x24 | |
76 | #define BRCMF_PCIE_PCIE2REG_MAILBOXINT 0x48 | |
77 | #define BRCMF_PCIE_PCIE2REG_MAILBOXMASK 0x4C | |
78 | #define BRCMF_PCIE_PCIE2REG_CONFIGADDR 0x120 | |
79 | #define BRCMF_PCIE_PCIE2REG_CONFIGDATA 0x124 | |
80 | #define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX 0x140 | |
81 | ||
82 | #define BRCMF_PCIE_GENREV1 1 | |
83 | #define BRCMF_PCIE_GENREV2 2 | |
84 | ||
85 | #define BRCMF_PCIE2_INTA 0x01 | |
86 | #define BRCMF_PCIE2_INTB 0x02 | |
87 | ||
88 | #define BRCMF_PCIE_INT_0 0x01 | |
89 | #define BRCMF_PCIE_INT_1 0x02 | |
90 | #define BRCMF_PCIE_INT_DEF (BRCMF_PCIE_INT_0 | \ | |
91 | BRCMF_PCIE_INT_1) | |
92 | ||
93 | #define BRCMF_PCIE_MB_INT_FN0_0 0x0100 | |
94 | #define BRCMF_PCIE_MB_INT_FN0_1 0x0200 | |
95 | #define BRCMF_PCIE_MB_INT_D2H0_DB0 0x10000 | |
96 | #define BRCMF_PCIE_MB_INT_D2H0_DB1 0x20000 | |
97 | #define BRCMF_PCIE_MB_INT_D2H1_DB0 0x40000 | |
98 | #define BRCMF_PCIE_MB_INT_D2H1_DB1 0x80000 | |
99 | #define BRCMF_PCIE_MB_INT_D2H2_DB0 0x100000 | |
100 | #define BRCMF_PCIE_MB_INT_D2H2_DB1 0x200000 | |
101 | #define BRCMF_PCIE_MB_INT_D2H3_DB0 0x400000 | |
102 | #define BRCMF_PCIE_MB_INT_D2H3_DB1 0x800000 | |
103 | ||
104 | #define BRCMF_PCIE_MB_INT_D2H_DB (BRCMF_PCIE_MB_INT_D2H0_DB0 | \ | |
105 | BRCMF_PCIE_MB_INT_D2H0_DB1 | \ | |
106 | BRCMF_PCIE_MB_INT_D2H1_DB0 | \ | |
107 | BRCMF_PCIE_MB_INT_D2H1_DB1 | \ | |
108 | BRCMF_PCIE_MB_INT_D2H2_DB0 | \ | |
109 | BRCMF_PCIE_MB_INT_D2H2_DB1 | \ | |
110 | BRCMF_PCIE_MB_INT_D2H3_DB0 | \ | |
111 | BRCMF_PCIE_MB_INT_D2H3_DB1) | |
112 | ||
113 | #define BRCMF_PCIE_MIN_SHARED_VERSION 4 | |
114 | #define BRCMF_PCIE_MAX_SHARED_VERSION 5 | |
115 | #define BRCMF_PCIE_SHARED_VERSION_MASK 0x00FF | |
116 | #define BRCMF_PCIE_SHARED_TXPUSH_SUPPORT 0x4000 | |
117 | ||
118 | #define BRCMF_PCIE_FLAGS_HTOD_SPLIT 0x4000 | |
119 | #define BRCMF_PCIE_FLAGS_DTOH_SPLIT 0x8000 | |
120 | ||
121 | #define BRCMF_SHARED_MAX_RXBUFPOST_OFFSET 34 | |
122 | #define BRCMF_SHARED_RING_BASE_OFFSET 52 | |
123 | #define BRCMF_SHARED_RX_DATAOFFSET_OFFSET 36 | |
124 | #define BRCMF_SHARED_CONSOLE_ADDR_OFFSET 20 | |
125 | #define BRCMF_SHARED_HTOD_MB_DATA_ADDR_OFFSET 40 | |
126 | #define BRCMF_SHARED_DTOH_MB_DATA_ADDR_OFFSET 44 | |
127 | #define BRCMF_SHARED_RING_INFO_ADDR_OFFSET 48 | |
128 | #define BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET 52 | |
129 | #define BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET 56 | |
130 | #define BRCMF_SHARED_DMA_RINGUPD_LEN_OFFSET 64 | |
131 | #define BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET 68 | |
132 | ||
133 | #define BRCMF_RING_H2D_RING_COUNT_OFFSET 0 | |
134 | #define BRCMF_RING_D2H_RING_COUNT_OFFSET 1 | |
135 | #define BRCMF_RING_H2D_RING_MEM_OFFSET 4 | |
136 | #define BRCMF_RING_H2D_RING_STATE_OFFSET 8 | |
137 | ||
138 | #define BRCMF_RING_MEM_BASE_ADDR_OFFSET 8 | |
139 | #define BRCMF_RING_MAX_ITEM_OFFSET 4 | |
140 | #define BRCMF_RING_LEN_ITEMS_OFFSET 6 | |
141 | #define BRCMF_RING_MEM_SZ 16 | |
142 | #define BRCMF_RING_STATE_SZ 8 | |
143 | ||
144 | #define BRCMF_SHARED_RING_H2D_W_IDX_PTR_OFFSET 4 | |
145 | #define BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET 8 | |
146 | #define BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET 12 | |
147 | #define BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET 16 | |
148 | #define BRCMF_SHARED_RING_TCM_MEMLOC_OFFSET 0 | |
149 | #define BRCMF_SHARED_RING_MAX_SUB_QUEUES 52 | |
150 | ||
151 | #define BRCMF_DEF_MAX_RXBUFPOST 255 | |
152 | ||
153 | #define BRCMF_CONSOLE_BUFADDR_OFFSET 8 | |
154 | #define BRCMF_CONSOLE_BUFSIZE_OFFSET 12 | |
155 | #define BRCMF_CONSOLE_WRITEIDX_OFFSET 16 | |
156 | ||
157 | #define BRCMF_DMA_D2H_SCRATCH_BUF_LEN 8 | |
158 | #define BRCMF_DMA_D2H_RINGUPD_BUF_LEN 1024 | |
159 | ||
160 | #define BRCMF_D2H_DEV_D3_ACK 0x00000001 | |
161 | #define BRCMF_D2H_DEV_DS_ENTER_REQ 0x00000002 | |
162 | #define BRCMF_D2H_DEV_DS_EXIT_NOTE 0x00000004 | |
163 | ||
164 | #define BRCMF_H2D_HOST_D3_INFORM 0x00000001 | |
165 | #define BRCMF_H2D_HOST_DS_ACK 0x00000002 | |
4eb3af7c HM |
166 | #define BRCMF_H2D_HOST_D0_INFORM_IN_USE 0x00000008 |
167 | #define BRCMF_H2D_HOST_D0_INFORM 0x00000010 | |
9e37f045 HM |
168 | |
169 | #define BRCMF_PCIE_MBDATA_TIMEOUT 2000 | |
170 | ||
bd4f82e3 HM |
171 | #define BRCMF_PCIE_CFGREG_STATUS_CMD 0x4 |
172 | #define BRCMF_PCIE_CFGREG_PM_CSR 0x4C | |
173 | #define BRCMF_PCIE_CFGREG_MSI_CAP 0x58 | |
174 | #define BRCMF_PCIE_CFGREG_MSI_ADDR_L 0x5C | |
175 | #define BRCMF_PCIE_CFGREG_MSI_ADDR_H 0x60 | |
176 | #define BRCMF_PCIE_CFGREG_MSI_DATA 0x64 | |
177 | #define BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL 0xBC | |
178 | #define BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL2 0xDC | |
179 | #define BRCMF_PCIE_CFGREG_RBAR_CTRL 0x228 | |
180 | #define BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1 0x248 | |
181 | #define BRCMF_PCIE_CFGREG_REG_BAR2_CONFIG 0x4E0 | |
182 | #define BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG 0x4F4 | |
183 | #define BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB 3 | |
184 | ||
9e37f045 HM |
185 | |
186 | MODULE_FIRMWARE(BRCMF_PCIE_43602_FW_NAME); | |
187 | MODULE_FIRMWARE(BRCMF_PCIE_43602_NVRAM_NAME); | |
7fca40eb AS |
188 | MODULE_FIRMWARE(BRCMF_PCIE_4356_FW_NAME); |
189 | MODULE_FIRMWARE(BRCMF_PCIE_4356_NVRAM_NAME); | |
9e37f045 HM |
190 | MODULE_FIRMWARE(BRCMF_PCIE_43570_FW_NAME); |
191 | MODULE_FIRMWARE(BRCMF_PCIE_43570_NVRAM_NAME); | |
192 | ||
193 | ||
194 | struct brcmf_pcie_console { | |
195 | u32 base_addr; | |
196 | u32 buf_addr; | |
197 | u32 bufsize; | |
198 | u32 read_idx; | |
199 | u8 log_str[256]; | |
200 | u8 log_idx; | |
201 | }; | |
202 | ||
203 | struct brcmf_pcie_shared_info { | |
204 | u32 tcm_base_address; | |
205 | u32 flags; | |
206 | struct brcmf_pcie_ringbuf *commonrings[BRCMF_NROF_COMMON_MSGRINGS]; | |
207 | struct brcmf_pcie_ringbuf *flowrings; | |
208 | u16 max_rxbufpost; | |
209 | u32 nrof_flowrings; | |
210 | u32 rx_dataoffset; | |
211 | u32 htod_mb_data_addr; | |
212 | u32 dtoh_mb_data_addr; | |
213 | u32 ring_info_addr; | |
214 | struct brcmf_pcie_console console; | |
215 | void *scratch; | |
216 | dma_addr_t scratch_dmahandle; | |
217 | void *ringupd; | |
218 | dma_addr_t ringupd_dmahandle; | |
219 | }; | |
220 | ||
221 | struct brcmf_pcie_core_info { | |
222 | u32 base; | |
223 | u32 wrapbase; | |
224 | }; | |
225 | ||
226 | struct brcmf_pciedev_info { | |
227 | enum brcmf_pcie_state state; | |
228 | bool in_irq; | |
229 | bool irq_requested; | |
230 | struct pci_dev *pdev; | |
231 | char fw_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN]; | |
232 | char nvram_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN]; | |
233 | void __iomem *regs; | |
234 | void __iomem *tcm; | |
235 | u32 tcm_size; | |
236 | u32 ram_base; | |
237 | u32 ram_size; | |
238 | struct brcmf_chip *ci; | |
239 | u32 coreid; | |
240 | u32 generic_corerev; | |
241 | struct brcmf_pcie_shared_info shared; | |
242 | void (*ringbell)(struct brcmf_pciedev_info *devinfo); | |
243 | wait_queue_head_t mbdata_resp_wait; | |
244 | bool mbdata_completed; | |
245 | bool irq_allocated; | |
4eb3af7c | 246 | bool wowl_enabled; |
9e37f045 HM |
247 | }; |
248 | ||
249 | struct brcmf_pcie_ringbuf { | |
250 | struct brcmf_commonring commonring; | |
251 | dma_addr_t dma_handle; | |
252 | u32 w_idx_addr; | |
253 | u32 r_idx_addr; | |
254 | struct brcmf_pciedev_info *devinfo; | |
255 | u8 id; | |
256 | }; | |
257 | ||
258 | ||
259 | static const u32 brcmf_ring_max_item[BRCMF_NROF_COMMON_MSGRINGS] = { | |
260 | BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM, | |
261 | BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM, | |
262 | BRCMF_D2H_MSGRING_CONTROL_COMPLETE_MAX_ITEM, | |
263 | BRCMF_D2H_MSGRING_TX_COMPLETE_MAX_ITEM, | |
264 | BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM | |
265 | }; | |
266 | ||
267 | static const u32 brcmf_ring_itemsize[BRCMF_NROF_COMMON_MSGRINGS] = { | |
268 | BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE, | |
269 | BRCMF_H2D_MSGRING_RXPOST_SUBMIT_ITEMSIZE, | |
270 | BRCMF_D2H_MSGRING_CONTROL_COMPLETE_ITEMSIZE, | |
271 | BRCMF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE, | |
272 | BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE | |
273 | }; | |
274 | ||
275 | ||
276 | /* dma flushing needs implementation for mips and arm platforms. Should | |
277 | * be put in util. Note, this is not real flushing. It is virtual non | |
278 | * cached memory. Only write buffers should have to be drained. Though | |
279 | * this may be different depending on platform...... | |
280 | */ | |
281 | #define brcmf_dma_flush(addr, len) | |
282 | #define brcmf_dma_invalidate_cache(addr, len) | |
283 | ||
284 | ||
285 | static u32 | |
286 | brcmf_pcie_read_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset) | |
287 | { | |
288 | void __iomem *address = devinfo->regs + reg_offset; | |
289 | ||
290 | return (ioread32(address)); | |
291 | } | |
292 | ||
293 | ||
294 | static void | |
295 | brcmf_pcie_write_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset, | |
296 | u32 value) | |
297 | { | |
298 | void __iomem *address = devinfo->regs + reg_offset; | |
299 | ||
300 | iowrite32(value, address); | |
301 | } | |
302 | ||
303 | ||
304 | static u8 | |
305 | brcmf_pcie_read_tcm8(struct brcmf_pciedev_info *devinfo, u32 mem_offset) | |
306 | { | |
307 | void __iomem *address = devinfo->tcm + mem_offset; | |
308 | ||
309 | return (ioread8(address)); | |
310 | } | |
311 | ||
312 | ||
313 | static u16 | |
314 | brcmf_pcie_read_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset) | |
315 | { | |
316 | void __iomem *address = devinfo->tcm + mem_offset; | |
317 | ||
318 | return (ioread16(address)); | |
319 | } | |
320 | ||
321 | ||
322 | static void | |
323 | brcmf_pcie_write_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset, | |
324 | u16 value) | |
325 | { | |
326 | void __iomem *address = devinfo->tcm + mem_offset; | |
327 | ||
328 | iowrite16(value, address); | |
329 | } | |
330 | ||
331 | ||
332 | static u32 | |
333 | brcmf_pcie_read_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset) | |
334 | { | |
335 | void __iomem *address = devinfo->tcm + mem_offset; | |
336 | ||
337 | return (ioread32(address)); | |
338 | } | |
339 | ||
340 | ||
341 | static void | |
342 | brcmf_pcie_write_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset, | |
343 | u32 value) | |
344 | { | |
345 | void __iomem *address = devinfo->tcm + mem_offset; | |
346 | ||
347 | iowrite32(value, address); | |
348 | } | |
349 | ||
350 | ||
351 | static u32 | |
352 | brcmf_pcie_read_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset) | |
353 | { | |
354 | void __iomem *addr = devinfo->tcm + devinfo->ci->rambase + mem_offset; | |
355 | ||
356 | return (ioread32(addr)); | |
357 | } | |
358 | ||
359 | ||
360 | static void | |
361 | brcmf_pcie_write_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset, | |
362 | u32 value) | |
363 | { | |
364 | void __iomem *addr = devinfo->tcm + devinfo->ci->rambase + mem_offset; | |
365 | ||
366 | iowrite32(value, addr); | |
367 | } | |
368 | ||
369 | ||
370 | static void | |
371 | brcmf_pcie_copy_mem_todev(struct brcmf_pciedev_info *devinfo, u32 mem_offset, | |
372 | void *srcaddr, u32 len) | |
373 | { | |
374 | void __iomem *address = devinfo->tcm + mem_offset; | |
375 | __le32 *src32; | |
376 | __le16 *src16; | |
377 | u8 *src8; | |
378 | ||
379 | if (((ulong)address & 4) || ((ulong)srcaddr & 4) || (len & 4)) { | |
380 | if (((ulong)address & 2) || ((ulong)srcaddr & 2) || (len & 2)) { | |
381 | src8 = (u8 *)srcaddr; | |
382 | while (len) { | |
383 | iowrite8(*src8, address); | |
384 | address++; | |
385 | src8++; | |
386 | len--; | |
387 | } | |
388 | } else { | |
389 | len = len / 2; | |
390 | src16 = (__le16 *)srcaddr; | |
391 | while (len) { | |
392 | iowrite16(le16_to_cpu(*src16), address); | |
393 | address += 2; | |
394 | src16++; | |
395 | len--; | |
396 | } | |
397 | } | |
398 | } else { | |
399 | len = len / 4; | |
400 | src32 = (__le32 *)srcaddr; | |
401 | while (len) { | |
402 | iowrite32(le32_to_cpu(*src32), address); | |
403 | address += 4; | |
404 | src32++; | |
405 | len--; | |
406 | } | |
407 | } | |
408 | } | |
409 | ||
410 | ||
411 | #define WRITECC32(devinfo, reg, value) brcmf_pcie_write_reg32(devinfo, \ | |
412 | CHIPCREGOFFS(reg), value) | |
413 | ||
414 | ||
415 | static void | |
416 | brcmf_pcie_select_core(struct brcmf_pciedev_info *devinfo, u16 coreid) | |
417 | { | |
418 | const struct pci_dev *pdev = devinfo->pdev; | |
419 | struct brcmf_core *core; | |
420 | u32 bar0_win; | |
421 | ||
422 | core = brcmf_chip_get_core(devinfo->ci, coreid); | |
423 | if (core) { | |
424 | bar0_win = core->base; | |
425 | pci_write_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW, bar0_win); | |
426 | if (pci_read_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW, | |
427 | &bar0_win) == 0) { | |
428 | if (bar0_win != core->base) { | |
429 | bar0_win = core->base; | |
430 | pci_write_config_dword(pdev, | |
431 | BRCMF_PCIE_BAR0_WINDOW, | |
432 | bar0_win); | |
433 | } | |
434 | } | |
435 | } else { | |
436 | brcmf_err("Unsupported core selected %x\n", coreid); | |
437 | } | |
438 | } | |
439 | ||
440 | ||
bd4f82e3 | 441 | static void brcmf_pcie_reset_device(struct brcmf_pciedev_info *devinfo) |
9e37f045 | 442 | { |
bd4f82e3 HM |
443 | u16 cfg_offset[] = { BRCMF_PCIE_CFGREG_STATUS_CMD, |
444 | BRCMF_PCIE_CFGREG_PM_CSR, | |
445 | BRCMF_PCIE_CFGREG_MSI_CAP, | |
446 | BRCMF_PCIE_CFGREG_MSI_ADDR_L, | |
447 | BRCMF_PCIE_CFGREG_MSI_ADDR_H, | |
448 | BRCMF_PCIE_CFGREG_MSI_DATA, | |
449 | BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL2, | |
450 | BRCMF_PCIE_CFGREG_RBAR_CTRL, | |
451 | BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1, | |
452 | BRCMF_PCIE_CFGREG_REG_BAR2_CONFIG, | |
453 | BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG }; | |
9e37f045 HM |
454 | u32 i; |
455 | u32 val; | |
bd4f82e3 | 456 | u32 lsc; |
9e37f045 HM |
457 | |
458 | if (!devinfo->ci) | |
459 | return; | |
460 | ||
bd4f82e3 HM |
461 | brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2); |
462 | brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR, | |
463 | BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL); | |
464 | lsc = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA); | |
465 | val = lsc & (~BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB); | |
466 | brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, val); | |
9e37f045 | 467 | |
bd4f82e3 HM |
468 | brcmf_pcie_select_core(devinfo, BCMA_CORE_CHIPCOMMON); |
469 | WRITECC32(devinfo, watchdog, 4); | |
9e37f045 HM |
470 | msleep(100); |
471 | ||
bd4f82e3 HM |
472 | brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2); |
473 | brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR, | |
474 | BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL); | |
475 | brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, lsc); | |
476 | ||
9e37f045 HM |
477 | brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2); |
478 | for (i = 0; i < ARRAY_SIZE(cfg_offset); i++) { | |
479 | brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR, | |
480 | cfg_offset[i]); | |
481 | val = brcmf_pcie_read_reg32(devinfo, | |
482 | BRCMF_PCIE_PCIE2REG_CONFIGDATA); | |
483 | brcmf_dbg(PCIE, "config offset 0x%04x, value 0x%04x\n", | |
484 | cfg_offset[i], val); | |
485 | brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, | |
486 | val); | |
487 | } | |
488 | } | |
489 | ||
490 | ||
491 | static void brcmf_pcie_attach(struct brcmf_pciedev_info *devinfo) | |
492 | { | |
493 | u32 config; | |
494 | ||
495 | brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2); | |
496 | if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_INTMASK) != 0) | |
bd4f82e3 | 497 | brcmf_pcie_reset_device(devinfo); |
9e37f045 HM |
498 | /* BAR1 window may not be sized properly */ |
499 | brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2); | |
500 | brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR, 0x4e0); | |
501 | config = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA); | |
502 | brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, config); | |
503 | ||
504 | device_wakeup_enable(&devinfo->pdev->dev); | |
505 | } | |
506 | ||
507 | ||
508 | static int brcmf_pcie_enter_download_state(struct brcmf_pciedev_info *devinfo) | |
509 | { | |
9e37f045 HM |
510 | if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) { |
511 | brcmf_pcie_select_core(devinfo, BCMA_CORE_ARM_CR4); | |
512 | brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX, | |
513 | 5); | |
514 | brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKPDA, | |
515 | 0); | |
516 | brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX, | |
517 | 7); | |
518 | brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKPDA, | |
519 | 0); | |
520 | } | |
521 | return 0; | |
522 | } | |
523 | ||
524 | ||
525 | static int brcmf_pcie_exit_download_state(struct brcmf_pciedev_info *devinfo, | |
526 | u32 resetintr) | |
527 | { | |
528 | struct brcmf_core *core; | |
529 | ||
530 | if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) { | |
531 | core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_INTERNAL_MEM); | |
532 | brcmf_chip_resetcore(core, 0, 0, 0); | |
533 | } | |
534 | ||
d380ebc9 | 535 | return !brcmf_chip_set_active(devinfo->ci, resetintr); |
9e37f045 HM |
536 | } |
537 | ||
538 | ||
4eb3af7c | 539 | static int |
9e37f045 HM |
540 | brcmf_pcie_send_mb_data(struct brcmf_pciedev_info *devinfo, u32 htod_mb_data) |
541 | { | |
542 | struct brcmf_pcie_shared_info *shared; | |
543 | u32 addr; | |
544 | u32 cur_htod_mb_data; | |
545 | u32 i; | |
546 | ||
547 | shared = &devinfo->shared; | |
548 | addr = shared->htod_mb_data_addr; | |
549 | cur_htod_mb_data = brcmf_pcie_read_tcm32(devinfo, addr); | |
550 | ||
551 | if (cur_htod_mb_data != 0) | |
552 | brcmf_dbg(PCIE, "MB transaction is already pending 0x%04x\n", | |
553 | cur_htod_mb_data); | |
554 | ||
555 | i = 0; | |
556 | while (cur_htod_mb_data != 0) { | |
557 | msleep(10); | |
558 | i++; | |
559 | if (i > 100) | |
4eb3af7c | 560 | return -EIO; |
9e37f045 HM |
561 | cur_htod_mb_data = brcmf_pcie_read_tcm32(devinfo, addr); |
562 | } | |
563 | ||
564 | brcmf_pcie_write_tcm32(devinfo, addr, htod_mb_data); | |
565 | pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1); | |
566 | pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1); | |
4eb3af7c HM |
567 | |
568 | return 0; | |
9e37f045 HM |
569 | } |
570 | ||
571 | ||
572 | static void brcmf_pcie_handle_mb_data(struct brcmf_pciedev_info *devinfo) | |
573 | { | |
574 | struct brcmf_pcie_shared_info *shared; | |
575 | u32 addr; | |
576 | u32 dtoh_mb_data; | |
577 | ||
578 | shared = &devinfo->shared; | |
579 | addr = shared->dtoh_mb_data_addr; | |
580 | dtoh_mb_data = brcmf_pcie_read_tcm32(devinfo, addr); | |
581 | ||
582 | if (!dtoh_mb_data) | |
583 | return; | |
584 | ||
585 | brcmf_pcie_write_tcm32(devinfo, addr, 0); | |
586 | ||
587 | brcmf_dbg(PCIE, "D2H_MB_DATA: 0x%04x\n", dtoh_mb_data); | |
588 | if (dtoh_mb_data & BRCMF_D2H_DEV_DS_ENTER_REQ) { | |
589 | brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP REQ\n"); | |
590 | brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_DS_ACK); | |
591 | brcmf_dbg(PCIE, "D2H_MB_DATA: sent DEEP SLEEP ACK\n"); | |
592 | } | |
593 | if (dtoh_mb_data & BRCMF_D2H_DEV_DS_EXIT_NOTE) | |
594 | brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP EXIT\n"); | |
ebcc2f51 | 595 | if (dtoh_mb_data & BRCMF_D2H_DEV_D3_ACK) { |
9e37f045 HM |
596 | brcmf_dbg(PCIE, "D2H_MB_DATA: D3 ACK\n"); |
597 | if (waitqueue_active(&devinfo->mbdata_resp_wait)) { | |
598 | devinfo->mbdata_completed = true; | |
599 | wake_up(&devinfo->mbdata_resp_wait); | |
600 | } | |
ebcc2f51 | 601 | } |
9e37f045 HM |
602 | } |
603 | ||
604 | ||
605 | static void brcmf_pcie_bus_console_init(struct brcmf_pciedev_info *devinfo) | |
606 | { | |
607 | struct brcmf_pcie_shared_info *shared; | |
608 | struct brcmf_pcie_console *console; | |
609 | u32 addr; | |
610 | ||
611 | shared = &devinfo->shared; | |
612 | console = &shared->console; | |
613 | addr = shared->tcm_base_address + BRCMF_SHARED_CONSOLE_ADDR_OFFSET; | |
614 | console->base_addr = brcmf_pcie_read_tcm32(devinfo, addr); | |
615 | ||
616 | addr = console->base_addr + BRCMF_CONSOLE_BUFADDR_OFFSET; | |
617 | console->buf_addr = brcmf_pcie_read_tcm32(devinfo, addr); | |
618 | addr = console->base_addr + BRCMF_CONSOLE_BUFSIZE_OFFSET; | |
619 | console->bufsize = brcmf_pcie_read_tcm32(devinfo, addr); | |
620 | ||
621 | brcmf_dbg(PCIE, "Console: base %x, buf %x, size %d\n", | |
622 | console->base_addr, console->buf_addr, console->bufsize); | |
623 | } | |
624 | ||
625 | ||
626 | static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo) | |
627 | { | |
628 | struct brcmf_pcie_console *console; | |
629 | u32 addr; | |
630 | u8 ch; | |
631 | u32 newidx; | |
632 | ||
633 | console = &devinfo->shared.console; | |
634 | addr = console->base_addr + BRCMF_CONSOLE_WRITEIDX_OFFSET; | |
635 | newidx = brcmf_pcie_read_tcm32(devinfo, addr); | |
636 | while (newidx != console->read_idx) { | |
637 | addr = console->buf_addr + console->read_idx; | |
638 | ch = brcmf_pcie_read_tcm8(devinfo, addr); | |
639 | console->read_idx++; | |
640 | if (console->read_idx == console->bufsize) | |
641 | console->read_idx = 0; | |
642 | if (ch == '\r') | |
643 | continue; | |
644 | console->log_str[console->log_idx] = ch; | |
645 | console->log_idx++; | |
646 | if ((ch != '\n') && | |
647 | (console->log_idx == (sizeof(console->log_str) - 2))) { | |
648 | ch = '\n'; | |
649 | console->log_str[console->log_idx] = ch; | |
650 | console->log_idx++; | |
651 | } | |
9e37f045 HM |
652 | if (ch == '\n') { |
653 | console->log_str[console->log_idx] = 0; | |
ef5671d2 | 654 | brcmf_dbg(PCIE, "CONSOLE: %s", console->log_str); |
9e37f045 HM |
655 | console->log_idx = 0; |
656 | } | |
657 | } | |
658 | } | |
659 | ||
660 | ||
661 | static __used void brcmf_pcie_ringbell_v1(struct brcmf_pciedev_info *devinfo) | |
662 | { | |
663 | u32 reg_value; | |
664 | ||
665 | brcmf_dbg(PCIE, "RING !\n"); | |
666 | reg_value = brcmf_pcie_read_reg32(devinfo, | |
667 | BRCMF_PCIE_PCIE2REG_MAILBOXINT); | |
668 | reg_value |= BRCMF_PCIE2_INTB; | |
669 | brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT, | |
670 | reg_value); | |
671 | } | |
672 | ||
673 | ||
674 | static void brcmf_pcie_ringbell_v2(struct brcmf_pciedev_info *devinfo) | |
675 | { | |
676 | brcmf_dbg(PCIE, "RING !\n"); | |
677 | /* Any arbitrary value will do, lets use 1 */ | |
678 | brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_H2D_MAILBOX, 1); | |
679 | } | |
680 | ||
681 | ||
682 | static void brcmf_pcie_intr_disable(struct brcmf_pciedev_info *devinfo) | |
683 | { | |
684 | if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1) | |
685 | pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_INTMASK, | |
686 | 0); | |
687 | else | |
688 | brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK, | |
689 | 0); | |
690 | } | |
691 | ||
692 | ||
693 | static void brcmf_pcie_intr_enable(struct brcmf_pciedev_info *devinfo) | |
694 | { | |
695 | if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1) | |
696 | pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_INTMASK, | |
697 | BRCMF_PCIE_INT_DEF); | |
698 | else | |
699 | brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK, | |
700 | BRCMF_PCIE_MB_INT_D2H_DB | | |
701 | BRCMF_PCIE_MB_INT_FN0_0 | | |
702 | BRCMF_PCIE_MB_INT_FN0_1); | |
703 | } | |
704 | ||
705 | ||
706 | static irqreturn_t brcmf_pcie_quick_check_isr_v1(int irq, void *arg) | |
707 | { | |
708 | struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg; | |
709 | u32 status; | |
710 | ||
711 | status = 0; | |
712 | pci_read_config_dword(devinfo->pdev, BRCMF_PCIE_REG_INTSTATUS, &status); | |
713 | if (status) { | |
714 | brcmf_pcie_intr_disable(devinfo); | |
715 | brcmf_dbg(PCIE, "Enter\n"); | |
716 | return IRQ_WAKE_THREAD; | |
717 | } | |
718 | return IRQ_NONE; | |
719 | } | |
720 | ||
721 | ||
722 | static irqreturn_t brcmf_pcie_quick_check_isr_v2(int irq, void *arg) | |
723 | { | |
724 | struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg; | |
725 | ||
726 | if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT)) { | |
727 | brcmf_pcie_intr_disable(devinfo); | |
728 | brcmf_dbg(PCIE, "Enter\n"); | |
729 | return IRQ_WAKE_THREAD; | |
730 | } | |
731 | return IRQ_NONE; | |
732 | } | |
733 | ||
734 | ||
735 | static irqreturn_t brcmf_pcie_isr_thread_v1(int irq, void *arg) | |
736 | { | |
737 | struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg; | |
738 | const struct pci_dev *pdev = devinfo->pdev; | |
739 | u32 status; | |
740 | ||
741 | devinfo->in_irq = true; | |
742 | status = 0; | |
743 | pci_read_config_dword(pdev, BRCMF_PCIE_REG_INTSTATUS, &status); | |
744 | brcmf_dbg(PCIE, "Enter %x\n", status); | |
745 | if (status) { | |
746 | pci_write_config_dword(pdev, BRCMF_PCIE_REG_INTSTATUS, status); | |
747 | if (devinfo->state == BRCMFMAC_PCIE_STATE_UP) | |
748 | brcmf_proto_msgbuf_rx_trigger(&devinfo->pdev->dev); | |
749 | } | |
750 | if (devinfo->state == BRCMFMAC_PCIE_STATE_UP) | |
751 | brcmf_pcie_intr_enable(devinfo); | |
752 | devinfo->in_irq = false; | |
753 | return IRQ_HANDLED; | |
754 | } | |
755 | ||
756 | ||
757 | static irqreturn_t brcmf_pcie_isr_thread_v2(int irq, void *arg) | |
758 | { | |
759 | struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg; | |
760 | u32 status; | |
761 | ||
762 | devinfo->in_irq = true; | |
763 | status = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT); | |
764 | brcmf_dbg(PCIE, "Enter %x\n", status); | |
765 | if (status) { | |
766 | brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT, | |
767 | status); | |
768 | if (status & (BRCMF_PCIE_MB_INT_FN0_0 | | |
769 | BRCMF_PCIE_MB_INT_FN0_1)) | |
770 | brcmf_pcie_handle_mb_data(devinfo); | |
771 | if (status & BRCMF_PCIE_MB_INT_D2H_DB) { | |
772 | if (devinfo->state == BRCMFMAC_PCIE_STATE_UP) | |
773 | brcmf_proto_msgbuf_rx_trigger( | |
774 | &devinfo->pdev->dev); | |
775 | } | |
776 | } | |
777 | brcmf_pcie_bus_console_read(devinfo); | |
778 | if (devinfo->state == BRCMFMAC_PCIE_STATE_UP) | |
779 | brcmf_pcie_intr_enable(devinfo); | |
780 | devinfo->in_irq = false; | |
781 | return IRQ_HANDLED; | |
782 | } | |
783 | ||
784 | ||
785 | static int brcmf_pcie_request_irq(struct brcmf_pciedev_info *devinfo) | |
786 | { | |
787 | struct pci_dev *pdev; | |
788 | ||
789 | pdev = devinfo->pdev; | |
790 | ||
791 | brcmf_pcie_intr_disable(devinfo); | |
792 | ||
793 | brcmf_dbg(PCIE, "Enter\n"); | |
794 | /* is it a v1 or v2 implementation */ | |
795 | devinfo->irq_requested = false; | |
e9efa340 | 796 | pci_enable_msi(pdev); |
9e37f045 HM |
797 | if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1) { |
798 | if (request_threaded_irq(pdev->irq, | |
799 | brcmf_pcie_quick_check_isr_v1, | |
800 | brcmf_pcie_isr_thread_v1, | |
801 | IRQF_SHARED, "brcmf_pcie_intr", | |
802 | devinfo)) { | |
e9efa340 | 803 | pci_disable_msi(pdev); |
9e37f045 HM |
804 | brcmf_err("Failed to request IRQ %d\n", pdev->irq); |
805 | return -EIO; | |
806 | } | |
807 | } else { | |
808 | if (request_threaded_irq(pdev->irq, | |
809 | brcmf_pcie_quick_check_isr_v2, | |
810 | brcmf_pcie_isr_thread_v2, | |
811 | IRQF_SHARED, "brcmf_pcie_intr", | |
812 | devinfo)) { | |
e9efa340 | 813 | pci_disable_msi(pdev); |
9e37f045 HM |
814 | brcmf_err("Failed to request IRQ %d\n", pdev->irq); |
815 | return -EIO; | |
816 | } | |
817 | } | |
818 | devinfo->irq_requested = true; | |
819 | devinfo->irq_allocated = true; | |
820 | return 0; | |
821 | } | |
822 | ||
823 | ||
824 | static void brcmf_pcie_release_irq(struct brcmf_pciedev_info *devinfo) | |
825 | { | |
826 | struct pci_dev *pdev; | |
827 | u32 status; | |
828 | u32 count; | |
829 | ||
830 | if (!devinfo->irq_allocated) | |
831 | return; | |
832 | ||
833 | pdev = devinfo->pdev; | |
834 | ||
835 | brcmf_pcie_intr_disable(devinfo); | |
836 | if (!devinfo->irq_requested) | |
837 | return; | |
838 | devinfo->irq_requested = false; | |
839 | free_irq(pdev->irq, devinfo); | |
e9efa340 | 840 | pci_disable_msi(pdev); |
9e37f045 HM |
841 | |
842 | msleep(50); | |
843 | count = 0; | |
844 | while ((devinfo->in_irq) && (count < 20)) { | |
845 | msleep(50); | |
846 | count++; | |
847 | } | |
848 | if (devinfo->in_irq) | |
849 | brcmf_err("Still in IRQ (processing) !!!\n"); | |
850 | ||
851 | if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1) { | |
852 | status = 0; | |
853 | pci_read_config_dword(pdev, BRCMF_PCIE_REG_INTSTATUS, &status); | |
854 | pci_write_config_dword(pdev, BRCMF_PCIE_REG_INTSTATUS, status); | |
855 | } else { | |
856 | status = brcmf_pcie_read_reg32(devinfo, | |
857 | BRCMF_PCIE_PCIE2REG_MAILBOXINT); | |
858 | brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT, | |
859 | status); | |
860 | } | |
861 | devinfo->irq_allocated = false; | |
862 | } | |
863 | ||
864 | ||
865 | static int brcmf_pcie_ring_mb_write_rptr(void *ctx) | |
866 | { | |
867 | struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx; | |
868 | struct brcmf_pciedev_info *devinfo = ring->devinfo; | |
869 | struct brcmf_commonring *commonring = &ring->commonring; | |
870 | ||
871 | if (devinfo->state != BRCMFMAC_PCIE_STATE_UP) | |
872 | return -EIO; | |
873 | ||
874 | brcmf_dbg(PCIE, "W r_ptr %d (%d), ring %d\n", commonring->r_ptr, | |
875 | commonring->w_ptr, ring->id); | |
876 | ||
877 | brcmf_pcie_write_tcm16(devinfo, ring->r_idx_addr, commonring->r_ptr); | |
878 | ||
879 | return 0; | |
880 | } | |
881 | ||
882 | ||
883 | static int brcmf_pcie_ring_mb_write_wptr(void *ctx) | |
884 | { | |
885 | struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx; | |
886 | struct brcmf_pciedev_info *devinfo = ring->devinfo; | |
887 | struct brcmf_commonring *commonring = &ring->commonring; | |
888 | ||
889 | if (devinfo->state != BRCMFMAC_PCIE_STATE_UP) | |
890 | return -EIO; | |
891 | ||
892 | brcmf_dbg(PCIE, "W w_ptr %d (%d), ring %d\n", commonring->w_ptr, | |
893 | commonring->r_ptr, ring->id); | |
894 | ||
895 | brcmf_pcie_write_tcm16(devinfo, ring->w_idx_addr, commonring->w_ptr); | |
896 | ||
897 | return 0; | |
898 | } | |
899 | ||
900 | ||
901 | static int brcmf_pcie_ring_mb_ring_bell(void *ctx) | |
902 | { | |
903 | struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx; | |
904 | struct brcmf_pciedev_info *devinfo = ring->devinfo; | |
905 | ||
906 | if (devinfo->state != BRCMFMAC_PCIE_STATE_UP) | |
907 | return -EIO; | |
908 | ||
909 | devinfo->ringbell(devinfo); | |
910 | ||
911 | return 0; | |
912 | } | |
913 | ||
914 | ||
915 | static int brcmf_pcie_ring_mb_update_rptr(void *ctx) | |
916 | { | |
917 | struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx; | |
918 | struct brcmf_pciedev_info *devinfo = ring->devinfo; | |
919 | struct brcmf_commonring *commonring = &ring->commonring; | |
920 | ||
921 | if (devinfo->state != BRCMFMAC_PCIE_STATE_UP) | |
922 | return -EIO; | |
923 | ||
924 | commonring->r_ptr = brcmf_pcie_read_tcm16(devinfo, ring->r_idx_addr); | |
925 | ||
926 | brcmf_dbg(PCIE, "R r_ptr %d (%d), ring %d\n", commonring->r_ptr, | |
927 | commonring->w_ptr, ring->id); | |
928 | ||
929 | return 0; | |
930 | } | |
931 | ||
932 | ||
933 | static int brcmf_pcie_ring_mb_update_wptr(void *ctx) | |
934 | { | |
935 | struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx; | |
936 | struct brcmf_pciedev_info *devinfo = ring->devinfo; | |
937 | struct brcmf_commonring *commonring = &ring->commonring; | |
938 | ||
939 | if (devinfo->state != BRCMFMAC_PCIE_STATE_UP) | |
940 | return -EIO; | |
941 | ||
942 | commonring->w_ptr = brcmf_pcie_read_tcm16(devinfo, ring->w_idx_addr); | |
943 | ||
944 | brcmf_dbg(PCIE, "R w_ptr %d (%d), ring %d\n", commonring->w_ptr, | |
945 | commonring->r_ptr, ring->id); | |
946 | ||
947 | return 0; | |
948 | } | |
949 | ||
950 | ||
951 | static void * | |
952 | brcmf_pcie_init_dmabuffer_for_device(struct brcmf_pciedev_info *devinfo, | |
953 | u32 size, u32 tcm_dma_phys_addr, | |
954 | dma_addr_t *dma_handle) | |
955 | { | |
956 | void *ring; | |
83297aaa | 957 | u64 address; |
9e37f045 HM |
958 | |
959 | ring = dma_alloc_coherent(&devinfo->pdev->dev, size, dma_handle, | |
960 | GFP_KERNEL); | |
961 | if (!ring) | |
962 | return NULL; | |
963 | ||
83297aaa | 964 | address = (u64)*dma_handle; |
9e37f045 HM |
965 | brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr, |
966 | address & 0xffffffff); | |
967 | brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr + 4, address >> 32); | |
968 | ||
969 | memset(ring, 0, size); | |
970 | ||
971 | return (ring); | |
972 | } | |
973 | ||
974 | ||
975 | static struct brcmf_pcie_ringbuf * | |
976 | brcmf_pcie_alloc_dma_and_ring(struct brcmf_pciedev_info *devinfo, u32 ring_id, | |
977 | u32 tcm_ring_phys_addr) | |
978 | { | |
979 | void *dma_buf; | |
980 | dma_addr_t dma_handle; | |
981 | struct brcmf_pcie_ringbuf *ring; | |
982 | u32 size; | |
983 | u32 addr; | |
984 | ||
985 | size = brcmf_ring_max_item[ring_id] * brcmf_ring_itemsize[ring_id]; | |
986 | dma_buf = brcmf_pcie_init_dmabuffer_for_device(devinfo, size, | |
987 | tcm_ring_phys_addr + BRCMF_RING_MEM_BASE_ADDR_OFFSET, | |
988 | &dma_handle); | |
989 | if (!dma_buf) | |
990 | return NULL; | |
991 | ||
992 | addr = tcm_ring_phys_addr + BRCMF_RING_MAX_ITEM_OFFSET; | |
993 | brcmf_pcie_write_tcm16(devinfo, addr, brcmf_ring_max_item[ring_id]); | |
994 | addr = tcm_ring_phys_addr + BRCMF_RING_LEN_ITEMS_OFFSET; | |
995 | brcmf_pcie_write_tcm16(devinfo, addr, brcmf_ring_itemsize[ring_id]); | |
996 | ||
997 | ring = kzalloc(sizeof(*ring), GFP_KERNEL); | |
998 | if (!ring) { | |
999 | dma_free_coherent(&devinfo->pdev->dev, size, dma_buf, | |
1000 | dma_handle); | |
1001 | return NULL; | |
1002 | } | |
1003 | brcmf_commonring_config(&ring->commonring, brcmf_ring_max_item[ring_id], | |
1004 | brcmf_ring_itemsize[ring_id], dma_buf); | |
1005 | ring->dma_handle = dma_handle; | |
1006 | ring->devinfo = devinfo; | |
1007 | brcmf_commonring_register_cb(&ring->commonring, | |
1008 | brcmf_pcie_ring_mb_ring_bell, | |
1009 | brcmf_pcie_ring_mb_update_rptr, | |
1010 | brcmf_pcie_ring_mb_update_wptr, | |
1011 | brcmf_pcie_ring_mb_write_rptr, | |
1012 | brcmf_pcie_ring_mb_write_wptr, ring); | |
1013 | ||
1014 | return (ring); | |
1015 | } | |
1016 | ||
1017 | ||
1018 | static void brcmf_pcie_release_ringbuffer(struct device *dev, | |
1019 | struct brcmf_pcie_ringbuf *ring) | |
1020 | { | |
1021 | void *dma_buf; | |
1022 | u32 size; | |
1023 | ||
1024 | if (!ring) | |
1025 | return; | |
1026 | ||
1027 | dma_buf = ring->commonring.buf_addr; | |
1028 | if (dma_buf) { | |
1029 | size = ring->commonring.depth * ring->commonring.item_len; | |
1030 | dma_free_coherent(dev, size, dma_buf, ring->dma_handle); | |
1031 | } | |
1032 | kfree(ring); | |
1033 | } | |
1034 | ||
1035 | ||
1036 | static void brcmf_pcie_release_ringbuffers(struct brcmf_pciedev_info *devinfo) | |
1037 | { | |
1038 | u32 i; | |
1039 | ||
1040 | for (i = 0; i < BRCMF_NROF_COMMON_MSGRINGS; i++) { | |
1041 | brcmf_pcie_release_ringbuffer(&devinfo->pdev->dev, | |
1042 | devinfo->shared.commonrings[i]); | |
1043 | devinfo->shared.commonrings[i] = NULL; | |
1044 | } | |
1045 | kfree(devinfo->shared.flowrings); | |
1046 | devinfo->shared.flowrings = NULL; | |
1047 | } | |
1048 | ||
1049 | ||
1050 | static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo) | |
1051 | { | |
1052 | struct brcmf_pcie_ringbuf *ring; | |
1053 | struct brcmf_pcie_ringbuf *rings; | |
1054 | u32 ring_addr; | |
1055 | u32 d2h_w_idx_ptr; | |
1056 | u32 d2h_r_idx_ptr; | |
1057 | u32 h2d_w_idx_ptr; | |
1058 | u32 h2d_r_idx_ptr; | |
1059 | u32 addr; | |
1060 | u32 ring_mem_ptr; | |
1061 | u32 i; | |
1062 | u16 max_sub_queues; | |
1063 | ||
1064 | ring_addr = devinfo->shared.ring_info_addr; | |
1065 | brcmf_dbg(PCIE, "Base ring addr = 0x%08x\n", ring_addr); | |
1066 | ||
1067 | addr = ring_addr + BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET; | |
1068 | d2h_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr); | |
1069 | addr = ring_addr + BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET; | |
1070 | d2h_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr); | |
1071 | addr = ring_addr + BRCMF_SHARED_RING_H2D_W_IDX_PTR_OFFSET; | |
1072 | h2d_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr); | |
1073 | addr = ring_addr + BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET; | |
1074 | h2d_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr); | |
1075 | ||
1076 | addr = ring_addr + BRCMF_SHARED_RING_TCM_MEMLOC_OFFSET; | |
1077 | ring_mem_ptr = brcmf_pcie_read_tcm32(devinfo, addr); | |
1078 | ||
1079 | for (i = 0; i < BRCMF_NROF_H2D_COMMON_MSGRINGS; i++) { | |
1080 | ring = brcmf_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr); | |
1081 | if (!ring) | |
1082 | goto fail; | |
1083 | ring->w_idx_addr = h2d_w_idx_ptr; | |
1084 | ring->r_idx_addr = h2d_r_idx_ptr; | |
1085 | ring->id = i; | |
1086 | devinfo->shared.commonrings[i] = ring; | |
1087 | ||
1088 | h2d_w_idx_ptr += sizeof(u32); | |
1089 | h2d_r_idx_ptr += sizeof(u32); | |
1090 | ring_mem_ptr += BRCMF_RING_MEM_SZ; | |
1091 | } | |
1092 | ||
1093 | for (i = BRCMF_NROF_H2D_COMMON_MSGRINGS; | |
1094 | i < BRCMF_NROF_COMMON_MSGRINGS; i++) { | |
1095 | ring = brcmf_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr); | |
1096 | if (!ring) | |
1097 | goto fail; | |
1098 | ring->w_idx_addr = d2h_w_idx_ptr; | |
1099 | ring->r_idx_addr = d2h_r_idx_ptr; | |
1100 | ring->id = i; | |
1101 | devinfo->shared.commonrings[i] = ring; | |
1102 | ||
1103 | d2h_w_idx_ptr += sizeof(u32); | |
1104 | d2h_r_idx_ptr += sizeof(u32); | |
1105 | ring_mem_ptr += BRCMF_RING_MEM_SZ; | |
1106 | } | |
1107 | ||
1108 | addr = ring_addr + BRCMF_SHARED_RING_MAX_SUB_QUEUES; | |
1109 | max_sub_queues = brcmf_pcie_read_tcm16(devinfo, addr); | |
1110 | devinfo->shared.nrof_flowrings = | |
1111 | max_sub_queues - BRCMF_NROF_H2D_COMMON_MSGRINGS; | |
1112 | rings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(*ring), | |
1113 | GFP_KERNEL); | |
1114 | if (!rings) | |
1115 | goto fail; | |
1116 | ||
1117 | brcmf_dbg(PCIE, "Nr of flowrings is %d\n", | |
1118 | devinfo->shared.nrof_flowrings); | |
1119 | ||
1120 | for (i = 0; i < devinfo->shared.nrof_flowrings; i++) { | |
1121 | ring = &rings[i]; | |
1122 | ring->devinfo = devinfo; | |
1123 | ring->id = i + BRCMF_NROF_COMMON_MSGRINGS; | |
1124 | brcmf_commonring_register_cb(&ring->commonring, | |
1125 | brcmf_pcie_ring_mb_ring_bell, | |
1126 | brcmf_pcie_ring_mb_update_rptr, | |
1127 | brcmf_pcie_ring_mb_update_wptr, | |
1128 | brcmf_pcie_ring_mb_write_rptr, | |
1129 | brcmf_pcie_ring_mb_write_wptr, | |
1130 | ring); | |
1131 | ring->w_idx_addr = h2d_w_idx_ptr; | |
1132 | ring->r_idx_addr = h2d_r_idx_ptr; | |
1133 | h2d_w_idx_ptr += sizeof(u32); | |
1134 | h2d_r_idx_ptr += sizeof(u32); | |
1135 | } | |
1136 | devinfo->shared.flowrings = rings; | |
1137 | ||
1138 | return 0; | |
1139 | ||
1140 | fail: | |
1141 | brcmf_err("Allocating commonring buffers failed\n"); | |
1142 | brcmf_pcie_release_ringbuffers(devinfo); | |
1143 | return -ENOMEM; | |
1144 | } | |
1145 | ||
1146 | ||
1147 | static void | |
1148 | brcmf_pcie_release_scratchbuffers(struct brcmf_pciedev_info *devinfo) | |
1149 | { | |
1150 | if (devinfo->shared.scratch) | |
1151 | dma_free_coherent(&devinfo->pdev->dev, | |
1152 | BRCMF_DMA_D2H_SCRATCH_BUF_LEN, | |
1153 | devinfo->shared.scratch, | |
1154 | devinfo->shared.scratch_dmahandle); | |
1155 | if (devinfo->shared.ringupd) | |
1156 | dma_free_coherent(&devinfo->pdev->dev, | |
1157 | BRCMF_DMA_D2H_RINGUPD_BUF_LEN, | |
1158 | devinfo->shared.ringupd, | |
1159 | devinfo->shared.ringupd_dmahandle); | |
1160 | } | |
1161 | ||
1162 | static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo) | |
1163 | { | |
83297aaa | 1164 | u64 address; |
9e37f045 HM |
1165 | u32 addr; |
1166 | ||
1167 | devinfo->shared.scratch = dma_alloc_coherent(&devinfo->pdev->dev, | |
1168 | BRCMF_DMA_D2H_SCRATCH_BUF_LEN, | |
1169 | &devinfo->shared.scratch_dmahandle, GFP_KERNEL); | |
1170 | if (!devinfo->shared.scratch) | |
1171 | goto fail; | |
1172 | ||
1173 | memset(devinfo->shared.scratch, 0, BRCMF_DMA_D2H_SCRATCH_BUF_LEN); | |
1174 | brcmf_dma_flush(devinfo->shared.scratch, BRCMF_DMA_D2H_SCRATCH_BUF_LEN); | |
1175 | ||
1176 | addr = devinfo->shared.tcm_base_address + | |
1177 | BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET; | |
83297aaa | 1178 | address = (u64)devinfo->shared.scratch_dmahandle; |
9e37f045 HM |
1179 | brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff); |
1180 | brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32); | |
1181 | addr = devinfo->shared.tcm_base_address + | |
1182 | BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET; | |
1183 | brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_SCRATCH_BUF_LEN); | |
1184 | ||
1185 | devinfo->shared.ringupd = dma_alloc_coherent(&devinfo->pdev->dev, | |
1186 | BRCMF_DMA_D2H_RINGUPD_BUF_LEN, | |
1187 | &devinfo->shared.ringupd_dmahandle, GFP_KERNEL); | |
1188 | if (!devinfo->shared.ringupd) | |
1189 | goto fail; | |
1190 | ||
1191 | memset(devinfo->shared.ringupd, 0, BRCMF_DMA_D2H_RINGUPD_BUF_LEN); | |
1192 | brcmf_dma_flush(devinfo->shared.ringupd, BRCMF_DMA_D2H_RINGUPD_BUF_LEN); | |
1193 | ||
1194 | addr = devinfo->shared.tcm_base_address + | |
1195 | BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET; | |
83297aaa | 1196 | address = (u64)devinfo->shared.ringupd_dmahandle; |
9e37f045 HM |
1197 | brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff); |
1198 | brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32); | |
1199 | addr = devinfo->shared.tcm_base_address + | |
1200 | BRCMF_SHARED_DMA_RINGUPD_LEN_OFFSET; | |
1201 | brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_RINGUPD_BUF_LEN); | |
1202 | return 0; | |
1203 | ||
1204 | fail: | |
1205 | brcmf_err("Allocating scratch buffers failed\n"); | |
1206 | brcmf_pcie_release_scratchbuffers(devinfo); | |
1207 | return -ENOMEM; | |
1208 | } | |
1209 | ||
1210 | ||
1211 | static void brcmf_pcie_down(struct device *dev) | |
1212 | { | |
1213 | } | |
1214 | ||
1215 | ||
1216 | static int brcmf_pcie_tx(struct device *dev, struct sk_buff *skb) | |
1217 | { | |
1218 | return 0; | |
1219 | } | |
1220 | ||
1221 | ||
1222 | static int brcmf_pcie_tx_ctlpkt(struct device *dev, unsigned char *msg, | |
1223 | uint len) | |
1224 | { | |
1225 | return 0; | |
1226 | } | |
1227 | ||
1228 | ||
1229 | static int brcmf_pcie_rx_ctlpkt(struct device *dev, unsigned char *msg, | |
1230 | uint len) | |
1231 | { | |
1232 | return 0; | |
1233 | } | |
1234 | ||
1235 | ||
4eb3af7c HM |
1236 | static void brcmf_pcie_wowl_config(struct device *dev, bool enabled) |
1237 | { | |
1238 | struct brcmf_bus *bus_if = dev_get_drvdata(dev); | |
1239 | struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie; | |
1240 | struct brcmf_pciedev_info *devinfo = buspub->devinfo; | |
1241 | ||
1242 | brcmf_dbg(PCIE, "Configuring WOWL, enabled=%d\n", enabled); | |
1243 | devinfo->wowl_enabled = enabled; | |
1244 | if (enabled) | |
1245 | device_set_wakeup_enable(&devinfo->pdev->dev, true); | |
1246 | else | |
1247 | device_set_wakeup_enable(&devinfo->pdev->dev, false); | |
1248 | } | |
1249 | ||
1250 | ||
9e37f045 HM |
1251 | static struct brcmf_bus_ops brcmf_pcie_bus_ops = { |
1252 | .txdata = brcmf_pcie_tx, | |
1253 | .stop = brcmf_pcie_down, | |
1254 | .txctl = brcmf_pcie_tx_ctlpkt, | |
1255 | .rxctl = brcmf_pcie_rx_ctlpkt, | |
4eb3af7c | 1256 | .wowl_config = brcmf_pcie_wowl_config, |
9e37f045 HM |
1257 | }; |
1258 | ||
1259 | ||
1260 | static int | |
1261 | brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info *devinfo, | |
1262 | u32 sharedram_addr) | |
1263 | { | |
1264 | struct brcmf_pcie_shared_info *shared; | |
1265 | u32 addr; | |
1266 | u32 version; | |
1267 | ||
1268 | shared = &devinfo->shared; | |
1269 | shared->tcm_base_address = sharedram_addr; | |
1270 | ||
1271 | shared->flags = brcmf_pcie_read_tcm32(devinfo, sharedram_addr); | |
1272 | version = shared->flags & BRCMF_PCIE_SHARED_VERSION_MASK; | |
1273 | brcmf_dbg(PCIE, "PCIe protocol version %d\n", version); | |
1274 | if ((version > BRCMF_PCIE_MAX_SHARED_VERSION) || | |
1275 | (version < BRCMF_PCIE_MIN_SHARED_VERSION)) { | |
1276 | brcmf_err("Unsupported PCIE version %d\n", version); | |
1277 | return -EINVAL; | |
1278 | } | |
1279 | if (shared->flags & BRCMF_PCIE_SHARED_TXPUSH_SUPPORT) { | |
1280 | brcmf_err("Unsupported legacy TX mode 0x%x\n", | |
1281 | shared->flags & BRCMF_PCIE_SHARED_TXPUSH_SUPPORT); | |
1282 | return -EINVAL; | |
1283 | } | |
1284 | ||
1285 | addr = sharedram_addr + BRCMF_SHARED_MAX_RXBUFPOST_OFFSET; | |
1286 | shared->max_rxbufpost = brcmf_pcie_read_tcm16(devinfo, addr); | |
1287 | if (shared->max_rxbufpost == 0) | |
1288 | shared->max_rxbufpost = BRCMF_DEF_MAX_RXBUFPOST; | |
1289 | ||
1290 | addr = sharedram_addr + BRCMF_SHARED_RX_DATAOFFSET_OFFSET; | |
1291 | shared->rx_dataoffset = brcmf_pcie_read_tcm32(devinfo, addr); | |
1292 | ||
1293 | addr = sharedram_addr + BRCMF_SHARED_HTOD_MB_DATA_ADDR_OFFSET; | |
1294 | shared->htod_mb_data_addr = brcmf_pcie_read_tcm32(devinfo, addr); | |
1295 | ||
1296 | addr = sharedram_addr + BRCMF_SHARED_DTOH_MB_DATA_ADDR_OFFSET; | |
1297 | shared->dtoh_mb_data_addr = brcmf_pcie_read_tcm32(devinfo, addr); | |
1298 | ||
1299 | addr = sharedram_addr + BRCMF_SHARED_RING_INFO_ADDR_OFFSET; | |
1300 | shared->ring_info_addr = brcmf_pcie_read_tcm32(devinfo, addr); | |
1301 | ||
1302 | brcmf_dbg(PCIE, "max rx buf post %d, rx dataoffset %d\n", | |
1303 | shared->max_rxbufpost, shared->rx_dataoffset); | |
1304 | ||
1305 | brcmf_pcie_bus_console_init(devinfo); | |
1306 | ||
1307 | return 0; | |
1308 | } | |
1309 | ||
1310 | ||
1311 | static int brcmf_pcie_get_fwnames(struct brcmf_pciedev_info *devinfo) | |
1312 | { | |
1313 | char *fw_name; | |
1314 | char *nvram_name; | |
1315 | uint fw_len, nv_len; | |
1316 | char end; | |
1317 | ||
1318 | brcmf_dbg(PCIE, "Enter, chip 0x%04x chiprev %d\n", devinfo->ci->chip, | |
1319 | devinfo->ci->chiprev); | |
1320 | ||
1321 | switch (devinfo->ci->chip) { | |
1322 | case BRCM_CC_43602_CHIP_ID: | |
1323 | fw_name = BRCMF_PCIE_43602_FW_NAME; | |
1324 | nvram_name = BRCMF_PCIE_43602_NVRAM_NAME; | |
1325 | break; | |
9e37f045 HM |
1326 | case BRCM_CC_4356_CHIP_ID: |
1327 | fw_name = BRCMF_PCIE_4356_FW_NAME; | |
1328 | nvram_name = BRCMF_PCIE_4356_NVRAM_NAME; | |
1329 | break; | |
1330 | case BRCM_CC_43567_CHIP_ID: | |
1331 | case BRCM_CC_43569_CHIP_ID: | |
1332 | case BRCM_CC_43570_CHIP_ID: | |
1333 | fw_name = BRCMF_PCIE_43570_FW_NAME; | |
1334 | nvram_name = BRCMF_PCIE_43570_NVRAM_NAME; | |
1335 | break; | |
1336 | default: | |
1337 | brcmf_err("Unsupported chip 0x%04x\n", devinfo->ci->chip); | |
1338 | return -ENODEV; | |
1339 | } | |
1340 | ||
1341 | fw_len = sizeof(devinfo->fw_name) - 1; | |
1342 | nv_len = sizeof(devinfo->nvram_name) - 1; | |
1343 | /* check if firmware path is provided by module parameter */ | |
1344 | if (brcmf_firmware_path[0] != '\0') { | |
1345 | strncpy(devinfo->fw_name, brcmf_firmware_path, fw_len); | |
1346 | strncpy(devinfo->nvram_name, brcmf_firmware_path, nv_len); | |
1347 | fw_len -= strlen(devinfo->fw_name); | |
1348 | nv_len -= strlen(devinfo->nvram_name); | |
1349 | ||
1350 | end = brcmf_firmware_path[strlen(brcmf_firmware_path) - 1]; | |
1351 | if (end != '/') { | |
1352 | strncat(devinfo->fw_name, "/", fw_len); | |
1353 | strncat(devinfo->nvram_name, "/", nv_len); | |
1354 | fw_len--; | |
1355 | nv_len--; | |
1356 | } | |
1357 | } | |
1358 | strncat(devinfo->fw_name, fw_name, fw_len); | |
1359 | strncat(devinfo->nvram_name, nvram_name, nv_len); | |
1360 | ||
1361 | return 0; | |
1362 | } | |
1363 | ||
1364 | ||
1365 | static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo, | |
1366 | const struct firmware *fw, void *nvram, | |
1367 | u32 nvram_len) | |
1368 | { | |
1369 | u32 sharedram_addr; | |
1370 | u32 sharedram_addr_written; | |
1371 | u32 loop_counter; | |
1372 | int err; | |
1373 | u32 address; | |
1374 | u32 resetintr; | |
1375 | ||
1376 | devinfo->ringbell = brcmf_pcie_ringbell_v2; | |
1377 | devinfo->generic_corerev = BRCMF_PCIE_GENREV2; | |
1378 | ||
1379 | brcmf_dbg(PCIE, "Halt ARM.\n"); | |
1380 | err = brcmf_pcie_enter_download_state(devinfo); | |
1381 | if (err) | |
1382 | return err; | |
1383 | ||
1384 | brcmf_dbg(PCIE, "Download FW %s\n", devinfo->fw_name); | |
1385 | brcmf_pcie_copy_mem_todev(devinfo, devinfo->ci->rambase, | |
1386 | (void *)fw->data, fw->size); | |
1387 | ||
1388 | resetintr = get_unaligned_le32(fw->data); | |
1389 | release_firmware(fw); | |
1390 | ||
1391 | /* reset last 4 bytes of RAM address. to be used for shared | |
1392 | * area. This identifies when FW is running | |
1393 | */ | |
1394 | brcmf_pcie_write_ram32(devinfo, devinfo->ci->ramsize - 4, 0); | |
1395 | ||
1396 | if (nvram) { | |
1397 | brcmf_dbg(PCIE, "Download NVRAM %s\n", devinfo->nvram_name); | |
1398 | address = devinfo->ci->rambase + devinfo->ci->ramsize - | |
1399 | nvram_len; | |
1400 | brcmf_pcie_copy_mem_todev(devinfo, address, nvram, nvram_len); | |
1401 | brcmf_fw_nvram_free(nvram); | |
1402 | } else { | |
1403 | brcmf_dbg(PCIE, "No matching NVRAM file found %s\n", | |
1404 | devinfo->nvram_name); | |
1405 | } | |
1406 | ||
1407 | sharedram_addr_written = brcmf_pcie_read_ram32(devinfo, | |
1408 | devinfo->ci->ramsize - | |
1409 | 4); | |
1410 | brcmf_dbg(PCIE, "Bring ARM in running state\n"); | |
1411 | err = brcmf_pcie_exit_download_state(devinfo, resetintr); | |
1412 | if (err) | |
1413 | return err; | |
1414 | ||
1415 | brcmf_dbg(PCIE, "Wait for FW init\n"); | |
1416 | sharedram_addr = sharedram_addr_written; | |
1417 | loop_counter = BRCMF_PCIE_FW_UP_TIMEOUT / 50; | |
1418 | while ((sharedram_addr == sharedram_addr_written) && (loop_counter)) { | |
1419 | msleep(50); | |
1420 | sharedram_addr = brcmf_pcie_read_ram32(devinfo, | |
1421 | devinfo->ci->ramsize - | |
1422 | 4); | |
1423 | loop_counter--; | |
1424 | } | |
1425 | if (sharedram_addr == sharedram_addr_written) { | |
1426 | brcmf_err("FW failed to initialize\n"); | |
1427 | return -ENODEV; | |
1428 | } | |
1429 | brcmf_dbg(PCIE, "Shared RAM addr: 0x%08x\n", sharedram_addr); | |
1430 | ||
1431 | return (brcmf_pcie_init_share_ram_info(devinfo, sharedram_addr)); | |
1432 | } | |
1433 | ||
1434 | ||
1435 | static int brcmf_pcie_get_resource(struct brcmf_pciedev_info *devinfo) | |
1436 | { | |
1437 | struct pci_dev *pdev; | |
1438 | int err; | |
1439 | phys_addr_t bar0_addr, bar1_addr; | |
1440 | ulong bar1_size; | |
1441 | ||
1442 | pdev = devinfo->pdev; | |
1443 | ||
1444 | err = pci_enable_device(pdev); | |
1445 | if (err) { | |
1446 | brcmf_err("pci_enable_device failed err=%d\n", err); | |
1447 | return err; | |
1448 | } | |
1449 | ||
1450 | pci_set_master(pdev); | |
1451 | ||
1452 | /* Bar-0 mapped address */ | |
1453 | bar0_addr = pci_resource_start(pdev, 0); | |
1454 | /* Bar-1 mapped address */ | |
1455 | bar1_addr = pci_resource_start(pdev, 2); | |
1456 | /* read Bar-1 mapped memory range */ | |
1457 | bar1_size = pci_resource_len(pdev, 2); | |
1458 | if ((bar1_size == 0) || (bar1_addr == 0)) { | |
1459 | brcmf_err("BAR1 Not enabled, device size=%ld, addr=%#016llx\n", | |
1460 | bar1_size, (unsigned long long)bar1_addr); | |
1461 | return -EINVAL; | |
1462 | } | |
1463 | ||
1464 | devinfo->regs = ioremap_nocache(bar0_addr, BRCMF_PCIE_REG_MAP_SIZE); | |
1465 | devinfo->tcm = ioremap_nocache(bar1_addr, BRCMF_PCIE_TCM_MAP_SIZE); | |
1466 | devinfo->tcm_size = BRCMF_PCIE_TCM_MAP_SIZE; | |
1467 | ||
1468 | if (!devinfo->regs || !devinfo->tcm) { | |
1469 | brcmf_err("ioremap() failed (%p,%p)\n", devinfo->regs, | |
1470 | devinfo->tcm); | |
1471 | return -EINVAL; | |
1472 | } | |
1473 | brcmf_dbg(PCIE, "Phys addr : reg space = %p base addr %#016llx\n", | |
1474 | devinfo->regs, (unsigned long long)bar0_addr); | |
1475 | brcmf_dbg(PCIE, "Phys addr : mem space = %p base addr %#016llx\n", | |
1476 | devinfo->tcm, (unsigned long long)bar1_addr); | |
1477 | ||
1478 | return 0; | |
1479 | } | |
1480 | ||
1481 | ||
1482 | static void brcmf_pcie_release_resource(struct brcmf_pciedev_info *devinfo) | |
1483 | { | |
1484 | if (devinfo->tcm) | |
1485 | iounmap(devinfo->tcm); | |
1486 | if (devinfo->regs) | |
1487 | iounmap(devinfo->regs); | |
1488 | ||
1489 | pci_disable_device(devinfo->pdev); | |
1490 | } | |
1491 | ||
1492 | ||
1493 | static int brcmf_pcie_attach_bus(struct device *dev) | |
1494 | { | |
1495 | int ret; | |
1496 | ||
1497 | /* Attach to the common driver interface */ | |
1498 | ret = brcmf_attach(dev); | |
1499 | if (ret) { | |
1500 | brcmf_err("brcmf_attach failed\n"); | |
1501 | } else { | |
1502 | ret = brcmf_bus_start(dev); | |
1503 | if (ret) | |
1504 | brcmf_err("dongle is not responding\n"); | |
1505 | } | |
1506 | ||
1507 | return ret; | |
1508 | } | |
1509 | ||
1510 | ||
1511 | static u32 brcmf_pcie_buscore_prep_addr(const struct pci_dev *pdev, u32 addr) | |
1512 | { | |
1513 | u32 ret_addr; | |
1514 | ||
1515 | ret_addr = addr & (BRCMF_PCIE_BAR0_REG_SIZE - 1); | |
1516 | addr &= ~(BRCMF_PCIE_BAR0_REG_SIZE - 1); | |
1517 | pci_write_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW, addr); | |
1518 | ||
1519 | return ret_addr; | |
1520 | } | |
1521 | ||
1522 | ||
1523 | static u32 brcmf_pcie_buscore_read32(void *ctx, u32 addr) | |
1524 | { | |
1525 | struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx; | |
1526 | ||
1527 | addr = brcmf_pcie_buscore_prep_addr(devinfo->pdev, addr); | |
1528 | return brcmf_pcie_read_reg32(devinfo, addr); | |
1529 | } | |
1530 | ||
1531 | ||
1532 | static void brcmf_pcie_buscore_write32(void *ctx, u32 addr, u32 value) | |
1533 | { | |
1534 | struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx; | |
1535 | ||
1536 | addr = brcmf_pcie_buscore_prep_addr(devinfo->pdev, addr); | |
1537 | brcmf_pcie_write_reg32(devinfo, addr, value); | |
1538 | } | |
1539 | ||
1540 | ||
1541 | static int brcmf_pcie_buscoreprep(void *ctx) | |
1542 | { | |
1543 | struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx; | |
1544 | int err; | |
1545 | ||
1546 | err = brcmf_pcie_get_resource(devinfo); | |
1547 | if (err == 0) { | |
1548 | /* Set CC watchdog to reset all the cores on the chip to bring | |
1549 | * back dongle to a sane state. | |
1550 | */ | |
1551 | brcmf_pcie_buscore_write32(ctx, CORE_CC_REG(SI_ENUM_BASE, | |
1552 | watchdog), 4); | |
1553 | msleep(100); | |
1554 | } | |
1555 | ||
1556 | return err; | |
1557 | } | |
1558 | ||
1559 | ||
d380ebc9 AS |
1560 | static void brcmf_pcie_buscore_activate(void *ctx, struct brcmf_chip *chip, |
1561 | u32 rstvec) | |
9e37f045 HM |
1562 | { |
1563 | struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx; | |
1564 | ||
1565 | brcmf_pcie_write_tcm32(devinfo, 0, rstvec); | |
1566 | } | |
1567 | ||
1568 | ||
1569 | static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = { | |
1570 | .prepare = brcmf_pcie_buscoreprep, | |
d380ebc9 | 1571 | .activate = brcmf_pcie_buscore_activate, |
9e37f045 HM |
1572 | .read32 = brcmf_pcie_buscore_read32, |
1573 | .write32 = brcmf_pcie_buscore_write32, | |
1574 | }; | |
1575 | ||
1576 | static void brcmf_pcie_setup(struct device *dev, const struct firmware *fw, | |
1577 | void *nvram, u32 nvram_len) | |
1578 | { | |
1579 | struct brcmf_bus *bus = dev_get_drvdata(dev); | |
1580 | struct brcmf_pciedev *pcie_bus_dev = bus->bus_priv.pcie; | |
1581 | struct brcmf_pciedev_info *devinfo = pcie_bus_dev->devinfo; | |
1582 | struct brcmf_commonring **flowrings; | |
1583 | int ret; | |
1584 | u32 i; | |
1585 | ||
1586 | brcmf_pcie_attach(devinfo); | |
1587 | ||
1588 | ret = brcmf_pcie_download_fw_nvram(devinfo, fw, nvram, nvram_len); | |
1589 | if (ret) | |
1590 | goto fail; | |
1591 | ||
1592 | devinfo->state = BRCMFMAC_PCIE_STATE_UP; | |
1593 | ||
1594 | ret = brcmf_pcie_init_ringbuffers(devinfo); | |
1595 | if (ret) | |
1596 | goto fail; | |
1597 | ||
1598 | ret = brcmf_pcie_init_scratchbuffers(devinfo); | |
1599 | if (ret) | |
1600 | goto fail; | |
1601 | ||
1602 | brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2); | |
1603 | ret = brcmf_pcie_request_irq(devinfo); | |
1604 | if (ret) | |
1605 | goto fail; | |
1606 | ||
1607 | /* hook the commonrings in the bus structure. */ | |
1608 | for (i = 0; i < BRCMF_NROF_COMMON_MSGRINGS; i++) | |
1609 | bus->msgbuf->commonrings[i] = | |
1610 | &devinfo->shared.commonrings[i]->commonring; | |
1611 | ||
1612 | flowrings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(flowrings), | |
1613 | GFP_KERNEL); | |
1614 | if (!flowrings) | |
1615 | goto fail; | |
1616 | ||
1617 | for (i = 0; i < devinfo->shared.nrof_flowrings; i++) | |
1618 | flowrings[i] = &devinfo->shared.flowrings[i].commonring; | |
1619 | bus->msgbuf->flowrings = flowrings; | |
1620 | ||
1621 | bus->msgbuf->rx_dataoffset = devinfo->shared.rx_dataoffset; | |
1622 | bus->msgbuf->max_rxbufpost = devinfo->shared.max_rxbufpost; | |
1623 | bus->msgbuf->nrof_flowrings = devinfo->shared.nrof_flowrings; | |
1624 | ||
1625 | init_waitqueue_head(&devinfo->mbdata_resp_wait); | |
1626 | ||
1627 | brcmf_pcie_intr_enable(devinfo); | |
1628 | if (brcmf_pcie_attach_bus(bus->dev) == 0) | |
1629 | return; | |
1630 | ||
1631 | brcmf_pcie_bus_console_read(devinfo); | |
1632 | ||
1633 | fail: | |
1634 | device_release_driver(dev); | |
1635 | } | |
1636 | ||
1637 | static int | |
1638 | brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |
1639 | { | |
1640 | int ret; | |
1641 | struct brcmf_pciedev_info *devinfo; | |
1642 | struct brcmf_pciedev *pcie_bus_dev; | |
1643 | struct brcmf_bus *bus; | |
1644 | ||
1645 | brcmf_dbg(PCIE, "Enter %x:%x\n", pdev->vendor, pdev->device); | |
1646 | ||
1647 | ret = -ENOMEM; | |
1648 | devinfo = kzalloc(sizeof(*devinfo), GFP_KERNEL); | |
1649 | if (devinfo == NULL) | |
1650 | return ret; | |
1651 | ||
1652 | devinfo->pdev = pdev; | |
1653 | pcie_bus_dev = NULL; | |
1654 | devinfo->ci = brcmf_chip_attach(devinfo, &brcmf_pcie_buscore_ops); | |
1655 | if (IS_ERR(devinfo->ci)) { | |
1656 | ret = PTR_ERR(devinfo->ci); | |
1657 | devinfo->ci = NULL; | |
1658 | goto fail; | |
1659 | } | |
1660 | ||
1661 | pcie_bus_dev = kzalloc(sizeof(*pcie_bus_dev), GFP_KERNEL); | |
1662 | if (pcie_bus_dev == NULL) { | |
1663 | ret = -ENOMEM; | |
1664 | goto fail; | |
1665 | } | |
1666 | ||
1667 | bus = kzalloc(sizeof(*bus), GFP_KERNEL); | |
1668 | if (!bus) { | |
1669 | ret = -ENOMEM; | |
1670 | goto fail; | |
1671 | } | |
1672 | bus->msgbuf = kzalloc(sizeof(*bus->msgbuf), GFP_KERNEL); | |
1673 | if (!bus->msgbuf) { | |
1674 | ret = -ENOMEM; | |
1675 | kfree(bus); | |
1676 | goto fail; | |
1677 | } | |
1678 | ||
1679 | /* hook it all together. */ | |
1680 | pcie_bus_dev->devinfo = devinfo; | |
1681 | pcie_bus_dev->bus = bus; | |
1682 | bus->dev = &pdev->dev; | |
1683 | bus->bus_priv.pcie = pcie_bus_dev; | |
1684 | bus->ops = &brcmf_pcie_bus_ops; | |
1685 | bus->proto_type = BRCMF_PROTO_MSGBUF; | |
1686 | bus->chip = devinfo->coreid; | |
4eb3af7c | 1687 | bus->wowl_supported = pci_pme_capable(pdev, PCI_D3hot); |
9e37f045 HM |
1688 | dev_set_drvdata(&pdev->dev, bus); |
1689 | ||
1690 | ret = brcmf_pcie_get_fwnames(devinfo); | |
1691 | if (ret) | |
1692 | goto fail_bus; | |
1693 | ||
1694 | ret = brcmf_fw_get_firmwares(bus->dev, BRCMF_FW_REQUEST_NVRAM | | |
1695 | BRCMF_FW_REQ_NV_OPTIONAL, | |
1696 | devinfo->fw_name, devinfo->nvram_name, | |
1697 | brcmf_pcie_setup); | |
1698 | if (ret == 0) | |
1699 | return 0; | |
1700 | fail_bus: | |
1701 | kfree(bus->msgbuf); | |
1702 | kfree(bus); | |
1703 | fail: | |
1704 | brcmf_err("failed %x:%x\n", pdev->vendor, pdev->device); | |
1705 | brcmf_pcie_release_resource(devinfo); | |
1706 | if (devinfo->ci) | |
1707 | brcmf_chip_detach(devinfo->ci); | |
1708 | kfree(pcie_bus_dev); | |
1709 | kfree(devinfo); | |
1710 | return ret; | |
1711 | } | |
1712 | ||
1713 | ||
1714 | static void | |
1715 | brcmf_pcie_remove(struct pci_dev *pdev) | |
1716 | { | |
1717 | struct brcmf_pciedev_info *devinfo; | |
1718 | struct brcmf_bus *bus; | |
1719 | ||
1720 | brcmf_dbg(PCIE, "Enter\n"); | |
1721 | ||
1722 | bus = dev_get_drvdata(&pdev->dev); | |
1723 | if (bus == NULL) | |
1724 | return; | |
1725 | ||
1726 | devinfo = bus->bus_priv.pcie->devinfo; | |
1727 | ||
1728 | devinfo->state = BRCMFMAC_PCIE_STATE_DOWN; | |
1729 | if (devinfo->ci) | |
1730 | brcmf_pcie_intr_disable(devinfo); | |
1731 | ||
1732 | brcmf_detach(&pdev->dev); | |
1733 | ||
1734 | kfree(bus->bus_priv.pcie); | |
1735 | kfree(bus->msgbuf->flowrings); | |
1736 | kfree(bus->msgbuf); | |
1737 | kfree(bus); | |
1738 | ||
1739 | brcmf_pcie_release_irq(devinfo); | |
1740 | brcmf_pcie_release_scratchbuffers(devinfo); | |
1741 | brcmf_pcie_release_ringbuffers(devinfo); | |
bd4f82e3 | 1742 | brcmf_pcie_reset_device(devinfo); |
9e37f045 HM |
1743 | brcmf_pcie_release_resource(devinfo); |
1744 | ||
1745 | if (devinfo->ci) | |
1746 | brcmf_chip_detach(devinfo->ci); | |
1747 | ||
1748 | kfree(devinfo); | |
1749 | dev_set_drvdata(&pdev->dev, NULL); | |
1750 | } | |
1751 | ||
1752 | ||
1753 | #ifdef CONFIG_PM | |
1754 | ||
1755 | ||
1756 | static int brcmf_pcie_suspend(struct pci_dev *pdev, pm_message_t state) | |
1757 | { | |
1758 | struct brcmf_pciedev_info *devinfo; | |
1759 | struct brcmf_bus *bus; | |
1760 | int err; | |
1761 | ||
1762 | brcmf_dbg(PCIE, "Enter, state=%d, pdev=%p\n", state.event, pdev); | |
1763 | ||
1764 | bus = dev_get_drvdata(&pdev->dev); | |
1765 | devinfo = bus->bus_priv.pcie->devinfo; | |
1766 | ||
1767 | brcmf_bus_change_state(bus, BRCMF_BUS_DOWN); | |
1768 | ||
1769 | devinfo->mbdata_completed = false; | |
1770 | brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D3_INFORM); | |
1771 | ||
1772 | wait_event_timeout(devinfo->mbdata_resp_wait, | |
1773 | devinfo->mbdata_completed, | |
1774 | msecs_to_jiffies(BRCMF_PCIE_MBDATA_TIMEOUT)); | |
1775 | if (!devinfo->mbdata_completed) { | |
1776 | brcmf_err("Timeout on response for entering D3 substate\n"); | |
1777 | return -EIO; | |
1778 | } | |
4eb3af7c | 1779 | brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D0_INFORM_IN_USE); |
9e37f045 HM |
1780 | |
1781 | err = pci_save_state(pdev); | |
4eb3af7c | 1782 | if (err) |
9e37f045 | 1783 | brcmf_err("pci_save_state failed, err=%d\n", err); |
4eb3af7c HM |
1784 | if ((err) || (!devinfo->wowl_enabled)) { |
1785 | brcmf_chip_detach(devinfo->ci); | |
1786 | devinfo->ci = NULL; | |
1787 | brcmf_pcie_remove(pdev); | |
1788 | return 0; | |
9e37f045 HM |
1789 | } |
1790 | ||
9e37f045 HM |
1791 | return pci_prepare_to_sleep(pdev); |
1792 | } | |
1793 | ||
9e37f045 HM |
1794 | static int brcmf_pcie_resume(struct pci_dev *pdev) |
1795 | { | |
4eb3af7c HM |
1796 | struct brcmf_pciedev_info *devinfo; |
1797 | struct brcmf_bus *bus; | |
9e37f045 HM |
1798 | int err; |
1799 | ||
4eb3af7c HM |
1800 | bus = dev_get_drvdata(&pdev->dev); |
1801 | brcmf_dbg(PCIE, "Enter, pdev=%p, bus=%p\n", pdev, bus); | |
9e37f045 HM |
1802 | |
1803 | err = pci_set_power_state(pdev, PCI_D0); | |
1804 | if (err) { | |
1805 | brcmf_err("pci_set_power_state failed, err=%d\n", err); | |
4eb3af7c | 1806 | goto cleanup; |
9e37f045 HM |
1807 | } |
1808 | pci_restore_state(pdev); | |
4eb3af7c HM |
1809 | pci_enable_wake(pdev, PCI_D3hot, false); |
1810 | pci_enable_wake(pdev, PCI_D3cold, false); | |
1811 | ||
1812 | /* Check if device is still up and running, if so we are ready */ | |
1813 | if (bus) { | |
1814 | devinfo = bus->bus_priv.pcie->devinfo; | |
1815 | if (brcmf_pcie_read_reg32(devinfo, | |
1816 | BRCMF_PCIE_PCIE2REG_INTMASK) != 0) { | |
1817 | if (brcmf_pcie_send_mb_data(devinfo, | |
1818 | BRCMF_H2D_HOST_D0_INFORM)) | |
1819 | goto cleanup; | |
1820 | brcmf_dbg(PCIE, "Hot resume, continue....\n"); | |
1821 | brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2); | |
a1cee865 | 1822 | brcmf_bus_change_state(bus, BRCMF_BUS_UP); |
4eb3af7c HM |
1823 | brcmf_pcie_intr_enable(devinfo); |
1824 | return 0; | |
1825 | } | |
1826 | } | |
9e37f045 | 1827 | |
4eb3af7c HM |
1828 | cleanup: |
1829 | if (bus) { | |
1830 | devinfo = bus->bus_priv.pcie->devinfo; | |
1831 | brcmf_chip_detach(devinfo->ci); | |
1832 | devinfo->ci = NULL; | |
1833 | brcmf_pcie_remove(pdev); | |
1834 | } | |
9e37f045 HM |
1835 | err = brcmf_pcie_probe(pdev, NULL); |
1836 | if (err) | |
1837 | brcmf_err("probe after resume failed, err=%d\n", err); | |
1838 | ||
1839 | return err; | |
1840 | } | |
1841 | ||
1842 | ||
1843 | #endif /* CONFIG_PM */ | |
1844 | ||
1845 | ||
1846 | #define BRCMF_PCIE_DEVICE(dev_id) { BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\ | |
1847 | PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 } | |
1848 | ||
1849 | static struct pci_device_id brcmf_pcie_devid_table[] = { | |
9e37f045 HM |
1850 | BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID), |
1851 | BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID), | |
1852 | BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID), | |
1853 | BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_DEVICE_ID), | |
48fd818f HM |
1854 | BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_2G_DEVICE_ID), |
1855 | BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_5G_DEVICE_ID), | |
9e37f045 HM |
1856 | { /* end: all zeroes */ } |
1857 | }; | |
1858 | ||
1859 | ||
1860 | MODULE_DEVICE_TABLE(pci, brcmf_pcie_devid_table); | |
1861 | ||
1862 | ||
1863 | static struct pci_driver brcmf_pciedrvr = { | |
1864 | .node = {}, | |
1865 | .name = KBUILD_MODNAME, | |
1866 | .id_table = brcmf_pcie_devid_table, | |
1867 | .probe = brcmf_pcie_probe, | |
1868 | .remove = brcmf_pcie_remove, | |
1869 | #ifdef CONFIG_PM | |
1870 | .suspend = brcmf_pcie_suspend, | |
1871 | .resume = brcmf_pcie_resume | |
1872 | #endif /* CONFIG_PM */ | |
1873 | }; | |
1874 | ||
1875 | ||
1876 | void brcmf_pcie_register(void) | |
1877 | { | |
1878 | int err; | |
1879 | ||
1880 | brcmf_dbg(PCIE, "Enter\n"); | |
1881 | err = pci_register_driver(&brcmf_pciedrvr); | |
1882 | if (err) | |
1883 | brcmf_err("PCIE driver registration failed, err=%d\n", err); | |
1884 | } | |
1885 | ||
1886 | ||
1887 | void brcmf_pcie_exit(void) | |
1888 | { | |
1889 | brcmf_dbg(PCIE, "Enter\n"); | |
1890 | pci_unregister_driver(&brcmf_pciedrvr); | |
1891 | } |