Commit | Line | Data |
---|---|---|
5e3dd157 KV |
1 | /* |
2 | * Copyright (c) 2005-2011 Atheros Communications Inc. | |
3 | * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. | |
4 | * | |
5 | * Permission to use, copy, modify, and/or distribute this software for any | |
6 | * purpose with or without fee is hereby granted, provided that the above | |
7 | * copyright notice and this permission notice appear in all copies. | |
8 | * | |
9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | |
10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |
11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | |
12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | |
13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | |
14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | |
15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
16 | */ | |
17 | ||
18 | #include <linux/pci.h> | |
19 | #include <linux/module.h> | |
20 | #include <linux/interrupt.h> | |
21 | #include <linux/spinlock.h> | |
22 | ||
23 | #include "core.h" | |
24 | #include "debug.h" | |
25 | ||
26 | #include "targaddrs.h" | |
27 | #include "bmi.h" | |
28 | ||
29 | #include "hif.h" | |
30 | #include "htc.h" | |
31 | ||
32 | #include "ce.h" | |
33 | #include "pci.h" | |
34 | ||
8cc8df90 | 35 | static unsigned int ath10k_target_ps; |
5e3dd157 KV |
36 | module_param(ath10k_target_ps, uint, 0644); |
37 | MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option"); | |
38 | ||
5e3dd157 KV |
39 | #define QCA988X_2_0_DEVICE_ID (0x003c) |
40 | ||
41 | static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = { | |
5e3dd157 KV |
42 | { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */ |
43 | {0} | |
44 | }; | |
45 | ||
46 | static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address, | |
47 | u32 *data); | |
48 | ||
49 | static void ath10k_pci_process_ce(struct ath10k *ar); | |
50 | static int ath10k_pci_post_rx(struct ath10k *ar); | |
87263e5b | 51 | static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info, |
5e3dd157 | 52 | int num); |
87263e5b | 53 | static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info); |
5e3dd157 | 54 | static void ath10k_pci_stop_ce(struct ath10k *ar); |
8c5c5368 MK |
55 | static void ath10k_pci_device_reset(struct ath10k *ar); |
56 | static int ath10k_pci_reset_target(struct ath10k *ar); | |
32270b61 MK |
57 | static int ath10k_pci_start_intr(struct ath10k *ar); |
58 | static void ath10k_pci_stop_intr(struct ath10k *ar); | |
5e3dd157 KV |
59 | |
60 | static const struct ce_attr host_ce_config_wlan[] = { | |
48e9c225 KV |
61 | /* CE0: host->target HTC control and raw streams */ |
62 | { | |
63 | .flags = CE_ATTR_FLAGS, | |
64 | .src_nentries = 16, | |
65 | .src_sz_max = 256, | |
66 | .dest_nentries = 0, | |
67 | }, | |
68 | ||
69 | /* CE1: target->host HTT + HTC control */ | |
70 | { | |
71 | .flags = CE_ATTR_FLAGS, | |
72 | .src_nentries = 0, | |
73 | .src_sz_max = 512, | |
74 | .dest_nentries = 512, | |
75 | }, | |
76 | ||
77 | /* CE2: target->host WMI */ | |
78 | { | |
79 | .flags = CE_ATTR_FLAGS, | |
80 | .src_nentries = 0, | |
81 | .src_sz_max = 2048, | |
82 | .dest_nentries = 32, | |
83 | }, | |
84 | ||
85 | /* CE3: host->target WMI */ | |
86 | { | |
87 | .flags = CE_ATTR_FLAGS, | |
88 | .src_nentries = 32, | |
89 | .src_sz_max = 2048, | |
90 | .dest_nentries = 0, | |
91 | }, | |
92 | ||
93 | /* CE4: host->target HTT */ | |
94 | { | |
95 | .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, | |
96 | .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES, | |
97 | .src_sz_max = 256, | |
98 | .dest_nentries = 0, | |
99 | }, | |
100 | ||
101 | /* CE5: unused */ | |
102 | { | |
103 | .flags = CE_ATTR_FLAGS, | |
104 | .src_nentries = 0, | |
105 | .src_sz_max = 0, | |
106 | .dest_nentries = 0, | |
107 | }, | |
108 | ||
109 | /* CE6: target autonomous hif_memcpy */ | |
110 | { | |
111 | .flags = CE_ATTR_FLAGS, | |
112 | .src_nentries = 0, | |
113 | .src_sz_max = 0, | |
114 | .dest_nentries = 0, | |
115 | }, | |
116 | ||
117 | /* CE7: ce_diag, the Diagnostic Window */ | |
118 | { | |
119 | .flags = CE_ATTR_FLAGS, | |
120 | .src_nentries = 2, | |
121 | .src_sz_max = DIAG_TRANSFER_LIMIT, | |
122 | .dest_nentries = 2, | |
123 | }, | |
5e3dd157 KV |
124 | }; |
125 | ||
126 | /* Target firmware's Copy Engine configuration. */ | |
127 | static const struct ce_pipe_config target_ce_config_wlan[] = { | |
d88effba KV |
128 | /* CE0: host->target HTC control and raw streams */ |
129 | { | |
130 | .pipenum = 0, | |
131 | .pipedir = PIPEDIR_OUT, | |
132 | .nentries = 32, | |
133 | .nbytes_max = 256, | |
134 | .flags = CE_ATTR_FLAGS, | |
135 | .reserved = 0, | |
136 | }, | |
137 | ||
138 | /* CE1: target->host HTT + HTC control */ | |
139 | { | |
140 | .pipenum = 1, | |
141 | .pipedir = PIPEDIR_IN, | |
142 | .nentries = 32, | |
143 | .nbytes_max = 512, | |
144 | .flags = CE_ATTR_FLAGS, | |
145 | .reserved = 0, | |
146 | }, | |
147 | ||
148 | /* CE2: target->host WMI */ | |
149 | { | |
150 | .pipenum = 2, | |
151 | .pipedir = PIPEDIR_IN, | |
152 | .nentries = 32, | |
153 | .nbytes_max = 2048, | |
154 | .flags = CE_ATTR_FLAGS, | |
155 | .reserved = 0, | |
156 | }, | |
157 | ||
158 | /* CE3: host->target WMI */ | |
159 | { | |
160 | .pipenum = 3, | |
161 | .pipedir = PIPEDIR_OUT, | |
162 | .nentries = 32, | |
163 | .nbytes_max = 2048, | |
164 | .flags = CE_ATTR_FLAGS, | |
165 | .reserved = 0, | |
166 | }, | |
167 | ||
168 | /* CE4: host->target HTT */ | |
169 | { | |
170 | .pipenum = 4, | |
171 | .pipedir = PIPEDIR_OUT, | |
172 | .nentries = 256, | |
173 | .nbytes_max = 256, | |
174 | .flags = CE_ATTR_FLAGS, | |
175 | .reserved = 0, | |
176 | }, | |
177 | ||
5e3dd157 | 178 | /* NB: 50% of src nentries, since tx has 2 frags */ |
d88effba KV |
179 | |
180 | /* CE5: unused */ | |
181 | { | |
182 | .pipenum = 5, | |
183 | .pipedir = PIPEDIR_OUT, | |
184 | .nentries = 32, | |
185 | .nbytes_max = 2048, | |
186 | .flags = CE_ATTR_FLAGS, | |
187 | .reserved = 0, | |
188 | }, | |
189 | ||
190 | /* CE6: Reserved for target autonomous hif_memcpy */ | |
191 | { | |
192 | .pipenum = 6, | |
193 | .pipedir = PIPEDIR_INOUT, | |
194 | .nentries = 32, | |
195 | .nbytes_max = 4096, | |
196 | .flags = CE_ATTR_FLAGS, | |
197 | .reserved = 0, | |
198 | }, | |
199 | ||
5e3dd157 KV |
200 | /* CE7 used only by Host */ |
201 | }; | |
202 | ||
203 | /* | |
204 | * Diagnostic read/write access is provided for startup/config/debug usage. | |
205 | * Caller must guarantee proper alignment, when applicable, and single user | |
206 | * at any moment. | |
207 | */ | |
208 | static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, | |
209 | int nbytes) | |
210 | { | |
211 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
212 | int ret = 0; | |
213 | u32 buf; | |
214 | unsigned int completed_nbytes, orig_nbytes, remaining_bytes; | |
215 | unsigned int id; | |
216 | unsigned int flags; | |
2aa39115 | 217 | struct ath10k_ce_pipe *ce_diag; |
5e3dd157 KV |
218 | /* Host buffer address in CE space */ |
219 | u32 ce_data; | |
220 | dma_addr_t ce_data_base = 0; | |
221 | void *data_buf = NULL; | |
222 | int i; | |
223 | ||
224 | /* | |
225 | * This code cannot handle reads to non-memory space. Redirect to the | |
226 | * register read fn but preserve the multi word read capability of | |
227 | * this fn | |
228 | */ | |
229 | if (address < DRAM_BASE_ADDRESS) { | |
230 | if (!IS_ALIGNED(address, 4) || | |
231 | !IS_ALIGNED((unsigned long)data, 4)) | |
232 | return -EIO; | |
233 | ||
234 | while ((nbytes >= 4) && ((ret = ath10k_pci_diag_read_access( | |
235 | ar, address, (u32 *)data)) == 0)) { | |
236 | nbytes -= sizeof(u32); | |
237 | address += sizeof(u32); | |
238 | data += sizeof(u32); | |
239 | } | |
240 | return ret; | |
241 | } | |
242 | ||
243 | ce_diag = ar_pci->ce_diag; | |
244 | ||
245 | /* | |
246 | * Allocate a temporary bounce buffer to hold caller's data | |
247 | * to be DMA'ed from Target. This guarantees | |
248 | * 1) 4-byte alignment | |
249 | * 2) Buffer in DMA-able space | |
250 | */ | |
251 | orig_nbytes = nbytes; | |
252 | data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev, | |
253 | orig_nbytes, | |
254 | &ce_data_base); | |
255 | ||
256 | if (!data_buf) { | |
257 | ret = -ENOMEM; | |
258 | goto done; | |
259 | } | |
260 | memset(data_buf, 0, orig_nbytes); | |
261 | ||
262 | remaining_bytes = orig_nbytes; | |
263 | ce_data = ce_data_base; | |
264 | while (remaining_bytes) { | |
265 | nbytes = min_t(unsigned int, remaining_bytes, | |
266 | DIAG_TRANSFER_LIMIT); | |
267 | ||
268 | ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data); | |
269 | if (ret != 0) | |
270 | goto done; | |
271 | ||
272 | /* Request CE to send from Target(!) address to Host buffer */ | |
273 | /* | |
274 | * The address supplied by the caller is in the | |
275 | * Target CPU virtual address space. | |
276 | * | |
277 | * In order to use this address with the diagnostic CE, | |
278 | * convert it from Target CPU virtual address space | |
279 | * to CE address space | |
280 | */ | |
281 | ath10k_pci_wake(ar); | |
282 | address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, | |
283 | address); | |
284 | ath10k_pci_sleep(ar); | |
285 | ||
286 | ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0, | |
287 | 0); | |
288 | if (ret) | |
289 | goto done; | |
290 | ||
291 | i = 0; | |
292 | while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf, | |
293 | &completed_nbytes, | |
294 | &id) != 0) { | |
295 | mdelay(1); | |
296 | if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { | |
297 | ret = -EBUSY; | |
298 | goto done; | |
299 | } | |
300 | } | |
301 | ||
302 | if (nbytes != completed_nbytes) { | |
303 | ret = -EIO; | |
304 | goto done; | |
305 | } | |
306 | ||
307 | if (buf != (u32) address) { | |
308 | ret = -EIO; | |
309 | goto done; | |
310 | } | |
311 | ||
312 | i = 0; | |
313 | while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf, | |
314 | &completed_nbytes, | |
315 | &id, &flags) != 0) { | |
316 | mdelay(1); | |
317 | ||
318 | if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { | |
319 | ret = -EBUSY; | |
320 | goto done; | |
321 | } | |
322 | } | |
323 | ||
324 | if (nbytes != completed_nbytes) { | |
325 | ret = -EIO; | |
326 | goto done; | |
327 | } | |
328 | ||
329 | if (buf != ce_data) { | |
330 | ret = -EIO; | |
331 | goto done; | |
332 | } | |
333 | ||
334 | remaining_bytes -= nbytes; | |
335 | address += nbytes; | |
336 | ce_data += nbytes; | |
337 | } | |
338 | ||
339 | done: | |
340 | if (ret == 0) { | |
341 | /* Copy data from allocated DMA buf to caller's buf */ | |
342 | WARN_ON_ONCE(orig_nbytes & 3); | |
343 | for (i = 0; i < orig_nbytes / sizeof(__le32); i++) { | |
344 | ((u32 *)data)[i] = | |
345 | __le32_to_cpu(((__le32 *)data_buf)[i]); | |
346 | } | |
347 | } else | |
348 | ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", | |
349 | __func__, address); | |
350 | ||
351 | if (data_buf) | |
352 | pci_free_consistent(ar_pci->pdev, orig_nbytes, | |
353 | data_buf, ce_data_base); | |
354 | ||
355 | return ret; | |
356 | } | |
357 | ||
358 | /* Read 4-byte aligned data from Target memory or register */ | |
359 | static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address, | |
360 | u32 *data) | |
361 | { | |
362 | /* Assume range doesn't cross this boundary */ | |
363 | if (address >= DRAM_BASE_ADDRESS) | |
364 | return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32)); | |
365 | ||
366 | ath10k_pci_wake(ar); | |
367 | *data = ath10k_pci_read32(ar, address); | |
368 | ath10k_pci_sleep(ar); | |
369 | return 0; | |
370 | } | |
371 | ||
372 | static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, | |
373 | const void *data, int nbytes) | |
374 | { | |
375 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
376 | int ret = 0; | |
377 | u32 buf; | |
378 | unsigned int completed_nbytes, orig_nbytes, remaining_bytes; | |
379 | unsigned int id; | |
380 | unsigned int flags; | |
2aa39115 | 381 | struct ath10k_ce_pipe *ce_diag; |
5e3dd157 KV |
382 | void *data_buf = NULL; |
383 | u32 ce_data; /* Host buffer address in CE space */ | |
384 | dma_addr_t ce_data_base = 0; | |
385 | int i; | |
386 | ||
387 | ce_diag = ar_pci->ce_diag; | |
388 | ||
389 | /* | |
390 | * Allocate a temporary bounce buffer to hold caller's data | |
391 | * to be DMA'ed to Target. This guarantees | |
392 | * 1) 4-byte alignment | |
393 | * 2) Buffer in DMA-able space | |
394 | */ | |
395 | orig_nbytes = nbytes; | |
396 | data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev, | |
397 | orig_nbytes, | |
398 | &ce_data_base); | |
399 | if (!data_buf) { | |
400 | ret = -ENOMEM; | |
401 | goto done; | |
402 | } | |
403 | ||
404 | /* Copy caller's data to allocated DMA buf */ | |
405 | WARN_ON_ONCE(orig_nbytes & 3); | |
406 | for (i = 0; i < orig_nbytes / sizeof(__le32); i++) | |
407 | ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]); | |
408 | ||
409 | /* | |
410 | * The address supplied by the caller is in the | |
411 | * Target CPU virtual address space. | |
412 | * | |
413 | * In order to use this address with the diagnostic CE, | |
414 | * convert it from | |
415 | * Target CPU virtual address space | |
416 | * to | |
417 | * CE address space | |
418 | */ | |
419 | ath10k_pci_wake(ar); | |
420 | address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address); | |
421 | ath10k_pci_sleep(ar); | |
422 | ||
423 | remaining_bytes = orig_nbytes; | |
424 | ce_data = ce_data_base; | |
425 | while (remaining_bytes) { | |
426 | /* FIXME: check cast */ | |
427 | nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT); | |
428 | ||
429 | /* Set up to receive directly into Target(!) address */ | |
430 | ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address); | |
431 | if (ret != 0) | |
432 | goto done; | |
433 | ||
434 | /* | |
435 | * Request CE to send caller-supplied data that | |
436 | * was copied to bounce buffer to Target(!) address. | |
437 | */ | |
438 | ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data, | |
439 | nbytes, 0, 0); | |
440 | if (ret != 0) | |
441 | goto done; | |
442 | ||
443 | i = 0; | |
444 | while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf, | |
445 | &completed_nbytes, | |
446 | &id) != 0) { | |
447 | mdelay(1); | |
448 | ||
449 | if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { | |
450 | ret = -EBUSY; | |
451 | goto done; | |
452 | } | |
453 | } | |
454 | ||
455 | if (nbytes != completed_nbytes) { | |
456 | ret = -EIO; | |
457 | goto done; | |
458 | } | |
459 | ||
460 | if (buf != ce_data) { | |
461 | ret = -EIO; | |
462 | goto done; | |
463 | } | |
464 | ||
465 | i = 0; | |
466 | while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf, | |
467 | &completed_nbytes, | |
468 | &id, &flags) != 0) { | |
469 | mdelay(1); | |
470 | ||
471 | if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { | |
472 | ret = -EBUSY; | |
473 | goto done; | |
474 | } | |
475 | } | |
476 | ||
477 | if (nbytes != completed_nbytes) { | |
478 | ret = -EIO; | |
479 | goto done; | |
480 | } | |
481 | ||
482 | if (buf != address) { | |
483 | ret = -EIO; | |
484 | goto done; | |
485 | } | |
486 | ||
487 | remaining_bytes -= nbytes; | |
488 | address += nbytes; | |
489 | ce_data += nbytes; | |
490 | } | |
491 | ||
492 | done: | |
493 | if (data_buf) { | |
494 | pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf, | |
495 | ce_data_base); | |
496 | } | |
497 | ||
498 | if (ret != 0) | |
499 | ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__, | |
500 | address); | |
501 | ||
502 | return ret; | |
503 | } | |
504 | ||
505 | /* Write 4B data to Target memory or register */ | |
506 | static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address, | |
507 | u32 data) | |
508 | { | |
509 | /* Assume range doesn't cross this boundary */ | |
510 | if (address >= DRAM_BASE_ADDRESS) | |
511 | return ath10k_pci_diag_write_mem(ar, address, &data, | |
512 | sizeof(u32)); | |
513 | ||
514 | ath10k_pci_wake(ar); | |
515 | ath10k_pci_write32(ar, address, data); | |
516 | ath10k_pci_sleep(ar); | |
517 | return 0; | |
518 | } | |
519 | ||
520 | static bool ath10k_pci_target_is_awake(struct ath10k *ar) | |
521 | { | |
522 | void __iomem *mem = ath10k_pci_priv(ar)->mem; | |
523 | u32 val; | |
524 | val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS + | |
525 | RTC_STATE_ADDRESS); | |
526 | return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON); | |
527 | } | |
528 | ||
529 | static void ath10k_pci_wait(struct ath10k *ar) | |
530 | { | |
531 | int n = 100; | |
532 | ||
533 | while (n-- && !ath10k_pci_target_is_awake(ar)) | |
534 | msleep(10); | |
535 | ||
536 | if (n < 0) | |
537 | ath10k_warn("Unable to wakeup target\n"); | |
538 | } | |
539 | ||
540 | void ath10k_do_pci_wake(struct ath10k *ar) | |
541 | { | |
542 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
543 | void __iomem *pci_addr = ar_pci->mem; | |
544 | int tot_delay = 0; | |
545 | int curr_delay = 5; | |
546 | ||
547 | if (atomic_read(&ar_pci->keep_awake_count) == 0) { | |
548 | /* Force AWAKE */ | |
549 | iowrite32(PCIE_SOC_WAKE_V_MASK, | |
550 | pci_addr + PCIE_LOCAL_BASE_ADDRESS + | |
551 | PCIE_SOC_WAKE_ADDRESS); | |
552 | } | |
553 | atomic_inc(&ar_pci->keep_awake_count); | |
554 | ||
555 | if (ar_pci->verified_awake) | |
556 | return; | |
557 | ||
558 | for (;;) { | |
559 | if (ath10k_pci_target_is_awake(ar)) { | |
560 | ar_pci->verified_awake = true; | |
561 | break; | |
562 | } | |
563 | ||
564 | if (tot_delay > PCIE_WAKE_TIMEOUT) { | |
565 | ath10k_warn("target takes too long to wake up (awake count %d)\n", | |
566 | atomic_read(&ar_pci->keep_awake_count)); | |
567 | break; | |
568 | } | |
569 | ||
570 | udelay(curr_delay); | |
571 | tot_delay += curr_delay; | |
572 | ||
573 | if (curr_delay < 50) | |
574 | curr_delay += 5; | |
575 | } | |
576 | } | |
577 | ||
578 | void ath10k_do_pci_sleep(struct ath10k *ar) | |
579 | { | |
580 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
581 | void __iomem *pci_addr = ar_pci->mem; | |
582 | ||
583 | if (atomic_dec_and_test(&ar_pci->keep_awake_count)) { | |
584 | /* Allow sleep */ | |
585 | ar_pci->verified_awake = false; | |
586 | iowrite32(PCIE_SOC_WAKE_RESET, | |
587 | pci_addr + PCIE_LOCAL_BASE_ADDRESS + | |
588 | PCIE_SOC_WAKE_ADDRESS); | |
589 | } | |
590 | } | |
591 | ||
592 | /* | |
593 | * FIXME: Handle OOM properly. | |
594 | */ | |
595 | static inline | |
87263e5b | 596 | struct ath10k_pci_compl *get_free_compl(struct ath10k_pci_pipe *pipe_info) |
5e3dd157 KV |
597 | { |
598 | struct ath10k_pci_compl *compl = NULL; | |
599 | ||
600 | spin_lock_bh(&pipe_info->pipe_lock); | |
601 | if (list_empty(&pipe_info->compl_free)) { | |
602 | ath10k_warn("Completion buffers are full\n"); | |
603 | goto exit; | |
604 | } | |
605 | compl = list_first_entry(&pipe_info->compl_free, | |
606 | struct ath10k_pci_compl, list); | |
607 | list_del(&compl->list); | |
608 | exit: | |
609 | spin_unlock_bh(&pipe_info->pipe_lock); | |
610 | return compl; | |
611 | } | |
612 | ||
613 | /* Called by lower (CE) layer when a send to Target completes. */ | |
2aa39115 | 614 | static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state, |
5e3dd157 KV |
615 | void *transfer_context, |
616 | u32 ce_data, | |
617 | unsigned int nbytes, | |
618 | unsigned int transfer_id) | |
619 | { | |
620 | struct ath10k *ar = ce_state->ar; | |
621 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
87263e5b | 622 | struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id]; |
5e3dd157 KV |
623 | struct ath10k_pci_compl *compl; |
624 | bool process = false; | |
625 | ||
626 | do { | |
627 | /* | |
628 | * For the send completion of an item in sendlist, just | |
629 | * increment num_sends_allowed. The upper layer callback will | |
630 | * be triggered when last fragment is done with send. | |
631 | */ | |
632 | if (transfer_context == CE_SENDLIST_ITEM_CTXT) { | |
633 | spin_lock_bh(&pipe_info->pipe_lock); | |
634 | pipe_info->num_sends_allowed++; | |
635 | spin_unlock_bh(&pipe_info->pipe_lock); | |
636 | continue; | |
637 | } | |
638 | ||
639 | compl = get_free_compl(pipe_info); | |
640 | if (!compl) | |
641 | break; | |
642 | ||
f9d8fece | 643 | compl->state = ATH10K_PCI_COMPL_SEND; |
5e3dd157 KV |
644 | compl->ce_state = ce_state; |
645 | compl->pipe_info = pipe_info; | |
646 | compl->transfer_context = transfer_context; | |
647 | compl->nbytes = nbytes; | |
648 | compl->transfer_id = transfer_id; | |
649 | compl->flags = 0; | |
650 | ||
651 | /* | |
652 | * Add the completion to the processing queue. | |
653 | */ | |
654 | spin_lock_bh(&ar_pci->compl_lock); | |
655 | list_add_tail(&compl->list, &ar_pci->compl_process); | |
656 | spin_unlock_bh(&ar_pci->compl_lock); | |
657 | ||
658 | process = true; | |
659 | } while (ath10k_ce_completed_send_next(ce_state, | |
660 | &transfer_context, | |
661 | &ce_data, &nbytes, | |
662 | &transfer_id) == 0); | |
663 | ||
664 | /* | |
665 | * If only some of the items within a sendlist have completed, | |
666 | * don't invoke completion processing until the entire sendlist | |
667 | * has been sent. | |
668 | */ | |
669 | if (!process) | |
670 | return; | |
671 | ||
672 | ath10k_pci_process_ce(ar); | |
673 | } | |
674 | ||
675 | /* Called by lower (CE) layer when data is received from the Target. */ | |
2aa39115 | 676 | static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state, |
5e3dd157 KV |
677 | void *transfer_context, u32 ce_data, |
678 | unsigned int nbytes, | |
679 | unsigned int transfer_id, | |
680 | unsigned int flags) | |
681 | { | |
682 | struct ath10k *ar = ce_state->ar; | |
683 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
87263e5b | 684 | struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id]; |
5e3dd157 KV |
685 | struct ath10k_pci_compl *compl; |
686 | struct sk_buff *skb; | |
687 | ||
688 | do { | |
689 | compl = get_free_compl(pipe_info); | |
690 | if (!compl) | |
691 | break; | |
692 | ||
f9d8fece | 693 | compl->state = ATH10K_PCI_COMPL_RECV; |
5e3dd157 KV |
694 | compl->ce_state = ce_state; |
695 | compl->pipe_info = pipe_info; | |
696 | compl->transfer_context = transfer_context; | |
697 | compl->nbytes = nbytes; | |
698 | compl->transfer_id = transfer_id; | |
699 | compl->flags = flags; | |
700 | ||
701 | skb = transfer_context; | |
702 | dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr, | |
703 | skb->len + skb_tailroom(skb), | |
704 | DMA_FROM_DEVICE); | |
705 | /* | |
706 | * Add the completion to the processing queue. | |
707 | */ | |
708 | spin_lock_bh(&ar_pci->compl_lock); | |
709 | list_add_tail(&compl->list, &ar_pci->compl_process); | |
710 | spin_unlock_bh(&ar_pci->compl_lock); | |
711 | ||
712 | } while (ath10k_ce_completed_recv_next(ce_state, | |
713 | &transfer_context, | |
714 | &ce_data, &nbytes, | |
715 | &transfer_id, | |
716 | &flags) == 0); | |
717 | ||
718 | ath10k_pci_process_ce(ar); | |
719 | } | |
720 | ||
721 | /* Send the first nbytes bytes of the buffer */ | |
722 | static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id, | |
723 | unsigned int transfer_id, | |
724 | unsigned int bytes, struct sk_buff *nbuf) | |
725 | { | |
726 | struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf); | |
727 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
87263e5b | 728 | struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe_id]); |
2aa39115 | 729 | struct ath10k_ce_pipe *ce_hdl = pipe_info->ce_hdl; |
5e3dd157 KV |
730 | struct ce_sendlist sendlist; |
731 | unsigned int len; | |
732 | u32 flags = 0; | |
733 | int ret; | |
734 | ||
735 | memset(&sendlist, 0, sizeof(struct ce_sendlist)); | |
736 | ||
737 | len = min(bytes, nbuf->len); | |
738 | bytes -= len; | |
739 | ||
740 | if (len & 3) | |
741 | ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len); | |
742 | ||
743 | ath10k_dbg(ATH10K_DBG_PCI, | |
744 | "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n", | |
745 | nbuf->data, (unsigned long long) skb_cb->paddr, | |
746 | nbuf->len, len); | |
747 | ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, | |
748 | "ath10k tx: data: ", | |
749 | nbuf->data, nbuf->len); | |
750 | ||
751 | ath10k_ce_sendlist_buf_add(&sendlist, skb_cb->paddr, len, flags); | |
752 | ||
753 | /* Make sure we have resources to handle this request */ | |
754 | spin_lock_bh(&pipe_info->pipe_lock); | |
755 | if (!pipe_info->num_sends_allowed) { | |
756 | ath10k_warn("Pipe: %d is full\n", pipe_id); | |
757 | spin_unlock_bh(&pipe_info->pipe_lock); | |
758 | return -ENOSR; | |
759 | } | |
760 | pipe_info->num_sends_allowed--; | |
761 | spin_unlock_bh(&pipe_info->pipe_lock); | |
762 | ||
763 | ret = ath10k_ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id); | |
764 | if (ret) | |
765 | ath10k_warn("CE send failed: %p\n", nbuf); | |
766 | ||
767 | return ret; | |
768 | } | |
769 | ||
770 | static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe) | |
771 | { | |
772 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
87263e5b | 773 | struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe]); |
5e3dd157 KV |
774 | int ret; |
775 | ||
776 | spin_lock_bh(&pipe_info->pipe_lock); | |
777 | ret = pipe_info->num_sends_allowed; | |
778 | spin_unlock_bh(&pipe_info->pipe_lock); | |
779 | ||
780 | return ret; | |
781 | } | |
782 | ||
783 | static void ath10k_pci_hif_dump_area(struct ath10k *ar) | |
784 | { | |
785 | u32 reg_dump_area = 0; | |
786 | u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {}; | |
787 | u32 host_addr; | |
788 | int ret; | |
789 | u32 i; | |
790 | ||
791 | ath10k_err("firmware crashed!\n"); | |
792 | ath10k_err("hardware name %s version 0x%x\n", | |
793 | ar->hw_params.name, ar->target_version); | |
794 | ath10k_err("firmware version: %u.%u.%u.%u\n", ar->fw_version_major, | |
795 | ar->fw_version_minor, ar->fw_version_release, | |
796 | ar->fw_version_build); | |
797 | ||
798 | host_addr = host_interest_item_address(HI_ITEM(hi_failure_state)); | |
799 | if (ath10k_pci_diag_read_mem(ar, host_addr, | |
800 | ®_dump_area, sizeof(u32)) != 0) { | |
801 | ath10k_warn("could not read hi_failure_state\n"); | |
802 | return; | |
803 | } | |
804 | ||
805 | ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area); | |
806 | ||
807 | ret = ath10k_pci_diag_read_mem(ar, reg_dump_area, | |
808 | ®_dump_values[0], | |
809 | REG_DUMP_COUNT_QCA988X * sizeof(u32)); | |
810 | if (ret != 0) { | |
811 | ath10k_err("could not dump FW Dump Area\n"); | |
812 | return; | |
813 | } | |
814 | ||
815 | BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4); | |
816 | ||
817 | ath10k_err("target Register Dump\n"); | |
818 | for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4) | |
819 | ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n", | |
820 | i, | |
821 | reg_dump_values[i], | |
822 | reg_dump_values[i + 1], | |
823 | reg_dump_values[i + 2], | |
824 | reg_dump_values[i + 3]); | |
affd3217 MK |
825 | |
826 | ieee80211_queue_work(ar->hw, &ar->restart_work); | |
5e3dd157 KV |
827 | } |
828 | ||
829 | static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe, | |
830 | int force) | |
831 | { | |
832 | if (!force) { | |
833 | int resources; | |
834 | /* | |
835 | * Decide whether to actually poll for completions, or just | |
836 | * wait for a later chance. | |
837 | * If there seem to be plenty of resources left, then just wait | |
838 | * since checking involves reading a CE register, which is a | |
839 | * relatively expensive operation. | |
840 | */ | |
841 | resources = ath10k_pci_hif_get_free_queue_number(ar, pipe); | |
842 | ||
843 | /* | |
844 | * If at least 50% of the total resources are still available, | |
845 | * don't bother checking again yet. | |
846 | */ | |
847 | if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1)) | |
848 | return; | |
849 | } | |
850 | ath10k_ce_per_engine_service(ar, pipe); | |
851 | } | |
852 | ||
e799bbff MK |
853 | static void ath10k_pci_hif_set_callbacks(struct ath10k *ar, |
854 | struct ath10k_hif_cb *callbacks) | |
5e3dd157 KV |
855 | { |
856 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
857 | ||
858 | ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); | |
859 | ||
860 | memcpy(&ar_pci->msg_callbacks_current, callbacks, | |
861 | sizeof(ar_pci->msg_callbacks_current)); | |
862 | } | |
863 | ||
864 | static int ath10k_pci_start_ce(struct ath10k *ar) | |
865 | { | |
866 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
2aa39115 | 867 | struct ath10k_ce_pipe *ce_diag = ar_pci->ce_diag; |
5e3dd157 | 868 | const struct ce_attr *attr; |
87263e5b | 869 | struct ath10k_pci_pipe *pipe_info; |
5e3dd157 KV |
870 | struct ath10k_pci_compl *compl; |
871 | int i, pipe_num, completions, disable_interrupts; | |
872 | ||
873 | spin_lock_init(&ar_pci->compl_lock); | |
874 | INIT_LIST_HEAD(&ar_pci->compl_process); | |
875 | ||
876 | for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) { | |
877 | pipe_info = &ar_pci->pipe_info[pipe_num]; | |
878 | ||
879 | spin_lock_init(&pipe_info->pipe_lock); | |
880 | INIT_LIST_HEAD(&pipe_info->compl_free); | |
881 | ||
882 | /* Handle Diagnostic CE specially */ | |
883 | if (pipe_info->ce_hdl == ce_diag) | |
884 | continue; | |
885 | ||
886 | attr = &host_ce_config_wlan[pipe_num]; | |
887 | completions = 0; | |
888 | ||
889 | if (attr->src_nentries) { | |
890 | disable_interrupts = attr->flags & CE_ATTR_DIS_INTR; | |
891 | ath10k_ce_send_cb_register(pipe_info->ce_hdl, | |
892 | ath10k_pci_ce_send_done, | |
893 | disable_interrupts); | |
894 | completions += attr->src_nentries; | |
895 | pipe_info->num_sends_allowed = attr->src_nentries - 1; | |
896 | } | |
897 | ||
898 | if (attr->dest_nentries) { | |
899 | ath10k_ce_recv_cb_register(pipe_info->ce_hdl, | |
900 | ath10k_pci_ce_recv_data); | |
901 | completions += attr->dest_nentries; | |
902 | } | |
903 | ||
904 | if (completions == 0) | |
905 | continue; | |
906 | ||
907 | for (i = 0; i < completions; i++) { | |
ffe5daa8 | 908 | compl = kmalloc(sizeof(*compl), GFP_KERNEL); |
5e3dd157 KV |
909 | if (!compl) { |
910 | ath10k_warn("No memory for completion state\n"); | |
911 | ath10k_pci_stop_ce(ar); | |
912 | return -ENOMEM; | |
913 | } | |
914 | ||
f9d8fece | 915 | compl->state = ATH10K_PCI_COMPL_FREE; |
5e3dd157 KV |
916 | list_add_tail(&compl->list, &pipe_info->compl_free); |
917 | } | |
918 | } | |
919 | ||
920 | return 0; | |
921 | } | |
922 | ||
923 | static void ath10k_pci_stop_ce(struct ath10k *ar) | |
924 | { | |
925 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
926 | struct ath10k_pci_compl *compl; | |
927 | struct sk_buff *skb; | |
928 | int i; | |
929 | ||
930 | ath10k_ce_disable_interrupts(ar); | |
931 | ||
932 | /* Cancel the pending tasklet */ | |
933 | tasklet_kill(&ar_pci->intr_tq); | |
934 | ||
935 | for (i = 0; i < CE_COUNT; i++) | |
936 | tasklet_kill(&ar_pci->pipe_info[i].intr); | |
937 | ||
938 | /* Mark pending completions as aborted, so that upper layers free up | |
939 | * their associated resources */ | |
940 | spin_lock_bh(&ar_pci->compl_lock); | |
941 | list_for_each_entry(compl, &ar_pci->compl_process, list) { | |
942 | skb = (struct sk_buff *)compl->transfer_context; | |
943 | ATH10K_SKB_CB(skb)->is_aborted = true; | |
944 | } | |
945 | spin_unlock_bh(&ar_pci->compl_lock); | |
946 | } | |
947 | ||
948 | static void ath10k_pci_cleanup_ce(struct ath10k *ar) | |
949 | { | |
950 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
951 | struct ath10k_pci_compl *compl, *tmp; | |
87263e5b | 952 | struct ath10k_pci_pipe *pipe_info; |
5e3dd157 KV |
953 | struct sk_buff *netbuf; |
954 | int pipe_num; | |
955 | ||
956 | /* Free pending completions. */ | |
957 | spin_lock_bh(&ar_pci->compl_lock); | |
958 | if (!list_empty(&ar_pci->compl_process)) | |
959 | ath10k_warn("pending completions still present! possible memory leaks.\n"); | |
960 | ||
961 | list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) { | |
962 | list_del(&compl->list); | |
963 | netbuf = (struct sk_buff *)compl->transfer_context; | |
964 | dev_kfree_skb_any(netbuf); | |
965 | kfree(compl); | |
966 | } | |
967 | spin_unlock_bh(&ar_pci->compl_lock); | |
968 | ||
969 | /* Free unused completions for each pipe. */ | |
970 | for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) { | |
971 | pipe_info = &ar_pci->pipe_info[pipe_num]; | |
972 | ||
973 | spin_lock_bh(&pipe_info->pipe_lock); | |
974 | list_for_each_entry_safe(compl, tmp, | |
975 | &pipe_info->compl_free, list) { | |
976 | list_del(&compl->list); | |
977 | kfree(compl); | |
978 | } | |
979 | spin_unlock_bh(&pipe_info->pipe_lock); | |
980 | } | |
981 | } | |
982 | ||
983 | static void ath10k_pci_process_ce(struct ath10k *ar) | |
984 | { | |
985 | struct ath10k_pci *ar_pci = ar->hif.priv; | |
986 | struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current; | |
987 | struct ath10k_pci_compl *compl; | |
988 | struct sk_buff *skb; | |
989 | unsigned int nbytes; | |
990 | int ret, send_done = 0; | |
991 | ||
992 | /* Upper layers aren't ready to handle tx/rx completions in parallel so | |
993 | * we must serialize all completion processing. */ | |
994 | ||
995 | spin_lock_bh(&ar_pci->compl_lock); | |
996 | if (ar_pci->compl_processing) { | |
997 | spin_unlock_bh(&ar_pci->compl_lock); | |
998 | return; | |
999 | } | |
1000 | ar_pci->compl_processing = true; | |
1001 | spin_unlock_bh(&ar_pci->compl_lock); | |
1002 | ||
1003 | for (;;) { | |
1004 | spin_lock_bh(&ar_pci->compl_lock); | |
1005 | if (list_empty(&ar_pci->compl_process)) { | |
1006 | spin_unlock_bh(&ar_pci->compl_lock); | |
1007 | break; | |
1008 | } | |
1009 | compl = list_first_entry(&ar_pci->compl_process, | |
1010 | struct ath10k_pci_compl, list); | |
1011 | list_del(&compl->list); | |
1012 | spin_unlock_bh(&ar_pci->compl_lock); | |
1013 | ||
f9d8fece MK |
1014 | switch (compl->state) { |
1015 | case ATH10K_PCI_COMPL_SEND: | |
5e3dd157 KV |
1016 | cb->tx_completion(ar, |
1017 | compl->transfer_context, | |
1018 | compl->transfer_id); | |
1019 | send_done = 1; | |
f9d8fece MK |
1020 | break; |
1021 | case ATH10K_PCI_COMPL_RECV: | |
5e3dd157 KV |
1022 | ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1); |
1023 | if (ret) { | |
1024 | ath10k_warn("Unable to post recv buffer for pipe: %d\n", | |
1025 | compl->pipe_info->pipe_num); | |
1026 | break; | |
1027 | } | |
1028 | ||
1029 | skb = (struct sk_buff *)compl->transfer_context; | |
1030 | nbytes = compl->nbytes; | |
1031 | ||
1032 | ath10k_dbg(ATH10K_DBG_PCI, | |
1033 | "ath10k_pci_ce_recv_data netbuf=%p nbytes=%d\n", | |
1034 | skb, nbytes); | |
1035 | ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, | |
1036 | "ath10k rx: ", skb->data, nbytes); | |
1037 | ||
1038 | if (skb->len + skb_tailroom(skb) >= nbytes) { | |
1039 | skb_trim(skb, 0); | |
1040 | skb_put(skb, nbytes); | |
1041 | cb->rx_completion(ar, skb, | |
1042 | compl->pipe_info->pipe_num); | |
1043 | } else { | |
1044 | ath10k_warn("rxed more than expected (nbytes %d, max %d)", | |
1045 | nbytes, | |
1046 | skb->len + skb_tailroom(skb)); | |
1047 | } | |
f9d8fece MK |
1048 | break; |
1049 | case ATH10K_PCI_COMPL_FREE: | |
1050 | ath10k_warn("free completion cannot be processed\n"); | |
1051 | break; | |
1052 | default: | |
1053 | ath10k_warn("invalid completion state (%d)\n", | |
1054 | compl->state); | |
1055 | break; | |
5e3dd157 KV |
1056 | } |
1057 | ||
f9d8fece | 1058 | compl->state = ATH10K_PCI_COMPL_FREE; |
5e3dd157 KV |
1059 | |
1060 | /* | |
1061 | * Add completion back to the pipe's free list. | |
1062 | */ | |
1063 | spin_lock_bh(&compl->pipe_info->pipe_lock); | |
1064 | list_add_tail(&compl->list, &compl->pipe_info->compl_free); | |
1065 | compl->pipe_info->num_sends_allowed += send_done; | |
1066 | spin_unlock_bh(&compl->pipe_info->pipe_lock); | |
1067 | } | |
1068 | ||
1069 | spin_lock_bh(&ar_pci->compl_lock); | |
1070 | ar_pci->compl_processing = false; | |
1071 | spin_unlock_bh(&ar_pci->compl_lock); | |
1072 | } | |
1073 | ||
1074 | /* TODO - temporary mapping while we have too few CE's */ | |
1075 | static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, | |
1076 | u16 service_id, u8 *ul_pipe, | |
1077 | u8 *dl_pipe, int *ul_is_polled, | |
1078 | int *dl_is_polled) | |
1079 | { | |
1080 | int ret = 0; | |
1081 | ||
1082 | /* polling for received messages not supported */ | |
1083 | *dl_is_polled = 0; | |
1084 | ||
1085 | switch (service_id) { | |
1086 | case ATH10K_HTC_SVC_ID_HTT_DATA_MSG: | |
1087 | /* | |
1088 | * Host->target HTT gets its own pipe, so it can be polled | |
1089 | * while other pipes are interrupt driven. | |
1090 | */ | |
1091 | *ul_pipe = 4; | |
1092 | /* | |
1093 | * Use the same target->host pipe for HTC ctrl, HTC raw | |
1094 | * streams, and HTT. | |
1095 | */ | |
1096 | *dl_pipe = 1; | |
1097 | break; | |
1098 | ||
1099 | case ATH10K_HTC_SVC_ID_RSVD_CTRL: | |
1100 | case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS: | |
1101 | /* | |
1102 | * Note: HTC_RAW_STREAMS_SVC is currently unused, and | |
1103 | * HTC_CTRL_RSVD_SVC could share the same pipe as the | |
1104 | * WMI services. So, if another CE is needed, change | |
1105 | * this to *ul_pipe = 3, which frees up CE 0. | |
1106 | */ | |
1107 | /* *ul_pipe = 3; */ | |
1108 | *ul_pipe = 0; | |
1109 | *dl_pipe = 1; | |
1110 | break; | |
1111 | ||
1112 | case ATH10K_HTC_SVC_ID_WMI_DATA_BK: | |
1113 | case ATH10K_HTC_SVC_ID_WMI_DATA_BE: | |
1114 | case ATH10K_HTC_SVC_ID_WMI_DATA_VI: | |
1115 | case ATH10K_HTC_SVC_ID_WMI_DATA_VO: | |
1116 | ||
1117 | case ATH10K_HTC_SVC_ID_WMI_CONTROL: | |
1118 | *ul_pipe = 3; | |
1119 | *dl_pipe = 2; | |
1120 | break; | |
1121 | ||
1122 | /* pipe 5 unused */ | |
1123 | /* pipe 6 reserved */ | |
1124 | /* pipe 7 reserved */ | |
1125 | ||
1126 | default: | |
1127 | ret = -1; | |
1128 | break; | |
1129 | } | |
1130 | *ul_is_polled = | |
1131 | (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0; | |
1132 | ||
1133 | return ret; | |
1134 | } | |
1135 | ||
1136 | static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar, | |
1137 | u8 *ul_pipe, u8 *dl_pipe) | |
1138 | { | |
1139 | int ul_is_polled, dl_is_polled; | |
1140 | ||
1141 | (void)ath10k_pci_hif_map_service_to_pipe(ar, | |
1142 | ATH10K_HTC_SVC_ID_RSVD_CTRL, | |
1143 | ul_pipe, | |
1144 | dl_pipe, | |
1145 | &ul_is_polled, | |
1146 | &dl_is_polled); | |
1147 | } | |
1148 | ||
87263e5b | 1149 | static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info, |
5e3dd157 KV |
1150 | int num) |
1151 | { | |
1152 | struct ath10k *ar = pipe_info->hif_ce_state; | |
1153 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
2aa39115 | 1154 | struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl; |
5e3dd157 KV |
1155 | struct sk_buff *skb; |
1156 | dma_addr_t ce_data; | |
1157 | int i, ret = 0; | |
1158 | ||
1159 | if (pipe_info->buf_sz == 0) | |
1160 | return 0; | |
1161 | ||
1162 | for (i = 0; i < num; i++) { | |
1163 | skb = dev_alloc_skb(pipe_info->buf_sz); | |
1164 | if (!skb) { | |
1165 | ath10k_warn("could not allocate skbuff for pipe %d\n", | |
1166 | num); | |
1167 | ret = -ENOMEM; | |
1168 | goto err; | |
1169 | } | |
1170 | ||
1171 | WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb"); | |
1172 | ||
1173 | ce_data = dma_map_single(ar->dev, skb->data, | |
1174 | skb->len + skb_tailroom(skb), | |
1175 | DMA_FROM_DEVICE); | |
1176 | ||
1177 | if (unlikely(dma_mapping_error(ar->dev, ce_data))) { | |
1178 | ath10k_warn("could not dma map skbuff\n"); | |
1179 | dev_kfree_skb_any(skb); | |
1180 | ret = -EIO; | |
1181 | goto err; | |
1182 | } | |
1183 | ||
1184 | ATH10K_SKB_CB(skb)->paddr = ce_data; | |
1185 | ||
1186 | pci_dma_sync_single_for_device(ar_pci->pdev, ce_data, | |
1187 | pipe_info->buf_sz, | |
1188 | PCI_DMA_FROMDEVICE); | |
1189 | ||
1190 | ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb, | |
1191 | ce_data); | |
1192 | if (ret) { | |
1193 | ath10k_warn("could not enqueue to pipe %d (%d)\n", | |
1194 | num, ret); | |
1195 | goto err; | |
1196 | } | |
1197 | } | |
1198 | ||
1199 | return ret; | |
1200 | ||
1201 | err: | |
1202 | ath10k_pci_rx_pipe_cleanup(pipe_info); | |
1203 | return ret; | |
1204 | } | |
1205 | ||
1206 | static int ath10k_pci_post_rx(struct ath10k *ar) | |
1207 | { | |
1208 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
87263e5b | 1209 | struct ath10k_pci_pipe *pipe_info; |
5e3dd157 KV |
1210 | const struct ce_attr *attr; |
1211 | int pipe_num, ret = 0; | |
1212 | ||
1213 | for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) { | |
1214 | pipe_info = &ar_pci->pipe_info[pipe_num]; | |
1215 | attr = &host_ce_config_wlan[pipe_num]; | |
1216 | ||
1217 | if (attr->dest_nentries == 0) | |
1218 | continue; | |
1219 | ||
1220 | ret = ath10k_pci_post_rx_pipe(pipe_info, | |
1221 | attr->dest_nentries - 1); | |
1222 | if (ret) { | |
1223 | ath10k_warn("Unable to replenish recv buffers for pipe: %d\n", | |
1224 | pipe_num); | |
1225 | ||
1226 | for (; pipe_num >= 0; pipe_num--) { | |
1227 | pipe_info = &ar_pci->pipe_info[pipe_num]; | |
1228 | ath10k_pci_rx_pipe_cleanup(pipe_info); | |
1229 | } | |
1230 | return ret; | |
1231 | } | |
1232 | } | |
1233 | ||
1234 | return 0; | |
1235 | } | |
1236 | ||
1237 | static int ath10k_pci_hif_start(struct ath10k *ar) | |
1238 | { | |
1239 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
1240 | int ret; | |
1241 | ||
1242 | ret = ath10k_pci_start_ce(ar); | |
1243 | if (ret) { | |
1244 | ath10k_warn("could not start CE (%d)\n", ret); | |
1245 | return ret; | |
1246 | } | |
1247 | ||
1248 | /* Post buffers once to start things off. */ | |
1249 | ret = ath10k_pci_post_rx(ar); | |
1250 | if (ret) { | |
1251 | ath10k_warn("could not post rx pipes (%d)\n", ret); | |
1252 | return ret; | |
1253 | } | |
1254 | ||
1255 | ar_pci->started = 1; | |
1256 | return 0; | |
1257 | } | |
1258 | ||
87263e5b | 1259 | static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info) |
5e3dd157 KV |
1260 | { |
1261 | struct ath10k *ar; | |
1262 | struct ath10k_pci *ar_pci; | |
2aa39115 | 1263 | struct ath10k_ce_pipe *ce_hdl; |
5e3dd157 KV |
1264 | u32 buf_sz; |
1265 | struct sk_buff *netbuf; | |
1266 | u32 ce_data; | |
1267 | ||
1268 | buf_sz = pipe_info->buf_sz; | |
1269 | ||
1270 | /* Unused Copy Engine */ | |
1271 | if (buf_sz == 0) | |
1272 | return; | |
1273 | ||
1274 | ar = pipe_info->hif_ce_state; | |
1275 | ar_pci = ath10k_pci_priv(ar); | |
1276 | ||
1277 | if (!ar_pci->started) | |
1278 | return; | |
1279 | ||
1280 | ce_hdl = pipe_info->ce_hdl; | |
1281 | ||
1282 | while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf, | |
1283 | &ce_data) == 0) { | |
1284 | dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr, | |
1285 | netbuf->len + skb_tailroom(netbuf), | |
1286 | DMA_FROM_DEVICE); | |
1287 | dev_kfree_skb_any(netbuf); | |
1288 | } | |
1289 | } | |
1290 | ||
87263e5b | 1291 | static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info) |
5e3dd157 KV |
1292 | { |
1293 | struct ath10k *ar; | |
1294 | struct ath10k_pci *ar_pci; | |
2aa39115 | 1295 | struct ath10k_ce_pipe *ce_hdl; |
5e3dd157 KV |
1296 | struct sk_buff *netbuf; |
1297 | u32 ce_data; | |
1298 | unsigned int nbytes; | |
1299 | unsigned int id; | |
1300 | u32 buf_sz; | |
1301 | ||
1302 | buf_sz = pipe_info->buf_sz; | |
1303 | ||
1304 | /* Unused Copy Engine */ | |
1305 | if (buf_sz == 0) | |
1306 | return; | |
1307 | ||
1308 | ar = pipe_info->hif_ce_state; | |
1309 | ar_pci = ath10k_pci_priv(ar); | |
1310 | ||
1311 | if (!ar_pci->started) | |
1312 | return; | |
1313 | ||
1314 | ce_hdl = pipe_info->ce_hdl; | |
1315 | ||
1316 | while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf, | |
1317 | &ce_data, &nbytes, &id) == 0) { | |
1318 | if (netbuf != CE_SENDLIST_ITEM_CTXT) | |
1319 | /* | |
1320 | * Indicate the completion to higer layer to free | |
1321 | * the buffer | |
1322 | */ | |
1323 | ATH10K_SKB_CB(netbuf)->is_aborted = true; | |
1324 | ar_pci->msg_callbacks_current.tx_completion(ar, | |
1325 | netbuf, | |
1326 | id); | |
1327 | } | |
1328 | } | |
1329 | ||
1330 | /* | |
1331 | * Cleanup residual buffers for device shutdown: | |
1332 | * buffers that were enqueued for receive | |
1333 | * buffers that were to be sent | |
1334 | * Note: Buffers that had completed but which were | |
1335 | * not yet processed are on a completion queue. They | |
1336 | * are handled when the completion thread shuts down. | |
1337 | */ | |
1338 | static void ath10k_pci_buffer_cleanup(struct ath10k *ar) | |
1339 | { | |
1340 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
1341 | int pipe_num; | |
1342 | ||
1343 | for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) { | |
87263e5b | 1344 | struct ath10k_pci_pipe *pipe_info; |
5e3dd157 KV |
1345 | |
1346 | pipe_info = &ar_pci->pipe_info[pipe_num]; | |
1347 | ath10k_pci_rx_pipe_cleanup(pipe_info); | |
1348 | ath10k_pci_tx_pipe_cleanup(pipe_info); | |
1349 | } | |
1350 | } | |
1351 | ||
1352 | static void ath10k_pci_ce_deinit(struct ath10k *ar) | |
1353 | { | |
1354 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
87263e5b | 1355 | struct ath10k_pci_pipe *pipe_info; |
5e3dd157 KV |
1356 | int pipe_num; |
1357 | ||
1358 | for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) { | |
1359 | pipe_info = &ar_pci->pipe_info[pipe_num]; | |
1360 | if (pipe_info->ce_hdl) { | |
1361 | ath10k_ce_deinit(pipe_info->ce_hdl); | |
1362 | pipe_info->ce_hdl = NULL; | |
1363 | pipe_info->buf_sz = 0; | |
1364 | } | |
1365 | } | |
1366 | } | |
1367 | ||
32270b61 MK |
1368 | static void ath10k_pci_disable_irqs(struct ath10k *ar) |
1369 | { | |
1370 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
1371 | int i; | |
1372 | ||
1373 | for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++) | |
1374 | disable_irq(ar_pci->pdev->irq + i); | |
1375 | } | |
1376 | ||
5e3dd157 KV |
1377 | static void ath10k_pci_hif_stop(struct ath10k *ar) |
1378 | { | |
32270b61 MK |
1379 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
1380 | ||
5e3dd157 KV |
1381 | ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); |
1382 | ||
32270b61 MK |
1383 | /* Irqs are never explicitly re-enabled. They are implicitly re-enabled |
1384 | * by ath10k_pci_start_intr(). */ | |
1385 | ath10k_pci_disable_irqs(ar); | |
1386 | ||
5e3dd157 KV |
1387 | ath10k_pci_stop_ce(ar); |
1388 | ||
1389 | /* At this point, asynchronous threads are stopped, the target should | |
1390 | * not DMA nor interrupt. We process the leftovers and then free | |
1391 | * everything else up. */ | |
1392 | ||
1393 | ath10k_pci_process_ce(ar); | |
1394 | ath10k_pci_cleanup_ce(ar); | |
1395 | ath10k_pci_buffer_cleanup(ar); | |
32270b61 MK |
1396 | |
1397 | ar_pci->started = 0; | |
5e3dd157 KV |
1398 | } |
1399 | ||
1400 | static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, | |
1401 | void *req, u32 req_len, | |
1402 | void *resp, u32 *resp_len) | |
1403 | { | |
1404 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
2aa39115 MK |
1405 | struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG]; |
1406 | struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST]; | |
1407 | struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl; | |
1408 | struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl; | |
5e3dd157 KV |
1409 | dma_addr_t req_paddr = 0; |
1410 | dma_addr_t resp_paddr = 0; | |
1411 | struct bmi_xfer xfer = {}; | |
1412 | void *treq, *tresp = NULL; | |
1413 | int ret = 0; | |
1414 | ||
1415 | if (resp && !resp_len) | |
1416 | return -EINVAL; | |
1417 | ||
1418 | if (resp && resp_len && *resp_len == 0) | |
1419 | return -EINVAL; | |
1420 | ||
1421 | treq = kmemdup(req, req_len, GFP_KERNEL); | |
1422 | if (!treq) | |
1423 | return -ENOMEM; | |
1424 | ||
1425 | req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE); | |
1426 | ret = dma_mapping_error(ar->dev, req_paddr); | |
1427 | if (ret) | |
1428 | goto err_dma; | |
1429 | ||
1430 | if (resp && resp_len) { | |
1431 | tresp = kzalloc(*resp_len, GFP_KERNEL); | |
1432 | if (!tresp) { | |
1433 | ret = -ENOMEM; | |
1434 | goto err_req; | |
1435 | } | |
1436 | ||
1437 | resp_paddr = dma_map_single(ar->dev, tresp, *resp_len, | |
1438 | DMA_FROM_DEVICE); | |
1439 | ret = dma_mapping_error(ar->dev, resp_paddr); | |
1440 | if (ret) | |
1441 | goto err_req; | |
1442 | ||
1443 | xfer.wait_for_resp = true; | |
1444 | xfer.resp_len = 0; | |
1445 | ||
1446 | ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr); | |
1447 | } | |
1448 | ||
1449 | init_completion(&xfer.done); | |
1450 | ||
1451 | ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0); | |
1452 | if (ret) | |
1453 | goto err_resp; | |
1454 | ||
1455 | ret = wait_for_completion_timeout(&xfer.done, | |
1456 | BMI_COMMUNICATION_TIMEOUT_HZ); | |
1457 | if (ret <= 0) { | |
1458 | u32 unused_buffer; | |
1459 | unsigned int unused_nbytes; | |
1460 | unsigned int unused_id; | |
1461 | ||
1462 | ret = -ETIMEDOUT; | |
1463 | ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer, | |
1464 | &unused_nbytes, &unused_id); | |
1465 | } else { | |
1466 | /* non-zero means we did not time out */ | |
1467 | ret = 0; | |
1468 | } | |
1469 | ||
1470 | err_resp: | |
1471 | if (resp) { | |
1472 | u32 unused_buffer; | |
1473 | ||
1474 | ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer); | |
1475 | dma_unmap_single(ar->dev, resp_paddr, | |
1476 | *resp_len, DMA_FROM_DEVICE); | |
1477 | } | |
1478 | err_req: | |
1479 | dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE); | |
1480 | ||
1481 | if (ret == 0 && resp_len) { | |
1482 | *resp_len = min(*resp_len, xfer.resp_len); | |
1483 | memcpy(resp, tresp, xfer.resp_len); | |
1484 | } | |
1485 | err_dma: | |
1486 | kfree(treq); | |
1487 | kfree(tresp); | |
1488 | ||
1489 | return ret; | |
1490 | } | |
1491 | ||
2aa39115 | 1492 | static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state, |
5e3dd157 KV |
1493 | void *transfer_context, |
1494 | u32 data, | |
1495 | unsigned int nbytes, | |
1496 | unsigned int transfer_id) | |
1497 | { | |
1498 | struct bmi_xfer *xfer = transfer_context; | |
1499 | ||
1500 | if (xfer->wait_for_resp) | |
1501 | return; | |
1502 | ||
1503 | complete(&xfer->done); | |
1504 | } | |
1505 | ||
2aa39115 | 1506 | static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state, |
5e3dd157 KV |
1507 | void *transfer_context, |
1508 | u32 data, | |
1509 | unsigned int nbytes, | |
1510 | unsigned int transfer_id, | |
1511 | unsigned int flags) | |
1512 | { | |
1513 | struct bmi_xfer *xfer = transfer_context; | |
1514 | ||
1515 | if (!xfer->wait_for_resp) { | |
1516 | ath10k_warn("unexpected: BMI data received; ignoring\n"); | |
1517 | return; | |
1518 | } | |
1519 | ||
1520 | xfer->resp_len = nbytes; | |
1521 | complete(&xfer->done); | |
1522 | } | |
1523 | ||
1524 | /* | |
1525 | * Map from service/endpoint to Copy Engine. | |
1526 | * This table is derived from the CE_PCI TABLE, above. | |
1527 | * It is passed to the Target at startup for use by firmware. | |
1528 | */ | |
1529 | static const struct service_to_pipe target_service_to_ce_map_wlan[] = { | |
1530 | { | |
1531 | ATH10K_HTC_SVC_ID_WMI_DATA_VO, | |
1532 | PIPEDIR_OUT, /* out = UL = host -> target */ | |
1533 | 3, | |
1534 | }, | |
1535 | { | |
1536 | ATH10K_HTC_SVC_ID_WMI_DATA_VO, | |
1537 | PIPEDIR_IN, /* in = DL = target -> host */ | |
1538 | 2, | |
1539 | }, | |
1540 | { | |
1541 | ATH10K_HTC_SVC_ID_WMI_DATA_BK, | |
1542 | PIPEDIR_OUT, /* out = UL = host -> target */ | |
1543 | 3, | |
1544 | }, | |
1545 | { | |
1546 | ATH10K_HTC_SVC_ID_WMI_DATA_BK, | |
1547 | PIPEDIR_IN, /* in = DL = target -> host */ | |
1548 | 2, | |
1549 | }, | |
1550 | { | |
1551 | ATH10K_HTC_SVC_ID_WMI_DATA_BE, | |
1552 | PIPEDIR_OUT, /* out = UL = host -> target */ | |
1553 | 3, | |
1554 | }, | |
1555 | { | |
1556 | ATH10K_HTC_SVC_ID_WMI_DATA_BE, | |
1557 | PIPEDIR_IN, /* in = DL = target -> host */ | |
1558 | 2, | |
1559 | }, | |
1560 | { | |
1561 | ATH10K_HTC_SVC_ID_WMI_DATA_VI, | |
1562 | PIPEDIR_OUT, /* out = UL = host -> target */ | |
1563 | 3, | |
1564 | }, | |
1565 | { | |
1566 | ATH10K_HTC_SVC_ID_WMI_DATA_VI, | |
1567 | PIPEDIR_IN, /* in = DL = target -> host */ | |
1568 | 2, | |
1569 | }, | |
1570 | { | |
1571 | ATH10K_HTC_SVC_ID_WMI_CONTROL, | |
1572 | PIPEDIR_OUT, /* out = UL = host -> target */ | |
1573 | 3, | |
1574 | }, | |
1575 | { | |
1576 | ATH10K_HTC_SVC_ID_WMI_CONTROL, | |
1577 | PIPEDIR_IN, /* in = DL = target -> host */ | |
1578 | 2, | |
1579 | }, | |
1580 | { | |
1581 | ATH10K_HTC_SVC_ID_RSVD_CTRL, | |
1582 | PIPEDIR_OUT, /* out = UL = host -> target */ | |
1583 | 0, /* could be moved to 3 (share with WMI) */ | |
1584 | }, | |
1585 | { | |
1586 | ATH10K_HTC_SVC_ID_RSVD_CTRL, | |
1587 | PIPEDIR_IN, /* in = DL = target -> host */ | |
1588 | 1, | |
1589 | }, | |
1590 | { | |
1591 | ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */ | |
1592 | PIPEDIR_OUT, /* out = UL = host -> target */ | |
1593 | 0, | |
1594 | }, | |
1595 | { | |
1596 | ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */ | |
1597 | PIPEDIR_IN, /* in = DL = target -> host */ | |
1598 | 1, | |
1599 | }, | |
1600 | { | |
1601 | ATH10K_HTC_SVC_ID_HTT_DATA_MSG, | |
1602 | PIPEDIR_OUT, /* out = UL = host -> target */ | |
1603 | 4, | |
1604 | }, | |
1605 | { | |
1606 | ATH10K_HTC_SVC_ID_HTT_DATA_MSG, | |
1607 | PIPEDIR_IN, /* in = DL = target -> host */ | |
1608 | 1, | |
1609 | }, | |
1610 | ||
1611 | /* (Additions here) */ | |
1612 | ||
1613 | { /* Must be last */ | |
1614 | 0, | |
1615 | 0, | |
1616 | 0, | |
1617 | }, | |
1618 | }; | |
1619 | ||
1620 | /* | |
1621 | * Send an interrupt to the device to wake up the Target CPU | |
1622 | * so it has an opportunity to notice any changed state. | |
1623 | */ | |
1624 | static int ath10k_pci_wake_target_cpu(struct ath10k *ar) | |
1625 | { | |
1626 | int ret; | |
1627 | u32 core_ctrl; | |
1628 | ||
1629 | ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS | | |
1630 | CORE_CTRL_ADDRESS, | |
1631 | &core_ctrl); | |
1632 | if (ret) { | |
1633 | ath10k_warn("Unable to read core ctrl\n"); | |
1634 | return ret; | |
1635 | } | |
1636 | ||
1637 | /* A_INUM_FIRMWARE interrupt to Target CPU */ | |
1638 | core_ctrl |= CORE_CTRL_CPU_INTR_MASK; | |
1639 | ||
1640 | ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS | | |
1641 | CORE_CTRL_ADDRESS, | |
1642 | core_ctrl); | |
1643 | if (ret) | |
1644 | ath10k_warn("Unable to set interrupt mask\n"); | |
1645 | ||
1646 | return ret; | |
1647 | } | |
1648 | ||
1649 | static int ath10k_pci_init_config(struct ath10k *ar) | |
1650 | { | |
1651 | u32 interconnect_targ_addr; | |
1652 | u32 pcie_state_targ_addr = 0; | |
1653 | u32 pipe_cfg_targ_addr = 0; | |
1654 | u32 svc_to_pipe_map = 0; | |
1655 | u32 pcie_config_flags = 0; | |
1656 | u32 ealloc_value; | |
1657 | u32 ealloc_targ_addr; | |
1658 | u32 flag2_value; | |
1659 | u32 flag2_targ_addr; | |
1660 | int ret = 0; | |
1661 | ||
1662 | /* Download to Target the CE Config and the service-to-CE map */ | |
1663 | interconnect_targ_addr = | |
1664 | host_interest_item_address(HI_ITEM(hi_interconnect_state)); | |
1665 | ||
1666 | /* Supply Target-side CE configuration */ | |
1667 | ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr, | |
1668 | &pcie_state_targ_addr); | |
1669 | if (ret != 0) { | |
1670 | ath10k_err("Failed to get pcie state addr: %d\n", ret); | |
1671 | return ret; | |
1672 | } | |
1673 | ||
1674 | if (pcie_state_targ_addr == 0) { | |
1675 | ret = -EIO; | |
1676 | ath10k_err("Invalid pcie state addr\n"); | |
1677 | return ret; | |
1678 | } | |
1679 | ||
1680 | ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr + | |
1681 | offsetof(struct pcie_state, | |
1682 | pipe_cfg_addr), | |
1683 | &pipe_cfg_targ_addr); | |
1684 | if (ret != 0) { | |
1685 | ath10k_err("Failed to get pipe cfg addr: %d\n", ret); | |
1686 | return ret; | |
1687 | } | |
1688 | ||
1689 | if (pipe_cfg_targ_addr == 0) { | |
1690 | ret = -EIO; | |
1691 | ath10k_err("Invalid pipe cfg addr\n"); | |
1692 | return ret; | |
1693 | } | |
1694 | ||
1695 | ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr, | |
1696 | target_ce_config_wlan, | |
1697 | sizeof(target_ce_config_wlan)); | |
1698 | ||
1699 | if (ret != 0) { | |
1700 | ath10k_err("Failed to write pipe cfg: %d\n", ret); | |
1701 | return ret; | |
1702 | } | |
1703 | ||
1704 | ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr + | |
1705 | offsetof(struct pcie_state, | |
1706 | svc_to_pipe_map), | |
1707 | &svc_to_pipe_map); | |
1708 | if (ret != 0) { | |
1709 | ath10k_err("Failed to get svc/pipe map: %d\n", ret); | |
1710 | return ret; | |
1711 | } | |
1712 | ||
1713 | if (svc_to_pipe_map == 0) { | |
1714 | ret = -EIO; | |
1715 | ath10k_err("Invalid svc_to_pipe map\n"); | |
1716 | return ret; | |
1717 | } | |
1718 | ||
1719 | ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map, | |
1720 | target_service_to_ce_map_wlan, | |
1721 | sizeof(target_service_to_ce_map_wlan)); | |
1722 | if (ret != 0) { | |
1723 | ath10k_err("Failed to write svc/pipe map: %d\n", ret); | |
1724 | return ret; | |
1725 | } | |
1726 | ||
1727 | ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr + | |
1728 | offsetof(struct pcie_state, | |
1729 | config_flags), | |
1730 | &pcie_config_flags); | |
1731 | if (ret != 0) { | |
1732 | ath10k_err("Failed to get pcie config_flags: %d\n", ret); | |
1733 | return ret; | |
1734 | } | |
1735 | ||
1736 | pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1; | |
1737 | ||
1738 | ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr + | |
1739 | offsetof(struct pcie_state, config_flags), | |
1740 | &pcie_config_flags, | |
1741 | sizeof(pcie_config_flags)); | |
1742 | if (ret != 0) { | |
1743 | ath10k_err("Failed to write pcie config_flags: %d\n", ret); | |
1744 | return ret; | |
1745 | } | |
1746 | ||
1747 | /* configure early allocation */ | |
1748 | ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc)); | |
1749 | ||
1750 | ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value); | |
1751 | if (ret != 0) { | |
1752 | ath10k_err("Faile to get early alloc val: %d\n", ret); | |
1753 | return ret; | |
1754 | } | |
1755 | ||
1756 | /* first bank is switched to IRAM */ | |
1757 | ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) & | |
1758 | HI_EARLY_ALLOC_MAGIC_MASK); | |
1759 | ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) & | |
1760 | HI_EARLY_ALLOC_IRAM_BANKS_MASK); | |
1761 | ||
1762 | ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value); | |
1763 | if (ret != 0) { | |
1764 | ath10k_err("Failed to set early alloc val: %d\n", ret); | |
1765 | return ret; | |
1766 | } | |
1767 | ||
1768 | /* Tell Target to proceed with initialization */ | |
1769 | flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2)); | |
1770 | ||
1771 | ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value); | |
1772 | if (ret != 0) { | |
1773 | ath10k_err("Failed to get option val: %d\n", ret); | |
1774 | return ret; | |
1775 | } | |
1776 | ||
1777 | flag2_value |= HI_OPTION_EARLY_CFG_DONE; | |
1778 | ||
1779 | ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value); | |
1780 | if (ret != 0) { | |
1781 | ath10k_err("Failed to set option val: %d\n", ret); | |
1782 | return ret; | |
1783 | } | |
1784 | ||
1785 | return 0; | |
1786 | } | |
1787 | ||
1788 | ||
1789 | ||
1790 | static int ath10k_pci_ce_init(struct ath10k *ar) | |
1791 | { | |
1792 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
87263e5b | 1793 | struct ath10k_pci_pipe *pipe_info; |
5e3dd157 KV |
1794 | const struct ce_attr *attr; |
1795 | int pipe_num; | |
1796 | ||
1797 | for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) { | |
1798 | pipe_info = &ar_pci->pipe_info[pipe_num]; | |
1799 | pipe_info->pipe_num = pipe_num; | |
1800 | pipe_info->hif_ce_state = ar; | |
1801 | attr = &host_ce_config_wlan[pipe_num]; | |
1802 | ||
1803 | pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr); | |
1804 | if (pipe_info->ce_hdl == NULL) { | |
1805 | ath10k_err("Unable to initialize CE for pipe: %d\n", | |
1806 | pipe_num); | |
1807 | ||
1808 | /* It is safe to call it here. It checks if ce_hdl is | |
1809 | * valid for each pipe */ | |
1810 | ath10k_pci_ce_deinit(ar); | |
1811 | return -1; | |
1812 | } | |
1813 | ||
1814 | if (pipe_num == ar_pci->ce_count - 1) { | |
1815 | /* | |
1816 | * Reserve the ultimate CE for | |
1817 | * diagnostic Window support | |
1818 | */ | |
1819 | ar_pci->ce_diag = | |
1820 | ar_pci->pipe_info[ar_pci->ce_count - 1].ce_hdl; | |
1821 | continue; | |
1822 | } | |
1823 | ||
1824 | pipe_info->buf_sz = (size_t) (attr->src_sz_max); | |
1825 | } | |
1826 | ||
1827 | /* | |
1828 | * Initially, establish CE completion handlers for use with BMI. | |
1829 | * These are overwritten with generic handlers after we exit BMI phase. | |
1830 | */ | |
1831 | pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG]; | |
1832 | ath10k_ce_send_cb_register(pipe_info->ce_hdl, | |
1833 | ath10k_pci_bmi_send_done, 0); | |
1834 | ||
1835 | pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST]; | |
1836 | ath10k_ce_recv_cb_register(pipe_info->ce_hdl, | |
1837 | ath10k_pci_bmi_recv_data); | |
1838 | ||
1839 | return 0; | |
1840 | } | |
1841 | ||
1842 | static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar) | |
1843 | { | |
1844 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
1845 | u32 fw_indicator_address, fw_indicator; | |
1846 | ||
1847 | ath10k_pci_wake(ar); | |
1848 | ||
1849 | fw_indicator_address = ar_pci->fw_indicator_address; | |
1850 | fw_indicator = ath10k_pci_read32(ar, fw_indicator_address); | |
1851 | ||
1852 | if (fw_indicator & FW_IND_EVENT_PENDING) { | |
1853 | /* ACK: clear Target-side pending event */ | |
1854 | ath10k_pci_write32(ar, fw_indicator_address, | |
1855 | fw_indicator & ~FW_IND_EVENT_PENDING); | |
1856 | ||
1857 | if (ar_pci->started) { | |
1858 | ath10k_pci_hif_dump_area(ar); | |
1859 | } else { | |
1860 | /* | |
1861 | * Probable Target failure before we're prepared | |
1862 | * to handle it. Generally unexpected. | |
1863 | */ | |
1864 | ath10k_warn("early firmware event indicated\n"); | |
1865 | } | |
1866 | } | |
1867 | ||
1868 | ath10k_pci_sleep(ar); | |
1869 | } | |
1870 | ||
8c5c5368 MK |
1871 | static int ath10k_pci_hif_power_up(struct ath10k *ar) |
1872 | { | |
8cc8df90 | 1873 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
8c5c5368 MK |
1874 | int ret; |
1875 | ||
32270b61 MK |
1876 | ret = ath10k_pci_start_intr(ar); |
1877 | if (ret) { | |
1878 | ath10k_err("could not start interrupt handling (%d)\n", ret); | |
1879 | goto err; | |
1880 | } | |
1881 | ||
8c5c5368 MK |
1882 | /* |
1883 | * Bring the target up cleanly. | |
1884 | * | |
1885 | * The target may be in an undefined state with an AUX-powered Target | |
1886 | * and a Host in WoW mode. If the Host crashes, loses power, or is | |
1887 | * restarted (without unloading the driver) then the Target is left | |
1888 | * (aux) powered and running. On a subsequent driver load, the Target | |
1889 | * is in an unexpected state. We try to catch that here in order to | |
1890 | * reset the Target and retry the probe. | |
1891 | */ | |
1892 | ath10k_pci_device_reset(ar); | |
1893 | ||
1894 | ret = ath10k_pci_reset_target(ar); | |
1895 | if (ret) | |
32270b61 | 1896 | goto err_irq; |
8c5c5368 | 1897 | |
8cc8df90 | 1898 | if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features)) |
8c5c5368 | 1899 | /* Force AWAKE forever */ |
8c5c5368 | 1900 | ath10k_do_pci_wake(ar); |
8c5c5368 MK |
1901 | |
1902 | ret = ath10k_pci_ce_init(ar); | |
1903 | if (ret) | |
1904 | goto err_ps; | |
1905 | ||
1906 | ret = ath10k_pci_init_config(ar); | |
1907 | if (ret) | |
1908 | goto err_ce; | |
1909 | ||
1910 | ret = ath10k_pci_wake_target_cpu(ar); | |
1911 | if (ret) { | |
1912 | ath10k_err("could not wake up target CPU (%d)\n", ret); | |
1913 | goto err_ce; | |
1914 | } | |
1915 | ||
1916 | return 0; | |
1917 | ||
1918 | err_ce: | |
1919 | ath10k_pci_ce_deinit(ar); | |
1920 | err_ps: | |
8cc8df90 | 1921 | if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features)) |
8c5c5368 | 1922 | ath10k_do_pci_sleep(ar); |
32270b61 MK |
1923 | err_irq: |
1924 | ath10k_pci_stop_intr(ar); | |
8c5c5368 MK |
1925 | err: |
1926 | return ret; | |
1927 | } | |
1928 | ||
1929 | static void ath10k_pci_hif_power_down(struct ath10k *ar) | |
1930 | { | |
8cc8df90 BM |
1931 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
1932 | ||
32270b61 | 1933 | ath10k_pci_stop_intr(ar); |
8cc8df90 | 1934 | |
8c5c5368 | 1935 | ath10k_pci_ce_deinit(ar); |
8cc8df90 | 1936 | if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features)) |
8c5c5368 MK |
1937 | ath10k_do_pci_sleep(ar); |
1938 | } | |
1939 | ||
8cd13cad MK |
1940 | #ifdef CONFIG_PM |
1941 | ||
1942 | #define ATH10K_PCI_PM_CONTROL 0x44 | |
1943 | ||
1944 | static int ath10k_pci_hif_suspend(struct ath10k *ar) | |
1945 | { | |
1946 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
1947 | struct pci_dev *pdev = ar_pci->pdev; | |
1948 | u32 val; | |
1949 | ||
1950 | pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val); | |
1951 | ||
1952 | if ((val & 0x000000ff) != 0x3) { | |
1953 | pci_save_state(pdev); | |
1954 | pci_disable_device(pdev); | |
1955 | pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL, | |
1956 | (val & 0xffffff00) | 0x03); | |
1957 | } | |
1958 | ||
1959 | return 0; | |
1960 | } | |
1961 | ||
1962 | static int ath10k_pci_hif_resume(struct ath10k *ar) | |
1963 | { | |
1964 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
1965 | struct pci_dev *pdev = ar_pci->pdev; | |
1966 | u32 val; | |
1967 | ||
1968 | pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val); | |
1969 | ||
1970 | if ((val & 0x000000ff) != 0) { | |
1971 | pci_restore_state(pdev); | |
1972 | pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL, | |
1973 | val & 0xffffff00); | |
1974 | /* | |
1975 | * Suspend/Resume resets the PCI configuration space, | |
1976 | * so we have to re-disable the RETRY_TIMEOUT register (0x41) | |
1977 | * to keep PCI Tx retries from interfering with C3 CPU state | |
1978 | */ | |
1979 | pci_read_config_dword(pdev, 0x40, &val); | |
1980 | ||
1981 | if ((val & 0x0000ff00) != 0) | |
1982 | pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); | |
1983 | } | |
1984 | ||
1985 | return 0; | |
1986 | } | |
1987 | #endif | |
1988 | ||
5e3dd157 KV |
1989 | static const struct ath10k_hif_ops ath10k_pci_hif_ops = { |
1990 | .send_head = ath10k_pci_hif_send_head, | |
1991 | .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg, | |
1992 | .start = ath10k_pci_hif_start, | |
1993 | .stop = ath10k_pci_hif_stop, | |
1994 | .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe, | |
1995 | .get_default_pipe = ath10k_pci_hif_get_default_pipe, | |
1996 | .send_complete_check = ath10k_pci_hif_send_complete_check, | |
e799bbff | 1997 | .set_callbacks = ath10k_pci_hif_set_callbacks, |
5e3dd157 | 1998 | .get_free_queue_number = ath10k_pci_hif_get_free_queue_number, |
8c5c5368 MK |
1999 | .power_up = ath10k_pci_hif_power_up, |
2000 | .power_down = ath10k_pci_hif_power_down, | |
8cd13cad MK |
2001 | #ifdef CONFIG_PM |
2002 | .suspend = ath10k_pci_hif_suspend, | |
2003 | .resume = ath10k_pci_hif_resume, | |
2004 | #endif | |
5e3dd157 KV |
2005 | }; |
2006 | ||
2007 | static void ath10k_pci_ce_tasklet(unsigned long ptr) | |
2008 | { | |
87263e5b | 2009 | struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr; |
5e3dd157 KV |
2010 | struct ath10k_pci *ar_pci = pipe->ar_pci; |
2011 | ||
2012 | ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num); | |
2013 | } | |
2014 | ||
2015 | static void ath10k_msi_err_tasklet(unsigned long data) | |
2016 | { | |
2017 | struct ath10k *ar = (struct ath10k *)data; | |
2018 | ||
2019 | ath10k_pci_fw_interrupt_handler(ar); | |
2020 | } | |
2021 | ||
2022 | /* | |
2023 | * Handler for a per-engine interrupt on a PARTICULAR CE. | |
2024 | * This is used in cases where each CE has a private MSI interrupt. | |
2025 | */ | |
2026 | static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg) | |
2027 | { | |
2028 | struct ath10k *ar = arg; | |
2029 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
2030 | int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL; | |
2031 | ||
e5742672 | 2032 | if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) { |
5e3dd157 KV |
2033 | ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id); |
2034 | return IRQ_HANDLED; | |
2035 | } | |
2036 | ||
2037 | /* | |
2038 | * NOTE: We are able to derive ce_id from irq because we | |
2039 | * use a one-to-one mapping for CE's 0..5. | |
2040 | * CE's 6 & 7 do not use interrupts at all. | |
2041 | * | |
2042 | * This mapping must be kept in sync with the mapping | |
2043 | * used by firmware. | |
2044 | */ | |
2045 | tasklet_schedule(&ar_pci->pipe_info[ce_id].intr); | |
2046 | return IRQ_HANDLED; | |
2047 | } | |
2048 | ||
2049 | static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg) | |
2050 | { | |
2051 | struct ath10k *ar = arg; | |
2052 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
2053 | ||
2054 | tasklet_schedule(&ar_pci->msi_fw_err); | |
2055 | return IRQ_HANDLED; | |
2056 | } | |
2057 | ||
2058 | /* | |
2059 | * Top-level interrupt handler for all PCI interrupts from a Target. | |
2060 | * When a block of MSI interrupts is allocated, this top-level handler | |
2061 | * is not used; instead, we directly call the correct sub-handler. | |
2062 | */ | |
2063 | static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg) | |
2064 | { | |
2065 | struct ath10k *ar = arg; | |
2066 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
2067 | ||
2068 | if (ar_pci->num_msi_intrs == 0) { | |
2069 | /* | |
2070 | * IMPORTANT: INTR_CLR regiser has to be set after | |
2071 | * INTR_ENABLE is set to 0, otherwise interrupt can not be | |
2072 | * really cleared. | |
2073 | */ | |
2074 | iowrite32(0, ar_pci->mem + | |
2075 | (SOC_CORE_BASE_ADDRESS | | |
2076 | PCIE_INTR_ENABLE_ADDRESS)); | |
2077 | iowrite32(PCIE_INTR_FIRMWARE_MASK | | |
2078 | PCIE_INTR_CE_MASK_ALL, | |
2079 | ar_pci->mem + (SOC_CORE_BASE_ADDRESS | | |
2080 | PCIE_INTR_CLR_ADDRESS)); | |
2081 | /* | |
2082 | * IMPORTANT: this extra read transaction is required to | |
2083 | * flush the posted write buffer. | |
2084 | */ | |
2085 | (void) ioread32(ar_pci->mem + | |
2086 | (SOC_CORE_BASE_ADDRESS | | |
2087 | PCIE_INTR_ENABLE_ADDRESS)); | |
2088 | } | |
2089 | ||
2090 | tasklet_schedule(&ar_pci->intr_tq); | |
2091 | ||
2092 | return IRQ_HANDLED; | |
2093 | } | |
2094 | ||
2095 | static void ath10k_pci_tasklet(unsigned long data) | |
2096 | { | |
2097 | struct ath10k *ar = (struct ath10k *)data; | |
2098 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
2099 | ||
2100 | ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */ | |
2101 | ath10k_ce_per_engine_service_any(ar); | |
2102 | ||
2103 | if (ar_pci->num_msi_intrs == 0) { | |
2104 | /* Enable Legacy PCI line interrupts */ | |
2105 | iowrite32(PCIE_INTR_FIRMWARE_MASK | | |
2106 | PCIE_INTR_CE_MASK_ALL, | |
2107 | ar_pci->mem + (SOC_CORE_BASE_ADDRESS | | |
2108 | PCIE_INTR_ENABLE_ADDRESS)); | |
2109 | /* | |
2110 | * IMPORTANT: this extra read transaction is required to | |
2111 | * flush the posted write buffer | |
2112 | */ | |
2113 | (void) ioread32(ar_pci->mem + | |
2114 | (SOC_CORE_BASE_ADDRESS | | |
2115 | PCIE_INTR_ENABLE_ADDRESS)); | |
2116 | } | |
2117 | } | |
2118 | ||
2119 | static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num) | |
2120 | { | |
2121 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
2122 | int ret; | |
2123 | int i; | |
2124 | ||
2125 | ret = pci_enable_msi_block(ar_pci->pdev, num); | |
2126 | if (ret) | |
2127 | return ret; | |
2128 | ||
2129 | ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, | |
2130 | ath10k_pci_msi_fw_handler, | |
2131 | IRQF_SHARED, "ath10k_pci", ar); | |
591ecdb8 MK |
2132 | if (ret) { |
2133 | ath10k_warn("request_irq(%d) failed %d\n", | |
2134 | ar_pci->pdev->irq + MSI_ASSIGN_FW, ret); | |
2135 | ||
2136 | pci_disable_msi(ar_pci->pdev); | |
5e3dd157 | 2137 | return ret; |
591ecdb8 | 2138 | } |
5e3dd157 KV |
2139 | |
2140 | for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) { | |
2141 | ret = request_irq(ar_pci->pdev->irq + i, | |
2142 | ath10k_pci_per_engine_handler, | |
2143 | IRQF_SHARED, "ath10k_pci", ar); | |
2144 | if (ret) { | |
2145 | ath10k_warn("request_irq(%d) failed %d\n", | |
2146 | ar_pci->pdev->irq + i, ret); | |
2147 | ||
87b1423b MK |
2148 | for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--) |
2149 | free_irq(ar_pci->pdev->irq + i, ar); | |
5e3dd157 | 2150 | |
87b1423b | 2151 | free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar); |
5e3dd157 KV |
2152 | pci_disable_msi(ar_pci->pdev); |
2153 | return ret; | |
2154 | } | |
2155 | } | |
2156 | ||
2157 | ath10k_info("MSI-X interrupt handling (%d intrs)\n", num); | |
2158 | return 0; | |
2159 | } | |
2160 | ||
2161 | static int ath10k_pci_start_intr_msi(struct ath10k *ar) | |
2162 | { | |
2163 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
2164 | int ret; | |
2165 | ||
2166 | ret = pci_enable_msi(ar_pci->pdev); | |
2167 | if (ret < 0) | |
2168 | return ret; | |
2169 | ||
2170 | ret = request_irq(ar_pci->pdev->irq, | |
2171 | ath10k_pci_interrupt_handler, | |
2172 | IRQF_SHARED, "ath10k_pci", ar); | |
2173 | if (ret < 0) { | |
2174 | pci_disable_msi(ar_pci->pdev); | |
2175 | return ret; | |
2176 | } | |
2177 | ||
2178 | ath10k_info("MSI interrupt handling\n"); | |
2179 | return 0; | |
2180 | } | |
2181 | ||
2182 | static int ath10k_pci_start_intr_legacy(struct ath10k *ar) | |
2183 | { | |
2184 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
2185 | int ret; | |
2186 | ||
2187 | ret = request_irq(ar_pci->pdev->irq, | |
2188 | ath10k_pci_interrupt_handler, | |
2189 | IRQF_SHARED, "ath10k_pci", ar); | |
2190 | if (ret < 0) | |
2191 | return ret; | |
2192 | ||
2193 | /* | |
2194 | * Make sure to wake the Target before enabling Legacy | |
2195 | * Interrupt. | |
2196 | */ | |
2197 | iowrite32(PCIE_SOC_WAKE_V_MASK, | |
2198 | ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + | |
2199 | PCIE_SOC_WAKE_ADDRESS); | |
2200 | ||
2201 | ath10k_pci_wait(ar); | |
2202 | ||
2203 | /* | |
2204 | * A potential race occurs here: The CORE_BASE write | |
2205 | * depends on target correctly decoding AXI address but | |
2206 | * host won't know when target writes BAR to CORE_CTRL. | |
2207 | * This write might get lost if target has NOT written BAR. | |
2208 | * For now, fix the race by repeating the write in below | |
2209 | * synchronization checking. | |
2210 | */ | |
2211 | iowrite32(PCIE_INTR_FIRMWARE_MASK | | |
2212 | PCIE_INTR_CE_MASK_ALL, | |
2213 | ar_pci->mem + (SOC_CORE_BASE_ADDRESS | | |
2214 | PCIE_INTR_ENABLE_ADDRESS)); | |
2215 | iowrite32(PCIE_SOC_WAKE_RESET, | |
2216 | ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + | |
2217 | PCIE_SOC_WAKE_ADDRESS); | |
2218 | ||
2219 | ath10k_info("legacy interrupt handling\n"); | |
2220 | return 0; | |
2221 | } | |
2222 | ||
2223 | static int ath10k_pci_start_intr(struct ath10k *ar) | |
2224 | { | |
2225 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
2226 | int num = MSI_NUM_REQUEST; | |
2227 | int ret; | |
2228 | int i; | |
2229 | ||
2230 | tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long) ar); | |
2231 | tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet, | |
2232 | (unsigned long) ar); | |
2233 | ||
2234 | for (i = 0; i < CE_COUNT; i++) { | |
2235 | ar_pci->pipe_info[i].ar_pci = ar_pci; | |
2236 | tasklet_init(&ar_pci->pipe_info[i].intr, | |
2237 | ath10k_pci_ce_tasklet, | |
2238 | (unsigned long)&ar_pci->pipe_info[i]); | |
2239 | } | |
2240 | ||
2241 | if (!test_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features)) | |
2242 | num = 1; | |
2243 | ||
2244 | if (num > 1) { | |
2245 | ret = ath10k_pci_start_intr_msix(ar, num); | |
2246 | if (ret == 0) | |
2247 | goto exit; | |
2248 | ||
2249 | ath10k_warn("MSI-X didn't succeed (%d), trying MSI\n", ret); | |
2250 | num = 1; | |
2251 | } | |
2252 | ||
2253 | if (num == 1) { | |
2254 | ret = ath10k_pci_start_intr_msi(ar); | |
2255 | if (ret == 0) | |
2256 | goto exit; | |
2257 | ||
2258 | ath10k_warn("MSI didn't succeed (%d), trying legacy INTR\n", | |
2259 | ret); | |
2260 | num = 0; | |
2261 | } | |
2262 | ||
2263 | ret = ath10k_pci_start_intr_legacy(ar); | |
2264 | ||
2265 | exit: | |
2266 | ar_pci->num_msi_intrs = num; | |
2267 | ar_pci->ce_count = CE_COUNT; | |
2268 | return ret; | |
2269 | } | |
2270 | ||
2271 | static void ath10k_pci_stop_intr(struct ath10k *ar) | |
2272 | { | |
2273 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
2274 | int i; | |
2275 | ||
2276 | /* There's at least one interrupt irregardless whether its legacy INTR | |
2277 | * or MSI or MSI-X */ | |
2278 | for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++) | |
2279 | free_irq(ar_pci->pdev->irq + i, ar); | |
2280 | ||
2281 | if (ar_pci->num_msi_intrs > 0) | |
2282 | pci_disable_msi(ar_pci->pdev); | |
2283 | } | |
2284 | ||
2285 | static int ath10k_pci_reset_target(struct ath10k *ar) | |
2286 | { | |
2287 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | |
2288 | int wait_limit = 300; /* 3 sec */ | |
2289 | ||
2290 | /* Wait for Target to finish initialization before we proceed. */ | |
2291 | iowrite32(PCIE_SOC_WAKE_V_MASK, | |
2292 | ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + | |
2293 | PCIE_SOC_WAKE_ADDRESS); | |
2294 | ||
2295 | ath10k_pci_wait(ar); | |
2296 | ||
2297 | while (wait_limit-- && | |
2298 | !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) & | |
2299 | FW_IND_INITIALIZED)) { | |
2300 | if (ar_pci->num_msi_intrs == 0) | |
2301 | /* Fix potential race by repeating CORE_BASE writes */ | |
2302 | iowrite32(PCIE_INTR_FIRMWARE_MASK | | |
2303 | PCIE_INTR_CE_MASK_ALL, | |
2304 | ar_pci->mem + (SOC_CORE_BASE_ADDRESS | | |
2305 | PCIE_INTR_ENABLE_ADDRESS)); | |
2306 | mdelay(10); | |
2307 | } | |
2308 | ||
2309 | if (wait_limit < 0) { | |
2310 | ath10k_err("Target stalled\n"); | |
2311 | iowrite32(PCIE_SOC_WAKE_RESET, | |
2312 | ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + | |
2313 | PCIE_SOC_WAKE_ADDRESS); | |
2314 | return -EIO; | |
2315 | } | |
2316 | ||
2317 | iowrite32(PCIE_SOC_WAKE_RESET, | |
2318 | ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + | |
2319 | PCIE_SOC_WAKE_ADDRESS); | |
2320 | ||
2321 | return 0; | |
2322 | } | |
2323 | ||
7a5fe3f8 | 2324 | static void ath10k_pci_device_reset(struct ath10k *ar) |
5e3dd157 | 2325 | { |
7a5fe3f8 | 2326 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
5e3dd157 KV |
2327 | void __iomem *mem = ar_pci->mem; |
2328 | int i; | |
2329 | u32 val; | |
2330 | ||
2331 | if (!SOC_GLOBAL_RESET_ADDRESS) | |
2332 | return; | |
2333 | ||
2334 | if (!mem) | |
2335 | return; | |
2336 | ||
2337 | ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS, | |
2338 | PCIE_SOC_WAKE_V_MASK); | |
2339 | for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { | |
2340 | if (ath10k_pci_target_is_awake(ar)) | |
2341 | break; | |
2342 | msleep(1); | |
2343 | } | |
2344 | ||
2345 | /* Put Target, including PCIe, into RESET. */ | |
2346 | val = ath10k_pci_reg_read32(mem, SOC_GLOBAL_RESET_ADDRESS); | |
2347 | val |= 1; | |
2348 | ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val); | |
2349 | ||
2350 | for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { | |
2351 | if (ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) & | |
2352 | RTC_STATE_COLD_RESET_MASK) | |
2353 | break; | |
2354 | msleep(1); | |
2355 | } | |
2356 | ||
2357 | /* Pull Target, including PCIe, out of RESET. */ | |
2358 | val &= ~1; | |
2359 | ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val); | |
2360 | ||
2361 | for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { | |
2362 | if (!(ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) & | |
2363 | RTC_STATE_COLD_RESET_MASK)) | |
2364 | break; | |
2365 | msleep(1); | |
2366 | } | |
2367 | ||
2368 | ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET); | |
2369 | } | |
2370 | ||
2371 | static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci) | |
2372 | { | |
2373 | int i; | |
2374 | ||
2375 | for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) { | |
2376 | if (!test_bit(i, ar_pci->features)) | |
2377 | continue; | |
2378 | ||
2379 | switch (i) { | |
2380 | case ATH10K_PCI_FEATURE_MSI_X: | |
2381 | ath10k_dbg(ATH10K_DBG_PCI, "device supports MSI-X\n"); | |
2382 | break; | |
8cc8df90 BM |
2383 | case ATH10K_PCI_FEATURE_SOC_POWER_SAVE: |
2384 | ath10k_dbg(ATH10K_DBG_PCI, "QCA98XX SoC power save enabled\n"); | |
2385 | break; | |
5e3dd157 KV |
2386 | } |
2387 | } | |
2388 | } | |
2389 | ||
2390 | static int ath10k_pci_probe(struct pci_dev *pdev, | |
2391 | const struct pci_device_id *pci_dev) | |
2392 | { | |
2393 | void __iomem *mem; | |
2394 | int ret = 0; | |
2395 | struct ath10k *ar; | |
2396 | struct ath10k_pci *ar_pci; | |
2397 | u32 lcr_val; | |
2398 | ||
2399 | ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); | |
2400 | ||
2401 | ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL); | |
2402 | if (ar_pci == NULL) | |
2403 | return -ENOMEM; | |
2404 | ||
2405 | ar_pci->pdev = pdev; | |
2406 | ar_pci->dev = &pdev->dev; | |
2407 | ||
2408 | switch (pci_dev->device) { | |
5e3dd157 KV |
2409 | case QCA988X_2_0_DEVICE_ID: |
2410 | set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features); | |
2411 | break; | |
2412 | default: | |
2413 | ret = -ENODEV; | |
2414 | ath10k_err("Unkown device ID: %d\n", pci_dev->device); | |
2415 | goto err_ar_pci; | |
2416 | } | |
2417 | ||
8cc8df90 BM |
2418 | if (ath10k_target_ps) |
2419 | set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features); | |
2420 | ||
5e3dd157 KV |
2421 | ath10k_pci_dump_features(ar_pci); |
2422 | ||
3a0861ff | 2423 | ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops); |
5e3dd157 KV |
2424 | if (!ar) { |
2425 | ath10k_err("ath10k_core_create failed!\n"); | |
2426 | ret = -EINVAL; | |
2427 | goto err_ar_pci; | |
2428 | } | |
2429 | ||
5e3dd157 KV |
2430 | ar_pci->ar = ar; |
2431 | ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS; | |
2432 | atomic_set(&ar_pci->keep_awake_count, 0); | |
2433 | ||
2434 | pci_set_drvdata(pdev, ar); | |
2435 | ||
2436 | /* | |
2437 | * Without any knowledge of the Host, the Target may have been reset or | |
2438 | * power cycled and its Config Space may no longer reflect the PCI | |
2439 | * address space that was assigned earlier by the PCI infrastructure. | |
2440 | * Refresh it now. | |
2441 | */ | |
2442 | ret = pci_assign_resource(pdev, BAR_NUM); | |
2443 | if (ret) { | |
2444 | ath10k_err("cannot assign PCI space: %d\n", ret); | |
2445 | goto err_ar; | |
2446 | } | |
2447 | ||
2448 | ret = pci_enable_device(pdev); | |
2449 | if (ret) { | |
2450 | ath10k_err("cannot enable PCI device: %d\n", ret); | |
2451 | goto err_ar; | |
2452 | } | |
2453 | ||
2454 | /* Request MMIO resources */ | |
2455 | ret = pci_request_region(pdev, BAR_NUM, "ath"); | |
2456 | if (ret) { | |
2457 | ath10k_err("PCI MMIO reservation error: %d\n", ret); | |
2458 | goto err_device; | |
2459 | } | |
2460 | ||
2461 | /* | |
2462 | * Target structures have a limit of 32 bit DMA pointers. | |
2463 | * DMA pointers can be wider than 32 bits by default on some systems. | |
2464 | */ | |
2465 | ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | |
2466 | if (ret) { | |
2467 | ath10k_err("32-bit DMA not available: %d\n", ret); | |
2468 | goto err_region; | |
2469 | } | |
2470 | ||
2471 | ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | |
2472 | if (ret) { | |
2473 | ath10k_err("cannot enable 32-bit consistent DMA\n"); | |
2474 | goto err_region; | |
2475 | } | |
2476 | ||
2477 | /* Set bus master bit in PCI_COMMAND to enable DMA */ | |
2478 | pci_set_master(pdev); | |
2479 | ||
2480 | /* | |
2481 | * Temporary FIX: disable ASPM | |
2482 | * Will be removed after the OTP is programmed | |
2483 | */ | |
2484 | pci_read_config_dword(pdev, 0x80, &lcr_val); | |
2485 | pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00)); | |
2486 | ||
2487 | /* Arrange for access to Target SoC registers. */ | |
2488 | mem = pci_iomap(pdev, BAR_NUM, 0); | |
2489 | if (!mem) { | |
2490 | ath10k_err("PCI iomap error\n"); | |
2491 | ret = -EIO; | |
2492 | goto err_master; | |
2493 | } | |
2494 | ||
2495 | ar_pci->mem = mem; | |
2496 | ||
2497 | spin_lock_init(&ar_pci->ce_lock); | |
2498 | ||
5e3dd157 KV |
2499 | ret = ath10k_core_register(ar); |
2500 | if (ret) { | |
2501 | ath10k_err("could not register driver core (%d)\n", ret); | |
32270b61 | 2502 | goto err_iomap; |
5e3dd157 KV |
2503 | } |
2504 | ||
2505 | return 0; | |
2506 | ||
5e3dd157 KV |
2507 | err_iomap: |
2508 | pci_iounmap(pdev, mem); | |
2509 | err_master: | |
2510 | pci_clear_master(pdev); | |
2511 | err_region: | |
2512 | pci_release_region(pdev, BAR_NUM); | |
2513 | err_device: | |
2514 | pci_disable_device(pdev); | |
2515 | err_ar: | |
2516 | pci_set_drvdata(pdev, NULL); | |
2517 | ath10k_core_destroy(ar); | |
2518 | err_ar_pci: | |
2519 | /* call HIF PCI free here */ | |
2520 | kfree(ar_pci); | |
2521 | ||
2522 | return ret; | |
2523 | } | |
2524 | ||
2525 | static void ath10k_pci_remove(struct pci_dev *pdev) | |
2526 | { | |
2527 | struct ath10k *ar = pci_get_drvdata(pdev); | |
2528 | struct ath10k_pci *ar_pci; | |
2529 | ||
2530 | ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); | |
2531 | ||
2532 | if (!ar) | |
2533 | return; | |
2534 | ||
2535 | ar_pci = ath10k_pci_priv(ar); | |
2536 | ||
2537 | if (!ar_pci) | |
2538 | return; | |
2539 | ||
2540 | tasklet_kill(&ar_pci->msi_fw_err); | |
2541 | ||
2542 | ath10k_core_unregister(ar); | |
5e3dd157 KV |
2543 | |
2544 | pci_set_drvdata(pdev, NULL); | |
2545 | pci_iounmap(pdev, ar_pci->mem); | |
2546 | pci_release_region(pdev, BAR_NUM); | |
2547 | pci_clear_master(pdev); | |
2548 | pci_disable_device(pdev); | |
2549 | ||
2550 | ath10k_core_destroy(ar); | |
2551 | kfree(ar_pci); | |
2552 | } | |
2553 | ||
5e3dd157 KV |
2554 | MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table); |
2555 | ||
2556 | static struct pci_driver ath10k_pci_driver = { | |
2557 | .name = "ath10k_pci", | |
2558 | .id_table = ath10k_pci_id_table, | |
2559 | .probe = ath10k_pci_probe, | |
2560 | .remove = ath10k_pci_remove, | |
5e3dd157 KV |
2561 | }; |
2562 | ||
2563 | static int __init ath10k_pci_init(void) | |
2564 | { | |
2565 | int ret; | |
2566 | ||
2567 | ret = pci_register_driver(&ath10k_pci_driver); | |
2568 | if (ret) | |
2569 | ath10k_err("pci_register_driver failed [%d]\n", ret); | |
2570 | ||
2571 | return ret; | |
2572 | } | |
2573 | module_init(ath10k_pci_init); | |
2574 | ||
2575 | static void __exit ath10k_pci_exit(void) | |
2576 | { | |
2577 | pci_unregister_driver(&ath10k_pci_driver); | |
2578 | } | |
2579 | ||
2580 | module_exit(ath10k_pci_exit); | |
2581 | ||
2582 | MODULE_AUTHOR("Qualcomm Atheros"); | |
2583 | MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices"); | |
2584 | MODULE_LICENSE("Dual BSD/GPL"); | |
5e3dd157 KV |
2585 | MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE); |
2586 | MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE); | |
2587 | MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE); |