ath10k: decouple pci start/stop logic
[deliverable/linux.git] / drivers / net / wireless / ath / ath10k / pci.c
1 /*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18 #include <linux/pci.h>
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
22
23 #include "core.h"
24 #include "debug.h"
25
26 #include "targaddrs.h"
27 #include "bmi.h"
28
29 #include "hif.h"
30 #include "htc.h"
31
32 #include "ce.h"
33 #include "pci.h"
34
35 unsigned int ath10k_target_ps;
36 module_param(ath10k_target_ps, uint, 0644);
37 MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
38
39 #define QCA988X_1_0_DEVICE_ID (0xabcd)
40 #define QCA988X_2_0_DEVICE_ID (0x003c)
41
42 static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
43 { PCI_VDEVICE(ATHEROS, QCA988X_1_0_DEVICE_ID) }, /* PCI-E QCA988X V1 */
44 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
45 {0}
46 };
47
48 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
49 u32 *data);
50
51 static void ath10k_pci_process_ce(struct ath10k *ar);
52 static int ath10k_pci_post_rx(struct ath10k *ar);
53 static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,
54 int num);
55 static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info);
56 static void ath10k_pci_stop_ce(struct ath10k *ar);
57 static void ath10k_pci_device_reset(struct ath10k *ar);
58 static int ath10k_pci_reset_target(struct ath10k *ar);
59
60 static const struct ce_attr host_ce_config_wlan[] = {
61 /* host->target HTC control and raw streams */
62 { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 256, 0, NULL,},
63 /* could be moved to share CE3 */
64 /* target->host HTT + HTC control */
65 { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 512, 512, NULL,},
66 /* target->host WMI */
67 { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 32, NULL,},
68 /* host->target WMI */
69 { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,},
70 /* host->target HTT */
71 { /* CE4 */ CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, 0,
72 CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0, NULL,},
73 /* unused */
74 { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
75 /* Target autonomous hif_memcpy */
76 { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
77 /* ce_diag, the Diagnostic Window */
78 { /* CE7 */ CE_ATTR_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL,},
79 };
80
81 /* Target firmware's Copy Engine configuration. */
82 static const struct ce_pipe_config target_ce_config_wlan[] = {
83 /* host->target HTC control and raw streams */
84 { /* CE0 */ 0, PIPEDIR_OUT, 32, 256, CE_ATTR_FLAGS, 0,},
85 /* target->host HTT + HTC control */
86 { /* CE1 */ 1, PIPEDIR_IN, 32, 512, CE_ATTR_FLAGS, 0,},
87 /* target->host WMI */
88 { /* CE2 */ 2, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,},
89 /* host->target WMI */
90 { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
91 /* host->target HTT */
92 { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, CE_ATTR_FLAGS, 0,},
93 /* NB: 50% of src nentries, since tx has 2 frags */
94 /* unused */
95 { /* CE5 */ 5, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
96 /* Reserved for target autonomous hif_memcpy */
97 { /* CE6 */ 6, PIPEDIR_INOUT, 32, 4096, CE_ATTR_FLAGS, 0,},
98 /* CE7 used only by Host */
99 };
100
101 /*
102 * Diagnostic read/write access is provided for startup/config/debug usage.
103 * Caller must guarantee proper alignment, when applicable, and single user
104 * at any moment.
105 */
106 static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
107 int nbytes)
108 {
109 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
110 int ret = 0;
111 u32 buf;
112 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
113 unsigned int id;
114 unsigned int flags;
115 struct ce_state *ce_diag;
116 /* Host buffer address in CE space */
117 u32 ce_data;
118 dma_addr_t ce_data_base = 0;
119 void *data_buf = NULL;
120 int i;
121
122 /*
123 * This code cannot handle reads to non-memory space. Redirect to the
124 * register read fn but preserve the multi word read capability of
125 * this fn
126 */
127 if (address < DRAM_BASE_ADDRESS) {
128 if (!IS_ALIGNED(address, 4) ||
129 !IS_ALIGNED((unsigned long)data, 4))
130 return -EIO;
131
132 while ((nbytes >= 4) && ((ret = ath10k_pci_diag_read_access(
133 ar, address, (u32 *)data)) == 0)) {
134 nbytes -= sizeof(u32);
135 address += sizeof(u32);
136 data += sizeof(u32);
137 }
138 return ret;
139 }
140
141 ce_diag = ar_pci->ce_diag;
142
143 /*
144 * Allocate a temporary bounce buffer to hold caller's data
145 * to be DMA'ed from Target. This guarantees
146 * 1) 4-byte alignment
147 * 2) Buffer in DMA-able space
148 */
149 orig_nbytes = nbytes;
150 data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
151 orig_nbytes,
152 &ce_data_base);
153
154 if (!data_buf) {
155 ret = -ENOMEM;
156 goto done;
157 }
158 memset(data_buf, 0, orig_nbytes);
159
160 remaining_bytes = orig_nbytes;
161 ce_data = ce_data_base;
162 while (remaining_bytes) {
163 nbytes = min_t(unsigned int, remaining_bytes,
164 DIAG_TRANSFER_LIMIT);
165
166 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
167 if (ret != 0)
168 goto done;
169
170 /* Request CE to send from Target(!) address to Host buffer */
171 /*
172 * The address supplied by the caller is in the
173 * Target CPU virtual address space.
174 *
175 * In order to use this address with the diagnostic CE,
176 * convert it from Target CPU virtual address space
177 * to CE address space
178 */
179 ath10k_pci_wake(ar);
180 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
181 address);
182 ath10k_pci_sleep(ar);
183
184 ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
185 0);
186 if (ret)
187 goto done;
188
189 i = 0;
190 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
191 &completed_nbytes,
192 &id) != 0) {
193 mdelay(1);
194 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
195 ret = -EBUSY;
196 goto done;
197 }
198 }
199
200 if (nbytes != completed_nbytes) {
201 ret = -EIO;
202 goto done;
203 }
204
205 if (buf != (u32) address) {
206 ret = -EIO;
207 goto done;
208 }
209
210 i = 0;
211 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
212 &completed_nbytes,
213 &id, &flags) != 0) {
214 mdelay(1);
215
216 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
217 ret = -EBUSY;
218 goto done;
219 }
220 }
221
222 if (nbytes != completed_nbytes) {
223 ret = -EIO;
224 goto done;
225 }
226
227 if (buf != ce_data) {
228 ret = -EIO;
229 goto done;
230 }
231
232 remaining_bytes -= nbytes;
233 address += nbytes;
234 ce_data += nbytes;
235 }
236
237 done:
238 if (ret == 0) {
239 /* Copy data from allocated DMA buf to caller's buf */
240 WARN_ON_ONCE(orig_nbytes & 3);
241 for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
242 ((u32 *)data)[i] =
243 __le32_to_cpu(((__le32 *)data_buf)[i]);
244 }
245 } else
246 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n",
247 __func__, address);
248
249 if (data_buf)
250 pci_free_consistent(ar_pci->pdev, orig_nbytes,
251 data_buf, ce_data_base);
252
253 return ret;
254 }
255
256 /* Read 4-byte aligned data from Target memory or register */
257 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
258 u32 *data)
259 {
260 /* Assume range doesn't cross this boundary */
261 if (address >= DRAM_BASE_ADDRESS)
262 return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
263
264 ath10k_pci_wake(ar);
265 *data = ath10k_pci_read32(ar, address);
266 ath10k_pci_sleep(ar);
267 return 0;
268 }
269
270 static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
271 const void *data, int nbytes)
272 {
273 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
274 int ret = 0;
275 u32 buf;
276 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
277 unsigned int id;
278 unsigned int flags;
279 struct ce_state *ce_diag;
280 void *data_buf = NULL;
281 u32 ce_data; /* Host buffer address in CE space */
282 dma_addr_t ce_data_base = 0;
283 int i;
284
285 ce_diag = ar_pci->ce_diag;
286
287 /*
288 * Allocate a temporary bounce buffer to hold caller's data
289 * to be DMA'ed to Target. This guarantees
290 * 1) 4-byte alignment
291 * 2) Buffer in DMA-able space
292 */
293 orig_nbytes = nbytes;
294 data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
295 orig_nbytes,
296 &ce_data_base);
297 if (!data_buf) {
298 ret = -ENOMEM;
299 goto done;
300 }
301
302 /* Copy caller's data to allocated DMA buf */
303 WARN_ON_ONCE(orig_nbytes & 3);
304 for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
305 ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
306
307 /*
308 * The address supplied by the caller is in the
309 * Target CPU virtual address space.
310 *
311 * In order to use this address with the diagnostic CE,
312 * convert it from
313 * Target CPU virtual address space
314 * to
315 * CE address space
316 */
317 ath10k_pci_wake(ar);
318 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
319 ath10k_pci_sleep(ar);
320
321 remaining_bytes = orig_nbytes;
322 ce_data = ce_data_base;
323 while (remaining_bytes) {
324 /* FIXME: check cast */
325 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
326
327 /* Set up to receive directly into Target(!) address */
328 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
329 if (ret != 0)
330 goto done;
331
332 /*
333 * Request CE to send caller-supplied data that
334 * was copied to bounce buffer to Target(!) address.
335 */
336 ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
337 nbytes, 0, 0);
338 if (ret != 0)
339 goto done;
340
341 i = 0;
342 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
343 &completed_nbytes,
344 &id) != 0) {
345 mdelay(1);
346
347 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
348 ret = -EBUSY;
349 goto done;
350 }
351 }
352
353 if (nbytes != completed_nbytes) {
354 ret = -EIO;
355 goto done;
356 }
357
358 if (buf != ce_data) {
359 ret = -EIO;
360 goto done;
361 }
362
363 i = 0;
364 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
365 &completed_nbytes,
366 &id, &flags) != 0) {
367 mdelay(1);
368
369 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
370 ret = -EBUSY;
371 goto done;
372 }
373 }
374
375 if (nbytes != completed_nbytes) {
376 ret = -EIO;
377 goto done;
378 }
379
380 if (buf != address) {
381 ret = -EIO;
382 goto done;
383 }
384
385 remaining_bytes -= nbytes;
386 address += nbytes;
387 ce_data += nbytes;
388 }
389
390 done:
391 if (data_buf) {
392 pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf,
393 ce_data_base);
394 }
395
396 if (ret != 0)
397 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__,
398 address);
399
400 return ret;
401 }
402
403 /* Write 4B data to Target memory or register */
404 static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
405 u32 data)
406 {
407 /* Assume range doesn't cross this boundary */
408 if (address >= DRAM_BASE_ADDRESS)
409 return ath10k_pci_diag_write_mem(ar, address, &data,
410 sizeof(u32));
411
412 ath10k_pci_wake(ar);
413 ath10k_pci_write32(ar, address, data);
414 ath10k_pci_sleep(ar);
415 return 0;
416 }
417
418 static bool ath10k_pci_target_is_awake(struct ath10k *ar)
419 {
420 void __iomem *mem = ath10k_pci_priv(ar)->mem;
421 u32 val;
422 val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
423 RTC_STATE_ADDRESS);
424 return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
425 }
426
427 static void ath10k_pci_wait(struct ath10k *ar)
428 {
429 int n = 100;
430
431 while (n-- && !ath10k_pci_target_is_awake(ar))
432 msleep(10);
433
434 if (n < 0)
435 ath10k_warn("Unable to wakeup target\n");
436 }
437
438 void ath10k_do_pci_wake(struct ath10k *ar)
439 {
440 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
441 void __iomem *pci_addr = ar_pci->mem;
442 int tot_delay = 0;
443 int curr_delay = 5;
444
445 if (atomic_read(&ar_pci->keep_awake_count) == 0) {
446 /* Force AWAKE */
447 iowrite32(PCIE_SOC_WAKE_V_MASK,
448 pci_addr + PCIE_LOCAL_BASE_ADDRESS +
449 PCIE_SOC_WAKE_ADDRESS);
450 }
451 atomic_inc(&ar_pci->keep_awake_count);
452
453 if (ar_pci->verified_awake)
454 return;
455
456 for (;;) {
457 if (ath10k_pci_target_is_awake(ar)) {
458 ar_pci->verified_awake = true;
459 break;
460 }
461
462 if (tot_delay > PCIE_WAKE_TIMEOUT) {
463 ath10k_warn("target takes too long to wake up (awake count %d)\n",
464 atomic_read(&ar_pci->keep_awake_count));
465 break;
466 }
467
468 udelay(curr_delay);
469 tot_delay += curr_delay;
470
471 if (curr_delay < 50)
472 curr_delay += 5;
473 }
474 }
475
476 void ath10k_do_pci_sleep(struct ath10k *ar)
477 {
478 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
479 void __iomem *pci_addr = ar_pci->mem;
480
481 if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
482 /* Allow sleep */
483 ar_pci->verified_awake = false;
484 iowrite32(PCIE_SOC_WAKE_RESET,
485 pci_addr + PCIE_LOCAL_BASE_ADDRESS +
486 PCIE_SOC_WAKE_ADDRESS);
487 }
488 }
489
490 /*
491 * FIXME: Handle OOM properly.
492 */
493 static inline
494 struct ath10k_pci_compl *get_free_compl(struct hif_ce_pipe_info *pipe_info)
495 {
496 struct ath10k_pci_compl *compl = NULL;
497
498 spin_lock_bh(&pipe_info->pipe_lock);
499 if (list_empty(&pipe_info->compl_free)) {
500 ath10k_warn("Completion buffers are full\n");
501 goto exit;
502 }
503 compl = list_first_entry(&pipe_info->compl_free,
504 struct ath10k_pci_compl, list);
505 list_del(&compl->list);
506 exit:
507 spin_unlock_bh(&pipe_info->pipe_lock);
508 return compl;
509 }
510
511 /* Called by lower (CE) layer when a send to Target completes. */
512 static void ath10k_pci_ce_send_done(struct ce_state *ce_state,
513 void *transfer_context,
514 u32 ce_data,
515 unsigned int nbytes,
516 unsigned int transfer_id)
517 {
518 struct ath10k *ar = ce_state->ar;
519 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
520 struct hif_ce_pipe_info *pipe_info = &ar_pci->pipe_info[ce_state->id];
521 struct ath10k_pci_compl *compl;
522 bool process = false;
523
524 do {
525 /*
526 * For the send completion of an item in sendlist, just
527 * increment num_sends_allowed. The upper layer callback will
528 * be triggered when last fragment is done with send.
529 */
530 if (transfer_context == CE_SENDLIST_ITEM_CTXT) {
531 spin_lock_bh(&pipe_info->pipe_lock);
532 pipe_info->num_sends_allowed++;
533 spin_unlock_bh(&pipe_info->pipe_lock);
534 continue;
535 }
536
537 compl = get_free_compl(pipe_info);
538 if (!compl)
539 break;
540
541 compl->send_or_recv = HIF_CE_COMPLETE_SEND;
542 compl->ce_state = ce_state;
543 compl->pipe_info = pipe_info;
544 compl->transfer_context = transfer_context;
545 compl->nbytes = nbytes;
546 compl->transfer_id = transfer_id;
547 compl->flags = 0;
548
549 /*
550 * Add the completion to the processing queue.
551 */
552 spin_lock_bh(&ar_pci->compl_lock);
553 list_add_tail(&compl->list, &ar_pci->compl_process);
554 spin_unlock_bh(&ar_pci->compl_lock);
555
556 process = true;
557 } while (ath10k_ce_completed_send_next(ce_state,
558 &transfer_context,
559 &ce_data, &nbytes,
560 &transfer_id) == 0);
561
562 /*
563 * If only some of the items within a sendlist have completed,
564 * don't invoke completion processing until the entire sendlist
565 * has been sent.
566 */
567 if (!process)
568 return;
569
570 ath10k_pci_process_ce(ar);
571 }
572
573 /* Called by lower (CE) layer when data is received from the Target. */
574 static void ath10k_pci_ce_recv_data(struct ce_state *ce_state,
575 void *transfer_context, u32 ce_data,
576 unsigned int nbytes,
577 unsigned int transfer_id,
578 unsigned int flags)
579 {
580 struct ath10k *ar = ce_state->ar;
581 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
582 struct hif_ce_pipe_info *pipe_info = &ar_pci->pipe_info[ce_state->id];
583 struct ath10k_pci_compl *compl;
584 struct sk_buff *skb;
585
586 do {
587 compl = get_free_compl(pipe_info);
588 if (!compl)
589 break;
590
591 compl->send_or_recv = HIF_CE_COMPLETE_RECV;
592 compl->ce_state = ce_state;
593 compl->pipe_info = pipe_info;
594 compl->transfer_context = transfer_context;
595 compl->nbytes = nbytes;
596 compl->transfer_id = transfer_id;
597 compl->flags = flags;
598
599 skb = transfer_context;
600 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
601 skb->len + skb_tailroom(skb),
602 DMA_FROM_DEVICE);
603 /*
604 * Add the completion to the processing queue.
605 */
606 spin_lock_bh(&ar_pci->compl_lock);
607 list_add_tail(&compl->list, &ar_pci->compl_process);
608 spin_unlock_bh(&ar_pci->compl_lock);
609
610 } while (ath10k_ce_completed_recv_next(ce_state,
611 &transfer_context,
612 &ce_data, &nbytes,
613 &transfer_id,
614 &flags) == 0);
615
616 ath10k_pci_process_ce(ar);
617 }
618
619 /* Send the first nbytes bytes of the buffer */
620 static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
621 unsigned int transfer_id,
622 unsigned int bytes, struct sk_buff *nbuf)
623 {
624 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
625 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
626 struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe_id]);
627 struct ce_state *ce_hdl = pipe_info->ce_hdl;
628 struct ce_sendlist sendlist;
629 unsigned int len;
630 u32 flags = 0;
631 int ret;
632
633 memset(&sendlist, 0, sizeof(struct ce_sendlist));
634
635 len = min(bytes, nbuf->len);
636 bytes -= len;
637
638 if (len & 3)
639 ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len);
640
641 ath10k_dbg(ATH10K_DBG_PCI,
642 "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n",
643 nbuf->data, (unsigned long long) skb_cb->paddr,
644 nbuf->len, len);
645 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
646 "ath10k tx: data: ",
647 nbuf->data, nbuf->len);
648
649 ath10k_ce_sendlist_buf_add(&sendlist, skb_cb->paddr, len, flags);
650
651 /* Make sure we have resources to handle this request */
652 spin_lock_bh(&pipe_info->pipe_lock);
653 if (!pipe_info->num_sends_allowed) {
654 ath10k_warn("Pipe: %d is full\n", pipe_id);
655 spin_unlock_bh(&pipe_info->pipe_lock);
656 return -ENOSR;
657 }
658 pipe_info->num_sends_allowed--;
659 spin_unlock_bh(&pipe_info->pipe_lock);
660
661 ret = ath10k_ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
662 if (ret)
663 ath10k_warn("CE send failed: %p\n", nbuf);
664
665 return ret;
666 }
667
668 static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
669 {
670 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
671 struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe]);
672 int ret;
673
674 spin_lock_bh(&pipe_info->pipe_lock);
675 ret = pipe_info->num_sends_allowed;
676 spin_unlock_bh(&pipe_info->pipe_lock);
677
678 return ret;
679 }
680
681 static void ath10k_pci_hif_dump_area(struct ath10k *ar)
682 {
683 u32 reg_dump_area = 0;
684 u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
685 u32 host_addr;
686 int ret;
687 u32 i;
688
689 ath10k_err("firmware crashed!\n");
690 ath10k_err("hardware name %s version 0x%x\n",
691 ar->hw_params.name, ar->target_version);
692 ath10k_err("firmware version: %u.%u.%u.%u\n", ar->fw_version_major,
693 ar->fw_version_minor, ar->fw_version_release,
694 ar->fw_version_build);
695
696 host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
697 if (ath10k_pci_diag_read_mem(ar, host_addr,
698 &reg_dump_area, sizeof(u32)) != 0) {
699 ath10k_warn("could not read hi_failure_state\n");
700 return;
701 }
702
703 ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
704
705 ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
706 &reg_dump_values[0],
707 REG_DUMP_COUNT_QCA988X * sizeof(u32));
708 if (ret != 0) {
709 ath10k_err("could not dump FW Dump Area\n");
710 return;
711 }
712
713 BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
714
715 ath10k_err("target Register Dump\n");
716 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
717 ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
718 i,
719 reg_dump_values[i],
720 reg_dump_values[i + 1],
721 reg_dump_values[i + 2],
722 reg_dump_values[i + 3]);
723 }
724
725 static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
726 int force)
727 {
728 if (!force) {
729 int resources;
730 /*
731 * Decide whether to actually poll for completions, or just
732 * wait for a later chance.
733 * If there seem to be plenty of resources left, then just wait
734 * since checking involves reading a CE register, which is a
735 * relatively expensive operation.
736 */
737 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
738
739 /*
740 * If at least 50% of the total resources are still available,
741 * don't bother checking again yet.
742 */
743 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
744 return;
745 }
746 ath10k_ce_per_engine_service(ar, pipe);
747 }
748
749 static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
750 struct ath10k_hif_cb *callbacks)
751 {
752 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
753
754 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
755
756 memcpy(&ar_pci->msg_callbacks_current, callbacks,
757 sizeof(ar_pci->msg_callbacks_current));
758 }
759
760 static int ath10k_pci_start_ce(struct ath10k *ar)
761 {
762 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
763 struct ce_state *ce_diag = ar_pci->ce_diag;
764 const struct ce_attr *attr;
765 struct hif_ce_pipe_info *pipe_info;
766 struct ath10k_pci_compl *compl;
767 int i, pipe_num, completions, disable_interrupts;
768
769 spin_lock_init(&ar_pci->compl_lock);
770 INIT_LIST_HEAD(&ar_pci->compl_process);
771
772 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
773 pipe_info = &ar_pci->pipe_info[pipe_num];
774
775 spin_lock_init(&pipe_info->pipe_lock);
776 INIT_LIST_HEAD(&pipe_info->compl_free);
777
778 /* Handle Diagnostic CE specially */
779 if (pipe_info->ce_hdl == ce_diag)
780 continue;
781
782 attr = &host_ce_config_wlan[pipe_num];
783 completions = 0;
784
785 if (attr->src_nentries) {
786 disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
787 ath10k_ce_send_cb_register(pipe_info->ce_hdl,
788 ath10k_pci_ce_send_done,
789 disable_interrupts);
790 completions += attr->src_nentries;
791 pipe_info->num_sends_allowed = attr->src_nentries - 1;
792 }
793
794 if (attr->dest_nentries) {
795 ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
796 ath10k_pci_ce_recv_data);
797 completions += attr->dest_nentries;
798 }
799
800 if (completions == 0)
801 continue;
802
803 for (i = 0; i < completions; i++) {
804 compl = kmalloc(sizeof(struct ath10k_pci_compl),
805 GFP_KERNEL);
806 if (!compl) {
807 ath10k_warn("No memory for completion state\n");
808 ath10k_pci_stop_ce(ar);
809 return -ENOMEM;
810 }
811
812 compl->send_or_recv = HIF_CE_COMPLETE_FREE;
813 list_add_tail(&compl->list, &pipe_info->compl_free);
814 }
815 }
816
817 return 0;
818 }
819
820 static void ath10k_pci_stop_ce(struct ath10k *ar)
821 {
822 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
823 struct ath10k_pci_compl *compl;
824 struct sk_buff *skb;
825 int i;
826
827 ath10k_ce_disable_interrupts(ar);
828
829 /* Cancel the pending tasklet */
830 tasklet_kill(&ar_pci->intr_tq);
831
832 for (i = 0; i < CE_COUNT; i++)
833 tasklet_kill(&ar_pci->pipe_info[i].intr);
834
835 /* Mark pending completions as aborted, so that upper layers free up
836 * their associated resources */
837 spin_lock_bh(&ar_pci->compl_lock);
838 list_for_each_entry(compl, &ar_pci->compl_process, list) {
839 skb = (struct sk_buff *)compl->transfer_context;
840 ATH10K_SKB_CB(skb)->is_aborted = true;
841 }
842 spin_unlock_bh(&ar_pci->compl_lock);
843 }
844
845 static void ath10k_pci_cleanup_ce(struct ath10k *ar)
846 {
847 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
848 struct ath10k_pci_compl *compl, *tmp;
849 struct hif_ce_pipe_info *pipe_info;
850 struct sk_buff *netbuf;
851 int pipe_num;
852
853 /* Free pending completions. */
854 spin_lock_bh(&ar_pci->compl_lock);
855 if (!list_empty(&ar_pci->compl_process))
856 ath10k_warn("pending completions still present! possible memory leaks.\n");
857
858 list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
859 list_del(&compl->list);
860 netbuf = (struct sk_buff *)compl->transfer_context;
861 dev_kfree_skb_any(netbuf);
862 kfree(compl);
863 }
864 spin_unlock_bh(&ar_pci->compl_lock);
865
866 /* Free unused completions for each pipe. */
867 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
868 pipe_info = &ar_pci->pipe_info[pipe_num];
869
870 spin_lock_bh(&pipe_info->pipe_lock);
871 list_for_each_entry_safe(compl, tmp,
872 &pipe_info->compl_free, list) {
873 list_del(&compl->list);
874 kfree(compl);
875 }
876 spin_unlock_bh(&pipe_info->pipe_lock);
877 }
878 }
879
880 static void ath10k_pci_process_ce(struct ath10k *ar)
881 {
882 struct ath10k_pci *ar_pci = ar->hif.priv;
883 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
884 struct ath10k_pci_compl *compl;
885 struct sk_buff *skb;
886 unsigned int nbytes;
887 int ret, send_done = 0;
888
889 /* Upper layers aren't ready to handle tx/rx completions in parallel so
890 * we must serialize all completion processing. */
891
892 spin_lock_bh(&ar_pci->compl_lock);
893 if (ar_pci->compl_processing) {
894 spin_unlock_bh(&ar_pci->compl_lock);
895 return;
896 }
897 ar_pci->compl_processing = true;
898 spin_unlock_bh(&ar_pci->compl_lock);
899
900 for (;;) {
901 spin_lock_bh(&ar_pci->compl_lock);
902 if (list_empty(&ar_pci->compl_process)) {
903 spin_unlock_bh(&ar_pci->compl_lock);
904 break;
905 }
906 compl = list_first_entry(&ar_pci->compl_process,
907 struct ath10k_pci_compl, list);
908 list_del(&compl->list);
909 spin_unlock_bh(&ar_pci->compl_lock);
910
911 if (compl->send_or_recv == HIF_CE_COMPLETE_SEND) {
912 cb->tx_completion(ar,
913 compl->transfer_context,
914 compl->transfer_id);
915 send_done = 1;
916 } else {
917 ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
918 if (ret) {
919 ath10k_warn("Unable to post recv buffer for pipe: %d\n",
920 compl->pipe_info->pipe_num);
921 break;
922 }
923
924 skb = (struct sk_buff *)compl->transfer_context;
925 nbytes = compl->nbytes;
926
927 ath10k_dbg(ATH10K_DBG_PCI,
928 "ath10k_pci_ce_recv_data netbuf=%p nbytes=%d\n",
929 skb, nbytes);
930 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
931 "ath10k rx: ", skb->data, nbytes);
932
933 if (skb->len + skb_tailroom(skb) >= nbytes) {
934 skb_trim(skb, 0);
935 skb_put(skb, nbytes);
936 cb->rx_completion(ar, skb,
937 compl->pipe_info->pipe_num);
938 } else {
939 ath10k_warn("rxed more than expected (nbytes %d, max %d)",
940 nbytes,
941 skb->len + skb_tailroom(skb));
942 }
943 }
944
945 compl->send_or_recv = HIF_CE_COMPLETE_FREE;
946
947 /*
948 * Add completion back to the pipe's free list.
949 */
950 spin_lock_bh(&compl->pipe_info->pipe_lock);
951 list_add_tail(&compl->list, &compl->pipe_info->compl_free);
952 compl->pipe_info->num_sends_allowed += send_done;
953 spin_unlock_bh(&compl->pipe_info->pipe_lock);
954 }
955
956 spin_lock_bh(&ar_pci->compl_lock);
957 ar_pci->compl_processing = false;
958 spin_unlock_bh(&ar_pci->compl_lock);
959 }
960
961 /* TODO - temporary mapping while we have too few CE's */
962 static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
963 u16 service_id, u8 *ul_pipe,
964 u8 *dl_pipe, int *ul_is_polled,
965 int *dl_is_polled)
966 {
967 int ret = 0;
968
969 /* polling for received messages not supported */
970 *dl_is_polled = 0;
971
972 switch (service_id) {
973 case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
974 /*
975 * Host->target HTT gets its own pipe, so it can be polled
976 * while other pipes are interrupt driven.
977 */
978 *ul_pipe = 4;
979 /*
980 * Use the same target->host pipe for HTC ctrl, HTC raw
981 * streams, and HTT.
982 */
983 *dl_pipe = 1;
984 break;
985
986 case ATH10K_HTC_SVC_ID_RSVD_CTRL:
987 case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
988 /*
989 * Note: HTC_RAW_STREAMS_SVC is currently unused, and
990 * HTC_CTRL_RSVD_SVC could share the same pipe as the
991 * WMI services. So, if another CE is needed, change
992 * this to *ul_pipe = 3, which frees up CE 0.
993 */
994 /* *ul_pipe = 3; */
995 *ul_pipe = 0;
996 *dl_pipe = 1;
997 break;
998
999 case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
1000 case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
1001 case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
1002 case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
1003
1004 case ATH10K_HTC_SVC_ID_WMI_CONTROL:
1005 *ul_pipe = 3;
1006 *dl_pipe = 2;
1007 break;
1008
1009 /* pipe 5 unused */
1010 /* pipe 6 reserved */
1011 /* pipe 7 reserved */
1012
1013 default:
1014 ret = -1;
1015 break;
1016 }
1017 *ul_is_polled =
1018 (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1019
1020 return ret;
1021 }
1022
1023 static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1024 u8 *ul_pipe, u8 *dl_pipe)
1025 {
1026 int ul_is_polled, dl_is_polled;
1027
1028 (void)ath10k_pci_hif_map_service_to_pipe(ar,
1029 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1030 ul_pipe,
1031 dl_pipe,
1032 &ul_is_polled,
1033 &dl_is_polled);
1034 }
1035
1036 static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,
1037 int num)
1038 {
1039 struct ath10k *ar = pipe_info->hif_ce_state;
1040 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1041 struct ce_state *ce_state = pipe_info->ce_hdl;
1042 struct sk_buff *skb;
1043 dma_addr_t ce_data;
1044 int i, ret = 0;
1045
1046 if (pipe_info->buf_sz == 0)
1047 return 0;
1048
1049 for (i = 0; i < num; i++) {
1050 skb = dev_alloc_skb(pipe_info->buf_sz);
1051 if (!skb) {
1052 ath10k_warn("could not allocate skbuff for pipe %d\n",
1053 num);
1054 ret = -ENOMEM;
1055 goto err;
1056 }
1057
1058 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
1059
1060 ce_data = dma_map_single(ar->dev, skb->data,
1061 skb->len + skb_tailroom(skb),
1062 DMA_FROM_DEVICE);
1063
1064 if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
1065 ath10k_warn("could not dma map skbuff\n");
1066 dev_kfree_skb_any(skb);
1067 ret = -EIO;
1068 goto err;
1069 }
1070
1071 ATH10K_SKB_CB(skb)->paddr = ce_data;
1072
1073 pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
1074 pipe_info->buf_sz,
1075 PCI_DMA_FROMDEVICE);
1076
1077 ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
1078 ce_data);
1079 if (ret) {
1080 ath10k_warn("could not enqueue to pipe %d (%d)\n",
1081 num, ret);
1082 goto err;
1083 }
1084 }
1085
1086 return ret;
1087
1088 err:
1089 ath10k_pci_rx_pipe_cleanup(pipe_info);
1090 return ret;
1091 }
1092
1093 static int ath10k_pci_post_rx(struct ath10k *ar)
1094 {
1095 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1096 struct hif_ce_pipe_info *pipe_info;
1097 const struct ce_attr *attr;
1098 int pipe_num, ret = 0;
1099
1100 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1101 pipe_info = &ar_pci->pipe_info[pipe_num];
1102 attr = &host_ce_config_wlan[pipe_num];
1103
1104 if (attr->dest_nentries == 0)
1105 continue;
1106
1107 ret = ath10k_pci_post_rx_pipe(pipe_info,
1108 attr->dest_nentries - 1);
1109 if (ret) {
1110 ath10k_warn("Unable to replenish recv buffers for pipe: %d\n",
1111 pipe_num);
1112
1113 for (; pipe_num >= 0; pipe_num--) {
1114 pipe_info = &ar_pci->pipe_info[pipe_num];
1115 ath10k_pci_rx_pipe_cleanup(pipe_info);
1116 }
1117 return ret;
1118 }
1119 }
1120
1121 return 0;
1122 }
1123
1124 static int ath10k_pci_hif_start(struct ath10k *ar)
1125 {
1126 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1127 int ret;
1128
1129 ret = ath10k_pci_start_ce(ar);
1130 if (ret) {
1131 ath10k_warn("could not start CE (%d)\n", ret);
1132 return ret;
1133 }
1134
1135 /* Post buffers once to start things off. */
1136 ret = ath10k_pci_post_rx(ar);
1137 if (ret) {
1138 ath10k_warn("could not post rx pipes (%d)\n", ret);
1139 return ret;
1140 }
1141
1142 ar_pci->started = 1;
1143 return 0;
1144 }
1145
1146 static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
1147 {
1148 struct ath10k *ar;
1149 struct ath10k_pci *ar_pci;
1150 struct ce_state *ce_hdl;
1151 u32 buf_sz;
1152 struct sk_buff *netbuf;
1153 u32 ce_data;
1154
1155 buf_sz = pipe_info->buf_sz;
1156
1157 /* Unused Copy Engine */
1158 if (buf_sz == 0)
1159 return;
1160
1161 ar = pipe_info->hif_ce_state;
1162 ar_pci = ath10k_pci_priv(ar);
1163
1164 if (!ar_pci->started)
1165 return;
1166
1167 ce_hdl = pipe_info->ce_hdl;
1168
1169 while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
1170 &ce_data) == 0) {
1171 dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
1172 netbuf->len + skb_tailroom(netbuf),
1173 DMA_FROM_DEVICE);
1174 dev_kfree_skb_any(netbuf);
1175 }
1176 }
1177
1178 static void ath10k_pci_tx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
1179 {
1180 struct ath10k *ar;
1181 struct ath10k_pci *ar_pci;
1182 struct ce_state *ce_hdl;
1183 struct sk_buff *netbuf;
1184 u32 ce_data;
1185 unsigned int nbytes;
1186 unsigned int id;
1187 u32 buf_sz;
1188
1189 buf_sz = pipe_info->buf_sz;
1190
1191 /* Unused Copy Engine */
1192 if (buf_sz == 0)
1193 return;
1194
1195 ar = pipe_info->hif_ce_state;
1196 ar_pci = ath10k_pci_priv(ar);
1197
1198 if (!ar_pci->started)
1199 return;
1200
1201 ce_hdl = pipe_info->ce_hdl;
1202
1203 while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1204 &ce_data, &nbytes, &id) == 0) {
1205 if (netbuf != CE_SENDLIST_ITEM_CTXT)
1206 /*
1207 * Indicate the completion to higer layer to free
1208 * the buffer
1209 */
1210 ATH10K_SKB_CB(netbuf)->is_aborted = true;
1211 ar_pci->msg_callbacks_current.tx_completion(ar,
1212 netbuf,
1213 id);
1214 }
1215 }
1216
1217 /*
1218 * Cleanup residual buffers for device shutdown:
1219 * buffers that were enqueued for receive
1220 * buffers that were to be sent
1221 * Note: Buffers that had completed but which were
1222 * not yet processed are on a completion queue. They
1223 * are handled when the completion thread shuts down.
1224 */
1225 static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1226 {
1227 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1228 int pipe_num;
1229
1230 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1231 struct hif_ce_pipe_info *pipe_info;
1232
1233 pipe_info = &ar_pci->pipe_info[pipe_num];
1234 ath10k_pci_rx_pipe_cleanup(pipe_info);
1235 ath10k_pci_tx_pipe_cleanup(pipe_info);
1236 }
1237 }
1238
1239 static void ath10k_pci_ce_deinit(struct ath10k *ar)
1240 {
1241 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1242 struct hif_ce_pipe_info *pipe_info;
1243 int pipe_num;
1244
1245 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1246 pipe_info = &ar_pci->pipe_info[pipe_num];
1247 if (pipe_info->ce_hdl) {
1248 ath10k_ce_deinit(pipe_info->ce_hdl);
1249 pipe_info->ce_hdl = NULL;
1250 pipe_info->buf_sz = 0;
1251 }
1252 }
1253 }
1254
1255 static void ath10k_pci_hif_stop(struct ath10k *ar)
1256 {
1257 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
1258
1259 ath10k_pci_stop_ce(ar);
1260
1261 /* At this point, asynchronous threads are stopped, the target should
1262 * not DMA nor interrupt. We process the leftovers and then free
1263 * everything else up. */
1264
1265 ath10k_pci_process_ce(ar);
1266 ath10k_pci_cleanup_ce(ar);
1267 ath10k_pci_buffer_cleanup(ar);
1268 }
1269
1270 static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1271 void *req, u32 req_len,
1272 void *resp, u32 *resp_len)
1273 {
1274 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1275 struct ce_state *ce_tx = ar_pci->pipe_info[BMI_CE_NUM_TO_TARG].ce_hdl;
1276 struct ce_state *ce_rx = ar_pci->pipe_info[BMI_CE_NUM_TO_HOST].ce_hdl;
1277 dma_addr_t req_paddr = 0;
1278 dma_addr_t resp_paddr = 0;
1279 struct bmi_xfer xfer = {};
1280 void *treq, *tresp = NULL;
1281 int ret = 0;
1282
1283 if (resp && !resp_len)
1284 return -EINVAL;
1285
1286 if (resp && resp_len && *resp_len == 0)
1287 return -EINVAL;
1288
1289 treq = kmemdup(req, req_len, GFP_KERNEL);
1290 if (!treq)
1291 return -ENOMEM;
1292
1293 req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1294 ret = dma_mapping_error(ar->dev, req_paddr);
1295 if (ret)
1296 goto err_dma;
1297
1298 if (resp && resp_len) {
1299 tresp = kzalloc(*resp_len, GFP_KERNEL);
1300 if (!tresp) {
1301 ret = -ENOMEM;
1302 goto err_req;
1303 }
1304
1305 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1306 DMA_FROM_DEVICE);
1307 ret = dma_mapping_error(ar->dev, resp_paddr);
1308 if (ret)
1309 goto err_req;
1310
1311 xfer.wait_for_resp = true;
1312 xfer.resp_len = 0;
1313
1314 ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
1315 }
1316
1317 init_completion(&xfer.done);
1318
1319 ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1320 if (ret)
1321 goto err_resp;
1322
1323 ret = wait_for_completion_timeout(&xfer.done,
1324 BMI_COMMUNICATION_TIMEOUT_HZ);
1325 if (ret <= 0) {
1326 u32 unused_buffer;
1327 unsigned int unused_nbytes;
1328 unsigned int unused_id;
1329
1330 ret = -ETIMEDOUT;
1331 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1332 &unused_nbytes, &unused_id);
1333 } else {
1334 /* non-zero means we did not time out */
1335 ret = 0;
1336 }
1337
1338 err_resp:
1339 if (resp) {
1340 u32 unused_buffer;
1341
1342 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1343 dma_unmap_single(ar->dev, resp_paddr,
1344 *resp_len, DMA_FROM_DEVICE);
1345 }
1346 err_req:
1347 dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1348
1349 if (ret == 0 && resp_len) {
1350 *resp_len = min(*resp_len, xfer.resp_len);
1351 memcpy(resp, tresp, xfer.resp_len);
1352 }
1353 err_dma:
1354 kfree(treq);
1355 kfree(tresp);
1356
1357 return ret;
1358 }
1359
1360 static void ath10k_pci_bmi_send_done(struct ce_state *ce_state,
1361 void *transfer_context,
1362 u32 data,
1363 unsigned int nbytes,
1364 unsigned int transfer_id)
1365 {
1366 struct bmi_xfer *xfer = transfer_context;
1367
1368 if (xfer->wait_for_resp)
1369 return;
1370
1371 complete(&xfer->done);
1372 }
1373
1374 static void ath10k_pci_bmi_recv_data(struct ce_state *ce_state,
1375 void *transfer_context,
1376 u32 data,
1377 unsigned int nbytes,
1378 unsigned int transfer_id,
1379 unsigned int flags)
1380 {
1381 struct bmi_xfer *xfer = transfer_context;
1382
1383 if (!xfer->wait_for_resp) {
1384 ath10k_warn("unexpected: BMI data received; ignoring\n");
1385 return;
1386 }
1387
1388 xfer->resp_len = nbytes;
1389 complete(&xfer->done);
1390 }
1391
1392 /*
1393 * Map from service/endpoint to Copy Engine.
1394 * This table is derived from the CE_PCI TABLE, above.
1395 * It is passed to the Target at startup for use by firmware.
1396 */
1397 static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
1398 {
1399 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1400 PIPEDIR_OUT, /* out = UL = host -> target */
1401 3,
1402 },
1403 {
1404 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1405 PIPEDIR_IN, /* in = DL = target -> host */
1406 2,
1407 },
1408 {
1409 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1410 PIPEDIR_OUT, /* out = UL = host -> target */
1411 3,
1412 },
1413 {
1414 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1415 PIPEDIR_IN, /* in = DL = target -> host */
1416 2,
1417 },
1418 {
1419 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1420 PIPEDIR_OUT, /* out = UL = host -> target */
1421 3,
1422 },
1423 {
1424 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1425 PIPEDIR_IN, /* in = DL = target -> host */
1426 2,
1427 },
1428 {
1429 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1430 PIPEDIR_OUT, /* out = UL = host -> target */
1431 3,
1432 },
1433 {
1434 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1435 PIPEDIR_IN, /* in = DL = target -> host */
1436 2,
1437 },
1438 {
1439 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1440 PIPEDIR_OUT, /* out = UL = host -> target */
1441 3,
1442 },
1443 {
1444 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1445 PIPEDIR_IN, /* in = DL = target -> host */
1446 2,
1447 },
1448 {
1449 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1450 PIPEDIR_OUT, /* out = UL = host -> target */
1451 0, /* could be moved to 3 (share with WMI) */
1452 },
1453 {
1454 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1455 PIPEDIR_IN, /* in = DL = target -> host */
1456 1,
1457 },
1458 {
1459 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
1460 PIPEDIR_OUT, /* out = UL = host -> target */
1461 0,
1462 },
1463 {
1464 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
1465 PIPEDIR_IN, /* in = DL = target -> host */
1466 1,
1467 },
1468 {
1469 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1470 PIPEDIR_OUT, /* out = UL = host -> target */
1471 4,
1472 },
1473 {
1474 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1475 PIPEDIR_IN, /* in = DL = target -> host */
1476 1,
1477 },
1478
1479 /* (Additions here) */
1480
1481 { /* Must be last */
1482 0,
1483 0,
1484 0,
1485 },
1486 };
1487
1488 /*
1489 * Send an interrupt to the device to wake up the Target CPU
1490 * so it has an opportunity to notice any changed state.
1491 */
1492 static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1493 {
1494 int ret;
1495 u32 core_ctrl;
1496
1497 ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
1498 CORE_CTRL_ADDRESS,
1499 &core_ctrl);
1500 if (ret) {
1501 ath10k_warn("Unable to read core ctrl\n");
1502 return ret;
1503 }
1504
1505 /* A_INUM_FIRMWARE interrupt to Target CPU */
1506 core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1507
1508 ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
1509 CORE_CTRL_ADDRESS,
1510 core_ctrl);
1511 if (ret)
1512 ath10k_warn("Unable to set interrupt mask\n");
1513
1514 return ret;
1515 }
1516
1517 static int ath10k_pci_init_config(struct ath10k *ar)
1518 {
1519 u32 interconnect_targ_addr;
1520 u32 pcie_state_targ_addr = 0;
1521 u32 pipe_cfg_targ_addr = 0;
1522 u32 svc_to_pipe_map = 0;
1523 u32 pcie_config_flags = 0;
1524 u32 ealloc_value;
1525 u32 ealloc_targ_addr;
1526 u32 flag2_value;
1527 u32 flag2_targ_addr;
1528 int ret = 0;
1529
1530 /* Download to Target the CE Config and the service-to-CE map */
1531 interconnect_targ_addr =
1532 host_interest_item_address(HI_ITEM(hi_interconnect_state));
1533
1534 /* Supply Target-side CE configuration */
1535 ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
1536 &pcie_state_targ_addr);
1537 if (ret != 0) {
1538 ath10k_err("Failed to get pcie state addr: %d\n", ret);
1539 return ret;
1540 }
1541
1542 if (pcie_state_targ_addr == 0) {
1543 ret = -EIO;
1544 ath10k_err("Invalid pcie state addr\n");
1545 return ret;
1546 }
1547
1548 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1549 offsetof(struct pcie_state,
1550 pipe_cfg_addr),
1551 &pipe_cfg_targ_addr);
1552 if (ret != 0) {
1553 ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
1554 return ret;
1555 }
1556
1557 if (pipe_cfg_targ_addr == 0) {
1558 ret = -EIO;
1559 ath10k_err("Invalid pipe cfg addr\n");
1560 return ret;
1561 }
1562
1563 ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1564 target_ce_config_wlan,
1565 sizeof(target_ce_config_wlan));
1566
1567 if (ret != 0) {
1568 ath10k_err("Failed to write pipe cfg: %d\n", ret);
1569 return ret;
1570 }
1571
1572 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1573 offsetof(struct pcie_state,
1574 svc_to_pipe_map),
1575 &svc_to_pipe_map);
1576 if (ret != 0) {
1577 ath10k_err("Failed to get svc/pipe map: %d\n", ret);
1578 return ret;
1579 }
1580
1581 if (svc_to_pipe_map == 0) {
1582 ret = -EIO;
1583 ath10k_err("Invalid svc_to_pipe map\n");
1584 return ret;
1585 }
1586
1587 ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1588 target_service_to_ce_map_wlan,
1589 sizeof(target_service_to_ce_map_wlan));
1590 if (ret != 0) {
1591 ath10k_err("Failed to write svc/pipe map: %d\n", ret);
1592 return ret;
1593 }
1594
1595 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1596 offsetof(struct pcie_state,
1597 config_flags),
1598 &pcie_config_flags);
1599 if (ret != 0) {
1600 ath10k_err("Failed to get pcie config_flags: %d\n", ret);
1601 return ret;
1602 }
1603
1604 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1605
1606 ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
1607 offsetof(struct pcie_state, config_flags),
1608 &pcie_config_flags,
1609 sizeof(pcie_config_flags));
1610 if (ret != 0) {
1611 ath10k_err("Failed to write pcie config_flags: %d\n", ret);
1612 return ret;
1613 }
1614
1615 /* configure early allocation */
1616 ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1617
1618 ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
1619 if (ret != 0) {
1620 ath10k_err("Faile to get early alloc val: %d\n", ret);
1621 return ret;
1622 }
1623
1624 /* first bank is switched to IRAM */
1625 ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1626 HI_EARLY_ALLOC_MAGIC_MASK);
1627 ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1628 HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1629
1630 ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
1631 if (ret != 0) {
1632 ath10k_err("Failed to set early alloc val: %d\n", ret);
1633 return ret;
1634 }
1635
1636 /* Tell Target to proceed with initialization */
1637 flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1638
1639 ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
1640 if (ret != 0) {
1641 ath10k_err("Failed to get option val: %d\n", ret);
1642 return ret;
1643 }
1644
1645 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1646
1647 ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
1648 if (ret != 0) {
1649 ath10k_err("Failed to set option val: %d\n", ret);
1650 return ret;
1651 }
1652
1653 return 0;
1654 }
1655
1656
1657
1658 static int ath10k_pci_ce_init(struct ath10k *ar)
1659 {
1660 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1661 struct hif_ce_pipe_info *pipe_info;
1662 const struct ce_attr *attr;
1663 int pipe_num;
1664
1665 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1666 pipe_info = &ar_pci->pipe_info[pipe_num];
1667 pipe_info->pipe_num = pipe_num;
1668 pipe_info->hif_ce_state = ar;
1669 attr = &host_ce_config_wlan[pipe_num];
1670
1671 pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
1672 if (pipe_info->ce_hdl == NULL) {
1673 ath10k_err("Unable to initialize CE for pipe: %d\n",
1674 pipe_num);
1675
1676 /* It is safe to call it here. It checks if ce_hdl is
1677 * valid for each pipe */
1678 ath10k_pci_ce_deinit(ar);
1679 return -1;
1680 }
1681
1682 if (pipe_num == ar_pci->ce_count - 1) {
1683 /*
1684 * Reserve the ultimate CE for
1685 * diagnostic Window support
1686 */
1687 ar_pci->ce_diag =
1688 ar_pci->pipe_info[ar_pci->ce_count - 1].ce_hdl;
1689 continue;
1690 }
1691
1692 pipe_info->buf_sz = (size_t) (attr->src_sz_max);
1693 }
1694
1695 /*
1696 * Initially, establish CE completion handlers for use with BMI.
1697 * These are overwritten with generic handlers after we exit BMI phase.
1698 */
1699 pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1700 ath10k_ce_send_cb_register(pipe_info->ce_hdl,
1701 ath10k_pci_bmi_send_done, 0);
1702
1703 pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1704 ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
1705 ath10k_pci_bmi_recv_data);
1706
1707 return 0;
1708 }
1709
1710 static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1711 {
1712 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1713 u32 fw_indicator_address, fw_indicator;
1714
1715 ath10k_pci_wake(ar);
1716
1717 fw_indicator_address = ar_pci->fw_indicator_address;
1718 fw_indicator = ath10k_pci_read32(ar, fw_indicator_address);
1719
1720 if (fw_indicator & FW_IND_EVENT_PENDING) {
1721 /* ACK: clear Target-side pending event */
1722 ath10k_pci_write32(ar, fw_indicator_address,
1723 fw_indicator & ~FW_IND_EVENT_PENDING);
1724
1725 if (ar_pci->started) {
1726 ath10k_pci_hif_dump_area(ar);
1727 } else {
1728 /*
1729 * Probable Target failure before we're prepared
1730 * to handle it. Generally unexpected.
1731 */
1732 ath10k_warn("early firmware event indicated\n");
1733 }
1734 }
1735
1736 ath10k_pci_sleep(ar);
1737 }
1738
1739 static int ath10k_pci_hif_power_up(struct ath10k *ar)
1740 {
1741 int ret;
1742
1743 /*
1744 * Bring the target up cleanly.
1745 *
1746 * The target may be in an undefined state with an AUX-powered Target
1747 * and a Host in WoW mode. If the Host crashes, loses power, or is
1748 * restarted (without unloading the driver) then the Target is left
1749 * (aux) powered and running. On a subsequent driver load, the Target
1750 * is in an unexpected state. We try to catch that here in order to
1751 * reset the Target and retry the probe.
1752 */
1753 ath10k_pci_device_reset(ar);
1754
1755 ret = ath10k_pci_reset_target(ar);
1756 if (ret)
1757 goto err;
1758
1759 if (ath10k_target_ps) {
1760 ath10k_dbg(ATH10K_DBG_PCI, "on-chip power save enabled\n");
1761 } else {
1762 /* Force AWAKE forever */
1763 ath10k_dbg(ATH10K_DBG_PCI, "on-chip power save disabled\n");
1764 ath10k_do_pci_wake(ar);
1765 }
1766
1767 ret = ath10k_pci_ce_init(ar);
1768 if (ret)
1769 goto err_ps;
1770
1771 ret = ath10k_pci_init_config(ar);
1772 if (ret)
1773 goto err_ce;
1774
1775 ret = ath10k_pci_wake_target_cpu(ar);
1776 if (ret) {
1777 ath10k_err("could not wake up target CPU (%d)\n", ret);
1778 goto err_ce;
1779 }
1780
1781 return 0;
1782
1783 err_ce:
1784 ath10k_pci_ce_deinit(ar);
1785 err_ps:
1786 if (!ath10k_target_ps)
1787 ath10k_do_pci_sleep(ar);
1788 err:
1789 return ret;
1790 }
1791
1792 static void ath10k_pci_hif_power_down(struct ath10k *ar)
1793 {
1794 ath10k_pci_ce_deinit(ar);
1795 if (!ath10k_target_ps)
1796 ath10k_do_pci_sleep(ar);
1797 }
1798
1799 static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
1800 .send_head = ath10k_pci_hif_send_head,
1801 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
1802 .start = ath10k_pci_hif_start,
1803 .stop = ath10k_pci_hif_stop,
1804 .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
1805 .get_default_pipe = ath10k_pci_hif_get_default_pipe,
1806 .send_complete_check = ath10k_pci_hif_send_complete_check,
1807 .set_callbacks = ath10k_pci_hif_set_callbacks,
1808 .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
1809 .power_up = ath10k_pci_hif_power_up,
1810 .power_down = ath10k_pci_hif_power_down,
1811 };
1812
1813 static void ath10k_pci_ce_tasklet(unsigned long ptr)
1814 {
1815 struct hif_ce_pipe_info *pipe = (struct hif_ce_pipe_info *)ptr;
1816 struct ath10k_pci *ar_pci = pipe->ar_pci;
1817
1818 ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
1819 }
1820
1821 static void ath10k_msi_err_tasklet(unsigned long data)
1822 {
1823 struct ath10k *ar = (struct ath10k *)data;
1824
1825 ath10k_pci_fw_interrupt_handler(ar);
1826 }
1827
1828 /*
1829 * Handler for a per-engine interrupt on a PARTICULAR CE.
1830 * This is used in cases where each CE has a private MSI interrupt.
1831 */
1832 static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
1833 {
1834 struct ath10k *ar = arg;
1835 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1836 int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
1837
1838 if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
1839 ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
1840 return IRQ_HANDLED;
1841 }
1842
1843 /*
1844 * NOTE: We are able to derive ce_id from irq because we
1845 * use a one-to-one mapping for CE's 0..5.
1846 * CE's 6 & 7 do not use interrupts at all.
1847 *
1848 * This mapping must be kept in sync with the mapping
1849 * used by firmware.
1850 */
1851 tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
1852 return IRQ_HANDLED;
1853 }
1854
1855 static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
1856 {
1857 struct ath10k *ar = arg;
1858 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1859
1860 tasklet_schedule(&ar_pci->msi_fw_err);
1861 return IRQ_HANDLED;
1862 }
1863
1864 /*
1865 * Top-level interrupt handler for all PCI interrupts from a Target.
1866 * When a block of MSI interrupts is allocated, this top-level handler
1867 * is not used; instead, we directly call the correct sub-handler.
1868 */
1869 static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
1870 {
1871 struct ath10k *ar = arg;
1872 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1873
1874 if (ar_pci->num_msi_intrs == 0) {
1875 /*
1876 * IMPORTANT: INTR_CLR regiser has to be set after
1877 * INTR_ENABLE is set to 0, otherwise interrupt can not be
1878 * really cleared.
1879 */
1880 iowrite32(0, ar_pci->mem +
1881 (SOC_CORE_BASE_ADDRESS |
1882 PCIE_INTR_ENABLE_ADDRESS));
1883 iowrite32(PCIE_INTR_FIRMWARE_MASK |
1884 PCIE_INTR_CE_MASK_ALL,
1885 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
1886 PCIE_INTR_CLR_ADDRESS));
1887 /*
1888 * IMPORTANT: this extra read transaction is required to
1889 * flush the posted write buffer.
1890 */
1891 (void) ioread32(ar_pci->mem +
1892 (SOC_CORE_BASE_ADDRESS |
1893 PCIE_INTR_ENABLE_ADDRESS));
1894 }
1895
1896 tasklet_schedule(&ar_pci->intr_tq);
1897
1898 return IRQ_HANDLED;
1899 }
1900
1901 static void ath10k_pci_tasklet(unsigned long data)
1902 {
1903 struct ath10k *ar = (struct ath10k *)data;
1904 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1905
1906 ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
1907 ath10k_ce_per_engine_service_any(ar);
1908
1909 if (ar_pci->num_msi_intrs == 0) {
1910 /* Enable Legacy PCI line interrupts */
1911 iowrite32(PCIE_INTR_FIRMWARE_MASK |
1912 PCIE_INTR_CE_MASK_ALL,
1913 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
1914 PCIE_INTR_ENABLE_ADDRESS));
1915 /*
1916 * IMPORTANT: this extra read transaction is required to
1917 * flush the posted write buffer
1918 */
1919 (void) ioread32(ar_pci->mem +
1920 (SOC_CORE_BASE_ADDRESS |
1921 PCIE_INTR_ENABLE_ADDRESS));
1922 }
1923 }
1924
1925 static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num)
1926 {
1927 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1928 int ret;
1929 int i;
1930
1931 ret = pci_enable_msi_block(ar_pci->pdev, num);
1932 if (ret)
1933 return ret;
1934
1935 ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
1936 ath10k_pci_msi_fw_handler,
1937 IRQF_SHARED, "ath10k_pci", ar);
1938 if (ret)
1939 return ret;
1940
1941 for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
1942 ret = request_irq(ar_pci->pdev->irq + i,
1943 ath10k_pci_per_engine_handler,
1944 IRQF_SHARED, "ath10k_pci", ar);
1945 if (ret) {
1946 ath10k_warn("request_irq(%d) failed %d\n",
1947 ar_pci->pdev->irq + i, ret);
1948
1949 for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
1950 free_irq(ar_pci->pdev->irq + i, ar);
1951
1952 free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
1953 pci_disable_msi(ar_pci->pdev);
1954 return ret;
1955 }
1956 }
1957
1958 ath10k_info("MSI-X interrupt handling (%d intrs)\n", num);
1959 return 0;
1960 }
1961
1962 static int ath10k_pci_start_intr_msi(struct ath10k *ar)
1963 {
1964 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1965 int ret;
1966
1967 ret = pci_enable_msi(ar_pci->pdev);
1968 if (ret < 0)
1969 return ret;
1970
1971 ret = request_irq(ar_pci->pdev->irq,
1972 ath10k_pci_interrupt_handler,
1973 IRQF_SHARED, "ath10k_pci", ar);
1974 if (ret < 0) {
1975 pci_disable_msi(ar_pci->pdev);
1976 return ret;
1977 }
1978
1979 ath10k_info("MSI interrupt handling\n");
1980 return 0;
1981 }
1982
1983 static int ath10k_pci_start_intr_legacy(struct ath10k *ar)
1984 {
1985 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1986 int ret;
1987
1988 ret = request_irq(ar_pci->pdev->irq,
1989 ath10k_pci_interrupt_handler,
1990 IRQF_SHARED, "ath10k_pci", ar);
1991 if (ret < 0)
1992 return ret;
1993
1994 /*
1995 * Make sure to wake the Target before enabling Legacy
1996 * Interrupt.
1997 */
1998 iowrite32(PCIE_SOC_WAKE_V_MASK,
1999 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2000 PCIE_SOC_WAKE_ADDRESS);
2001
2002 ath10k_pci_wait(ar);
2003
2004 /*
2005 * A potential race occurs here: The CORE_BASE write
2006 * depends on target correctly decoding AXI address but
2007 * host won't know when target writes BAR to CORE_CTRL.
2008 * This write might get lost if target has NOT written BAR.
2009 * For now, fix the race by repeating the write in below
2010 * synchronization checking.
2011 */
2012 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2013 PCIE_INTR_CE_MASK_ALL,
2014 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2015 PCIE_INTR_ENABLE_ADDRESS));
2016 iowrite32(PCIE_SOC_WAKE_RESET,
2017 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2018 PCIE_SOC_WAKE_ADDRESS);
2019
2020 ath10k_info("legacy interrupt handling\n");
2021 return 0;
2022 }
2023
2024 static int ath10k_pci_start_intr(struct ath10k *ar)
2025 {
2026 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2027 int num = MSI_NUM_REQUEST;
2028 int ret;
2029 int i;
2030
2031 tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long) ar);
2032 tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
2033 (unsigned long) ar);
2034
2035 for (i = 0; i < CE_COUNT; i++) {
2036 ar_pci->pipe_info[i].ar_pci = ar_pci;
2037 tasklet_init(&ar_pci->pipe_info[i].intr,
2038 ath10k_pci_ce_tasklet,
2039 (unsigned long)&ar_pci->pipe_info[i]);
2040 }
2041
2042 if (!test_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features))
2043 num = 1;
2044
2045 if (num > 1) {
2046 ret = ath10k_pci_start_intr_msix(ar, num);
2047 if (ret == 0)
2048 goto exit;
2049
2050 ath10k_warn("MSI-X didn't succeed (%d), trying MSI\n", ret);
2051 num = 1;
2052 }
2053
2054 if (num == 1) {
2055 ret = ath10k_pci_start_intr_msi(ar);
2056 if (ret == 0)
2057 goto exit;
2058
2059 ath10k_warn("MSI didn't succeed (%d), trying legacy INTR\n",
2060 ret);
2061 num = 0;
2062 }
2063
2064 ret = ath10k_pci_start_intr_legacy(ar);
2065
2066 exit:
2067 ar_pci->num_msi_intrs = num;
2068 ar_pci->ce_count = CE_COUNT;
2069 return ret;
2070 }
2071
2072 static void ath10k_pci_stop_intr(struct ath10k *ar)
2073 {
2074 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2075 int i;
2076
2077 /* There's at least one interrupt irregardless whether its legacy INTR
2078 * or MSI or MSI-X */
2079 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2080 free_irq(ar_pci->pdev->irq + i, ar);
2081
2082 if (ar_pci->num_msi_intrs > 0)
2083 pci_disable_msi(ar_pci->pdev);
2084 }
2085
2086 static int ath10k_pci_reset_target(struct ath10k *ar)
2087 {
2088 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2089 int wait_limit = 300; /* 3 sec */
2090
2091 /* Wait for Target to finish initialization before we proceed. */
2092 iowrite32(PCIE_SOC_WAKE_V_MASK,
2093 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2094 PCIE_SOC_WAKE_ADDRESS);
2095
2096 ath10k_pci_wait(ar);
2097
2098 while (wait_limit-- &&
2099 !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
2100 FW_IND_INITIALIZED)) {
2101 if (ar_pci->num_msi_intrs == 0)
2102 /* Fix potential race by repeating CORE_BASE writes */
2103 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2104 PCIE_INTR_CE_MASK_ALL,
2105 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2106 PCIE_INTR_ENABLE_ADDRESS));
2107 mdelay(10);
2108 }
2109
2110 if (wait_limit < 0) {
2111 ath10k_err("Target stalled\n");
2112 iowrite32(PCIE_SOC_WAKE_RESET,
2113 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2114 PCIE_SOC_WAKE_ADDRESS);
2115 return -EIO;
2116 }
2117
2118 iowrite32(PCIE_SOC_WAKE_RESET,
2119 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2120 PCIE_SOC_WAKE_ADDRESS);
2121
2122 return 0;
2123 }
2124
2125 static void ath10k_pci_device_reset(struct ath10k *ar)
2126 {
2127 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2128 void __iomem *mem = ar_pci->mem;
2129 int i;
2130 u32 val;
2131
2132 if (!SOC_GLOBAL_RESET_ADDRESS)
2133 return;
2134
2135 if (!mem)
2136 return;
2137
2138 ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS,
2139 PCIE_SOC_WAKE_V_MASK);
2140 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2141 if (ath10k_pci_target_is_awake(ar))
2142 break;
2143 msleep(1);
2144 }
2145
2146 /* Put Target, including PCIe, into RESET. */
2147 val = ath10k_pci_reg_read32(mem, SOC_GLOBAL_RESET_ADDRESS);
2148 val |= 1;
2149 ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val);
2150
2151 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2152 if (ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) &
2153 RTC_STATE_COLD_RESET_MASK)
2154 break;
2155 msleep(1);
2156 }
2157
2158 /* Pull Target, including PCIe, out of RESET. */
2159 val &= ~1;
2160 ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val);
2161
2162 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2163 if (!(ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) &
2164 RTC_STATE_COLD_RESET_MASK))
2165 break;
2166 msleep(1);
2167 }
2168
2169 ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2170 }
2171
2172 static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
2173 {
2174 int i;
2175
2176 for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
2177 if (!test_bit(i, ar_pci->features))
2178 continue;
2179
2180 switch (i) {
2181 case ATH10K_PCI_FEATURE_MSI_X:
2182 ath10k_dbg(ATH10K_DBG_PCI, "device supports MSI-X\n");
2183 break;
2184 case ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND:
2185 ath10k_dbg(ATH10K_DBG_PCI, "QCA988X_1.0 workaround enabled\n");
2186 break;
2187 }
2188 }
2189 }
2190
2191 static int ath10k_pci_probe(struct pci_dev *pdev,
2192 const struct pci_device_id *pci_dev)
2193 {
2194 void __iomem *mem;
2195 int ret = 0;
2196 struct ath10k *ar;
2197 struct ath10k_pci *ar_pci;
2198 u32 lcr_val;
2199
2200 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2201
2202 ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
2203 if (ar_pci == NULL)
2204 return -ENOMEM;
2205
2206 ar_pci->pdev = pdev;
2207 ar_pci->dev = &pdev->dev;
2208
2209 switch (pci_dev->device) {
2210 case QCA988X_1_0_DEVICE_ID:
2211 set_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features);
2212 break;
2213 case QCA988X_2_0_DEVICE_ID:
2214 set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
2215 break;
2216 default:
2217 ret = -ENODEV;
2218 ath10k_err("Unkown device ID: %d\n", pci_dev->device);
2219 goto err_ar_pci;
2220 }
2221
2222 ath10k_pci_dump_features(ar_pci);
2223
2224 ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
2225 if (!ar) {
2226 ath10k_err("ath10k_core_create failed!\n");
2227 ret = -EINVAL;
2228 goto err_ar_pci;
2229 }
2230
2231 /* Enable QCA988X_1.0 HW workarounds */
2232 if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features))
2233 spin_lock_init(&ar_pci->hw_v1_workaround_lock);
2234
2235 ar_pci->ar = ar;
2236 ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
2237 atomic_set(&ar_pci->keep_awake_count, 0);
2238
2239 pci_set_drvdata(pdev, ar);
2240
2241 /*
2242 * Without any knowledge of the Host, the Target may have been reset or
2243 * power cycled and its Config Space may no longer reflect the PCI
2244 * address space that was assigned earlier by the PCI infrastructure.
2245 * Refresh it now.
2246 */
2247 ret = pci_assign_resource(pdev, BAR_NUM);
2248 if (ret) {
2249 ath10k_err("cannot assign PCI space: %d\n", ret);
2250 goto err_ar;
2251 }
2252
2253 ret = pci_enable_device(pdev);
2254 if (ret) {
2255 ath10k_err("cannot enable PCI device: %d\n", ret);
2256 goto err_ar;
2257 }
2258
2259 /* Request MMIO resources */
2260 ret = pci_request_region(pdev, BAR_NUM, "ath");
2261 if (ret) {
2262 ath10k_err("PCI MMIO reservation error: %d\n", ret);
2263 goto err_device;
2264 }
2265
2266 /*
2267 * Target structures have a limit of 32 bit DMA pointers.
2268 * DMA pointers can be wider than 32 bits by default on some systems.
2269 */
2270 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2271 if (ret) {
2272 ath10k_err("32-bit DMA not available: %d\n", ret);
2273 goto err_region;
2274 }
2275
2276 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2277 if (ret) {
2278 ath10k_err("cannot enable 32-bit consistent DMA\n");
2279 goto err_region;
2280 }
2281
2282 /* Set bus master bit in PCI_COMMAND to enable DMA */
2283 pci_set_master(pdev);
2284
2285 /*
2286 * Temporary FIX: disable ASPM
2287 * Will be removed after the OTP is programmed
2288 */
2289 pci_read_config_dword(pdev, 0x80, &lcr_val);
2290 pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2291
2292 /* Arrange for access to Target SoC registers. */
2293 mem = pci_iomap(pdev, BAR_NUM, 0);
2294 if (!mem) {
2295 ath10k_err("PCI iomap error\n");
2296 ret = -EIO;
2297 goto err_master;
2298 }
2299
2300 ar_pci->mem = mem;
2301
2302 spin_lock_init(&ar_pci->ce_lock);
2303
2304 ar_pci->cacheline_sz = dma_get_cache_alignment();
2305
2306 ret = ath10k_pci_start_intr(ar);
2307 if (ret) {
2308 ath10k_err("could not start interrupt handling (%d)\n", ret);
2309 goto err_iomap;
2310 }
2311
2312 ret = ath10k_pci_hif_power_up(ar);
2313 if (ret) {
2314 ath10k_err("could not start pci hif (%d)\n", ret);
2315 goto err_intr;
2316 }
2317
2318 ret = ath10k_core_register(ar);
2319 if (ret) {
2320 ath10k_err("could not register driver core (%d)\n", ret);
2321 goto err_hif;
2322 }
2323
2324 return 0;
2325
2326 err_hif:
2327 ath10k_pci_hif_power_down(ar);
2328 err_intr:
2329 ath10k_pci_stop_intr(ar);
2330 err_iomap:
2331 pci_iounmap(pdev, mem);
2332 err_master:
2333 pci_clear_master(pdev);
2334 err_region:
2335 pci_release_region(pdev, BAR_NUM);
2336 err_device:
2337 pci_disable_device(pdev);
2338 err_ar:
2339 pci_set_drvdata(pdev, NULL);
2340 ath10k_core_destroy(ar);
2341 err_ar_pci:
2342 /* call HIF PCI free here */
2343 kfree(ar_pci);
2344
2345 return ret;
2346 }
2347
2348 static void ath10k_pci_remove(struct pci_dev *pdev)
2349 {
2350 struct ath10k *ar = pci_get_drvdata(pdev);
2351 struct ath10k_pci *ar_pci;
2352
2353 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2354
2355 if (!ar)
2356 return;
2357
2358 ar_pci = ath10k_pci_priv(ar);
2359
2360 if (!ar_pci)
2361 return;
2362
2363 tasklet_kill(&ar_pci->msi_fw_err);
2364
2365 ath10k_core_unregister(ar);
2366 ath10k_pci_hif_power_down(ar);
2367 ath10k_pci_stop_intr(ar);
2368
2369 pci_set_drvdata(pdev, NULL);
2370 pci_iounmap(pdev, ar_pci->mem);
2371 pci_release_region(pdev, BAR_NUM);
2372 pci_clear_master(pdev);
2373 pci_disable_device(pdev);
2374
2375 ath10k_core_destroy(ar);
2376 kfree(ar_pci);
2377 }
2378
2379 #if defined(CONFIG_PM_SLEEP)
2380
2381 #define ATH10K_PCI_PM_CONTROL 0x44
2382
2383 static int ath10k_pci_suspend(struct device *device)
2384 {
2385 struct pci_dev *pdev = to_pci_dev(device);
2386 struct ath10k *ar = pci_get_drvdata(pdev);
2387 struct ath10k_pci *ar_pci;
2388 u32 val;
2389 int ret, retval;
2390
2391 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2392
2393 if (!ar)
2394 return -ENODEV;
2395
2396 ar_pci = ath10k_pci_priv(ar);
2397 if (!ar_pci)
2398 return -ENODEV;
2399
2400 if (ath10k_core_target_suspend(ar))
2401 return -EBUSY;
2402
2403 ret = wait_event_interruptible_timeout(ar->event_queue,
2404 ar->is_target_paused == true,
2405 1 * HZ);
2406 if (ret < 0) {
2407 ath10k_warn("suspend interrupted (%d)\n", ret);
2408 retval = ret;
2409 goto resume;
2410 } else if (ret == 0) {
2411 ath10k_warn("suspend timed out - target pause event never came\n");
2412 retval = EIO;
2413 goto resume;
2414 }
2415
2416 /*
2417 * reset is_target_paused and host can check that in next time,
2418 * or it will always be TRUE and host just skip the waiting
2419 * condition, it causes target assert due to host already
2420 * suspend
2421 */
2422 ar->is_target_paused = false;
2423
2424 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2425
2426 if ((val & 0x000000ff) != 0x3) {
2427 pci_save_state(pdev);
2428 pci_disable_device(pdev);
2429 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2430 (val & 0xffffff00) | 0x03);
2431 }
2432
2433 return 0;
2434 resume:
2435 ret = ath10k_core_target_resume(ar);
2436 if (ret)
2437 ath10k_warn("could not resume (%d)\n", ret);
2438
2439 return retval;
2440 }
2441
2442 static int ath10k_pci_resume(struct device *device)
2443 {
2444 struct pci_dev *pdev = to_pci_dev(device);
2445 struct ath10k *ar = pci_get_drvdata(pdev);
2446 struct ath10k_pci *ar_pci;
2447 int ret;
2448 u32 val;
2449
2450 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2451
2452 if (!ar)
2453 return -ENODEV;
2454 ar_pci = ath10k_pci_priv(ar);
2455
2456 if (!ar_pci)
2457 return -ENODEV;
2458
2459 ret = pci_enable_device(pdev);
2460 if (ret) {
2461 ath10k_warn("cannot enable PCI device: %d\n", ret);
2462 return ret;
2463 }
2464
2465 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2466
2467 if ((val & 0x000000ff) != 0) {
2468 pci_restore_state(pdev);
2469 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2470 val & 0xffffff00);
2471 /*
2472 * Suspend/Resume resets the PCI configuration space,
2473 * so we have to re-disable the RETRY_TIMEOUT register (0x41)
2474 * to keep PCI Tx retries from interfering with C3 CPU state
2475 */
2476 pci_read_config_dword(pdev, 0x40, &val);
2477
2478 if ((val & 0x0000ff00) != 0)
2479 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
2480 }
2481
2482 ret = ath10k_core_target_resume(ar);
2483 if (ret)
2484 ath10k_warn("target resume failed: %d\n", ret);
2485
2486 return ret;
2487 }
2488
2489 static SIMPLE_DEV_PM_OPS(ath10k_dev_pm_ops,
2490 ath10k_pci_suspend,
2491 ath10k_pci_resume);
2492
2493 #define ATH10K_PCI_PM_OPS (&ath10k_dev_pm_ops)
2494
2495 #else
2496
2497 #define ATH10K_PCI_PM_OPS NULL
2498
2499 #endif /* CONFIG_PM_SLEEP */
2500
2501 MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2502
2503 static struct pci_driver ath10k_pci_driver = {
2504 .name = "ath10k_pci",
2505 .id_table = ath10k_pci_id_table,
2506 .probe = ath10k_pci_probe,
2507 .remove = ath10k_pci_remove,
2508 .driver.pm = ATH10K_PCI_PM_OPS,
2509 };
2510
2511 static int __init ath10k_pci_init(void)
2512 {
2513 int ret;
2514
2515 ret = pci_register_driver(&ath10k_pci_driver);
2516 if (ret)
2517 ath10k_err("pci_register_driver failed [%d]\n", ret);
2518
2519 return ret;
2520 }
2521 module_init(ath10k_pci_init);
2522
2523 static void __exit ath10k_pci_exit(void)
2524 {
2525 pci_unregister_driver(&ath10k_pci_driver);
2526 }
2527
2528 module_exit(ath10k_pci_exit);
2529
2530 MODULE_AUTHOR("Qualcomm Atheros");
2531 MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2532 MODULE_LICENSE("Dual BSD/GPL");
2533 MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_FW_FILE);
2534 MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_OTP_FILE);
2535 MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_BOARD_DATA_FILE);
2536 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
2537 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
2538 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
This page took 0.081597 seconds and 6 git commands to generate.