ath10k: re-add support for early fw indication
[deliverable/linux.git] / drivers / net / wireless / ath / ath10k / pci.c
CommitLineData
5e3dd157
KV
1/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include <linux/pci.h>
19#include <linux/module.h>
20#include <linux/interrupt.h>
21#include <linux/spinlock.h>
650b91fb 22#include <linux/bitops.h>
5e3dd157
KV
23
24#include "core.h"
25#include "debug.h"
26
27#include "targaddrs.h"
28#include "bmi.h"
29
30#include "hif.h"
31#include "htc.h"
32
33#include "ce.h"
34#include "pci.h"
35
8cc8df90 36static unsigned int ath10k_target_ps;
5e3dd157
KV
37module_param(ath10k_target_ps, uint, 0644);
38MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
39
5e3dd157
KV
40#define QCA988X_2_0_DEVICE_ID (0x003c)
41
42static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
5e3dd157
KV
43 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
44 {0}
45};
46
47static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
48 u32 *data);
49
50static void ath10k_pci_process_ce(struct ath10k *ar);
51static int ath10k_pci_post_rx(struct ath10k *ar);
87263e5b 52static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
5e3dd157 53 int num);
87263e5b 54static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
5e3dd157 55static void ath10k_pci_stop_ce(struct ath10k *ar);
5b2589fc 56static int ath10k_pci_device_reset(struct ath10k *ar);
d7fb47f5 57static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
fc15ca13
MK
58static int ath10k_pci_init_irq(struct ath10k *ar);
59static int ath10k_pci_deinit_irq(struct ath10k *ar);
60static int ath10k_pci_request_irq(struct ath10k *ar);
61static void ath10k_pci_free_irq(struct ath10k *ar);
85622cde
MK
62static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
63 struct ath10k_ce_pipe *rx_pipe,
64 struct bmi_xfer *xfer);
c80de12b 65static void ath10k_pci_cleanup_ce(struct ath10k *ar);
5e3dd157
KV
66
67static const struct ce_attr host_ce_config_wlan[] = {
48e9c225
KV
68 /* CE0: host->target HTC control and raw streams */
69 {
70 .flags = CE_ATTR_FLAGS,
71 .src_nentries = 16,
72 .src_sz_max = 256,
73 .dest_nentries = 0,
74 },
75
76 /* CE1: target->host HTT + HTC control */
77 {
78 .flags = CE_ATTR_FLAGS,
79 .src_nentries = 0,
80 .src_sz_max = 512,
81 .dest_nentries = 512,
82 },
83
84 /* CE2: target->host WMI */
85 {
86 .flags = CE_ATTR_FLAGS,
87 .src_nentries = 0,
88 .src_sz_max = 2048,
89 .dest_nentries = 32,
90 },
91
92 /* CE3: host->target WMI */
93 {
94 .flags = CE_ATTR_FLAGS,
95 .src_nentries = 32,
96 .src_sz_max = 2048,
97 .dest_nentries = 0,
98 },
99
100 /* CE4: host->target HTT */
101 {
102 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
103 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
104 .src_sz_max = 256,
105 .dest_nentries = 0,
106 },
107
108 /* CE5: unused */
109 {
110 .flags = CE_ATTR_FLAGS,
111 .src_nentries = 0,
112 .src_sz_max = 0,
113 .dest_nentries = 0,
114 },
115
116 /* CE6: target autonomous hif_memcpy */
117 {
118 .flags = CE_ATTR_FLAGS,
119 .src_nentries = 0,
120 .src_sz_max = 0,
121 .dest_nentries = 0,
122 },
123
124 /* CE7: ce_diag, the Diagnostic Window */
125 {
126 .flags = CE_ATTR_FLAGS,
127 .src_nentries = 2,
128 .src_sz_max = DIAG_TRANSFER_LIMIT,
129 .dest_nentries = 2,
130 },
5e3dd157
KV
131};
132
133/* Target firmware's Copy Engine configuration. */
134static const struct ce_pipe_config target_ce_config_wlan[] = {
d88effba
KV
135 /* CE0: host->target HTC control and raw streams */
136 {
137 .pipenum = 0,
138 .pipedir = PIPEDIR_OUT,
139 .nentries = 32,
140 .nbytes_max = 256,
141 .flags = CE_ATTR_FLAGS,
142 .reserved = 0,
143 },
144
145 /* CE1: target->host HTT + HTC control */
146 {
147 .pipenum = 1,
148 .pipedir = PIPEDIR_IN,
149 .nentries = 32,
150 .nbytes_max = 512,
151 .flags = CE_ATTR_FLAGS,
152 .reserved = 0,
153 },
154
155 /* CE2: target->host WMI */
156 {
157 .pipenum = 2,
158 .pipedir = PIPEDIR_IN,
159 .nentries = 32,
160 .nbytes_max = 2048,
161 .flags = CE_ATTR_FLAGS,
162 .reserved = 0,
163 },
164
165 /* CE3: host->target WMI */
166 {
167 .pipenum = 3,
168 .pipedir = PIPEDIR_OUT,
169 .nentries = 32,
170 .nbytes_max = 2048,
171 .flags = CE_ATTR_FLAGS,
172 .reserved = 0,
173 },
174
175 /* CE4: host->target HTT */
176 {
177 .pipenum = 4,
178 .pipedir = PIPEDIR_OUT,
179 .nentries = 256,
180 .nbytes_max = 256,
181 .flags = CE_ATTR_FLAGS,
182 .reserved = 0,
183 },
184
5e3dd157 185 /* NB: 50% of src nentries, since tx has 2 frags */
d88effba
KV
186
187 /* CE5: unused */
188 {
189 .pipenum = 5,
190 .pipedir = PIPEDIR_OUT,
191 .nentries = 32,
192 .nbytes_max = 2048,
193 .flags = CE_ATTR_FLAGS,
194 .reserved = 0,
195 },
196
197 /* CE6: Reserved for target autonomous hif_memcpy */
198 {
199 .pipenum = 6,
200 .pipedir = PIPEDIR_INOUT,
201 .nentries = 32,
202 .nbytes_max = 4096,
203 .flags = CE_ATTR_FLAGS,
204 .reserved = 0,
205 },
206
5e3dd157
KV
207 /* CE7 used only by Host */
208};
209
e539887b
MK
210static bool ath10k_pci_irq_pending(struct ath10k *ar)
211{
212 u32 cause;
213
214 /* Check if the shared legacy irq is for us */
215 cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
216 PCIE_INTR_CAUSE_ADDRESS);
217 if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
218 return true;
219
220 return false;
221}
222
2685218b
MK
223static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
224{
225 /* IMPORTANT: INTR_CLR register has to be set after
226 * INTR_ENABLE is set to 0, otherwise interrupt can not be
227 * really cleared. */
228 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
229 0);
230 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
231 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
232
233 /* IMPORTANT: this extra read transaction is required to
234 * flush the posted write buffer. */
235 (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
236 PCIE_INTR_ENABLE_ADDRESS);
237}
238
239static void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
240{
241 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
242 PCIE_INTR_ENABLE_ADDRESS,
243 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
244
245 /* IMPORTANT: this extra read transaction is required to
246 * flush the posted write buffer. */
247 (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
248 PCIE_INTR_ENABLE_ADDRESS);
249}
250
ab977bd0
MK
251static irqreturn_t ath10k_pci_early_irq_handler(int irq, void *arg)
252{
253 struct ath10k *ar = arg;
254 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
255
256 if (ar_pci->num_msi_intrs == 0) {
257 if (!ath10k_pci_irq_pending(ar))
258 return IRQ_NONE;
259
260 ath10k_pci_disable_and_clear_legacy_irq(ar);
261 }
262
263 tasklet_schedule(&ar_pci->early_irq_tasklet);
264
265 return IRQ_HANDLED;
266}
267
268static int ath10k_pci_request_early_irq(struct ath10k *ar)
269{
270 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
271 int ret;
272
273 /* Regardless whether MSI-X/MSI/legacy irqs have been set up the first
274 * interrupt from irq vector is triggered in all cases for FW
275 * indication/errors */
276 ret = request_irq(ar_pci->pdev->irq, ath10k_pci_early_irq_handler,
277 IRQF_SHARED, "ath10k_pci (early)", ar);
278 if (ret) {
279 ath10k_warn("failed to request early irq: %d\n", ret);
280 return ret;
281 }
282
283 return 0;
284}
285
286static void ath10k_pci_free_early_irq(struct ath10k *ar)
287{
288 free_irq(ath10k_pci_priv(ar)->pdev->irq, ar);
289}
290
5e3dd157
KV
291/*
292 * Diagnostic read/write access is provided for startup/config/debug usage.
293 * Caller must guarantee proper alignment, when applicable, and single user
294 * at any moment.
295 */
296static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
297 int nbytes)
298{
299 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
300 int ret = 0;
301 u32 buf;
302 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
303 unsigned int id;
304 unsigned int flags;
2aa39115 305 struct ath10k_ce_pipe *ce_diag;
5e3dd157
KV
306 /* Host buffer address in CE space */
307 u32 ce_data;
308 dma_addr_t ce_data_base = 0;
309 void *data_buf = NULL;
310 int i;
311
312 /*
313 * This code cannot handle reads to non-memory space. Redirect to the
314 * register read fn but preserve the multi word read capability of
315 * this fn
316 */
317 if (address < DRAM_BASE_ADDRESS) {
318 if (!IS_ALIGNED(address, 4) ||
319 !IS_ALIGNED((unsigned long)data, 4))
320 return -EIO;
321
322 while ((nbytes >= 4) && ((ret = ath10k_pci_diag_read_access(
323 ar, address, (u32 *)data)) == 0)) {
324 nbytes -= sizeof(u32);
325 address += sizeof(u32);
326 data += sizeof(u32);
327 }
328 return ret;
329 }
330
331 ce_diag = ar_pci->ce_diag;
332
333 /*
334 * Allocate a temporary bounce buffer to hold caller's data
335 * to be DMA'ed from Target. This guarantees
336 * 1) 4-byte alignment
337 * 2) Buffer in DMA-able space
338 */
339 orig_nbytes = nbytes;
340 data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
341 orig_nbytes,
342 &ce_data_base);
343
344 if (!data_buf) {
345 ret = -ENOMEM;
346 goto done;
347 }
348 memset(data_buf, 0, orig_nbytes);
349
350 remaining_bytes = orig_nbytes;
351 ce_data = ce_data_base;
352 while (remaining_bytes) {
353 nbytes = min_t(unsigned int, remaining_bytes,
354 DIAG_TRANSFER_LIMIT);
355
356 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
357 if (ret != 0)
358 goto done;
359
360 /* Request CE to send from Target(!) address to Host buffer */
361 /*
362 * The address supplied by the caller is in the
363 * Target CPU virtual address space.
364 *
365 * In order to use this address with the diagnostic CE,
366 * convert it from Target CPU virtual address space
367 * to CE address space
368 */
369 ath10k_pci_wake(ar);
370 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
371 address);
372 ath10k_pci_sleep(ar);
373
374 ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
375 0);
376 if (ret)
377 goto done;
378
379 i = 0;
380 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
381 &completed_nbytes,
382 &id) != 0) {
383 mdelay(1);
384 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
385 ret = -EBUSY;
386 goto done;
387 }
388 }
389
390 if (nbytes != completed_nbytes) {
391 ret = -EIO;
392 goto done;
393 }
394
395 if (buf != (u32) address) {
396 ret = -EIO;
397 goto done;
398 }
399
400 i = 0;
401 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
402 &completed_nbytes,
403 &id, &flags) != 0) {
404 mdelay(1);
405
406 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
407 ret = -EBUSY;
408 goto done;
409 }
410 }
411
412 if (nbytes != completed_nbytes) {
413 ret = -EIO;
414 goto done;
415 }
416
417 if (buf != ce_data) {
418 ret = -EIO;
419 goto done;
420 }
421
422 remaining_bytes -= nbytes;
423 address += nbytes;
424 ce_data += nbytes;
425 }
426
427done:
428 if (ret == 0) {
429 /* Copy data from allocated DMA buf to caller's buf */
430 WARN_ON_ONCE(orig_nbytes & 3);
431 for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
432 ((u32 *)data)[i] =
433 __le32_to_cpu(((__le32 *)data_buf)[i]);
434 }
435 } else
436 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n",
437 __func__, address);
438
439 if (data_buf)
440 pci_free_consistent(ar_pci->pdev, orig_nbytes,
441 data_buf, ce_data_base);
442
443 return ret;
444}
445
446/* Read 4-byte aligned data from Target memory or register */
447static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
448 u32 *data)
449{
450 /* Assume range doesn't cross this boundary */
451 if (address >= DRAM_BASE_ADDRESS)
452 return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
453
454 ath10k_pci_wake(ar);
455 *data = ath10k_pci_read32(ar, address);
456 ath10k_pci_sleep(ar);
457 return 0;
458}
459
460static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
461 const void *data, int nbytes)
462{
463 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
464 int ret = 0;
465 u32 buf;
466 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
467 unsigned int id;
468 unsigned int flags;
2aa39115 469 struct ath10k_ce_pipe *ce_diag;
5e3dd157
KV
470 void *data_buf = NULL;
471 u32 ce_data; /* Host buffer address in CE space */
472 dma_addr_t ce_data_base = 0;
473 int i;
474
475 ce_diag = ar_pci->ce_diag;
476
477 /*
478 * Allocate a temporary bounce buffer to hold caller's data
479 * to be DMA'ed to Target. This guarantees
480 * 1) 4-byte alignment
481 * 2) Buffer in DMA-able space
482 */
483 orig_nbytes = nbytes;
484 data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
485 orig_nbytes,
486 &ce_data_base);
487 if (!data_buf) {
488 ret = -ENOMEM;
489 goto done;
490 }
491
492 /* Copy caller's data to allocated DMA buf */
493 WARN_ON_ONCE(orig_nbytes & 3);
494 for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
495 ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
496
497 /*
498 * The address supplied by the caller is in the
499 * Target CPU virtual address space.
500 *
501 * In order to use this address with the diagnostic CE,
502 * convert it from
503 * Target CPU virtual address space
504 * to
505 * CE address space
506 */
507 ath10k_pci_wake(ar);
508 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
509 ath10k_pci_sleep(ar);
510
511 remaining_bytes = orig_nbytes;
512 ce_data = ce_data_base;
513 while (remaining_bytes) {
514 /* FIXME: check cast */
515 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
516
517 /* Set up to receive directly into Target(!) address */
518 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
519 if (ret != 0)
520 goto done;
521
522 /*
523 * Request CE to send caller-supplied data that
524 * was copied to bounce buffer to Target(!) address.
525 */
526 ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
527 nbytes, 0, 0);
528 if (ret != 0)
529 goto done;
530
531 i = 0;
532 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
533 &completed_nbytes,
534 &id) != 0) {
535 mdelay(1);
536
537 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
538 ret = -EBUSY;
539 goto done;
540 }
541 }
542
543 if (nbytes != completed_nbytes) {
544 ret = -EIO;
545 goto done;
546 }
547
548 if (buf != ce_data) {
549 ret = -EIO;
550 goto done;
551 }
552
553 i = 0;
554 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
555 &completed_nbytes,
556 &id, &flags) != 0) {
557 mdelay(1);
558
559 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
560 ret = -EBUSY;
561 goto done;
562 }
563 }
564
565 if (nbytes != completed_nbytes) {
566 ret = -EIO;
567 goto done;
568 }
569
570 if (buf != address) {
571 ret = -EIO;
572 goto done;
573 }
574
575 remaining_bytes -= nbytes;
576 address += nbytes;
577 ce_data += nbytes;
578 }
579
580done:
581 if (data_buf) {
582 pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf,
583 ce_data_base);
584 }
585
586 if (ret != 0)
587 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__,
588 address);
589
590 return ret;
591}
592
593/* Write 4B data to Target memory or register */
594static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
595 u32 data)
596{
597 /* Assume range doesn't cross this boundary */
598 if (address >= DRAM_BASE_ADDRESS)
599 return ath10k_pci_diag_write_mem(ar, address, &data,
600 sizeof(u32));
601
602 ath10k_pci_wake(ar);
603 ath10k_pci_write32(ar, address, data);
604 ath10k_pci_sleep(ar);
605 return 0;
606}
607
608static bool ath10k_pci_target_is_awake(struct ath10k *ar)
609{
610 void __iomem *mem = ath10k_pci_priv(ar)->mem;
611 u32 val;
612 val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
613 RTC_STATE_ADDRESS);
614 return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
615}
616
3aebe54b 617int ath10k_do_pci_wake(struct ath10k *ar)
5e3dd157
KV
618{
619 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
620 void __iomem *pci_addr = ar_pci->mem;
621 int tot_delay = 0;
622 int curr_delay = 5;
623
624 if (atomic_read(&ar_pci->keep_awake_count) == 0) {
625 /* Force AWAKE */
626 iowrite32(PCIE_SOC_WAKE_V_MASK,
627 pci_addr + PCIE_LOCAL_BASE_ADDRESS +
628 PCIE_SOC_WAKE_ADDRESS);
629 }
630 atomic_inc(&ar_pci->keep_awake_count);
631
632 if (ar_pci->verified_awake)
3aebe54b 633 return 0;
5e3dd157
KV
634
635 for (;;) {
636 if (ath10k_pci_target_is_awake(ar)) {
637 ar_pci->verified_awake = true;
3aebe54b 638 return 0;
5e3dd157
KV
639 }
640
641 if (tot_delay > PCIE_WAKE_TIMEOUT) {
3aebe54b
KV
642 ath10k_warn("target took longer %d us to wake up (awake count %d)\n",
643 PCIE_WAKE_TIMEOUT,
5e3dd157 644 atomic_read(&ar_pci->keep_awake_count));
3aebe54b 645 return -ETIMEDOUT;
5e3dd157
KV
646 }
647
648 udelay(curr_delay);
649 tot_delay += curr_delay;
650
651 if (curr_delay < 50)
652 curr_delay += 5;
653 }
654}
655
656void ath10k_do_pci_sleep(struct ath10k *ar)
657{
658 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
659 void __iomem *pci_addr = ar_pci->mem;
660
661 if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
662 /* Allow sleep */
663 ar_pci->verified_awake = false;
664 iowrite32(PCIE_SOC_WAKE_RESET,
665 pci_addr + PCIE_LOCAL_BASE_ADDRESS +
666 PCIE_SOC_WAKE_ADDRESS);
667 }
668}
669
670/*
671 * FIXME: Handle OOM properly.
672 */
673static inline
87263e5b 674struct ath10k_pci_compl *get_free_compl(struct ath10k_pci_pipe *pipe_info)
5e3dd157
KV
675{
676 struct ath10k_pci_compl *compl = NULL;
677
678 spin_lock_bh(&pipe_info->pipe_lock);
679 if (list_empty(&pipe_info->compl_free)) {
680 ath10k_warn("Completion buffers are full\n");
681 goto exit;
682 }
683 compl = list_first_entry(&pipe_info->compl_free,
684 struct ath10k_pci_compl, list);
685 list_del(&compl->list);
686exit:
687 spin_unlock_bh(&pipe_info->pipe_lock);
688 return compl;
689}
690
691/* Called by lower (CE) layer when a send to Target completes. */
5440ce25 692static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
5e3dd157
KV
693{
694 struct ath10k *ar = ce_state->ar;
695 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
87263e5b 696 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
5e3dd157 697 struct ath10k_pci_compl *compl;
5440ce25
MK
698 void *transfer_context;
699 u32 ce_data;
700 unsigned int nbytes;
701 unsigned int transfer_id;
5e3dd157 702
5440ce25
MK
703 while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
704 &ce_data, &nbytes,
705 &transfer_id) == 0) {
5e3dd157
KV
706 compl = get_free_compl(pipe_info);
707 if (!compl)
708 break;
709
f9d8fece 710 compl->state = ATH10K_PCI_COMPL_SEND;
5e3dd157
KV
711 compl->ce_state = ce_state;
712 compl->pipe_info = pipe_info;
aa5c1db4 713 compl->skb = transfer_context;
5e3dd157
KV
714 compl->nbytes = nbytes;
715 compl->transfer_id = transfer_id;
716 compl->flags = 0;
717
718 /*
719 * Add the completion to the processing queue.
720 */
721 spin_lock_bh(&ar_pci->compl_lock);
722 list_add_tail(&compl->list, &ar_pci->compl_process);
723 spin_unlock_bh(&ar_pci->compl_lock);
5440ce25 724 }
5e3dd157
KV
725
726 ath10k_pci_process_ce(ar);
727}
728
729/* Called by lower (CE) layer when data is received from the Target. */
5440ce25 730static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
5e3dd157
KV
731{
732 struct ath10k *ar = ce_state->ar;
733 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
87263e5b 734 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
5e3dd157
KV
735 struct ath10k_pci_compl *compl;
736 struct sk_buff *skb;
5440ce25
MK
737 void *transfer_context;
738 u32 ce_data;
739 unsigned int nbytes;
740 unsigned int transfer_id;
741 unsigned int flags;
5e3dd157 742
5440ce25
MK
743 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
744 &ce_data, &nbytes, &transfer_id,
745 &flags) == 0) {
5e3dd157
KV
746 compl = get_free_compl(pipe_info);
747 if (!compl)
748 break;
749
f9d8fece 750 compl->state = ATH10K_PCI_COMPL_RECV;
5e3dd157
KV
751 compl->ce_state = ce_state;
752 compl->pipe_info = pipe_info;
aa5c1db4 753 compl->skb = transfer_context;
5e3dd157
KV
754 compl->nbytes = nbytes;
755 compl->transfer_id = transfer_id;
756 compl->flags = flags;
757
758 skb = transfer_context;
759 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
760 skb->len + skb_tailroom(skb),
761 DMA_FROM_DEVICE);
762 /*
763 * Add the completion to the processing queue.
764 */
765 spin_lock_bh(&ar_pci->compl_lock);
766 list_add_tail(&compl->list, &ar_pci->compl_process);
767 spin_unlock_bh(&ar_pci->compl_lock);
5440ce25 768 }
5e3dd157
KV
769
770 ath10k_pci_process_ce(ar);
771}
772
773/* Send the first nbytes bytes of the buffer */
774static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
775 unsigned int transfer_id,
776 unsigned int bytes, struct sk_buff *nbuf)
777{
778 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
779 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
87263e5b 780 struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe_id]);
2aa39115 781 struct ath10k_ce_pipe *ce_hdl = pipe_info->ce_hdl;
5e3dd157
KV
782 unsigned int len;
783 u32 flags = 0;
784 int ret;
785
5e3dd157
KV
786 len = min(bytes, nbuf->len);
787 bytes -= len;
788
789 if (len & 3)
790 ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len);
791
792 ath10k_dbg(ATH10K_DBG_PCI,
793 "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n",
794 nbuf->data, (unsigned long long) skb_cb->paddr,
795 nbuf->len, len);
796 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
797 "ath10k tx: data: ",
798 nbuf->data, nbuf->len);
799
2e761b5a
MK
800 ret = ath10k_ce_send(ce_hdl, nbuf, skb_cb->paddr, len, transfer_id,
801 flags);
5e3dd157 802 if (ret)
1d2b48d6 803 ath10k_warn("failed to send sk_buff to CE: %p\n", nbuf);
5e3dd157
KV
804
805 return ret;
806}
807
808static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
809{
810 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3efcb3b4 811 return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
5e3dd157
KV
812}
813
814static void ath10k_pci_hif_dump_area(struct ath10k *ar)
815{
816 u32 reg_dump_area = 0;
817 u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
818 u32 host_addr;
819 int ret;
820 u32 i;
821
822 ath10k_err("firmware crashed!\n");
823 ath10k_err("hardware name %s version 0x%x\n",
824 ar->hw_params.name, ar->target_version);
825 ath10k_err("firmware version: %u.%u.%u.%u\n", ar->fw_version_major,
826 ar->fw_version_minor, ar->fw_version_release,
827 ar->fw_version_build);
828
829 host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
1d2b48d6
MK
830 ret = ath10k_pci_diag_read_mem(ar, host_addr,
831 &reg_dump_area, sizeof(u32));
832 if (ret) {
833 ath10k_err("failed to read FW dump area address: %d\n", ret);
5e3dd157
KV
834 return;
835 }
836
837 ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
838
839 ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
840 &reg_dump_values[0],
841 REG_DUMP_COUNT_QCA988X * sizeof(u32));
842 if (ret != 0) {
1d2b48d6 843 ath10k_err("failed to read FW dump area: %d\n", ret);
5e3dd157
KV
844 return;
845 }
846
847 BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
848
849 ath10k_err("target Register Dump\n");
850 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
851 ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
852 i,
853 reg_dump_values[i],
854 reg_dump_values[i + 1],
855 reg_dump_values[i + 2],
856 reg_dump_values[i + 3]);
affd3217 857
5e90de86 858 queue_work(ar->workqueue, &ar->restart_work);
5e3dd157
KV
859}
860
861static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
862 int force)
863{
864 if (!force) {
865 int resources;
866 /*
867 * Decide whether to actually poll for completions, or just
868 * wait for a later chance.
869 * If there seem to be plenty of resources left, then just wait
870 * since checking involves reading a CE register, which is a
871 * relatively expensive operation.
872 */
873 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
874
875 /*
876 * If at least 50% of the total resources are still available,
877 * don't bother checking again yet.
878 */
879 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
880 return;
881 }
882 ath10k_ce_per_engine_service(ar, pipe);
883}
884
e799bbff
MK
885static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
886 struct ath10k_hif_cb *callbacks)
5e3dd157
KV
887{
888 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
889
890 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
891
892 memcpy(&ar_pci->msg_callbacks_current, callbacks,
893 sizeof(ar_pci->msg_callbacks_current));
894}
895
c80de12b 896static int ath10k_pci_alloc_compl(struct ath10k *ar)
5e3dd157
KV
897{
898 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
5e3dd157 899 const struct ce_attr *attr;
87263e5b 900 struct ath10k_pci_pipe *pipe_info;
5e3dd157 901 struct ath10k_pci_compl *compl;
c80de12b 902 int i, pipe_num, completions;
5e3dd157
KV
903
904 spin_lock_init(&ar_pci->compl_lock);
905 INIT_LIST_HEAD(&ar_pci->compl_process);
906
fad6ed78 907 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
5e3dd157
KV
908 pipe_info = &ar_pci->pipe_info[pipe_num];
909
910 spin_lock_init(&pipe_info->pipe_lock);
911 INIT_LIST_HEAD(&pipe_info->compl_free);
912
913 /* Handle Diagnostic CE specially */
c80de12b 914 if (pipe_info->ce_hdl == ar_pci->ce_diag)
5e3dd157
KV
915 continue;
916
917 attr = &host_ce_config_wlan[pipe_num];
918 completions = 0;
919
c80de12b 920 if (attr->src_nentries)
5e3dd157 921 completions += attr->src_nentries;
5e3dd157 922
c80de12b 923 if (attr->dest_nentries)
5e3dd157 924 completions += attr->dest_nentries;
5e3dd157
KV
925
926 for (i = 0; i < completions; i++) {
ffe5daa8 927 compl = kmalloc(sizeof(*compl), GFP_KERNEL);
5e3dd157
KV
928 if (!compl) {
929 ath10k_warn("No memory for completion state\n");
c80de12b 930 ath10k_pci_cleanup_ce(ar);
5e3dd157
KV
931 return -ENOMEM;
932 }
933
f9d8fece 934 compl->state = ATH10K_PCI_COMPL_FREE;
5e3dd157
KV
935 list_add_tail(&compl->list, &pipe_info->compl_free);
936 }
937 }
938
939 return 0;
940}
941
c80de12b
MK
942static int ath10k_pci_setup_ce_irq(struct ath10k *ar)
943{
944 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
945 const struct ce_attr *attr;
946 struct ath10k_pci_pipe *pipe_info;
947 int pipe_num, disable_interrupts;
948
949 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
950 pipe_info = &ar_pci->pipe_info[pipe_num];
951
952 /* Handle Diagnostic CE specially */
953 if (pipe_info->ce_hdl == ar_pci->ce_diag)
954 continue;
955
956 attr = &host_ce_config_wlan[pipe_num];
957
958 if (attr->src_nentries) {
959 disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
960 ath10k_ce_send_cb_register(pipe_info->ce_hdl,
961 ath10k_pci_ce_send_done,
962 disable_interrupts);
963 }
964
965 if (attr->dest_nentries)
966 ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
967 ath10k_pci_ce_recv_data);
968 }
969
970 return 0;
971}
972
96a9d0dc 973static void ath10k_pci_kill_tasklet(struct ath10k *ar)
5e3dd157
KV
974{
975 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
5e3dd157
KV
976 int i;
977
5e3dd157 978 tasklet_kill(&ar_pci->intr_tq);
103d4f5e 979 tasklet_kill(&ar_pci->msi_fw_err);
ab977bd0 980 tasklet_kill(&ar_pci->early_irq_tasklet);
5e3dd157
KV
981
982 for (i = 0; i < CE_COUNT; i++)
983 tasklet_kill(&ar_pci->pipe_info[i].intr);
96a9d0dc
MK
984}
985
986static void ath10k_pci_stop_ce(struct ath10k *ar)
987{
988 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
989 struct ath10k_pci_compl *compl;
990 struct sk_buff *skb;
5e3dd157
KV
991
992 /* Mark pending completions as aborted, so that upper layers free up
993 * their associated resources */
994 spin_lock_bh(&ar_pci->compl_lock);
995 list_for_each_entry(compl, &ar_pci->compl_process, list) {
aa5c1db4 996 skb = compl->skb;
5e3dd157
KV
997 ATH10K_SKB_CB(skb)->is_aborted = true;
998 }
999 spin_unlock_bh(&ar_pci->compl_lock);
1000}
1001
1002static void ath10k_pci_cleanup_ce(struct ath10k *ar)
1003{
1004 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1005 struct ath10k_pci_compl *compl, *tmp;
87263e5b 1006 struct ath10k_pci_pipe *pipe_info;
5e3dd157
KV
1007 struct sk_buff *netbuf;
1008 int pipe_num;
1009
1010 /* Free pending completions. */
1011 spin_lock_bh(&ar_pci->compl_lock);
1012 if (!list_empty(&ar_pci->compl_process))
1013 ath10k_warn("pending completions still present! possible memory leaks.\n");
1014
1015 list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
1016 list_del(&compl->list);
aa5c1db4 1017 netbuf = compl->skb;
5e3dd157
KV
1018 dev_kfree_skb_any(netbuf);
1019 kfree(compl);
1020 }
1021 spin_unlock_bh(&ar_pci->compl_lock);
1022
1023 /* Free unused completions for each pipe. */
fad6ed78 1024 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
5e3dd157
KV
1025 pipe_info = &ar_pci->pipe_info[pipe_num];
1026
1027 spin_lock_bh(&pipe_info->pipe_lock);
1028 list_for_each_entry_safe(compl, tmp,
1029 &pipe_info->compl_free, list) {
1030 list_del(&compl->list);
1031 kfree(compl);
1032 }
1033 spin_unlock_bh(&pipe_info->pipe_lock);
1034 }
1035}
1036
1037static void ath10k_pci_process_ce(struct ath10k *ar)
1038{
1039 struct ath10k_pci *ar_pci = ar->hif.priv;
1040 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
1041 struct ath10k_pci_compl *compl;
1042 struct sk_buff *skb;
1043 unsigned int nbytes;
1044 int ret, send_done = 0;
1045
1046 /* Upper layers aren't ready to handle tx/rx completions in parallel so
1047 * we must serialize all completion processing. */
1048
1049 spin_lock_bh(&ar_pci->compl_lock);
1050 if (ar_pci->compl_processing) {
1051 spin_unlock_bh(&ar_pci->compl_lock);
1052 return;
1053 }
1054 ar_pci->compl_processing = true;
1055 spin_unlock_bh(&ar_pci->compl_lock);
1056
1057 for (;;) {
1058 spin_lock_bh(&ar_pci->compl_lock);
1059 if (list_empty(&ar_pci->compl_process)) {
1060 spin_unlock_bh(&ar_pci->compl_lock);
1061 break;
1062 }
1063 compl = list_first_entry(&ar_pci->compl_process,
1064 struct ath10k_pci_compl, list);
1065 list_del(&compl->list);
1066 spin_unlock_bh(&ar_pci->compl_lock);
1067
f9d8fece
MK
1068 switch (compl->state) {
1069 case ATH10K_PCI_COMPL_SEND:
5e3dd157 1070 cb->tx_completion(ar,
aa5c1db4 1071 compl->skb,
5e3dd157
KV
1072 compl->transfer_id);
1073 send_done = 1;
f9d8fece
MK
1074 break;
1075 case ATH10K_PCI_COMPL_RECV:
5e3dd157
KV
1076 ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
1077 if (ret) {
1d2b48d6
MK
1078 ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
1079 compl->pipe_info->pipe_num, ret);
5e3dd157
KV
1080 break;
1081 }
1082
aa5c1db4 1083 skb = compl->skb;
5e3dd157
KV
1084 nbytes = compl->nbytes;
1085
1086 ath10k_dbg(ATH10K_DBG_PCI,
1087 "ath10k_pci_ce_recv_data netbuf=%p nbytes=%d\n",
1088 skb, nbytes);
1089 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
1090 "ath10k rx: ", skb->data, nbytes);
1091
1092 if (skb->len + skb_tailroom(skb) >= nbytes) {
1093 skb_trim(skb, 0);
1094 skb_put(skb, nbytes);
1095 cb->rx_completion(ar, skb,
1096 compl->pipe_info->pipe_num);
1097 } else {
1098 ath10k_warn("rxed more than expected (nbytes %d, max %d)",
1099 nbytes,
1100 skb->len + skb_tailroom(skb));
1101 }
f9d8fece
MK
1102 break;
1103 case ATH10K_PCI_COMPL_FREE:
1104 ath10k_warn("free completion cannot be processed\n");
1105 break;
1106 default:
1107 ath10k_warn("invalid completion state (%d)\n",
1108 compl->state);
1109 break;
5e3dd157
KV
1110 }
1111
f9d8fece 1112 compl->state = ATH10K_PCI_COMPL_FREE;
5e3dd157
KV
1113
1114 /*
1115 * Add completion back to the pipe's free list.
1116 */
1117 spin_lock_bh(&compl->pipe_info->pipe_lock);
1118 list_add_tail(&compl->list, &compl->pipe_info->compl_free);
5e3dd157
KV
1119 spin_unlock_bh(&compl->pipe_info->pipe_lock);
1120 }
1121
1122 spin_lock_bh(&ar_pci->compl_lock);
1123 ar_pci->compl_processing = false;
1124 spin_unlock_bh(&ar_pci->compl_lock);
1125}
1126
1127/* TODO - temporary mapping while we have too few CE's */
1128static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
1129 u16 service_id, u8 *ul_pipe,
1130 u8 *dl_pipe, int *ul_is_polled,
1131 int *dl_is_polled)
1132{
1133 int ret = 0;
1134
1135 /* polling for received messages not supported */
1136 *dl_is_polled = 0;
1137
1138 switch (service_id) {
1139 case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
1140 /*
1141 * Host->target HTT gets its own pipe, so it can be polled
1142 * while other pipes are interrupt driven.
1143 */
1144 *ul_pipe = 4;
1145 /*
1146 * Use the same target->host pipe for HTC ctrl, HTC raw
1147 * streams, and HTT.
1148 */
1149 *dl_pipe = 1;
1150 break;
1151
1152 case ATH10K_HTC_SVC_ID_RSVD_CTRL:
1153 case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
1154 /*
1155 * Note: HTC_RAW_STREAMS_SVC is currently unused, and
1156 * HTC_CTRL_RSVD_SVC could share the same pipe as the
1157 * WMI services. So, if another CE is needed, change
1158 * this to *ul_pipe = 3, which frees up CE 0.
1159 */
1160 /* *ul_pipe = 3; */
1161 *ul_pipe = 0;
1162 *dl_pipe = 1;
1163 break;
1164
1165 case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
1166 case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
1167 case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
1168 case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
1169
1170 case ATH10K_HTC_SVC_ID_WMI_CONTROL:
1171 *ul_pipe = 3;
1172 *dl_pipe = 2;
1173 break;
1174
1175 /* pipe 5 unused */
1176 /* pipe 6 reserved */
1177 /* pipe 7 reserved */
1178
1179 default:
1180 ret = -1;
1181 break;
1182 }
1183 *ul_is_polled =
1184 (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1185
1186 return ret;
1187}
1188
1189static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1190 u8 *ul_pipe, u8 *dl_pipe)
1191{
1192 int ul_is_polled, dl_is_polled;
1193
1194 (void)ath10k_pci_hif_map_service_to_pipe(ar,
1195 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1196 ul_pipe,
1197 dl_pipe,
1198 &ul_is_polled,
1199 &dl_is_polled);
1200}
1201
87263e5b 1202static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
5e3dd157
KV
1203 int num)
1204{
1205 struct ath10k *ar = pipe_info->hif_ce_state;
1206 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2aa39115 1207 struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
5e3dd157
KV
1208 struct sk_buff *skb;
1209 dma_addr_t ce_data;
1210 int i, ret = 0;
1211
1212 if (pipe_info->buf_sz == 0)
1213 return 0;
1214
1215 for (i = 0; i < num; i++) {
1216 skb = dev_alloc_skb(pipe_info->buf_sz);
1217 if (!skb) {
1d2b48d6 1218 ath10k_warn("failed to allocate skbuff for pipe %d\n",
5e3dd157
KV
1219 num);
1220 ret = -ENOMEM;
1221 goto err;
1222 }
1223
1224 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
1225
1226 ce_data = dma_map_single(ar->dev, skb->data,
1227 skb->len + skb_tailroom(skb),
1228 DMA_FROM_DEVICE);
1229
1230 if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
1d2b48d6 1231 ath10k_warn("failed to DMA map sk_buff\n");
5e3dd157
KV
1232 dev_kfree_skb_any(skb);
1233 ret = -EIO;
1234 goto err;
1235 }
1236
1237 ATH10K_SKB_CB(skb)->paddr = ce_data;
1238
1239 pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
1240 pipe_info->buf_sz,
1241 PCI_DMA_FROMDEVICE);
1242
1243 ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
1244 ce_data);
1245 if (ret) {
1d2b48d6 1246 ath10k_warn("failed to enqueue to pipe %d: %d\n",
5e3dd157
KV
1247 num, ret);
1248 goto err;
1249 }
1250 }
1251
1252 return ret;
1253
1254err:
1255 ath10k_pci_rx_pipe_cleanup(pipe_info);
1256 return ret;
1257}
1258
1259static int ath10k_pci_post_rx(struct ath10k *ar)
1260{
1261 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
87263e5b 1262 struct ath10k_pci_pipe *pipe_info;
5e3dd157
KV
1263 const struct ce_attr *attr;
1264 int pipe_num, ret = 0;
1265
fad6ed78 1266 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
5e3dd157
KV
1267 pipe_info = &ar_pci->pipe_info[pipe_num];
1268 attr = &host_ce_config_wlan[pipe_num];
1269
1270 if (attr->dest_nentries == 0)
1271 continue;
1272
1273 ret = ath10k_pci_post_rx_pipe(pipe_info,
1274 attr->dest_nentries - 1);
1275 if (ret) {
1d2b48d6
MK
1276 ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
1277 pipe_num, ret);
5e3dd157
KV
1278
1279 for (; pipe_num >= 0; pipe_num--) {
1280 pipe_info = &ar_pci->pipe_info[pipe_num];
1281 ath10k_pci_rx_pipe_cleanup(pipe_info);
1282 }
1283 return ret;
1284 }
1285 }
1286
1287 return 0;
1288}
1289
1290static int ath10k_pci_hif_start(struct ath10k *ar)
1291{
1292 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
ab977bd0
MK
1293 int ret, ret_early;
1294
1295 ath10k_pci_free_early_irq(ar);
1296 ath10k_pci_kill_tasklet(ar);
5e3dd157 1297
c80de12b 1298 ret = ath10k_pci_alloc_compl(ar);
5e3dd157 1299 if (ret) {
c80de12b 1300 ath10k_warn("failed to allocate CE completions: %d\n", ret);
ab977bd0 1301 goto err_early_irq;
5e3dd157
KV
1302 }
1303
5d1aa946
MK
1304 ret = ath10k_pci_request_irq(ar);
1305 if (ret) {
1306 ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1307 ret);
1308 goto err_free_compl;
1309 }
1310
c80de12b
MK
1311 ret = ath10k_pci_setup_ce_irq(ar);
1312 if (ret) {
1313 ath10k_warn("failed to setup CE interrupts: %d\n", ret);
5d1aa946 1314 goto err_stop;
c80de12b
MK
1315 }
1316
5e3dd157
KV
1317 /* Post buffers once to start things off. */
1318 ret = ath10k_pci_post_rx(ar);
1319 if (ret) {
1d2b48d6
MK
1320 ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1321 ret);
5d1aa946 1322 goto err_stop;
5e3dd157
KV
1323 }
1324
1325 ar_pci->started = 1;
1326 return 0;
c80de12b 1327
5d1aa946
MK
1328err_stop:
1329 ath10k_ce_disable_interrupts(ar);
1330 ath10k_pci_free_irq(ar);
1331 ath10k_pci_kill_tasklet(ar);
c80de12b
MK
1332 ath10k_pci_stop_ce(ar);
1333 ath10k_pci_process_ce(ar);
1334err_free_compl:
1335 ath10k_pci_cleanup_ce(ar);
ab977bd0
MK
1336err_early_irq:
1337 /* Though there should be no interrupts (device was reset)
1338 * power_down() expects the early IRQ to be installed as per the
1339 * driver lifecycle. */
1340 ret_early = ath10k_pci_request_early_irq(ar);
1341 if (ret_early)
1342 ath10k_warn("failed to re-enable early irq: %d\n", ret_early);
1343
c80de12b 1344 return ret;
5e3dd157
KV
1345}
1346
87263e5b 1347static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
5e3dd157
KV
1348{
1349 struct ath10k *ar;
1350 struct ath10k_pci *ar_pci;
2aa39115 1351 struct ath10k_ce_pipe *ce_hdl;
5e3dd157
KV
1352 u32 buf_sz;
1353 struct sk_buff *netbuf;
1354 u32 ce_data;
1355
1356 buf_sz = pipe_info->buf_sz;
1357
1358 /* Unused Copy Engine */
1359 if (buf_sz == 0)
1360 return;
1361
1362 ar = pipe_info->hif_ce_state;
1363 ar_pci = ath10k_pci_priv(ar);
1364
1365 if (!ar_pci->started)
1366 return;
1367
1368 ce_hdl = pipe_info->ce_hdl;
1369
1370 while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
1371 &ce_data) == 0) {
1372 dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
1373 netbuf->len + skb_tailroom(netbuf),
1374 DMA_FROM_DEVICE);
1375 dev_kfree_skb_any(netbuf);
1376 }
1377}
1378
87263e5b 1379static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
5e3dd157
KV
1380{
1381 struct ath10k *ar;
1382 struct ath10k_pci *ar_pci;
2aa39115 1383 struct ath10k_ce_pipe *ce_hdl;
5e3dd157
KV
1384 struct sk_buff *netbuf;
1385 u32 ce_data;
1386 unsigned int nbytes;
1387 unsigned int id;
1388 u32 buf_sz;
1389
1390 buf_sz = pipe_info->buf_sz;
1391
1392 /* Unused Copy Engine */
1393 if (buf_sz == 0)
1394 return;
1395
1396 ar = pipe_info->hif_ce_state;
1397 ar_pci = ath10k_pci_priv(ar);
1398
1399 if (!ar_pci->started)
1400 return;
1401
1402 ce_hdl = pipe_info->ce_hdl;
1403
1404 while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1405 &ce_data, &nbytes, &id) == 0) {
e9bb0aa3
KV
1406 /*
1407 * Indicate the completion to higer layer to free
1408 * the buffer
1409 */
2415fc16
MK
1410
1411 if (!netbuf) {
1412 ath10k_warn("invalid sk_buff on CE %d - NULL pointer. firmware crashed?\n",
1413 ce_hdl->id);
1414 continue;
1415 }
1416
e9bb0aa3
KV
1417 ATH10K_SKB_CB(netbuf)->is_aborted = true;
1418 ar_pci->msg_callbacks_current.tx_completion(ar,
1419 netbuf,
1420 id);
5e3dd157
KV
1421 }
1422}
1423
1424/*
1425 * Cleanup residual buffers for device shutdown:
1426 * buffers that were enqueued for receive
1427 * buffers that were to be sent
1428 * Note: Buffers that had completed but which were
1429 * not yet processed are on a completion queue. They
1430 * are handled when the completion thread shuts down.
1431 */
1432static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1433{
1434 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1435 int pipe_num;
1436
fad6ed78 1437 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
87263e5b 1438 struct ath10k_pci_pipe *pipe_info;
5e3dd157
KV
1439
1440 pipe_info = &ar_pci->pipe_info[pipe_num];
1441 ath10k_pci_rx_pipe_cleanup(pipe_info);
1442 ath10k_pci_tx_pipe_cleanup(pipe_info);
1443 }
1444}
1445
1446static void ath10k_pci_ce_deinit(struct ath10k *ar)
1447{
1448 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
87263e5b 1449 struct ath10k_pci_pipe *pipe_info;
5e3dd157
KV
1450 int pipe_num;
1451
fad6ed78 1452 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
5e3dd157
KV
1453 pipe_info = &ar_pci->pipe_info[pipe_num];
1454 if (pipe_info->ce_hdl) {
1455 ath10k_ce_deinit(pipe_info->ce_hdl);
1456 pipe_info->ce_hdl = NULL;
1457 pipe_info->buf_sz = 0;
1458 }
1459 }
1460}
1461
1462static void ath10k_pci_hif_stop(struct ath10k *ar)
1463{
32270b61 1464 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
5d1aa946 1465 int ret;
32270b61 1466
5e3dd157
KV
1467 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
1468
5d1aa946
MK
1469 ret = ath10k_ce_disable_interrupts(ar);
1470 if (ret)
1471 ath10k_warn("failed to disable CE interrupts: %d\n", ret);
32270b61 1472
5d1aa946
MK
1473 ath10k_pci_free_irq(ar);
1474 ath10k_pci_kill_tasklet(ar);
5e3dd157
KV
1475 ath10k_pci_stop_ce(ar);
1476
ab977bd0
MK
1477 ret = ath10k_pci_request_early_irq(ar);
1478 if (ret)
1479 ath10k_warn("failed to re-enable early irq: %d\n", ret);
1480
5e3dd157
KV
1481 /* At this point, asynchronous threads are stopped, the target should
1482 * not DMA nor interrupt. We process the leftovers and then free
1483 * everything else up. */
1484
1485 ath10k_pci_process_ce(ar);
1486 ath10k_pci_cleanup_ce(ar);
1487 ath10k_pci_buffer_cleanup(ar);
32270b61 1488
6a42a47e
MK
1489 /* Make the sure the device won't access any structures on the host by
1490 * resetting it. The device was fed with PCI CE ringbuffer
1491 * configuration during init. If ringbuffers are freed and the device
1492 * were to access them this could lead to memory corruption on the
1493 * host. */
1494 ath10k_pci_device_reset(ar);
1495
32270b61 1496 ar_pci->started = 0;
5e3dd157
KV
1497}
1498
1499static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1500 void *req, u32 req_len,
1501 void *resp, u32 *resp_len)
1502{
1503 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2aa39115
MK
1504 struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1505 struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1506 struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1507 struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
5e3dd157
KV
1508 dma_addr_t req_paddr = 0;
1509 dma_addr_t resp_paddr = 0;
1510 struct bmi_xfer xfer = {};
1511 void *treq, *tresp = NULL;
1512 int ret = 0;
1513
85622cde
MK
1514 might_sleep();
1515
5e3dd157
KV
1516 if (resp && !resp_len)
1517 return -EINVAL;
1518
1519 if (resp && resp_len && *resp_len == 0)
1520 return -EINVAL;
1521
1522 treq = kmemdup(req, req_len, GFP_KERNEL);
1523 if (!treq)
1524 return -ENOMEM;
1525
1526 req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1527 ret = dma_mapping_error(ar->dev, req_paddr);
1528 if (ret)
1529 goto err_dma;
1530
1531 if (resp && resp_len) {
1532 tresp = kzalloc(*resp_len, GFP_KERNEL);
1533 if (!tresp) {
1534 ret = -ENOMEM;
1535 goto err_req;
1536 }
1537
1538 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1539 DMA_FROM_DEVICE);
1540 ret = dma_mapping_error(ar->dev, resp_paddr);
1541 if (ret)
1542 goto err_req;
1543
1544 xfer.wait_for_resp = true;
1545 xfer.resp_len = 0;
1546
1547 ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
1548 }
1549
1550 init_completion(&xfer.done);
1551
1552 ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1553 if (ret)
1554 goto err_resp;
1555
85622cde
MK
1556 ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
1557 if (ret) {
5e3dd157
KV
1558 u32 unused_buffer;
1559 unsigned int unused_nbytes;
1560 unsigned int unused_id;
1561
5e3dd157
KV
1562 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1563 &unused_nbytes, &unused_id);
1564 } else {
1565 /* non-zero means we did not time out */
1566 ret = 0;
1567 }
1568
1569err_resp:
1570 if (resp) {
1571 u32 unused_buffer;
1572
1573 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1574 dma_unmap_single(ar->dev, resp_paddr,
1575 *resp_len, DMA_FROM_DEVICE);
1576 }
1577err_req:
1578 dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1579
1580 if (ret == 0 && resp_len) {
1581 *resp_len = min(*resp_len, xfer.resp_len);
1582 memcpy(resp, tresp, xfer.resp_len);
1583 }
1584err_dma:
1585 kfree(treq);
1586 kfree(tresp);
1587
1588 return ret;
1589}
1590
5440ce25 1591static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
5e3dd157 1592{
5440ce25
MK
1593 struct bmi_xfer *xfer;
1594 u32 ce_data;
1595 unsigned int nbytes;
1596 unsigned int transfer_id;
1597
1598 if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
1599 &nbytes, &transfer_id))
1600 return;
5e3dd157
KV
1601
1602 if (xfer->wait_for_resp)
1603 return;
1604
1605 complete(&xfer->done);
1606}
1607
5440ce25 1608static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
5e3dd157 1609{
5440ce25
MK
1610 struct bmi_xfer *xfer;
1611 u32 ce_data;
1612 unsigned int nbytes;
1613 unsigned int transfer_id;
1614 unsigned int flags;
1615
1616 if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
1617 &nbytes, &transfer_id, &flags))
1618 return;
5e3dd157
KV
1619
1620 if (!xfer->wait_for_resp) {
1621 ath10k_warn("unexpected: BMI data received; ignoring\n");
1622 return;
1623 }
1624
1625 xfer->resp_len = nbytes;
1626 complete(&xfer->done);
1627}
1628
85622cde
MK
1629static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
1630 struct ath10k_ce_pipe *rx_pipe,
1631 struct bmi_xfer *xfer)
1632{
1633 unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1634
1635 while (time_before_eq(jiffies, timeout)) {
1636 ath10k_pci_bmi_send_done(tx_pipe);
1637 ath10k_pci_bmi_recv_data(rx_pipe);
1638
1639 if (completion_done(&xfer->done))
1640 return 0;
1641
1642 schedule();
1643 }
1644
1645 return -ETIMEDOUT;
1646}
1647
5e3dd157
KV
1648/*
1649 * Map from service/endpoint to Copy Engine.
1650 * This table is derived from the CE_PCI TABLE, above.
1651 * It is passed to the Target at startup for use by firmware.
1652 */
1653static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
1654 {
1655 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1656 PIPEDIR_OUT, /* out = UL = host -> target */
1657 3,
1658 },
1659 {
1660 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1661 PIPEDIR_IN, /* in = DL = target -> host */
1662 2,
1663 },
1664 {
1665 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1666 PIPEDIR_OUT, /* out = UL = host -> target */
1667 3,
1668 },
1669 {
1670 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1671 PIPEDIR_IN, /* in = DL = target -> host */
1672 2,
1673 },
1674 {
1675 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1676 PIPEDIR_OUT, /* out = UL = host -> target */
1677 3,
1678 },
1679 {
1680 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1681 PIPEDIR_IN, /* in = DL = target -> host */
1682 2,
1683 },
1684 {
1685 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1686 PIPEDIR_OUT, /* out = UL = host -> target */
1687 3,
1688 },
1689 {
1690 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1691 PIPEDIR_IN, /* in = DL = target -> host */
1692 2,
1693 },
1694 {
1695 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1696 PIPEDIR_OUT, /* out = UL = host -> target */
1697 3,
1698 },
1699 {
1700 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1701 PIPEDIR_IN, /* in = DL = target -> host */
1702 2,
1703 },
1704 {
1705 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1706 PIPEDIR_OUT, /* out = UL = host -> target */
1707 0, /* could be moved to 3 (share with WMI) */
1708 },
1709 {
1710 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1711 PIPEDIR_IN, /* in = DL = target -> host */
1712 1,
1713 },
1714 {
1715 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
1716 PIPEDIR_OUT, /* out = UL = host -> target */
1717 0,
1718 },
1719 {
1720 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
1721 PIPEDIR_IN, /* in = DL = target -> host */
1722 1,
1723 },
1724 {
1725 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1726 PIPEDIR_OUT, /* out = UL = host -> target */
1727 4,
1728 },
1729 {
1730 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1731 PIPEDIR_IN, /* in = DL = target -> host */
1732 1,
1733 },
1734
1735 /* (Additions here) */
1736
1737 { /* Must be last */
1738 0,
1739 0,
1740 0,
1741 },
1742};
1743
1744/*
1745 * Send an interrupt to the device to wake up the Target CPU
1746 * so it has an opportunity to notice any changed state.
1747 */
1748static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1749{
1750 int ret;
1751 u32 core_ctrl;
1752
1753 ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
1754 CORE_CTRL_ADDRESS,
1755 &core_ctrl);
1756 if (ret) {
1d2b48d6 1757 ath10k_warn("failed to read core_ctrl: %d\n", ret);
5e3dd157
KV
1758 return ret;
1759 }
1760
1761 /* A_INUM_FIRMWARE interrupt to Target CPU */
1762 core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1763
1764 ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
1765 CORE_CTRL_ADDRESS,
1766 core_ctrl);
1d2b48d6
MK
1767 if (ret) {
1768 ath10k_warn("failed to set target CPU interrupt mask: %d\n",
1769 ret);
1770 return ret;
1771 }
5e3dd157 1772
1d2b48d6 1773 return 0;
5e3dd157
KV
1774}
1775
1776static int ath10k_pci_init_config(struct ath10k *ar)
1777{
1778 u32 interconnect_targ_addr;
1779 u32 pcie_state_targ_addr = 0;
1780 u32 pipe_cfg_targ_addr = 0;
1781 u32 svc_to_pipe_map = 0;
1782 u32 pcie_config_flags = 0;
1783 u32 ealloc_value;
1784 u32 ealloc_targ_addr;
1785 u32 flag2_value;
1786 u32 flag2_targ_addr;
1787 int ret = 0;
1788
1789 /* Download to Target the CE Config and the service-to-CE map */
1790 interconnect_targ_addr =
1791 host_interest_item_address(HI_ITEM(hi_interconnect_state));
1792
1793 /* Supply Target-side CE configuration */
1794 ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
1795 &pcie_state_targ_addr);
1796 if (ret != 0) {
1797 ath10k_err("Failed to get pcie state addr: %d\n", ret);
1798 return ret;
1799 }
1800
1801 if (pcie_state_targ_addr == 0) {
1802 ret = -EIO;
1803 ath10k_err("Invalid pcie state addr\n");
1804 return ret;
1805 }
1806
1807 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1808 offsetof(struct pcie_state,
1809 pipe_cfg_addr),
1810 &pipe_cfg_targ_addr);
1811 if (ret != 0) {
1812 ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
1813 return ret;
1814 }
1815
1816 if (pipe_cfg_targ_addr == 0) {
1817 ret = -EIO;
1818 ath10k_err("Invalid pipe cfg addr\n");
1819 return ret;
1820 }
1821
1822 ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1823 target_ce_config_wlan,
1824 sizeof(target_ce_config_wlan));
1825
1826 if (ret != 0) {
1827 ath10k_err("Failed to write pipe cfg: %d\n", ret);
1828 return ret;
1829 }
1830
1831 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1832 offsetof(struct pcie_state,
1833 svc_to_pipe_map),
1834 &svc_to_pipe_map);
1835 if (ret != 0) {
1836 ath10k_err("Failed to get svc/pipe map: %d\n", ret);
1837 return ret;
1838 }
1839
1840 if (svc_to_pipe_map == 0) {
1841 ret = -EIO;
1842 ath10k_err("Invalid svc_to_pipe map\n");
1843 return ret;
1844 }
1845
1846 ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1847 target_service_to_ce_map_wlan,
1848 sizeof(target_service_to_ce_map_wlan));
1849 if (ret != 0) {
1850 ath10k_err("Failed to write svc/pipe map: %d\n", ret);
1851 return ret;
1852 }
1853
1854 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1855 offsetof(struct pcie_state,
1856 config_flags),
1857 &pcie_config_flags);
1858 if (ret != 0) {
1859 ath10k_err("Failed to get pcie config_flags: %d\n", ret);
1860 return ret;
1861 }
1862
1863 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1864
1865 ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
1866 offsetof(struct pcie_state, config_flags),
1867 &pcie_config_flags,
1868 sizeof(pcie_config_flags));
1869 if (ret != 0) {
1870 ath10k_err("Failed to write pcie config_flags: %d\n", ret);
1871 return ret;
1872 }
1873
1874 /* configure early allocation */
1875 ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1876
1877 ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
1878 if (ret != 0) {
1879 ath10k_err("Faile to get early alloc val: %d\n", ret);
1880 return ret;
1881 }
1882
1883 /* first bank is switched to IRAM */
1884 ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1885 HI_EARLY_ALLOC_MAGIC_MASK);
1886 ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1887 HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1888
1889 ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
1890 if (ret != 0) {
1891 ath10k_err("Failed to set early alloc val: %d\n", ret);
1892 return ret;
1893 }
1894
1895 /* Tell Target to proceed with initialization */
1896 flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1897
1898 ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
1899 if (ret != 0) {
1900 ath10k_err("Failed to get option val: %d\n", ret);
1901 return ret;
1902 }
1903
1904 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1905
1906 ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
1907 if (ret != 0) {
1908 ath10k_err("Failed to set option val: %d\n", ret);
1909 return ret;
1910 }
1911
1912 return 0;
1913}
1914
1915
1916
1917static int ath10k_pci_ce_init(struct ath10k *ar)
1918{
1919 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
87263e5b 1920 struct ath10k_pci_pipe *pipe_info;
5e3dd157
KV
1921 const struct ce_attr *attr;
1922 int pipe_num;
1923
fad6ed78 1924 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
5e3dd157
KV
1925 pipe_info = &ar_pci->pipe_info[pipe_num];
1926 pipe_info->pipe_num = pipe_num;
1927 pipe_info->hif_ce_state = ar;
1928 attr = &host_ce_config_wlan[pipe_num];
1929
1930 pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
1931 if (pipe_info->ce_hdl == NULL) {
1d2b48d6 1932 ath10k_err("failed to initialize CE for pipe: %d\n",
5e3dd157
KV
1933 pipe_num);
1934
1935 /* It is safe to call it here. It checks if ce_hdl is
1936 * valid for each pipe */
1937 ath10k_pci_ce_deinit(ar);
1938 return -1;
1939 }
1940
fad6ed78 1941 if (pipe_num == CE_COUNT - 1) {
5e3dd157
KV
1942 /*
1943 * Reserve the ultimate CE for
1944 * diagnostic Window support
1945 */
fad6ed78 1946 ar_pci->ce_diag = pipe_info->ce_hdl;
5e3dd157
KV
1947 continue;
1948 }
1949
1950 pipe_info->buf_sz = (size_t) (attr->src_sz_max);
1951 }
1952
5e3dd157
KV
1953 return 0;
1954}
1955
1956static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1957{
1958 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1959 u32 fw_indicator_address, fw_indicator;
1960
1961 ath10k_pci_wake(ar);
1962
1963 fw_indicator_address = ar_pci->fw_indicator_address;
1964 fw_indicator = ath10k_pci_read32(ar, fw_indicator_address);
1965
1966 if (fw_indicator & FW_IND_EVENT_PENDING) {
1967 /* ACK: clear Target-side pending event */
1968 ath10k_pci_write32(ar, fw_indicator_address,
1969 fw_indicator & ~FW_IND_EVENT_PENDING);
1970
1971 if (ar_pci->started) {
1972 ath10k_pci_hif_dump_area(ar);
1973 } else {
1974 /*
1975 * Probable Target failure before we're prepared
1976 * to handle it. Generally unexpected.
1977 */
1978 ath10k_warn("early firmware event indicated\n");
1979 }
1980 }
1981
1982 ath10k_pci_sleep(ar);
1983}
1984
8c5c5368
MK
1985static int ath10k_pci_hif_power_up(struct ath10k *ar)
1986{
8cc8df90 1987 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
95cbb6a8 1988 const char *irq_mode;
8c5c5368
MK
1989 int ret;
1990
1991 /*
1992 * Bring the target up cleanly.
1993 *
1994 * The target may be in an undefined state with an AUX-powered Target
1995 * and a Host in WoW mode. If the Host crashes, loses power, or is
1996 * restarted (without unloading the driver) then the Target is left
1997 * (aux) powered and running. On a subsequent driver load, the Target
1998 * is in an unexpected state. We try to catch that here in order to
1999 * reset the Target and retry the probe.
2000 */
5b2589fc
MK
2001 ret = ath10k_pci_device_reset(ar);
2002 if (ret) {
2003 ath10k_err("failed to reset target: %d\n", ret);
98563d5a 2004 goto err;
5b2589fc 2005 }
8c5c5368 2006
8cc8df90 2007 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
8c5c5368 2008 /* Force AWAKE forever */
8c5c5368 2009 ath10k_do_pci_wake(ar);
8c5c5368
MK
2010
2011 ret = ath10k_pci_ce_init(ar);
1d2b48d6
MK
2012 if (ret) {
2013 ath10k_err("failed to initialize CE: %d\n", ret);
8c5c5368 2014 goto err_ps;
1d2b48d6 2015 }
8c5c5368 2016
98563d5a
MK
2017 ret = ath10k_ce_disable_interrupts(ar);
2018 if (ret) {
2019 ath10k_err("failed to disable CE interrupts: %d\n", ret);
2020 goto err_ce;
2021 }
2022
fc15ca13 2023 ret = ath10k_pci_init_irq(ar);
98563d5a 2024 if (ret) {
fc15ca13 2025 ath10k_err("failed to init irqs: %d\n", ret);
8c5c5368 2026 goto err_ce;
98563d5a
MK
2027 }
2028
ab977bd0
MK
2029 ret = ath10k_pci_request_early_irq(ar);
2030 if (ret) {
2031 ath10k_err("failed to request early irq: %d\n", ret);
2032 goto err_deinit_irq;
2033 }
2034
98563d5a
MK
2035 ret = ath10k_pci_wait_for_target_init(ar);
2036 if (ret) {
2037 ath10k_err("failed to wait for target to init: %d\n", ret);
ab977bd0 2038 goto err_free_early_irq;
98563d5a
MK
2039 }
2040
2041 ret = ath10k_pci_init_config(ar);
2042 if (ret) {
2043 ath10k_err("failed to setup init config: %d\n", ret);
ab977bd0 2044 goto err_free_early_irq;
98563d5a 2045 }
8c5c5368
MK
2046
2047 ret = ath10k_pci_wake_target_cpu(ar);
2048 if (ret) {
1d2b48d6 2049 ath10k_err("could not wake up target CPU: %d\n", ret);
ab977bd0 2050 goto err_free_early_irq;
8c5c5368
MK
2051 }
2052
95cbb6a8
KV
2053 if (ar_pci->num_msi_intrs > 1)
2054 irq_mode = "MSI-X";
2055 else if (ar_pci->num_msi_intrs == 1)
2056 irq_mode = "MSI";
2057 else
2058 irq_mode = "legacy";
2059
650b91fb
KV
2060 if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
2061 ath10k_info("pci irq %s\n", irq_mode);
95cbb6a8 2062
8c5c5368
MK
2063 return 0;
2064
ab977bd0
MK
2065err_free_early_irq:
2066 ath10k_pci_free_early_irq(ar);
fc15ca13
MK
2067err_deinit_irq:
2068 ath10k_pci_deinit_irq(ar);
8c5c5368
MK
2069err_ce:
2070 ath10k_pci_ce_deinit(ar);
5d1aa946 2071 ath10k_pci_device_reset(ar);
8c5c5368 2072err_ps:
8cc8df90 2073 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
8c5c5368
MK
2074 ath10k_do_pci_sleep(ar);
2075err:
2076 return ret;
2077}
2078
2079static void ath10k_pci_hif_power_down(struct ath10k *ar)
2080{
8cc8df90
BM
2081 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2082
ab977bd0
MK
2083 ath10k_pci_free_early_irq(ar);
2084 ath10k_pci_kill_tasklet(ar);
fc15ca13 2085 ath10k_pci_deinit_irq(ar);
6a42a47e 2086 ath10k_pci_device_reset(ar);
8cc8df90 2087
8c5c5368 2088 ath10k_pci_ce_deinit(ar);
8cc8df90 2089 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
8c5c5368
MK
2090 ath10k_do_pci_sleep(ar);
2091}
2092
8cd13cad
MK
2093#ifdef CONFIG_PM
2094
2095#define ATH10K_PCI_PM_CONTROL 0x44
2096
2097static int ath10k_pci_hif_suspend(struct ath10k *ar)
2098{
2099 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2100 struct pci_dev *pdev = ar_pci->pdev;
2101 u32 val;
2102
2103 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2104
2105 if ((val & 0x000000ff) != 0x3) {
2106 pci_save_state(pdev);
2107 pci_disable_device(pdev);
2108 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2109 (val & 0xffffff00) | 0x03);
2110 }
2111
2112 return 0;
2113}
2114
2115static int ath10k_pci_hif_resume(struct ath10k *ar)
2116{
2117 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2118 struct pci_dev *pdev = ar_pci->pdev;
2119 u32 val;
2120
2121 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2122
2123 if ((val & 0x000000ff) != 0) {
2124 pci_restore_state(pdev);
2125 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2126 val & 0xffffff00);
2127 /*
2128 * Suspend/Resume resets the PCI configuration space,
2129 * so we have to re-disable the RETRY_TIMEOUT register (0x41)
2130 * to keep PCI Tx retries from interfering with C3 CPU state
2131 */
2132 pci_read_config_dword(pdev, 0x40, &val);
2133
2134 if ((val & 0x0000ff00) != 0)
2135 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
2136 }
2137
2138 return 0;
2139}
2140#endif
2141
5e3dd157
KV
2142static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
2143 .send_head = ath10k_pci_hif_send_head,
2144 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
2145 .start = ath10k_pci_hif_start,
2146 .stop = ath10k_pci_hif_stop,
2147 .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
2148 .get_default_pipe = ath10k_pci_hif_get_default_pipe,
2149 .send_complete_check = ath10k_pci_hif_send_complete_check,
e799bbff 2150 .set_callbacks = ath10k_pci_hif_set_callbacks,
5e3dd157 2151 .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
8c5c5368
MK
2152 .power_up = ath10k_pci_hif_power_up,
2153 .power_down = ath10k_pci_hif_power_down,
8cd13cad
MK
2154#ifdef CONFIG_PM
2155 .suspend = ath10k_pci_hif_suspend,
2156 .resume = ath10k_pci_hif_resume,
2157#endif
5e3dd157
KV
2158};
2159
2160static void ath10k_pci_ce_tasklet(unsigned long ptr)
2161{
87263e5b 2162 struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
5e3dd157
KV
2163 struct ath10k_pci *ar_pci = pipe->ar_pci;
2164
2165 ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
2166}
2167
2168static void ath10k_msi_err_tasklet(unsigned long data)
2169{
2170 struct ath10k *ar = (struct ath10k *)data;
2171
2172 ath10k_pci_fw_interrupt_handler(ar);
2173}
2174
2175/*
2176 * Handler for a per-engine interrupt on a PARTICULAR CE.
2177 * This is used in cases where each CE has a private MSI interrupt.
2178 */
2179static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
2180{
2181 struct ath10k *ar = arg;
2182 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2183 int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
2184
e5742672 2185 if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
5e3dd157
KV
2186 ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
2187 return IRQ_HANDLED;
2188 }
2189
2190 /*
2191 * NOTE: We are able to derive ce_id from irq because we
2192 * use a one-to-one mapping for CE's 0..5.
2193 * CE's 6 & 7 do not use interrupts at all.
2194 *
2195 * This mapping must be kept in sync with the mapping
2196 * used by firmware.
2197 */
2198 tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
2199 return IRQ_HANDLED;
2200}
2201
2202static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
2203{
2204 struct ath10k *ar = arg;
2205 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2206
2207 tasklet_schedule(&ar_pci->msi_fw_err);
2208 return IRQ_HANDLED;
2209}
2210
2211/*
2212 * Top-level interrupt handler for all PCI interrupts from a Target.
2213 * When a block of MSI interrupts is allocated, this top-level handler
2214 * is not used; instead, we directly call the correct sub-handler.
2215 */
2216static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2217{
2218 struct ath10k *ar = arg;
2219 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2220
2221 if (ar_pci->num_msi_intrs == 0) {
e539887b
MK
2222 if (!ath10k_pci_irq_pending(ar))
2223 return IRQ_NONE;
2224
2685218b 2225 ath10k_pci_disable_and_clear_legacy_irq(ar);
5e3dd157
KV
2226 }
2227
2228 tasklet_schedule(&ar_pci->intr_tq);
2229
2230 return IRQ_HANDLED;
2231}
2232
ab977bd0
MK
2233static void ath10k_pci_early_irq_tasklet(unsigned long data)
2234{
2235 struct ath10k *ar = (struct ath10k *)data;
2236 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2237 u32 fw_ind;
2238 int ret;
2239
2240 ret = ath10k_pci_wake(ar);
2241 if (ret) {
2242 ath10k_warn("failed to wake target in early irq tasklet: %d\n",
2243 ret);
2244 return;
2245 }
2246
2247 fw_ind = ath10k_pci_read32(ar, ar_pci->fw_indicator_address);
2248 if (fw_ind & FW_IND_EVENT_PENDING) {
2249 ath10k_pci_write32(ar, ar_pci->fw_indicator_address,
2250 fw_ind & ~FW_IND_EVENT_PENDING);
2251
2252 /* Some structures are unavailable during early boot or at
2253 * driver teardown so just print that the device has crashed. */
2254 ath10k_warn("device crashed - no diagnostics available\n");
2255 }
2256
2257 ath10k_pci_sleep(ar);
2258 ath10k_pci_enable_legacy_irq(ar);
2259}
2260
5e3dd157
KV
2261static void ath10k_pci_tasklet(unsigned long data)
2262{
2263 struct ath10k *ar = (struct ath10k *)data;
2264 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2265
2266 ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
2267 ath10k_ce_per_engine_service_any(ar);
2268
2685218b
MK
2269 /* Re-enable legacy irq that was disabled in the irq handler */
2270 if (ar_pci->num_msi_intrs == 0)
2271 ath10k_pci_enable_legacy_irq(ar);
5e3dd157
KV
2272}
2273
fc15ca13 2274static int ath10k_pci_request_irq_msix(struct ath10k *ar)
5e3dd157
KV
2275{
2276 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
fc15ca13 2277 int ret, i;
5e3dd157
KV
2278
2279 ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2280 ath10k_pci_msi_fw_handler,
2281 IRQF_SHARED, "ath10k_pci", ar);
591ecdb8 2282 if (ret) {
fc15ca13 2283 ath10k_warn("failed to request MSI-X fw irq %d: %d\n",
591ecdb8 2284 ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
5e3dd157 2285 return ret;
591ecdb8 2286 }
5e3dd157
KV
2287
2288 for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2289 ret = request_irq(ar_pci->pdev->irq + i,
2290 ath10k_pci_per_engine_handler,
2291 IRQF_SHARED, "ath10k_pci", ar);
2292 if (ret) {
fc15ca13 2293 ath10k_warn("failed to request MSI-X ce irq %d: %d\n",
5e3dd157
KV
2294 ar_pci->pdev->irq + i, ret);
2295
87b1423b
MK
2296 for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2297 free_irq(ar_pci->pdev->irq + i, ar);
5e3dd157 2298
87b1423b 2299 free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
5e3dd157
KV
2300 return ret;
2301 }
2302 }
2303
5e3dd157
KV
2304 return 0;
2305}
2306
fc15ca13 2307static int ath10k_pci_request_irq_msi(struct ath10k *ar)
5e3dd157
KV
2308{
2309 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2310 int ret;
2311
5e3dd157
KV
2312 ret = request_irq(ar_pci->pdev->irq,
2313 ath10k_pci_interrupt_handler,
2314 IRQF_SHARED, "ath10k_pci", ar);
fc15ca13
MK
2315 if (ret) {
2316 ath10k_warn("failed to request MSI irq %d: %d\n",
2317 ar_pci->pdev->irq, ret);
5e3dd157
KV
2318 return ret;
2319 }
2320
5e3dd157
KV
2321 return 0;
2322}
2323
fc15ca13 2324static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
5e3dd157
KV
2325{
2326 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2327 int ret;
2328
2329 ret = request_irq(ar_pci->pdev->irq,
2330 ath10k_pci_interrupt_handler,
2331 IRQF_SHARED, "ath10k_pci", ar);
f3782744 2332 if (ret) {
fc15ca13
MK
2333 ath10k_warn("failed to request legacy irq %d: %d\n",
2334 ar_pci->pdev->irq, ret);
f3782744
KV
2335 return ret;
2336 }
5e3dd157 2337
5e3dd157
KV
2338 return 0;
2339}
2340
fc15ca13
MK
2341static int ath10k_pci_request_irq(struct ath10k *ar)
2342{
2343 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2344
2345 switch (ar_pci->num_msi_intrs) {
2346 case 0:
2347 return ath10k_pci_request_irq_legacy(ar);
2348 case 1:
2349 return ath10k_pci_request_irq_msi(ar);
2350 case MSI_NUM_REQUEST:
2351 return ath10k_pci_request_irq_msix(ar);
2352 }
2353
2354 ath10k_warn("unknown irq configuration upon request\n");
2355 return -EINVAL;
2356}
2357
2358static void ath10k_pci_free_irq(struct ath10k *ar)
2359{
2360 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2361 int i;
2362
2363 /* There's at least one interrupt irregardless whether its legacy INTR
2364 * or MSI or MSI-X */
2365 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2366 free_irq(ar_pci->pdev->irq + i, ar);
2367}
2368
2369static void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
5e3dd157
KV
2370{
2371 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
5e3dd157
KV
2372 int i;
2373
fc15ca13 2374 tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
5e3dd157 2375 tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
fc15ca13 2376 (unsigned long)ar);
ab977bd0
MK
2377 tasklet_init(&ar_pci->early_irq_tasklet, ath10k_pci_early_irq_tasklet,
2378 (unsigned long)ar);
5e3dd157
KV
2379
2380 for (i = 0; i < CE_COUNT; i++) {
2381 ar_pci->pipe_info[i].ar_pci = ar_pci;
fc15ca13 2382 tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
5e3dd157
KV
2383 (unsigned long)&ar_pci->pipe_info[i]);
2384 }
fc15ca13
MK
2385}
2386
2387static int ath10k_pci_init_irq(struct ath10k *ar)
2388{
2389 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2390 int ret;
2391
2392 ath10k_pci_init_irq_tasklets(ar);
5e3dd157
KV
2393
2394 if (!test_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features))
fc15ca13 2395 goto msi;
5e3dd157 2396
fc15ca13
MK
2397 /* Try MSI-X */
2398 ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
2399 ret = pci_enable_msi_block(ar_pci->pdev, ar_pci->num_msi_intrs);
2400 if (ret == 0)
2401 return 0;
2402 if (ret > 0)
2403 pci_disable_msi(ar_pci->pdev);
5e3dd157 2404
fc15ca13
MK
2405msi:
2406 /* Try MSI */
2407 ar_pci->num_msi_intrs = 1;
2408 ret = pci_enable_msi(ar_pci->pdev);
2409 if (ret == 0)
2410 return 0;
5e3dd157 2411
fc15ca13
MK
2412 /* Try legacy irq
2413 *
2414 * A potential race occurs here: The CORE_BASE write
2415 * depends on target correctly decoding AXI address but
2416 * host won't know when target writes BAR to CORE_CTRL.
2417 * This write might get lost if target has NOT written BAR.
2418 * For now, fix the race by repeating the write in below
2419 * synchronization checking. */
2420 ar_pci->num_msi_intrs = 0;
5e3dd157 2421
fc15ca13
MK
2422 ret = ath10k_pci_wake(ar);
2423 if (ret) {
2424 ath10k_warn("failed to wake target: %d\n", ret);
2425 return ret;
5e3dd157
KV
2426 }
2427
fc15ca13
MK
2428 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2429 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
2430 ath10k_pci_sleep(ar);
2431
2432 return 0;
2433}
2434
2435static int ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
2436{
2437 int ret;
2438
2439 ret = ath10k_pci_wake(ar);
f3782744 2440 if (ret) {
fc15ca13 2441 ath10k_warn("failed to wake target: %d\n", ret);
f3782744
KV
2442 return ret;
2443 }
5e3dd157 2444
fc15ca13
MK
2445 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2446 0);
2447 ath10k_pci_sleep(ar);
2448
2449 return 0;
5e3dd157
KV
2450}
2451
fc15ca13 2452static int ath10k_pci_deinit_irq(struct ath10k *ar)
5e3dd157
KV
2453{
2454 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
5e3dd157 2455
fc15ca13
MK
2456 switch (ar_pci->num_msi_intrs) {
2457 case 0:
2458 return ath10k_pci_deinit_irq_legacy(ar);
2459 case 1:
2460 /* fall-through */
2461 case MSI_NUM_REQUEST:
5e3dd157 2462 pci_disable_msi(ar_pci->pdev);
fc15ca13
MK
2463 return 0;
2464 }
2465
2466 ath10k_warn("unknown irq configuration upon deinit\n");
2467 return -EINVAL;
5e3dd157
KV
2468}
2469
d7fb47f5 2470static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
5e3dd157
KV
2471{
2472 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2473 int wait_limit = 300; /* 3 sec */
f3782744 2474 int ret;
5e3dd157 2475
98563d5a 2476 ret = ath10k_pci_wake(ar);
f3782744 2477 if (ret) {
5b2589fc 2478 ath10k_err("failed to wake up target: %d\n", ret);
f3782744
KV
2479 return ret;
2480 }
5e3dd157
KV
2481
2482 while (wait_limit-- &&
2483 !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
2484 FW_IND_INITIALIZED)) {
2485 if (ar_pci->num_msi_intrs == 0)
2486 /* Fix potential race by repeating CORE_BASE writes */
2487 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2488 PCIE_INTR_CE_MASK_ALL,
2489 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2490 PCIE_INTR_ENABLE_ADDRESS));
2491 mdelay(10);
2492 }
2493
2494 if (wait_limit < 0) {
5b2589fc
MK
2495 ath10k_err("target stalled\n");
2496 ret = -EIO;
2497 goto out;
5e3dd157
KV
2498 }
2499
5b2589fc 2500out:
98563d5a 2501 ath10k_pci_sleep(ar);
5b2589fc 2502 return ret;
5e3dd157
KV
2503}
2504
5b2589fc 2505static int ath10k_pci_device_reset(struct ath10k *ar)
5e3dd157 2506{
5b2589fc 2507 int i, ret;
5e3dd157
KV
2508 u32 val;
2509
5b2589fc
MK
2510 ret = ath10k_do_pci_wake(ar);
2511 if (ret) {
2512 ath10k_err("failed to wake up target: %d\n",
2513 ret);
2514 return ret;
5e3dd157
KV
2515 }
2516
2517 /* Put Target, including PCIe, into RESET. */
e479ed43 2518 val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
5e3dd157 2519 val |= 1;
e479ed43 2520 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
5e3dd157
KV
2521
2522 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
e479ed43 2523 if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
5e3dd157
KV
2524 RTC_STATE_COLD_RESET_MASK)
2525 break;
2526 msleep(1);
2527 }
2528
2529 /* Pull Target, including PCIe, out of RESET. */
2530 val &= ~1;
e479ed43 2531 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
5e3dd157
KV
2532
2533 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
e479ed43 2534 if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
5e3dd157
KV
2535 RTC_STATE_COLD_RESET_MASK))
2536 break;
2537 msleep(1);
2538 }
2539
5b2589fc
MK
2540 ath10k_do_pci_sleep(ar);
2541 return 0;
5e3dd157
KV
2542}
2543
2544static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
2545{
2546 int i;
2547
2548 for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
2549 if (!test_bit(i, ar_pci->features))
2550 continue;
2551
2552 switch (i) {
2553 case ATH10K_PCI_FEATURE_MSI_X:
24cfade1 2554 ath10k_dbg(ATH10K_DBG_BOOT, "device supports MSI-X\n");
5e3dd157 2555 break;
8cc8df90 2556 case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
24cfade1 2557 ath10k_dbg(ATH10K_DBG_BOOT, "QCA98XX SoC power save enabled\n");
8cc8df90 2558 break;
5e3dd157
KV
2559 }
2560 }
2561}
2562
2563static int ath10k_pci_probe(struct pci_dev *pdev,
2564 const struct pci_device_id *pci_dev)
2565{
2566 void __iomem *mem;
2567 int ret = 0;
2568 struct ath10k *ar;
2569 struct ath10k_pci *ar_pci;
e01ae68c 2570 u32 lcr_val, chip_id;
5e3dd157
KV
2571
2572 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2573
2574 ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
2575 if (ar_pci == NULL)
2576 return -ENOMEM;
2577
2578 ar_pci->pdev = pdev;
2579 ar_pci->dev = &pdev->dev;
2580
2581 switch (pci_dev->device) {
5e3dd157
KV
2582 case QCA988X_2_0_DEVICE_ID:
2583 set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
2584 break;
2585 default:
2586 ret = -ENODEV;
2587 ath10k_err("Unkown device ID: %d\n", pci_dev->device);
2588 goto err_ar_pci;
2589 }
2590
8cc8df90
BM
2591 if (ath10k_target_ps)
2592 set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
2593
5e3dd157
KV
2594 ath10k_pci_dump_features(ar_pci);
2595
3a0861ff 2596 ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
5e3dd157 2597 if (!ar) {
1d2b48d6 2598 ath10k_err("failed to create driver core\n");
5e3dd157
KV
2599 ret = -EINVAL;
2600 goto err_ar_pci;
2601 }
2602
5e3dd157
KV
2603 ar_pci->ar = ar;
2604 ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
2605 atomic_set(&ar_pci->keep_awake_count, 0);
2606
2607 pci_set_drvdata(pdev, ar);
2608
2609 /*
2610 * Without any knowledge of the Host, the Target may have been reset or
2611 * power cycled and its Config Space may no longer reflect the PCI
2612 * address space that was assigned earlier by the PCI infrastructure.
2613 * Refresh it now.
2614 */
2615 ret = pci_assign_resource(pdev, BAR_NUM);
2616 if (ret) {
1d2b48d6 2617 ath10k_err("failed to assign PCI space: %d\n", ret);
5e3dd157
KV
2618 goto err_ar;
2619 }
2620
2621 ret = pci_enable_device(pdev);
2622 if (ret) {
1d2b48d6 2623 ath10k_err("failed to enable PCI device: %d\n", ret);
5e3dd157
KV
2624 goto err_ar;
2625 }
2626
2627 /* Request MMIO resources */
2628 ret = pci_request_region(pdev, BAR_NUM, "ath");
2629 if (ret) {
1d2b48d6 2630 ath10k_err("failed to request MMIO region: %d\n", ret);
5e3dd157
KV
2631 goto err_device;
2632 }
2633
2634 /*
2635 * Target structures have a limit of 32 bit DMA pointers.
2636 * DMA pointers can be wider than 32 bits by default on some systems.
2637 */
2638 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2639 if (ret) {
1d2b48d6 2640 ath10k_err("failed to set DMA mask to 32-bit: %d\n", ret);
5e3dd157
KV
2641 goto err_region;
2642 }
2643
2644 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2645 if (ret) {
1d2b48d6 2646 ath10k_err("failed to set consistent DMA mask to 32-bit\n");
5e3dd157
KV
2647 goto err_region;
2648 }
2649
2650 /* Set bus master bit in PCI_COMMAND to enable DMA */
2651 pci_set_master(pdev);
2652
2653 /*
2654 * Temporary FIX: disable ASPM
2655 * Will be removed after the OTP is programmed
2656 */
2657 pci_read_config_dword(pdev, 0x80, &lcr_val);
2658 pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2659
2660 /* Arrange for access to Target SoC registers. */
2661 mem = pci_iomap(pdev, BAR_NUM, 0);
2662 if (!mem) {
1d2b48d6 2663 ath10k_err("failed to perform IOMAP for BAR%d\n", BAR_NUM);
5e3dd157
KV
2664 ret = -EIO;
2665 goto err_master;
2666 }
2667
2668 ar_pci->mem = mem;
2669
2670 spin_lock_init(&ar_pci->ce_lock);
2671
e01ae68c
KV
2672 ret = ath10k_do_pci_wake(ar);
2673 if (ret) {
2674 ath10k_err("Failed to get chip id: %d\n", ret);
12eb0879 2675 goto err_iomap;
e01ae68c
KV
2676 }
2677
233eb97f 2678 chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
e01ae68c
KV
2679
2680 ath10k_do_pci_sleep(ar);
2681
24cfade1
KV
2682 ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
2683
e01ae68c 2684 ret = ath10k_core_register(ar, chip_id);
5e3dd157 2685 if (ret) {
1d2b48d6 2686 ath10k_err("failed to register driver core: %d\n", ret);
32270b61 2687 goto err_iomap;
5e3dd157
KV
2688 }
2689
2690 return 0;
2691
5e3dd157
KV
2692err_iomap:
2693 pci_iounmap(pdev, mem);
2694err_master:
2695 pci_clear_master(pdev);
2696err_region:
2697 pci_release_region(pdev, BAR_NUM);
2698err_device:
2699 pci_disable_device(pdev);
2700err_ar:
5e3dd157
KV
2701 ath10k_core_destroy(ar);
2702err_ar_pci:
2703 /* call HIF PCI free here */
2704 kfree(ar_pci);
2705
2706 return ret;
2707}
2708
2709static void ath10k_pci_remove(struct pci_dev *pdev)
2710{
2711 struct ath10k *ar = pci_get_drvdata(pdev);
2712 struct ath10k_pci *ar_pci;
2713
2714 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2715
2716 if (!ar)
2717 return;
2718
2719 ar_pci = ath10k_pci_priv(ar);
2720
2721 if (!ar_pci)
2722 return;
2723
2724 tasklet_kill(&ar_pci->msi_fw_err);
2725
2726 ath10k_core_unregister(ar);
5e3dd157 2727
5e3dd157
KV
2728 pci_iounmap(pdev, ar_pci->mem);
2729 pci_release_region(pdev, BAR_NUM);
2730 pci_clear_master(pdev);
2731 pci_disable_device(pdev);
2732
2733 ath10k_core_destroy(ar);
2734 kfree(ar_pci);
2735}
2736
5e3dd157
KV
2737MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2738
2739static struct pci_driver ath10k_pci_driver = {
2740 .name = "ath10k_pci",
2741 .id_table = ath10k_pci_id_table,
2742 .probe = ath10k_pci_probe,
2743 .remove = ath10k_pci_remove,
5e3dd157
KV
2744};
2745
2746static int __init ath10k_pci_init(void)
2747{
2748 int ret;
2749
2750 ret = pci_register_driver(&ath10k_pci_driver);
2751 if (ret)
1d2b48d6 2752 ath10k_err("failed to register PCI driver: %d\n", ret);
5e3dd157
KV
2753
2754 return ret;
2755}
2756module_init(ath10k_pci_init);
2757
2758static void __exit ath10k_pci_exit(void)
2759{
2760 pci_unregister_driver(&ath10k_pci_driver);
2761}
2762
2763module_exit(ath10k_pci_exit);
2764
2765MODULE_AUTHOR("Qualcomm Atheros");
2766MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2767MODULE_LICENSE("Dual BSD/GPL");
5e3dd157
KV
2768MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
2769MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
2770MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
This page took 0.218605 seconds and 5 git commands to generate.