ath10k: Set proper nss value for the peer
[deliverable/linux.git] / drivers / net / wireless / ath / ath10k / pci.c
CommitLineData
5e3dd157
KV
1/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include <linux/pci.h>
19#include <linux/module.h>
20#include <linux/interrupt.h>
21#include <linux/spinlock.h>
650b91fb 22#include <linux/bitops.h>
5e3dd157
KV
23
24#include "core.h"
25#include "debug.h"
26
27#include "targaddrs.h"
28#include "bmi.h"
29
30#include "hif.h"
31#include "htc.h"
32
33#include "ce.h"
34#include "pci.h"
35
cfe9c45b
MK
36enum ath10k_pci_irq_mode {
37 ATH10K_PCI_IRQ_AUTO = 0,
38 ATH10K_PCI_IRQ_LEGACY = 1,
39 ATH10K_PCI_IRQ_MSI = 2,
40};
41
8cc8df90 42static unsigned int ath10k_target_ps;
cfe9c45b
MK
43static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
44
5e3dd157
KV
45module_param(ath10k_target_ps, uint, 0644);
46MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
47
cfe9c45b
MK
48module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
49MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
50
5e3dd157
KV
51#define QCA988X_2_0_DEVICE_ID (0x003c)
52
53static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
5e3dd157
KV
54 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
55 {0}
56};
57
58static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
59 u32 *data);
60
61static void ath10k_pci_process_ce(struct ath10k *ar);
62static int ath10k_pci_post_rx(struct ath10k *ar);
87263e5b 63static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
5e3dd157 64 int num);
87263e5b 65static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
5e3dd157 66static void ath10k_pci_stop_ce(struct ath10k *ar);
5b2589fc 67static int ath10k_pci_device_reset(struct ath10k *ar);
d7fb47f5 68static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
fc15ca13
MK
69static int ath10k_pci_init_irq(struct ath10k *ar);
70static int ath10k_pci_deinit_irq(struct ath10k *ar);
71static int ath10k_pci_request_irq(struct ath10k *ar);
72static void ath10k_pci_free_irq(struct ath10k *ar);
85622cde
MK
73static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
74 struct ath10k_ce_pipe *rx_pipe,
75 struct bmi_xfer *xfer);
c80de12b 76static void ath10k_pci_cleanup_ce(struct ath10k *ar);
5e3dd157
KV
77
78static const struct ce_attr host_ce_config_wlan[] = {
48e9c225
KV
79 /* CE0: host->target HTC control and raw streams */
80 {
81 .flags = CE_ATTR_FLAGS,
82 .src_nentries = 16,
83 .src_sz_max = 256,
84 .dest_nentries = 0,
85 },
86
87 /* CE1: target->host HTT + HTC control */
88 {
89 .flags = CE_ATTR_FLAGS,
90 .src_nentries = 0,
91 .src_sz_max = 512,
92 .dest_nentries = 512,
93 },
94
95 /* CE2: target->host WMI */
96 {
97 .flags = CE_ATTR_FLAGS,
98 .src_nentries = 0,
99 .src_sz_max = 2048,
100 .dest_nentries = 32,
101 },
102
103 /* CE3: host->target WMI */
104 {
105 .flags = CE_ATTR_FLAGS,
106 .src_nentries = 32,
107 .src_sz_max = 2048,
108 .dest_nentries = 0,
109 },
110
111 /* CE4: host->target HTT */
112 {
113 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
114 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
115 .src_sz_max = 256,
116 .dest_nentries = 0,
117 },
118
119 /* CE5: unused */
120 {
121 .flags = CE_ATTR_FLAGS,
122 .src_nentries = 0,
123 .src_sz_max = 0,
124 .dest_nentries = 0,
125 },
126
127 /* CE6: target autonomous hif_memcpy */
128 {
129 .flags = CE_ATTR_FLAGS,
130 .src_nentries = 0,
131 .src_sz_max = 0,
132 .dest_nentries = 0,
133 },
134
135 /* CE7: ce_diag, the Diagnostic Window */
136 {
137 .flags = CE_ATTR_FLAGS,
138 .src_nentries = 2,
139 .src_sz_max = DIAG_TRANSFER_LIMIT,
140 .dest_nentries = 2,
141 },
5e3dd157
KV
142};
143
144/* Target firmware's Copy Engine configuration. */
145static const struct ce_pipe_config target_ce_config_wlan[] = {
d88effba
KV
146 /* CE0: host->target HTC control and raw streams */
147 {
148 .pipenum = 0,
149 .pipedir = PIPEDIR_OUT,
150 .nentries = 32,
151 .nbytes_max = 256,
152 .flags = CE_ATTR_FLAGS,
153 .reserved = 0,
154 },
155
156 /* CE1: target->host HTT + HTC control */
157 {
158 .pipenum = 1,
159 .pipedir = PIPEDIR_IN,
160 .nentries = 32,
161 .nbytes_max = 512,
162 .flags = CE_ATTR_FLAGS,
163 .reserved = 0,
164 },
165
166 /* CE2: target->host WMI */
167 {
168 .pipenum = 2,
169 .pipedir = PIPEDIR_IN,
170 .nentries = 32,
171 .nbytes_max = 2048,
172 .flags = CE_ATTR_FLAGS,
173 .reserved = 0,
174 },
175
176 /* CE3: host->target WMI */
177 {
178 .pipenum = 3,
179 .pipedir = PIPEDIR_OUT,
180 .nentries = 32,
181 .nbytes_max = 2048,
182 .flags = CE_ATTR_FLAGS,
183 .reserved = 0,
184 },
185
186 /* CE4: host->target HTT */
187 {
188 .pipenum = 4,
189 .pipedir = PIPEDIR_OUT,
190 .nentries = 256,
191 .nbytes_max = 256,
192 .flags = CE_ATTR_FLAGS,
193 .reserved = 0,
194 },
195
5e3dd157 196 /* NB: 50% of src nentries, since tx has 2 frags */
d88effba
KV
197
198 /* CE5: unused */
199 {
200 .pipenum = 5,
201 .pipedir = PIPEDIR_OUT,
202 .nentries = 32,
203 .nbytes_max = 2048,
204 .flags = CE_ATTR_FLAGS,
205 .reserved = 0,
206 },
207
208 /* CE6: Reserved for target autonomous hif_memcpy */
209 {
210 .pipenum = 6,
211 .pipedir = PIPEDIR_INOUT,
212 .nentries = 32,
213 .nbytes_max = 4096,
214 .flags = CE_ATTR_FLAGS,
215 .reserved = 0,
216 },
217
5e3dd157
KV
218 /* CE7 used only by Host */
219};
220
e539887b
MK
221static bool ath10k_pci_irq_pending(struct ath10k *ar)
222{
223 u32 cause;
224
225 /* Check if the shared legacy irq is for us */
226 cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
227 PCIE_INTR_CAUSE_ADDRESS);
228 if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
229 return true;
230
231 return false;
232}
233
2685218b
MK
234static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
235{
236 /* IMPORTANT: INTR_CLR register has to be set after
237 * INTR_ENABLE is set to 0, otherwise interrupt can not be
238 * really cleared. */
239 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
240 0);
241 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
242 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
243
244 /* IMPORTANT: this extra read transaction is required to
245 * flush the posted write buffer. */
246 (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
247 PCIE_INTR_ENABLE_ADDRESS);
248}
249
250static void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
251{
252 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
253 PCIE_INTR_ENABLE_ADDRESS,
254 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
255
256 /* IMPORTANT: this extra read transaction is required to
257 * flush the posted write buffer. */
258 (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
259 PCIE_INTR_ENABLE_ADDRESS);
260}
261
ab977bd0
MK
262static irqreturn_t ath10k_pci_early_irq_handler(int irq, void *arg)
263{
264 struct ath10k *ar = arg;
265 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
266
267 if (ar_pci->num_msi_intrs == 0) {
268 if (!ath10k_pci_irq_pending(ar))
269 return IRQ_NONE;
270
271 ath10k_pci_disable_and_clear_legacy_irq(ar);
272 }
273
274 tasklet_schedule(&ar_pci->early_irq_tasklet);
275
276 return IRQ_HANDLED;
277}
278
279static int ath10k_pci_request_early_irq(struct ath10k *ar)
280{
281 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
282 int ret;
283
284 /* Regardless whether MSI-X/MSI/legacy irqs have been set up the first
285 * interrupt from irq vector is triggered in all cases for FW
286 * indication/errors */
287 ret = request_irq(ar_pci->pdev->irq, ath10k_pci_early_irq_handler,
288 IRQF_SHARED, "ath10k_pci (early)", ar);
289 if (ret) {
290 ath10k_warn("failed to request early irq: %d\n", ret);
291 return ret;
292 }
293
294 return 0;
295}
296
297static void ath10k_pci_free_early_irq(struct ath10k *ar)
298{
299 free_irq(ath10k_pci_priv(ar)->pdev->irq, ar);
300}
301
5e3dd157
KV
302/*
303 * Diagnostic read/write access is provided for startup/config/debug usage.
304 * Caller must guarantee proper alignment, when applicable, and single user
305 * at any moment.
306 */
307static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
308 int nbytes)
309{
310 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
311 int ret = 0;
312 u32 buf;
313 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
314 unsigned int id;
315 unsigned int flags;
2aa39115 316 struct ath10k_ce_pipe *ce_diag;
5e3dd157
KV
317 /* Host buffer address in CE space */
318 u32 ce_data;
319 dma_addr_t ce_data_base = 0;
320 void *data_buf = NULL;
321 int i;
322
323 /*
324 * This code cannot handle reads to non-memory space. Redirect to the
325 * register read fn but preserve the multi word read capability of
326 * this fn
327 */
328 if (address < DRAM_BASE_ADDRESS) {
329 if (!IS_ALIGNED(address, 4) ||
330 !IS_ALIGNED((unsigned long)data, 4))
331 return -EIO;
332
333 while ((nbytes >= 4) && ((ret = ath10k_pci_diag_read_access(
334 ar, address, (u32 *)data)) == 0)) {
335 nbytes -= sizeof(u32);
336 address += sizeof(u32);
337 data += sizeof(u32);
338 }
339 return ret;
340 }
341
342 ce_diag = ar_pci->ce_diag;
343
344 /*
345 * Allocate a temporary bounce buffer to hold caller's data
346 * to be DMA'ed from Target. This guarantees
347 * 1) 4-byte alignment
348 * 2) Buffer in DMA-able space
349 */
350 orig_nbytes = nbytes;
351 data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
352 orig_nbytes,
353 &ce_data_base);
354
355 if (!data_buf) {
356 ret = -ENOMEM;
357 goto done;
358 }
359 memset(data_buf, 0, orig_nbytes);
360
361 remaining_bytes = orig_nbytes;
362 ce_data = ce_data_base;
363 while (remaining_bytes) {
364 nbytes = min_t(unsigned int, remaining_bytes,
365 DIAG_TRANSFER_LIMIT);
366
367 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
368 if (ret != 0)
369 goto done;
370
371 /* Request CE to send from Target(!) address to Host buffer */
372 /*
373 * The address supplied by the caller is in the
374 * Target CPU virtual address space.
375 *
376 * In order to use this address with the diagnostic CE,
377 * convert it from Target CPU virtual address space
378 * to CE address space
379 */
380 ath10k_pci_wake(ar);
381 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
382 address);
383 ath10k_pci_sleep(ar);
384
385 ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
386 0);
387 if (ret)
388 goto done;
389
390 i = 0;
391 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
392 &completed_nbytes,
393 &id) != 0) {
394 mdelay(1);
395 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
396 ret = -EBUSY;
397 goto done;
398 }
399 }
400
401 if (nbytes != completed_nbytes) {
402 ret = -EIO;
403 goto done;
404 }
405
406 if (buf != (u32) address) {
407 ret = -EIO;
408 goto done;
409 }
410
411 i = 0;
412 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
413 &completed_nbytes,
414 &id, &flags) != 0) {
415 mdelay(1);
416
417 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
418 ret = -EBUSY;
419 goto done;
420 }
421 }
422
423 if (nbytes != completed_nbytes) {
424 ret = -EIO;
425 goto done;
426 }
427
428 if (buf != ce_data) {
429 ret = -EIO;
430 goto done;
431 }
432
433 remaining_bytes -= nbytes;
434 address += nbytes;
435 ce_data += nbytes;
436 }
437
438done:
439 if (ret == 0) {
440 /* Copy data from allocated DMA buf to caller's buf */
441 WARN_ON_ONCE(orig_nbytes & 3);
442 for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
443 ((u32 *)data)[i] =
444 __le32_to_cpu(((__le32 *)data_buf)[i]);
445 }
446 } else
447 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n",
448 __func__, address);
449
450 if (data_buf)
451 pci_free_consistent(ar_pci->pdev, orig_nbytes,
452 data_buf, ce_data_base);
453
454 return ret;
455}
456
457/* Read 4-byte aligned data from Target memory or register */
458static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
459 u32 *data)
460{
461 /* Assume range doesn't cross this boundary */
462 if (address >= DRAM_BASE_ADDRESS)
463 return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
464
465 ath10k_pci_wake(ar);
466 *data = ath10k_pci_read32(ar, address);
467 ath10k_pci_sleep(ar);
468 return 0;
469}
470
471static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
472 const void *data, int nbytes)
473{
474 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
475 int ret = 0;
476 u32 buf;
477 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
478 unsigned int id;
479 unsigned int flags;
2aa39115 480 struct ath10k_ce_pipe *ce_diag;
5e3dd157
KV
481 void *data_buf = NULL;
482 u32 ce_data; /* Host buffer address in CE space */
483 dma_addr_t ce_data_base = 0;
484 int i;
485
486 ce_diag = ar_pci->ce_diag;
487
488 /*
489 * Allocate a temporary bounce buffer to hold caller's data
490 * to be DMA'ed to Target. This guarantees
491 * 1) 4-byte alignment
492 * 2) Buffer in DMA-able space
493 */
494 orig_nbytes = nbytes;
495 data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
496 orig_nbytes,
497 &ce_data_base);
498 if (!data_buf) {
499 ret = -ENOMEM;
500 goto done;
501 }
502
503 /* Copy caller's data to allocated DMA buf */
504 WARN_ON_ONCE(orig_nbytes & 3);
505 for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
506 ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
507
508 /*
509 * The address supplied by the caller is in the
510 * Target CPU virtual address space.
511 *
512 * In order to use this address with the diagnostic CE,
513 * convert it from
514 * Target CPU virtual address space
515 * to
516 * CE address space
517 */
518 ath10k_pci_wake(ar);
519 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
520 ath10k_pci_sleep(ar);
521
522 remaining_bytes = orig_nbytes;
523 ce_data = ce_data_base;
524 while (remaining_bytes) {
525 /* FIXME: check cast */
526 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
527
528 /* Set up to receive directly into Target(!) address */
529 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
530 if (ret != 0)
531 goto done;
532
533 /*
534 * Request CE to send caller-supplied data that
535 * was copied to bounce buffer to Target(!) address.
536 */
537 ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
538 nbytes, 0, 0);
539 if (ret != 0)
540 goto done;
541
542 i = 0;
543 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
544 &completed_nbytes,
545 &id) != 0) {
546 mdelay(1);
547
548 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
549 ret = -EBUSY;
550 goto done;
551 }
552 }
553
554 if (nbytes != completed_nbytes) {
555 ret = -EIO;
556 goto done;
557 }
558
559 if (buf != ce_data) {
560 ret = -EIO;
561 goto done;
562 }
563
564 i = 0;
565 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
566 &completed_nbytes,
567 &id, &flags) != 0) {
568 mdelay(1);
569
570 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
571 ret = -EBUSY;
572 goto done;
573 }
574 }
575
576 if (nbytes != completed_nbytes) {
577 ret = -EIO;
578 goto done;
579 }
580
581 if (buf != address) {
582 ret = -EIO;
583 goto done;
584 }
585
586 remaining_bytes -= nbytes;
587 address += nbytes;
588 ce_data += nbytes;
589 }
590
591done:
592 if (data_buf) {
593 pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf,
594 ce_data_base);
595 }
596
597 if (ret != 0)
598 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__,
599 address);
600
601 return ret;
602}
603
604/* Write 4B data to Target memory or register */
605static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
606 u32 data)
607{
608 /* Assume range doesn't cross this boundary */
609 if (address >= DRAM_BASE_ADDRESS)
610 return ath10k_pci_diag_write_mem(ar, address, &data,
611 sizeof(u32));
612
613 ath10k_pci_wake(ar);
614 ath10k_pci_write32(ar, address, data);
615 ath10k_pci_sleep(ar);
616 return 0;
617}
618
619static bool ath10k_pci_target_is_awake(struct ath10k *ar)
620{
621 void __iomem *mem = ath10k_pci_priv(ar)->mem;
622 u32 val;
623 val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
624 RTC_STATE_ADDRESS);
625 return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
626}
627
3aebe54b 628int ath10k_do_pci_wake(struct ath10k *ar)
5e3dd157
KV
629{
630 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
631 void __iomem *pci_addr = ar_pci->mem;
632 int tot_delay = 0;
633 int curr_delay = 5;
634
635 if (atomic_read(&ar_pci->keep_awake_count) == 0) {
636 /* Force AWAKE */
637 iowrite32(PCIE_SOC_WAKE_V_MASK,
638 pci_addr + PCIE_LOCAL_BASE_ADDRESS +
639 PCIE_SOC_WAKE_ADDRESS);
640 }
641 atomic_inc(&ar_pci->keep_awake_count);
642
643 if (ar_pci->verified_awake)
3aebe54b 644 return 0;
5e3dd157
KV
645
646 for (;;) {
647 if (ath10k_pci_target_is_awake(ar)) {
648 ar_pci->verified_awake = true;
3aebe54b 649 return 0;
5e3dd157
KV
650 }
651
652 if (tot_delay > PCIE_WAKE_TIMEOUT) {
3aebe54b
KV
653 ath10k_warn("target took longer %d us to wake up (awake count %d)\n",
654 PCIE_WAKE_TIMEOUT,
5e3dd157 655 atomic_read(&ar_pci->keep_awake_count));
3aebe54b 656 return -ETIMEDOUT;
5e3dd157
KV
657 }
658
659 udelay(curr_delay);
660 tot_delay += curr_delay;
661
662 if (curr_delay < 50)
663 curr_delay += 5;
664 }
665}
666
667void ath10k_do_pci_sleep(struct ath10k *ar)
668{
669 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
670 void __iomem *pci_addr = ar_pci->mem;
671
672 if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
673 /* Allow sleep */
674 ar_pci->verified_awake = false;
675 iowrite32(PCIE_SOC_WAKE_RESET,
676 pci_addr + PCIE_LOCAL_BASE_ADDRESS +
677 PCIE_SOC_WAKE_ADDRESS);
678 }
679}
680
681/*
682 * FIXME: Handle OOM properly.
683 */
684static inline
87263e5b 685struct ath10k_pci_compl *get_free_compl(struct ath10k_pci_pipe *pipe_info)
5e3dd157
KV
686{
687 struct ath10k_pci_compl *compl = NULL;
688
689 spin_lock_bh(&pipe_info->pipe_lock);
690 if (list_empty(&pipe_info->compl_free)) {
691 ath10k_warn("Completion buffers are full\n");
692 goto exit;
693 }
694 compl = list_first_entry(&pipe_info->compl_free,
695 struct ath10k_pci_compl, list);
696 list_del(&compl->list);
697exit:
698 spin_unlock_bh(&pipe_info->pipe_lock);
699 return compl;
700}
701
702/* Called by lower (CE) layer when a send to Target completes. */
5440ce25 703static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
5e3dd157
KV
704{
705 struct ath10k *ar = ce_state->ar;
706 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
87263e5b 707 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
5e3dd157 708 struct ath10k_pci_compl *compl;
5440ce25
MK
709 void *transfer_context;
710 u32 ce_data;
711 unsigned int nbytes;
712 unsigned int transfer_id;
5e3dd157 713
5440ce25
MK
714 while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
715 &ce_data, &nbytes,
716 &transfer_id) == 0) {
5e3dd157
KV
717 compl = get_free_compl(pipe_info);
718 if (!compl)
719 break;
720
f9d8fece 721 compl->state = ATH10K_PCI_COMPL_SEND;
5e3dd157
KV
722 compl->ce_state = ce_state;
723 compl->pipe_info = pipe_info;
aa5c1db4 724 compl->skb = transfer_context;
5e3dd157
KV
725 compl->nbytes = nbytes;
726 compl->transfer_id = transfer_id;
727 compl->flags = 0;
728
729 /*
730 * Add the completion to the processing queue.
731 */
732 spin_lock_bh(&ar_pci->compl_lock);
733 list_add_tail(&compl->list, &ar_pci->compl_process);
734 spin_unlock_bh(&ar_pci->compl_lock);
5440ce25 735 }
5e3dd157
KV
736
737 ath10k_pci_process_ce(ar);
738}
739
740/* Called by lower (CE) layer when data is received from the Target. */
5440ce25 741static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
5e3dd157
KV
742{
743 struct ath10k *ar = ce_state->ar;
744 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
87263e5b 745 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
5e3dd157
KV
746 struct ath10k_pci_compl *compl;
747 struct sk_buff *skb;
5440ce25
MK
748 void *transfer_context;
749 u32 ce_data;
750 unsigned int nbytes;
751 unsigned int transfer_id;
752 unsigned int flags;
5e3dd157 753
5440ce25
MK
754 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
755 &ce_data, &nbytes, &transfer_id,
756 &flags) == 0) {
5e3dd157
KV
757 compl = get_free_compl(pipe_info);
758 if (!compl)
759 break;
760
f9d8fece 761 compl->state = ATH10K_PCI_COMPL_RECV;
5e3dd157
KV
762 compl->ce_state = ce_state;
763 compl->pipe_info = pipe_info;
aa5c1db4 764 compl->skb = transfer_context;
5e3dd157
KV
765 compl->nbytes = nbytes;
766 compl->transfer_id = transfer_id;
767 compl->flags = flags;
768
769 skb = transfer_context;
770 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
771 skb->len + skb_tailroom(skb),
772 DMA_FROM_DEVICE);
773 /*
774 * Add the completion to the processing queue.
775 */
776 spin_lock_bh(&ar_pci->compl_lock);
777 list_add_tail(&compl->list, &ar_pci->compl_process);
778 spin_unlock_bh(&ar_pci->compl_lock);
5440ce25 779 }
5e3dd157
KV
780
781 ath10k_pci_process_ce(ar);
782}
783
784/* Send the first nbytes bytes of the buffer */
785static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
786 unsigned int transfer_id,
787 unsigned int bytes, struct sk_buff *nbuf)
788{
789 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
790 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
87263e5b 791 struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe_id]);
2aa39115 792 struct ath10k_ce_pipe *ce_hdl = pipe_info->ce_hdl;
5e3dd157
KV
793 unsigned int len;
794 u32 flags = 0;
795 int ret;
796
5e3dd157
KV
797 len = min(bytes, nbuf->len);
798 bytes -= len;
799
800 if (len & 3)
801 ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len);
802
803 ath10k_dbg(ATH10K_DBG_PCI,
804 "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n",
805 nbuf->data, (unsigned long long) skb_cb->paddr,
806 nbuf->len, len);
807 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
808 "ath10k tx: data: ",
809 nbuf->data, nbuf->len);
810
2e761b5a
MK
811 ret = ath10k_ce_send(ce_hdl, nbuf, skb_cb->paddr, len, transfer_id,
812 flags);
5e3dd157 813 if (ret)
1d2b48d6 814 ath10k_warn("failed to send sk_buff to CE: %p\n", nbuf);
5e3dd157
KV
815
816 return ret;
817}
818
819static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
820{
821 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3efcb3b4 822 return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
5e3dd157
KV
823}
824
825static void ath10k_pci_hif_dump_area(struct ath10k *ar)
826{
827 u32 reg_dump_area = 0;
828 u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
829 u32 host_addr;
830 int ret;
831 u32 i;
832
833 ath10k_err("firmware crashed!\n");
834 ath10k_err("hardware name %s version 0x%x\n",
835 ar->hw_params.name, ar->target_version);
5ba88b39 836 ath10k_err("firmware version: %s\n", ar->hw->wiphy->fw_version);
5e3dd157
KV
837
838 host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
1d2b48d6
MK
839 ret = ath10k_pci_diag_read_mem(ar, host_addr,
840 &reg_dump_area, sizeof(u32));
841 if (ret) {
842 ath10k_err("failed to read FW dump area address: %d\n", ret);
5e3dd157
KV
843 return;
844 }
845
846 ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
847
848 ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
849 &reg_dump_values[0],
850 REG_DUMP_COUNT_QCA988X * sizeof(u32));
851 if (ret != 0) {
1d2b48d6 852 ath10k_err("failed to read FW dump area: %d\n", ret);
5e3dd157
KV
853 return;
854 }
855
856 BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
857
858 ath10k_err("target Register Dump\n");
859 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
860 ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
861 i,
862 reg_dump_values[i],
863 reg_dump_values[i + 1],
864 reg_dump_values[i + 2],
865 reg_dump_values[i + 3]);
affd3217 866
5e90de86 867 queue_work(ar->workqueue, &ar->restart_work);
5e3dd157
KV
868}
869
870static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
871 int force)
872{
873 if (!force) {
874 int resources;
875 /*
876 * Decide whether to actually poll for completions, or just
877 * wait for a later chance.
878 * If there seem to be plenty of resources left, then just wait
879 * since checking involves reading a CE register, which is a
880 * relatively expensive operation.
881 */
882 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
883
884 /*
885 * If at least 50% of the total resources are still available,
886 * don't bother checking again yet.
887 */
888 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
889 return;
890 }
891 ath10k_ce_per_engine_service(ar, pipe);
892}
893
e799bbff
MK
894static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
895 struct ath10k_hif_cb *callbacks)
5e3dd157
KV
896{
897 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
898
899 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
900
901 memcpy(&ar_pci->msg_callbacks_current, callbacks,
902 sizeof(ar_pci->msg_callbacks_current));
903}
904
c80de12b 905static int ath10k_pci_alloc_compl(struct ath10k *ar)
5e3dd157
KV
906{
907 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
5e3dd157 908 const struct ce_attr *attr;
87263e5b 909 struct ath10k_pci_pipe *pipe_info;
5e3dd157 910 struct ath10k_pci_compl *compl;
c80de12b 911 int i, pipe_num, completions;
5e3dd157
KV
912
913 spin_lock_init(&ar_pci->compl_lock);
914 INIT_LIST_HEAD(&ar_pci->compl_process);
915
fad6ed78 916 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
5e3dd157
KV
917 pipe_info = &ar_pci->pipe_info[pipe_num];
918
919 spin_lock_init(&pipe_info->pipe_lock);
920 INIT_LIST_HEAD(&pipe_info->compl_free);
921
922 /* Handle Diagnostic CE specially */
c80de12b 923 if (pipe_info->ce_hdl == ar_pci->ce_diag)
5e3dd157
KV
924 continue;
925
926 attr = &host_ce_config_wlan[pipe_num];
927 completions = 0;
928
c80de12b 929 if (attr->src_nentries)
5e3dd157 930 completions += attr->src_nentries;
5e3dd157 931
c80de12b 932 if (attr->dest_nentries)
5e3dd157 933 completions += attr->dest_nentries;
5e3dd157
KV
934
935 for (i = 0; i < completions; i++) {
ffe5daa8 936 compl = kmalloc(sizeof(*compl), GFP_KERNEL);
5e3dd157
KV
937 if (!compl) {
938 ath10k_warn("No memory for completion state\n");
c80de12b 939 ath10k_pci_cleanup_ce(ar);
5e3dd157
KV
940 return -ENOMEM;
941 }
942
f9d8fece 943 compl->state = ATH10K_PCI_COMPL_FREE;
5e3dd157
KV
944 list_add_tail(&compl->list, &pipe_info->compl_free);
945 }
946 }
947
948 return 0;
949}
950
c80de12b 951static int ath10k_pci_setup_ce_irq(struct ath10k *ar)
5e3dd157
KV
952{
953 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
c80de12b
MK
954 const struct ce_attr *attr;
955 struct ath10k_pci_pipe *pipe_info;
956 int pipe_num, disable_interrupts;
5e3dd157 957
c80de12b
MK
958 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
959 pipe_info = &ar_pci->pipe_info[pipe_num];
960
961 /* Handle Diagnostic CE specially */
962 if (pipe_info->ce_hdl == ar_pci->ce_diag)
963 continue;
964
965 attr = &host_ce_config_wlan[pipe_num];
966
967 if (attr->src_nentries) {
968 disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
969 ath10k_ce_send_cb_register(pipe_info->ce_hdl,
970 ath10k_pci_ce_send_done,
971 disable_interrupts);
972 }
973
974 if (attr->dest_nentries)
975 ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
976 ath10k_pci_ce_recv_data);
977 }
978
979 return 0;
980}
981
96a9d0dc 982static void ath10k_pci_kill_tasklet(struct ath10k *ar)
5e3dd157
KV
983{
984 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
5e3dd157 985 int i;
5e3dd157 986
5e3dd157 987 tasklet_kill(&ar_pci->intr_tq);
103d4f5e 988 tasklet_kill(&ar_pci->msi_fw_err);
ab977bd0 989 tasklet_kill(&ar_pci->early_irq_tasklet);
5e3dd157
KV
990
991 for (i = 0; i < CE_COUNT; i++)
992 tasklet_kill(&ar_pci->pipe_info[i].intr);
96a9d0dc
MK
993}
994
995static void ath10k_pci_stop_ce(struct ath10k *ar)
996{
997 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
998 struct ath10k_pci_compl *compl;
999 struct sk_buff *skb;
5e3dd157
KV
1000
1001 /* Mark pending completions as aborted, so that upper layers free up
1002 * their associated resources */
1003 spin_lock_bh(&ar_pci->compl_lock);
1004 list_for_each_entry(compl, &ar_pci->compl_process, list) {
aa5c1db4 1005 skb = compl->skb;
5e3dd157
KV
1006 ATH10K_SKB_CB(skb)->is_aborted = true;
1007 }
1008 spin_unlock_bh(&ar_pci->compl_lock);
1009}
1010
1011static void ath10k_pci_cleanup_ce(struct ath10k *ar)
1012{
1013 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1014 struct ath10k_pci_compl *compl, *tmp;
87263e5b 1015 struct ath10k_pci_pipe *pipe_info;
5e3dd157
KV
1016 struct sk_buff *netbuf;
1017 int pipe_num;
1018
1019 /* Free pending completions. */
1020 spin_lock_bh(&ar_pci->compl_lock);
1021 if (!list_empty(&ar_pci->compl_process))
1022 ath10k_warn("pending completions still present! possible memory leaks.\n");
1023
1024 list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
1025 list_del(&compl->list);
aa5c1db4 1026 netbuf = compl->skb;
5e3dd157
KV
1027 dev_kfree_skb_any(netbuf);
1028 kfree(compl);
1029 }
1030 spin_unlock_bh(&ar_pci->compl_lock);
1031
1032 /* Free unused completions for each pipe. */
fad6ed78 1033 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
5e3dd157
KV
1034 pipe_info = &ar_pci->pipe_info[pipe_num];
1035
1036 spin_lock_bh(&pipe_info->pipe_lock);
1037 list_for_each_entry_safe(compl, tmp,
1038 &pipe_info->compl_free, list) {
1039 list_del(&compl->list);
1040 kfree(compl);
1041 }
1042 spin_unlock_bh(&pipe_info->pipe_lock);
1043 }
1044}
1045
1046static void ath10k_pci_process_ce(struct ath10k *ar)
1047{
1048 struct ath10k_pci *ar_pci = ar->hif.priv;
1049 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
1050 struct ath10k_pci_compl *compl;
1051 struct sk_buff *skb;
1052 unsigned int nbytes;
1053 int ret, send_done = 0;
1054
1055 /* Upper layers aren't ready to handle tx/rx completions in parallel so
1056 * we must serialize all completion processing. */
1057
1058 spin_lock_bh(&ar_pci->compl_lock);
1059 if (ar_pci->compl_processing) {
1060 spin_unlock_bh(&ar_pci->compl_lock);
1061 return;
1062 }
1063 ar_pci->compl_processing = true;
1064 spin_unlock_bh(&ar_pci->compl_lock);
1065
1066 for (;;) {
1067 spin_lock_bh(&ar_pci->compl_lock);
1068 if (list_empty(&ar_pci->compl_process)) {
1069 spin_unlock_bh(&ar_pci->compl_lock);
1070 break;
1071 }
1072 compl = list_first_entry(&ar_pci->compl_process,
1073 struct ath10k_pci_compl, list);
1074 list_del(&compl->list);
1075 spin_unlock_bh(&ar_pci->compl_lock);
1076
f9d8fece
MK
1077 switch (compl->state) {
1078 case ATH10K_PCI_COMPL_SEND:
5e3dd157 1079 cb->tx_completion(ar,
aa5c1db4 1080 compl->skb,
5e3dd157
KV
1081 compl->transfer_id);
1082 send_done = 1;
f9d8fece
MK
1083 break;
1084 case ATH10K_PCI_COMPL_RECV:
5e3dd157
KV
1085 ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
1086 if (ret) {
1d2b48d6
MK
1087 ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
1088 compl->pipe_info->pipe_num, ret);
5e3dd157
KV
1089 break;
1090 }
1091
aa5c1db4 1092 skb = compl->skb;
5e3dd157
KV
1093 nbytes = compl->nbytes;
1094
1095 ath10k_dbg(ATH10K_DBG_PCI,
1096 "ath10k_pci_ce_recv_data netbuf=%p nbytes=%d\n",
1097 skb, nbytes);
1098 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
1099 "ath10k rx: ", skb->data, nbytes);
1100
1101 if (skb->len + skb_tailroom(skb) >= nbytes) {
1102 skb_trim(skb, 0);
1103 skb_put(skb, nbytes);
1104 cb->rx_completion(ar, skb,
1105 compl->pipe_info->pipe_num);
1106 } else {
1107 ath10k_warn("rxed more than expected (nbytes %d, max %d)",
1108 nbytes,
1109 skb->len + skb_tailroom(skb));
1110 }
f9d8fece
MK
1111 break;
1112 case ATH10K_PCI_COMPL_FREE:
1113 ath10k_warn("free completion cannot be processed\n");
1114 break;
1115 default:
1116 ath10k_warn("invalid completion state (%d)\n",
1117 compl->state);
1118 break;
5e3dd157
KV
1119 }
1120
f9d8fece 1121 compl->state = ATH10K_PCI_COMPL_FREE;
5e3dd157
KV
1122
1123 /*
1124 * Add completion back to the pipe's free list.
1125 */
1126 spin_lock_bh(&compl->pipe_info->pipe_lock);
1127 list_add_tail(&compl->list, &compl->pipe_info->compl_free);
5e3dd157
KV
1128 spin_unlock_bh(&compl->pipe_info->pipe_lock);
1129 }
1130
1131 spin_lock_bh(&ar_pci->compl_lock);
1132 ar_pci->compl_processing = false;
1133 spin_unlock_bh(&ar_pci->compl_lock);
1134}
1135
1136/* TODO - temporary mapping while we have too few CE's */
1137static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
1138 u16 service_id, u8 *ul_pipe,
1139 u8 *dl_pipe, int *ul_is_polled,
1140 int *dl_is_polled)
1141{
1142 int ret = 0;
1143
1144 /* polling for received messages not supported */
1145 *dl_is_polled = 0;
1146
1147 switch (service_id) {
1148 case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
1149 /*
1150 * Host->target HTT gets its own pipe, so it can be polled
1151 * while other pipes are interrupt driven.
1152 */
1153 *ul_pipe = 4;
1154 /*
1155 * Use the same target->host pipe for HTC ctrl, HTC raw
1156 * streams, and HTT.
1157 */
1158 *dl_pipe = 1;
1159 break;
1160
1161 case ATH10K_HTC_SVC_ID_RSVD_CTRL:
1162 case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
1163 /*
1164 * Note: HTC_RAW_STREAMS_SVC is currently unused, and
1165 * HTC_CTRL_RSVD_SVC could share the same pipe as the
1166 * WMI services. So, if another CE is needed, change
1167 * this to *ul_pipe = 3, which frees up CE 0.
1168 */
1169 /* *ul_pipe = 3; */
1170 *ul_pipe = 0;
1171 *dl_pipe = 1;
1172 break;
1173
1174 case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
1175 case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
1176 case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
1177 case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
1178
1179 case ATH10K_HTC_SVC_ID_WMI_CONTROL:
1180 *ul_pipe = 3;
1181 *dl_pipe = 2;
1182 break;
1183
1184 /* pipe 5 unused */
1185 /* pipe 6 reserved */
1186 /* pipe 7 reserved */
1187
1188 default:
1189 ret = -1;
1190 break;
1191 }
1192 *ul_is_polled =
1193 (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1194
1195 return ret;
1196}
1197
1198static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1199 u8 *ul_pipe, u8 *dl_pipe)
1200{
1201 int ul_is_polled, dl_is_polled;
1202
1203 (void)ath10k_pci_hif_map_service_to_pipe(ar,
1204 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1205 ul_pipe,
1206 dl_pipe,
1207 &ul_is_polled,
1208 &dl_is_polled);
1209}
1210
87263e5b 1211static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
5e3dd157
KV
1212 int num)
1213{
1214 struct ath10k *ar = pipe_info->hif_ce_state;
1215 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2aa39115 1216 struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
5e3dd157
KV
1217 struct sk_buff *skb;
1218 dma_addr_t ce_data;
1219 int i, ret = 0;
1220
1221 if (pipe_info->buf_sz == 0)
1222 return 0;
1223
1224 for (i = 0; i < num; i++) {
1225 skb = dev_alloc_skb(pipe_info->buf_sz);
1226 if (!skb) {
1d2b48d6 1227 ath10k_warn("failed to allocate skbuff for pipe %d\n",
5e3dd157
KV
1228 num);
1229 ret = -ENOMEM;
1230 goto err;
1231 }
1232
1233 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
1234
1235 ce_data = dma_map_single(ar->dev, skb->data,
1236 skb->len + skb_tailroom(skb),
1237 DMA_FROM_DEVICE);
1238
1239 if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
1d2b48d6 1240 ath10k_warn("failed to DMA map sk_buff\n");
5e3dd157
KV
1241 dev_kfree_skb_any(skb);
1242 ret = -EIO;
1243 goto err;
1244 }
1245
1246 ATH10K_SKB_CB(skb)->paddr = ce_data;
1247
1248 pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
1249 pipe_info->buf_sz,
1250 PCI_DMA_FROMDEVICE);
1251
1252 ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
1253 ce_data);
1254 if (ret) {
1d2b48d6 1255 ath10k_warn("failed to enqueue to pipe %d: %d\n",
5e3dd157
KV
1256 num, ret);
1257 goto err;
1258 }
1259 }
1260
1261 return ret;
1262
1263err:
1264 ath10k_pci_rx_pipe_cleanup(pipe_info);
1265 return ret;
1266}
1267
1268static int ath10k_pci_post_rx(struct ath10k *ar)
1269{
1270 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
87263e5b 1271 struct ath10k_pci_pipe *pipe_info;
5e3dd157
KV
1272 const struct ce_attr *attr;
1273 int pipe_num, ret = 0;
1274
fad6ed78 1275 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
5e3dd157
KV
1276 pipe_info = &ar_pci->pipe_info[pipe_num];
1277 attr = &host_ce_config_wlan[pipe_num];
1278
1279 if (attr->dest_nentries == 0)
1280 continue;
1281
1282 ret = ath10k_pci_post_rx_pipe(pipe_info,
1283 attr->dest_nentries - 1);
1284 if (ret) {
1d2b48d6
MK
1285 ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
1286 pipe_num, ret);
5e3dd157
KV
1287
1288 for (; pipe_num >= 0; pipe_num--) {
1289 pipe_info = &ar_pci->pipe_info[pipe_num];
1290 ath10k_pci_rx_pipe_cleanup(pipe_info);
1291 }
1292 return ret;
1293 }
1294 }
1295
1296 return 0;
1297}
1298
1299static int ath10k_pci_hif_start(struct ath10k *ar)
1300{
1301 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
ab977bd0 1302 int ret, ret_early;
5e3dd157 1303
ab977bd0
MK
1304 ath10k_pci_free_early_irq(ar);
1305 ath10k_pci_kill_tasklet(ar);
5e3dd157 1306
c80de12b 1307 ret = ath10k_pci_alloc_compl(ar);
5e3dd157 1308 if (ret) {
c80de12b 1309 ath10k_warn("failed to allocate CE completions: %d\n", ret);
ab977bd0 1310 goto err_early_irq;
5e3dd157
KV
1311 }
1312
5d1aa946
MK
1313 ret = ath10k_pci_request_irq(ar);
1314 if (ret) {
1315 ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1316 ret);
1317 goto err_free_compl;
1318 }
1319
c80de12b
MK
1320 ret = ath10k_pci_setup_ce_irq(ar);
1321 if (ret) {
1322 ath10k_warn("failed to setup CE interrupts: %d\n", ret);
5d1aa946 1323 goto err_stop;
5e3dd157
KV
1324 }
1325
1326 /* Post buffers once to start things off. */
1327 ret = ath10k_pci_post_rx(ar);
1328 if (ret) {
1d2b48d6
MK
1329 ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1330 ret);
5d1aa946 1331 goto err_stop;
5e3dd157
KV
1332 }
1333
1334 ar_pci->started = 1;
1335 return 0;
c80de12b 1336
5d1aa946
MK
1337err_stop:
1338 ath10k_ce_disable_interrupts(ar);
1339 ath10k_pci_free_irq(ar);
1340 ath10k_pci_kill_tasklet(ar);
c80de12b
MK
1341 ath10k_pci_stop_ce(ar);
1342 ath10k_pci_process_ce(ar);
1343err_free_compl:
1344 ath10k_pci_cleanup_ce(ar);
ab977bd0
MK
1345err_early_irq:
1346 /* Though there should be no interrupts (device was reset)
1347 * power_down() expects the early IRQ to be installed as per the
1348 * driver lifecycle. */
1349 ret_early = ath10k_pci_request_early_irq(ar);
1350 if (ret_early)
1351 ath10k_warn("failed to re-enable early irq: %d\n", ret_early);
1352
c80de12b 1353 return ret;
5e3dd157
KV
1354}
1355
87263e5b 1356static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
5e3dd157
KV
1357{
1358 struct ath10k *ar;
1359 struct ath10k_pci *ar_pci;
2aa39115 1360 struct ath10k_ce_pipe *ce_hdl;
5e3dd157
KV
1361 u32 buf_sz;
1362 struct sk_buff *netbuf;
1363 u32 ce_data;
1364
1365 buf_sz = pipe_info->buf_sz;
1366
1367 /* Unused Copy Engine */
1368 if (buf_sz == 0)
1369 return;
1370
1371 ar = pipe_info->hif_ce_state;
1372 ar_pci = ath10k_pci_priv(ar);
1373
1374 if (!ar_pci->started)
1375 return;
1376
1377 ce_hdl = pipe_info->ce_hdl;
1378
1379 while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
1380 &ce_data) == 0) {
1381 dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
1382 netbuf->len + skb_tailroom(netbuf),
1383 DMA_FROM_DEVICE);
1384 dev_kfree_skb_any(netbuf);
1385 }
1386}
1387
87263e5b 1388static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
5e3dd157
KV
1389{
1390 struct ath10k *ar;
1391 struct ath10k_pci *ar_pci;
2aa39115 1392 struct ath10k_ce_pipe *ce_hdl;
5e3dd157
KV
1393 struct sk_buff *netbuf;
1394 u32 ce_data;
1395 unsigned int nbytes;
1396 unsigned int id;
1397 u32 buf_sz;
1398
1399 buf_sz = pipe_info->buf_sz;
1400
1401 /* Unused Copy Engine */
1402 if (buf_sz == 0)
1403 return;
1404
1405 ar = pipe_info->hif_ce_state;
1406 ar_pci = ath10k_pci_priv(ar);
1407
1408 if (!ar_pci->started)
1409 return;
1410
1411 ce_hdl = pipe_info->ce_hdl;
1412
1413 while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1414 &ce_data, &nbytes, &id) == 0) {
e9bb0aa3
KV
1415 /*
1416 * Indicate the completion to higer layer to free
1417 * the buffer
1418 */
2415fc16
MK
1419
1420 if (!netbuf) {
1421 ath10k_warn("invalid sk_buff on CE %d - NULL pointer. firmware crashed?\n",
1422 ce_hdl->id);
1423 continue;
1424 }
1425
e9bb0aa3
KV
1426 ATH10K_SKB_CB(netbuf)->is_aborted = true;
1427 ar_pci->msg_callbacks_current.tx_completion(ar,
1428 netbuf,
1429 id);
5e3dd157
KV
1430 }
1431}
1432
1433/*
1434 * Cleanup residual buffers for device shutdown:
1435 * buffers that were enqueued for receive
1436 * buffers that were to be sent
1437 * Note: Buffers that had completed but which were
1438 * not yet processed are on a completion queue. They
1439 * are handled when the completion thread shuts down.
1440 */
1441static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1442{
1443 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1444 int pipe_num;
1445
fad6ed78 1446 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
87263e5b 1447 struct ath10k_pci_pipe *pipe_info;
5e3dd157
KV
1448
1449 pipe_info = &ar_pci->pipe_info[pipe_num];
1450 ath10k_pci_rx_pipe_cleanup(pipe_info);
1451 ath10k_pci_tx_pipe_cleanup(pipe_info);
1452 }
1453}
1454
1455static void ath10k_pci_ce_deinit(struct ath10k *ar)
1456{
1457 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
87263e5b 1458 struct ath10k_pci_pipe *pipe_info;
5e3dd157
KV
1459 int pipe_num;
1460
fad6ed78 1461 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
5e3dd157
KV
1462 pipe_info = &ar_pci->pipe_info[pipe_num];
1463 if (pipe_info->ce_hdl) {
1464 ath10k_ce_deinit(pipe_info->ce_hdl);
1465 pipe_info->ce_hdl = NULL;
1466 pipe_info->buf_sz = 0;
1467 }
1468 }
1469}
1470
1471static void ath10k_pci_hif_stop(struct ath10k *ar)
1472{
32270b61 1473 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
5d1aa946 1474 int ret;
32270b61 1475
5e3dd157
KV
1476 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
1477
5d1aa946
MK
1478 ret = ath10k_ce_disable_interrupts(ar);
1479 if (ret)
1480 ath10k_warn("failed to disable CE interrupts: %d\n", ret);
32270b61 1481
5d1aa946
MK
1482 ath10k_pci_free_irq(ar);
1483 ath10k_pci_kill_tasklet(ar);
5e3dd157
KV
1484 ath10k_pci_stop_ce(ar);
1485
ab977bd0
MK
1486 ret = ath10k_pci_request_early_irq(ar);
1487 if (ret)
1488 ath10k_warn("failed to re-enable early irq: %d\n", ret);
1489
5e3dd157
KV
1490 /* At this point, asynchronous threads are stopped, the target should
1491 * not DMA nor interrupt. We process the leftovers and then free
1492 * everything else up. */
1493
1494 ath10k_pci_process_ce(ar);
1495 ath10k_pci_cleanup_ce(ar);
1496 ath10k_pci_buffer_cleanup(ar);
32270b61 1497
6a42a47e
MK
1498 /* Make the sure the device won't access any structures on the host by
1499 * resetting it. The device was fed with PCI CE ringbuffer
1500 * configuration during init. If ringbuffers are freed and the device
1501 * were to access them this could lead to memory corruption on the
1502 * host. */
1503 ath10k_pci_device_reset(ar);
1504
32270b61 1505 ar_pci->started = 0;
5e3dd157
KV
1506}
1507
1508static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1509 void *req, u32 req_len,
1510 void *resp, u32 *resp_len)
1511{
1512 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2aa39115
MK
1513 struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1514 struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1515 struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1516 struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
5e3dd157
KV
1517 dma_addr_t req_paddr = 0;
1518 dma_addr_t resp_paddr = 0;
1519 struct bmi_xfer xfer = {};
1520 void *treq, *tresp = NULL;
1521 int ret = 0;
1522
85622cde
MK
1523 might_sleep();
1524
5e3dd157
KV
1525 if (resp && !resp_len)
1526 return -EINVAL;
1527
1528 if (resp && resp_len && *resp_len == 0)
1529 return -EINVAL;
1530
1531 treq = kmemdup(req, req_len, GFP_KERNEL);
1532 if (!treq)
1533 return -ENOMEM;
1534
1535 req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1536 ret = dma_mapping_error(ar->dev, req_paddr);
1537 if (ret)
1538 goto err_dma;
1539
1540 if (resp && resp_len) {
1541 tresp = kzalloc(*resp_len, GFP_KERNEL);
1542 if (!tresp) {
1543 ret = -ENOMEM;
1544 goto err_req;
1545 }
1546
1547 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1548 DMA_FROM_DEVICE);
1549 ret = dma_mapping_error(ar->dev, resp_paddr);
1550 if (ret)
1551 goto err_req;
1552
1553 xfer.wait_for_resp = true;
1554 xfer.resp_len = 0;
1555
1556 ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
1557 }
1558
1559 init_completion(&xfer.done);
1560
1561 ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1562 if (ret)
1563 goto err_resp;
1564
85622cde
MK
1565 ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
1566 if (ret) {
5e3dd157
KV
1567 u32 unused_buffer;
1568 unsigned int unused_nbytes;
1569 unsigned int unused_id;
1570
5e3dd157
KV
1571 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1572 &unused_nbytes, &unused_id);
1573 } else {
1574 /* non-zero means we did not time out */
1575 ret = 0;
1576 }
1577
1578err_resp:
1579 if (resp) {
1580 u32 unused_buffer;
1581
1582 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1583 dma_unmap_single(ar->dev, resp_paddr,
1584 *resp_len, DMA_FROM_DEVICE);
1585 }
1586err_req:
1587 dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1588
1589 if (ret == 0 && resp_len) {
1590 *resp_len = min(*resp_len, xfer.resp_len);
1591 memcpy(resp, tresp, xfer.resp_len);
1592 }
1593err_dma:
1594 kfree(treq);
1595 kfree(tresp);
1596
1597 return ret;
1598}
1599
5440ce25 1600static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
5e3dd157 1601{
5440ce25
MK
1602 struct bmi_xfer *xfer;
1603 u32 ce_data;
1604 unsigned int nbytes;
1605 unsigned int transfer_id;
1606
1607 if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
1608 &nbytes, &transfer_id))
1609 return;
5e3dd157
KV
1610
1611 if (xfer->wait_for_resp)
1612 return;
1613
1614 complete(&xfer->done);
1615}
1616
5440ce25 1617static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
5e3dd157 1618{
5440ce25
MK
1619 struct bmi_xfer *xfer;
1620 u32 ce_data;
1621 unsigned int nbytes;
1622 unsigned int transfer_id;
1623 unsigned int flags;
1624
1625 if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
1626 &nbytes, &transfer_id, &flags))
1627 return;
5e3dd157
KV
1628
1629 if (!xfer->wait_for_resp) {
1630 ath10k_warn("unexpected: BMI data received; ignoring\n");
1631 return;
1632 }
1633
1634 xfer->resp_len = nbytes;
1635 complete(&xfer->done);
1636}
1637
85622cde
MK
1638static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
1639 struct ath10k_ce_pipe *rx_pipe,
1640 struct bmi_xfer *xfer)
1641{
1642 unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1643
1644 while (time_before_eq(jiffies, timeout)) {
1645 ath10k_pci_bmi_send_done(tx_pipe);
1646 ath10k_pci_bmi_recv_data(rx_pipe);
1647
1648 if (completion_done(&xfer->done))
1649 return 0;
1650
1651 schedule();
1652 }
1653
1654 return -ETIMEDOUT;
1655}
1656
5e3dd157
KV
1657/*
1658 * Map from service/endpoint to Copy Engine.
1659 * This table is derived from the CE_PCI TABLE, above.
1660 * It is passed to the Target at startup for use by firmware.
1661 */
1662static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
1663 {
1664 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1665 PIPEDIR_OUT, /* out = UL = host -> target */
1666 3,
1667 },
1668 {
1669 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1670 PIPEDIR_IN, /* in = DL = target -> host */
1671 2,
1672 },
1673 {
1674 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1675 PIPEDIR_OUT, /* out = UL = host -> target */
1676 3,
1677 },
1678 {
1679 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1680 PIPEDIR_IN, /* in = DL = target -> host */
1681 2,
1682 },
1683 {
1684 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1685 PIPEDIR_OUT, /* out = UL = host -> target */
1686 3,
1687 },
1688 {
1689 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1690 PIPEDIR_IN, /* in = DL = target -> host */
1691 2,
1692 },
1693 {
1694 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1695 PIPEDIR_OUT, /* out = UL = host -> target */
1696 3,
1697 },
1698 {
1699 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1700 PIPEDIR_IN, /* in = DL = target -> host */
1701 2,
1702 },
1703 {
1704 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1705 PIPEDIR_OUT, /* out = UL = host -> target */
1706 3,
1707 },
1708 {
1709 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1710 PIPEDIR_IN, /* in = DL = target -> host */
1711 2,
1712 },
1713 {
1714 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1715 PIPEDIR_OUT, /* out = UL = host -> target */
1716 0, /* could be moved to 3 (share with WMI) */
1717 },
1718 {
1719 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1720 PIPEDIR_IN, /* in = DL = target -> host */
1721 1,
1722 },
1723 {
1724 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
1725 PIPEDIR_OUT, /* out = UL = host -> target */
1726 0,
1727 },
1728 {
1729 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
1730 PIPEDIR_IN, /* in = DL = target -> host */
1731 1,
1732 },
1733 {
1734 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1735 PIPEDIR_OUT, /* out = UL = host -> target */
1736 4,
1737 },
1738 {
1739 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1740 PIPEDIR_IN, /* in = DL = target -> host */
1741 1,
1742 },
1743
1744 /* (Additions here) */
1745
1746 { /* Must be last */
1747 0,
1748 0,
1749 0,
1750 },
1751};
1752
1753/*
1754 * Send an interrupt to the device to wake up the Target CPU
1755 * so it has an opportunity to notice any changed state.
1756 */
1757static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1758{
1759 int ret;
1760 u32 core_ctrl;
1761
1762 ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
1763 CORE_CTRL_ADDRESS,
1764 &core_ctrl);
1765 if (ret) {
1d2b48d6 1766 ath10k_warn("failed to read core_ctrl: %d\n", ret);
5e3dd157
KV
1767 return ret;
1768 }
1769
1770 /* A_INUM_FIRMWARE interrupt to Target CPU */
1771 core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1772
1773 ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
1774 CORE_CTRL_ADDRESS,
1775 core_ctrl);
1d2b48d6
MK
1776 if (ret) {
1777 ath10k_warn("failed to set target CPU interrupt mask: %d\n",
1778 ret);
1779 return ret;
1780 }
5e3dd157 1781
1d2b48d6 1782 return 0;
5e3dd157
KV
1783}
1784
1785static int ath10k_pci_init_config(struct ath10k *ar)
1786{
1787 u32 interconnect_targ_addr;
1788 u32 pcie_state_targ_addr = 0;
1789 u32 pipe_cfg_targ_addr = 0;
1790 u32 svc_to_pipe_map = 0;
1791 u32 pcie_config_flags = 0;
1792 u32 ealloc_value;
1793 u32 ealloc_targ_addr;
1794 u32 flag2_value;
1795 u32 flag2_targ_addr;
1796 int ret = 0;
1797
1798 /* Download to Target the CE Config and the service-to-CE map */
1799 interconnect_targ_addr =
1800 host_interest_item_address(HI_ITEM(hi_interconnect_state));
1801
1802 /* Supply Target-side CE configuration */
1803 ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
1804 &pcie_state_targ_addr);
1805 if (ret != 0) {
1806 ath10k_err("Failed to get pcie state addr: %d\n", ret);
1807 return ret;
1808 }
1809
1810 if (pcie_state_targ_addr == 0) {
1811 ret = -EIO;
1812 ath10k_err("Invalid pcie state addr\n");
1813 return ret;
1814 }
1815
1816 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1817 offsetof(struct pcie_state,
1818 pipe_cfg_addr),
1819 &pipe_cfg_targ_addr);
1820 if (ret != 0) {
1821 ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
1822 return ret;
1823 }
1824
1825 if (pipe_cfg_targ_addr == 0) {
1826 ret = -EIO;
1827 ath10k_err("Invalid pipe cfg addr\n");
1828 return ret;
1829 }
1830
1831 ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1832 target_ce_config_wlan,
1833 sizeof(target_ce_config_wlan));
1834
1835 if (ret != 0) {
1836 ath10k_err("Failed to write pipe cfg: %d\n", ret);
1837 return ret;
1838 }
1839
1840 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1841 offsetof(struct pcie_state,
1842 svc_to_pipe_map),
1843 &svc_to_pipe_map);
1844 if (ret != 0) {
1845 ath10k_err("Failed to get svc/pipe map: %d\n", ret);
1846 return ret;
1847 }
1848
1849 if (svc_to_pipe_map == 0) {
1850 ret = -EIO;
1851 ath10k_err("Invalid svc_to_pipe map\n");
1852 return ret;
1853 }
1854
1855 ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1856 target_service_to_ce_map_wlan,
1857 sizeof(target_service_to_ce_map_wlan));
1858 if (ret != 0) {
1859 ath10k_err("Failed to write svc/pipe map: %d\n", ret);
1860 return ret;
1861 }
1862
1863 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1864 offsetof(struct pcie_state,
1865 config_flags),
1866 &pcie_config_flags);
1867 if (ret != 0) {
1868 ath10k_err("Failed to get pcie config_flags: %d\n", ret);
1869 return ret;
1870 }
1871
1872 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1873
1874 ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
1875 offsetof(struct pcie_state, config_flags),
1876 &pcie_config_flags,
1877 sizeof(pcie_config_flags));
1878 if (ret != 0) {
1879 ath10k_err("Failed to write pcie config_flags: %d\n", ret);
1880 return ret;
1881 }
1882
1883 /* configure early allocation */
1884 ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1885
1886 ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
1887 if (ret != 0) {
1888 ath10k_err("Faile to get early alloc val: %d\n", ret);
1889 return ret;
1890 }
1891
1892 /* first bank is switched to IRAM */
1893 ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1894 HI_EARLY_ALLOC_MAGIC_MASK);
1895 ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1896 HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1897
1898 ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
1899 if (ret != 0) {
1900 ath10k_err("Failed to set early alloc val: %d\n", ret);
1901 return ret;
1902 }
1903
1904 /* Tell Target to proceed with initialization */
1905 flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1906
1907 ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
1908 if (ret != 0) {
1909 ath10k_err("Failed to get option val: %d\n", ret);
1910 return ret;
1911 }
1912
1913 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1914
1915 ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
1916 if (ret != 0) {
1917 ath10k_err("Failed to set option val: %d\n", ret);
1918 return ret;
1919 }
1920
1921 return 0;
1922}
1923
1924
1925
1926static int ath10k_pci_ce_init(struct ath10k *ar)
1927{
1928 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
87263e5b 1929 struct ath10k_pci_pipe *pipe_info;
5e3dd157
KV
1930 const struct ce_attr *attr;
1931 int pipe_num;
1932
fad6ed78 1933 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
5e3dd157
KV
1934 pipe_info = &ar_pci->pipe_info[pipe_num];
1935 pipe_info->pipe_num = pipe_num;
1936 pipe_info->hif_ce_state = ar;
1937 attr = &host_ce_config_wlan[pipe_num];
1938
1939 pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
1940 if (pipe_info->ce_hdl == NULL) {
1d2b48d6 1941 ath10k_err("failed to initialize CE for pipe: %d\n",
5e3dd157
KV
1942 pipe_num);
1943
1944 /* It is safe to call it here. It checks if ce_hdl is
1945 * valid for each pipe */
1946 ath10k_pci_ce_deinit(ar);
1947 return -1;
1948 }
1949
fad6ed78 1950 if (pipe_num == CE_COUNT - 1) {
5e3dd157
KV
1951 /*
1952 * Reserve the ultimate CE for
1953 * diagnostic Window support
1954 */
fad6ed78 1955 ar_pci->ce_diag = pipe_info->ce_hdl;
5e3dd157
KV
1956 continue;
1957 }
1958
1959 pipe_info->buf_sz = (size_t) (attr->src_sz_max);
1960 }
1961
5e3dd157
KV
1962 return 0;
1963}
1964
1965static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1966{
1967 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1968 u32 fw_indicator_address, fw_indicator;
1969
1970 ath10k_pci_wake(ar);
1971
1972 fw_indicator_address = ar_pci->fw_indicator_address;
1973 fw_indicator = ath10k_pci_read32(ar, fw_indicator_address);
1974
1975 if (fw_indicator & FW_IND_EVENT_PENDING) {
1976 /* ACK: clear Target-side pending event */
1977 ath10k_pci_write32(ar, fw_indicator_address,
1978 fw_indicator & ~FW_IND_EVENT_PENDING);
1979
1980 if (ar_pci->started) {
1981 ath10k_pci_hif_dump_area(ar);
1982 } else {
1983 /*
1984 * Probable Target failure before we're prepared
1985 * to handle it. Generally unexpected.
1986 */
1987 ath10k_warn("early firmware event indicated\n");
1988 }
1989 }
1990
1991 ath10k_pci_sleep(ar);
1992}
1993
8c5c5368
MK
1994static int ath10k_pci_hif_power_up(struct ath10k *ar)
1995{
8cc8df90 1996 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
95cbb6a8 1997 const char *irq_mode;
8c5c5368
MK
1998 int ret;
1999
2000 /*
2001 * Bring the target up cleanly.
2002 *
2003 * The target may be in an undefined state with an AUX-powered Target
2004 * and a Host in WoW mode. If the Host crashes, loses power, or is
2005 * restarted (without unloading the driver) then the Target is left
2006 * (aux) powered and running. On a subsequent driver load, the Target
2007 * is in an unexpected state. We try to catch that here in order to
2008 * reset the Target and retry the probe.
2009 */
5b2589fc
MK
2010 ret = ath10k_pci_device_reset(ar);
2011 if (ret) {
2012 ath10k_err("failed to reset target: %d\n", ret);
98563d5a 2013 goto err;
5b2589fc 2014 }
8c5c5368 2015
8cc8df90 2016 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
8c5c5368 2017 /* Force AWAKE forever */
8c5c5368 2018 ath10k_do_pci_wake(ar);
8c5c5368
MK
2019
2020 ret = ath10k_pci_ce_init(ar);
1d2b48d6
MK
2021 if (ret) {
2022 ath10k_err("failed to initialize CE: %d\n", ret);
8c5c5368 2023 goto err_ps;
1d2b48d6 2024 }
8c5c5368 2025
98563d5a
MK
2026 ret = ath10k_ce_disable_interrupts(ar);
2027 if (ret) {
2028 ath10k_err("failed to disable CE interrupts: %d\n", ret);
8c5c5368 2029 goto err_ce;
98563d5a 2030 }
8c5c5368 2031
fc15ca13 2032 ret = ath10k_pci_init_irq(ar);
8c5c5368 2033 if (ret) {
fc15ca13 2034 ath10k_err("failed to init irqs: %d\n", ret);
8c5c5368
MK
2035 goto err_ce;
2036 }
2037
ab977bd0
MK
2038 ret = ath10k_pci_request_early_irq(ar);
2039 if (ret) {
2040 ath10k_err("failed to request early irq: %d\n", ret);
2041 goto err_deinit_irq;
2042 }
2043
98563d5a
MK
2044 ret = ath10k_pci_wait_for_target_init(ar);
2045 if (ret) {
2046 ath10k_err("failed to wait for target to init: %d\n", ret);
ab977bd0 2047 goto err_free_early_irq;
98563d5a
MK
2048 }
2049
2050 ret = ath10k_pci_init_config(ar);
2051 if (ret) {
2052 ath10k_err("failed to setup init config: %d\n", ret);
ab977bd0 2053 goto err_free_early_irq;
98563d5a 2054 }
8c5c5368
MK
2055
2056 ret = ath10k_pci_wake_target_cpu(ar);
2057 if (ret) {
1d2b48d6 2058 ath10k_err("could not wake up target CPU: %d\n", ret);
ab977bd0 2059 goto err_free_early_irq;
8c5c5368
MK
2060 }
2061
95cbb6a8
KV
2062 if (ar_pci->num_msi_intrs > 1)
2063 irq_mode = "MSI-X";
2064 else if (ar_pci->num_msi_intrs == 1)
2065 irq_mode = "MSI";
2066 else
2067 irq_mode = "legacy";
2068
650b91fb
KV
2069 if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
2070 ath10k_info("pci irq %s\n", irq_mode);
95cbb6a8 2071
8c5c5368
MK
2072 return 0;
2073
ab977bd0
MK
2074err_free_early_irq:
2075 ath10k_pci_free_early_irq(ar);
fc15ca13
MK
2076err_deinit_irq:
2077 ath10k_pci_deinit_irq(ar);
8c5c5368
MK
2078err_ce:
2079 ath10k_pci_ce_deinit(ar);
5d1aa946 2080 ath10k_pci_device_reset(ar);
8c5c5368 2081err_ps:
8cc8df90 2082 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
8c5c5368
MK
2083 ath10k_do_pci_sleep(ar);
2084err:
2085 return ret;
2086}
2087
2088static void ath10k_pci_hif_power_down(struct ath10k *ar)
2089{
8cc8df90
BM
2090 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2091
ab977bd0
MK
2092 ath10k_pci_free_early_irq(ar);
2093 ath10k_pci_kill_tasklet(ar);
fc15ca13 2094 ath10k_pci_deinit_irq(ar);
6a42a47e 2095 ath10k_pci_device_reset(ar);
8cc8df90 2096
8c5c5368 2097 ath10k_pci_ce_deinit(ar);
8cc8df90 2098 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
8c5c5368
MK
2099 ath10k_do_pci_sleep(ar);
2100}
2101
8cd13cad
MK
2102#ifdef CONFIG_PM
2103
2104#define ATH10K_PCI_PM_CONTROL 0x44
2105
2106static int ath10k_pci_hif_suspend(struct ath10k *ar)
2107{
2108 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2109 struct pci_dev *pdev = ar_pci->pdev;
2110 u32 val;
2111
2112 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2113
2114 if ((val & 0x000000ff) != 0x3) {
2115 pci_save_state(pdev);
2116 pci_disable_device(pdev);
2117 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2118 (val & 0xffffff00) | 0x03);
2119 }
2120
2121 return 0;
2122}
2123
2124static int ath10k_pci_hif_resume(struct ath10k *ar)
2125{
2126 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2127 struct pci_dev *pdev = ar_pci->pdev;
2128 u32 val;
2129
2130 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2131
2132 if ((val & 0x000000ff) != 0) {
2133 pci_restore_state(pdev);
2134 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2135 val & 0xffffff00);
2136 /*
2137 * Suspend/Resume resets the PCI configuration space,
2138 * so we have to re-disable the RETRY_TIMEOUT register (0x41)
2139 * to keep PCI Tx retries from interfering with C3 CPU state
2140 */
2141 pci_read_config_dword(pdev, 0x40, &val);
2142
2143 if ((val & 0x0000ff00) != 0)
2144 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
2145 }
2146
2147 return 0;
2148}
2149#endif
2150
5e3dd157
KV
2151static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
2152 .send_head = ath10k_pci_hif_send_head,
2153 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
2154 .start = ath10k_pci_hif_start,
2155 .stop = ath10k_pci_hif_stop,
2156 .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
2157 .get_default_pipe = ath10k_pci_hif_get_default_pipe,
2158 .send_complete_check = ath10k_pci_hif_send_complete_check,
e799bbff 2159 .set_callbacks = ath10k_pci_hif_set_callbacks,
5e3dd157 2160 .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
8c5c5368
MK
2161 .power_up = ath10k_pci_hif_power_up,
2162 .power_down = ath10k_pci_hif_power_down,
8cd13cad
MK
2163#ifdef CONFIG_PM
2164 .suspend = ath10k_pci_hif_suspend,
2165 .resume = ath10k_pci_hif_resume,
2166#endif
5e3dd157
KV
2167};
2168
2169static void ath10k_pci_ce_tasklet(unsigned long ptr)
2170{
87263e5b 2171 struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
5e3dd157
KV
2172 struct ath10k_pci *ar_pci = pipe->ar_pci;
2173
2174 ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
2175}
2176
2177static void ath10k_msi_err_tasklet(unsigned long data)
2178{
2179 struct ath10k *ar = (struct ath10k *)data;
2180
2181 ath10k_pci_fw_interrupt_handler(ar);
2182}
2183
2184/*
2185 * Handler for a per-engine interrupt on a PARTICULAR CE.
2186 * This is used in cases where each CE has a private MSI interrupt.
2187 */
2188static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
2189{
2190 struct ath10k *ar = arg;
2191 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2192 int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
2193
e5742672 2194 if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
5e3dd157
KV
2195 ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
2196 return IRQ_HANDLED;
2197 }
2198
2199 /*
2200 * NOTE: We are able to derive ce_id from irq because we
2201 * use a one-to-one mapping for CE's 0..5.
2202 * CE's 6 & 7 do not use interrupts at all.
2203 *
2204 * This mapping must be kept in sync with the mapping
2205 * used by firmware.
2206 */
2207 tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
2208 return IRQ_HANDLED;
2209}
2210
2211static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
2212{
2213 struct ath10k *ar = arg;
2214 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2215
2216 tasklet_schedule(&ar_pci->msi_fw_err);
2217 return IRQ_HANDLED;
2218}
2219
2220/*
2221 * Top-level interrupt handler for all PCI interrupts from a Target.
2222 * When a block of MSI interrupts is allocated, this top-level handler
2223 * is not used; instead, we directly call the correct sub-handler.
2224 */
2225static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2226{
2227 struct ath10k *ar = arg;
2228 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2229
2230 if (ar_pci->num_msi_intrs == 0) {
e539887b
MK
2231 if (!ath10k_pci_irq_pending(ar))
2232 return IRQ_NONE;
2233
2685218b 2234 ath10k_pci_disable_and_clear_legacy_irq(ar);
5e3dd157
KV
2235 }
2236
2237 tasklet_schedule(&ar_pci->intr_tq);
2238
2239 return IRQ_HANDLED;
2240}
2241
ab977bd0
MK
2242static void ath10k_pci_early_irq_tasklet(unsigned long data)
2243{
2244 struct ath10k *ar = (struct ath10k *)data;
2245 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2246 u32 fw_ind;
2247 int ret;
2248
2249 ret = ath10k_pci_wake(ar);
2250 if (ret) {
2251 ath10k_warn("failed to wake target in early irq tasklet: %d\n",
2252 ret);
2253 return;
2254 }
2255
2256 fw_ind = ath10k_pci_read32(ar, ar_pci->fw_indicator_address);
2257 if (fw_ind & FW_IND_EVENT_PENDING) {
2258 ath10k_pci_write32(ar, ar_pci->fw_indicator_address,
2259 fw_ind & ~FW_IND_EVENT_PENDING);
2260
2261 /* Some structures are unavailable during early boot or at
2262 * driver teardown so just print that the device has crashed. */
2263 ath10k_warn("device crashed - no diagnostics available\n");
2264 }
2265
2266 ath10k_pci_sleep(ar);
2267 ath10k_pci_enable_legacy_irq(ar);
2268}
2269
5e3dd157
KV
2270static void ath10k_pci_tasklet(unsigned long data)
2271{
2272 struct ath10k *ar = (struct ath10k *)data;
2273 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2274
2275 ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
2276 ath10k_ce_per_engine_service_any(ar);
2277
2685218b
MK
2278 /* Re-enable legacy irq that was disabled in the irq handler */
2279 if (ar_pci->num_msi_intrs == 0)
2280 ath10k_pci_enable_legacy_irq(ar);
5e3dd157
KV
2281}
2282
fc15ca13 2283static int ath10k_pci_request_irq_msix(struct ath10k *ar)
5e3dd157
KV
2284{
2285 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
fc15ca13 2286 int ret, i;
5e3dd157
KV
2287
2288 ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2289 ath10k_pci_msi_fw_handler,
2290 IRQF_SHARED, "ath10k_pci", ar);
591ecdb8 2291 if (ret) {
fc15ca13 2292 ath10k_warn("failed to request MSI-X fw irq %d: %d\n",
591ecdb8 2293 ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
5e3dd157 2294 return ret;
591ecdb8 2295 }
5e3dd157
KV
2296
2297 for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2298 ret = request_irq(ar_pci->pdev->irq + i,
2299 ath10k_pci_per_engine_handler,
2300 IRQF_SHARED, "ath10k_pci", ar);
2301 if (ret) {
fc15ca13 2302 ath10k_warn("failed to request MSI-X ce irq %d: %d\n",
5e3dd157
KV
2303 ar_pci->pdev->irq + i, ret);
2304
87b1423b
MK
2305 for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2306 free_irq(ar_pci->pdev->irq + i, ar);
5e3dd157 2307
87b1423b 2308 free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
5e3dd157
KV
2309 return ret;
2310 }
2311 }
2312
5e3dd157
KV
2313 return 0;
2314}
2315
fc15ca13 2316static int ath10k_pci_request_irq_msi(struct ath10k *ar)
5e3dd157
KV
2317{
2318 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2319 int ret;
2320
5e3dd157
KV
2321 ret = request_irq(ar_pci->pdev->irq,
2322 ath10k_pci_interrupt_handler,
2323 IRQF_SHARED, "ath10k_pci", ar);
fc15ca13
MK
2324 if (ret) {
2325 ath10k_warn("failed to request MSI irq %d: %d\n",
2326 ar_pci->pdev->irq, ret);
5e3dd157
KV
2327 return ret;
2328 }
2329
5e3dd157
KV
2330 return 0;
2331}
2332
fc15ca13 2333static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
5e3dd157
KV
2334{
2335 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2336 int ret;
2337
2338 ret = request_irq(ar_pci->pdev->irq,
2339 ath10k_pci_interrupt_handler,
2340 IRQF_SHARED, "ath10k_pci", ar);
f3782744 2341 if (ret) {
fc15ca13
MK
2342 ath10k_warn("failed to request legacy irq %d: %d\n",
2343 ar_pci->pdev->irq, ret);
5e3dd157 2344 return ret;
f3782744 2345 }
5e3dd157 2346
5e3dd157
KV
2347 return 0;
2348}
2349
fc15ca13
MK
2350static int ath10k_pci_request_irq(struct ath10k *ar)
2351{
2352 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
5e3dd157 2353
fc15ca13
MK
2354 switch (ar_pci->num_msi_intrs) {
2355 case 0:
2356 return ath10k_pci_request_irq_legacy(ar);
2357 case 1:
2358 return ath10k_pci_request_irq_msi(ar);
2359 case MSI_NUM_REQUEST:
2360 return ath10k_pci_request_irq_msix(ar);
2361 }
5e3dd157 2362
fc15ca13
MK
2363 ath10k_warn("unknown irq configuration upon request\n");
2364 return -EINVAL;
5e3dd157
KV
2365}
2366
fc15ca13
MK
2367static void ath10k_pci_free_irq(struct ath10k *ar)
2368{
2369 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2370 int i;
2371
2372 /* There's at least one interrupt irregardless whether its legacy INTR
2373 * or MSI or MSI-X */
2374 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2375 free_irq(ar_pci->pdev->irq + i, ar);
2376}
2377
2378static void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
5e3dd157
KV
2379{
2380 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
5e3dd157
KV
2381 int i;
2382
fc15ca13 2383 tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
5e3dd157 2384 tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
fc15ca13 2385 (unsigned long)ar);
ab977bd0
MK
2386 tasklet_init(&ar_pci->early_irq_tasklet, ath10k_pci_early_irq_tasklet,
2387 (unsigned long)ar);
5e3dd157
KV
2388
2389 for (i = 0; i < CE_COUNT; i++) {
2390 ar_pci->pipe_info[i].ar_pci = ar_pci;
fc15ca13 2391 tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
5e3dd157
KV
2392 (unsigned long)&ar_pci->pipe_info[i]);
2393 }
fc15ca13
MK
2394}
2395
2396static int ath10k_pci_init_irq(struct ath10k *ar)
2397{
2398 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
cfe9c45b
MK
2399 bool msix_supported = test_bit(ATH10K_PCI_FEATURE_MSI_X,
2400 ar_pci->features);
fc15ca13 2401 int ret;
5e3dd157 2402
fc15ca13 2403 ath10k_pci_init_irq_tasklets(ar);
5e3dd157 2404
cfe9c45b
MK
2405 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO &&
2406 !test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
2407 ath10k_info("limiting irq mode to: %d\n", ath10k_pci_irq_mode);
5e3dd157 2408
fc15ca13 2409 /* Try MSI-X */
cfe9c45b
MK
2410 if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO && msix_supported) {
2411 ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
2412 ret = pci_enable_msi_block(ar_pci->pdev, ar_pci->num_msi_intrs);
5e3dd157 2413 if (ret == 0)
cfe9c45b
MK
2414 return 0;
2415 if (ret > 0)
2416 pci_disable_msi(ar_pci->pdev);
5e3dd157 2417
cfe9c45b 2418 /* fall-through */
5e3dd157
KV
2419 }
2420
fc15ca13 2421 /* Try MSI */
cfe9c45b
MK
2422 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
2423 ar_pci->num_msi_intrs = 1;
2424 ret = pci_enable_msi(ar_pci->pdev);
5e3dd157 2425 if (ret == 0)
cfe9c45b 2426 return 0;
5e3dd157 2427
cfe9c45b 2428 /* fall-through */
5e3dd157
KV
2429 }
2430
fc15ca13
MK
2431 /* Try legacy irq
2432 *
2433 * A potential race occurs here: The CORE_BASE write
2434 * depends on target correctly decoding AXI address but
2435 * host won't know when target writes BAR to CORE_CTRL.
2436 * This write might get lost if target has NOT written BAR.
2437 * For now, fix the race by repeating the write in below
2438 * synchronization checking. */
2439 ar_pci->num_msi_intrs = 0;
5e3dd157 2440
fc15ca13
MK
2441 ret = ath10k_pci_wake(ar);
2442 if (ret) {
2443 ath10k_warn("failed to wake target: %d\n", ret);
2444 return ret;
5e3dd157
KV
2445 }
2446
fc15ca13
MK
2447 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2448 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
2449 ath10k_pci_sleep(ar);
2450
2451 return 0;
5e3dd157
KV
2452}
2453
fc15ca13 2454static int ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
5e3dd157 2455{
fc15ca13 2456 int ret;
5e3dd157 2457
fc15ca13 2458 ret = ath10k_pci_wake(ar);
f3782744 2459 if (ret) {
fc15ca13 2460 ath10k_warn("failed to wake target: %d\n", ret);
f3782744
KV
2461 return ret;
2462 }
5e3dd157 2463
fc15ca13
MK
2464 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2465 0);
2466 ath10k_pci_sleep(ar);
2467
2468 return 0;
5e3dd157
KV
2469}
2470
fc15ca13 2471static int ath10k_pci_deinit_irq(struct ath10k *ar)
5e3dd157
KV
2472{
2473 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
5e3dd157 2474
fc15ca13
MK
2475 switch (ar_pci->num_msi_intrs) {
2476 case 0:
2477 return ath10k_pci_deinit_irq_legacy(ar);
2478 case 1:
2479 /* fall-through */
2480 case MSI_NUM_REQUEST:
5e3dd157 2481 pci_disable_msi(ar_pci->pdev);
fc15ca13
MK
2482 return 0;
2483 }
2484
2485 ath10k_warn("unknown irq configuration upon deinit\n");
2486 return -EINVAL;
5e3dd157
KV
2487}
2488
d7fb47f5 2489static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
5e3dd157
KV
2490{
2491 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2492 int wait_limit = 300; /* 3 sec */
f3782744 2493 int ret;
5e3dd157 2494
98563d5a 2495 ret = ath10k_pci_wake(ar);
f3782744 2496 if (ret) {
5b2589fc 2497 ath10k_err("failed to wake up target: %d\n", ret);
f3782744
KV
2498 return ret;
2499 }
5e3dd157
KV
2500
2501 while (wait_limit-- &&
2502 !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
2503 FW_IND_INITIALIZED)) {
2504 if (ar_pci->num_msi_intrs == 0)
2505 /* Fix potential race by repeating CORE_BASE writes */
2506 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2507 PCIE_INTR_CE_MASK_ALL,
2508 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2509 PCIE_INTR_ENABLE_ADDRESS));
2510 mdelay(10);
2511 }
2512
2513 if (wait_limit < 0) {
5b2589fc
MK
2514 ath10k_err("target stalled\n");
2515 ret = -EIO;
2516 goto out;
5e3dd157
KV
2517 }
2518
5b2589fc 2519out:
98563d5a 2520 ath10k_pci_sleep(ar);
5b2589fc 2521 return ret;
5e3dd157
KV
2522}
2523
5b2589fc 2524static int ath10k_pci_device_reset(struct ath10k *ar)
5e3dd157 2525{
5b2589fc 2526 int i, ret;
5e3dd157
KV
2527 u32 val;
2528
5b2589fc
MK
2529 ret = ath10k_do_pci_wake(ar);
2530 if (ret) {
2531 ath10k_err("failed to wake up target: %d\n",
2532 ret);
2533 return ret;
5e3dd157
KV
2534 }
2535
2536 /* Put Target, including PCIe, into RESET. */
e479ed43 2537 val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
5e3dd157 2538 val |= 1;
e479ed43 2539 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
5e3dd157
KV
2540
2541 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
e479ed43 2542 if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
5e3dd157
KV
2543 RTC_STATE_COLD_RESET_MASK)
2544 break;
2545 msleep(1);
2546 }
2547
2548 /* Pull Target, including PCIe, out of RESET. */
2549 val &= ~1;
e479ed43 2550 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
5e3dd157
KV
2551
2552 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
e479ed43 2553 if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
5e3dd157
KV
2554 RTC_STATE_COLD_RESET_MASK))
2555 break;
2556 msleep(1);
2557 }
2558
5b2589fc
MK
2559 ath10k_do_pci_sleep(ar);
2560 return 0;
5e3dd157
KV
2561}
2562
2563static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
2564{
2565 int i;
2566
2567 for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
2568 if (!test_bit(i, ar_pci->features))
2569 continue;
2570
2571 switch (i) {
2572 case ATH10K_PCI_FEATURE_MSI_X:
24cfade1 2573 ath10k_dbg(ATH10K_DBG_BOOT, "device supports MSI-X\n");
5e3dd157 2574 break;
8cc8df90 2575 case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
24cfade1 2576 ath10k_dbg(ATH10K_DBG_BOOT, "QCA98XX SoC power save enabled\n");
8cc8df90 2577 break;
5e3dd157
KV
2578 }
2579 }
2580}
2581
2582static int ath10k_pci_probe(struct pci_dev *pdev,
2583 const struct pci_device_id *pci_dev)
2584{
2585 void __iomem *mem;
2586 int ret = 0;
2587 struct ath10k *ar;
2588 struct ath10k_pci *ar_pci;
e01ae68c 2589 u32 lcr_val, chip_id;
5e3dd157
KV
2590
2591 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2592
2593 ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
2594 if (ar_pci == NULL)
2595 return -ENOMEM;
2596
2597 ar_pci->pdev = pdev;
2598 ar_pci->dev = &pdev->dev;
2599
2600 switch (pci_dev->device) {
5e3dd157
KV
2601 case QCA988X_2_0_DEVICE_ID:
2602 set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
2603 break;
2604 default:
2605 ret = -ENODEV;
6d3be300 2606 ath10k_err("Unknown device ID: %d\n", pci_dev->device);
5e3dd157
KV
2607 goto err_ar_pci;
2608 }
2609
8cc8df90
BM
2610 if (ath10k_target_ps)
2611 set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
2612
5e3dd157
KV
2613 ath10k_pci_dump_features(ar_pci);
2614
3a0861ff 2615 ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
5e3dd157 2616 if (!ar) {
1d2b48d6 2617 ath10k_err("failed to create driver core\n");
5e3dd157
KV
2618 ret = -EINVAL;
2619 goto err_ar_pci;
2620 }
2621
5e3dd157
KV
2622 ar_pci->ar = ar;
2623 ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
2624 atomic_set(&ar_pci->keep_awake_count, 0);
2625
2626 pci_set_drvdata(pdev, ar);
2627
2628 /*
2629 * Without any knowledge of the Host, the Target may have been reset or
2630 * power cycled and its Config Space may no longer reflect the PCI
2631 * address space that was assigned earlier by the PCI infrastructure.
2632 * Refresh it now.
2633 */
2634 ret = pci_assign_resource(pdev, BAR_NUM);
2635 if (ret) {
1d2b48d6 2636 ath10k_err("failed to assign PCI space: %d\n", ret);
5e3dd157
KV
2637 goto err_ar;
2638 }
2639
2640 ret = pci_enable_device(pdev);
2641 if (ret) {
1d2b48d6 2642 ath10k_err("failed to enable PCI device: %d\n", ret);
5e3dd157
KV
2643 goto err_ar;
2644 }
2645
2646 /* Request MMIO resources */
2647 ret = pci_request_region(pdev, BAR_NUM, "ath");
2648 if (ret) {
1d2b48d6 2649 ath10k_err("failed to request MMIO region: %d\n", ret);
5e3dd157
KV
2650 goto err_device;
2651 }
2652
2653 /*
2654 * Target structures have a limit of 32 bit DMA pointers.
2655 * DMA pointers can be wider than 32 bits by default on some systems.
2656 */
2657 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2658 if (ret) {
1d2b48d6 2659 ath10k_err("failed to set DMA mask to 32-bit: %d\n", ret);
5e3dd157
KV
2660 goto err_region;
2661 }
2662
2663 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2664 if (ret) {
1d2b48d6 2665 ath10k_err("failed to set consistent DMA mask to 32-bit\n");
5e3dd157
KV
2666 goto err_region;
2667 }
2668
2669 /* Set bus master bit in PCI_COMMAND to enable DMA */
2670 pci_set_master(pdev);
2671
2672 /*
2673 * Temporary FIX: disable ASPM
2674 * Will be removed after the OTP is programmed
2675 */
2676 pci_read_config_dword(pdev, 0x80, &lcr_val);
2677 pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2678
2679 /* Arrange for access to Target SoC registers. */
2680 mem = pci_iomap(pdev, BAR_NUM, 0);
2681 if (!mem) {
1d2b48d6 2682 ath10k_err("failed to perform IOMAP for BAR%d\n", BAR_NUM);
5e3dd157
KV
2683 ret = -EIO;
2684 goto err_master;
2685 }
2686
2687 ar_pci->mem = mem;
2688
2689 spin_lock_init(&ar_pci->ce_lock);
2690
e01ae68c
KV
2691 ret = ath10k_do_pci_wake(ar);
2692 if (ret) {
2693 ath10k_err("Failed to get chip id: %d\n", ret);
12eb0879 2694 goto err_iomap;
e01ae68c
KV
2695 }
2696
233eb97f 2697 chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
e01ae68c
KV
2698
2699 ath10k_do_pci_sleep(ar);
2700
24cfade1
KV
2701 ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
2702
e01ae68c 2703 ret = ath10k_core_register(ar, chip_id);
5e3dd157 2704 if (ret) {
1d2b48d6 2705 ath10k_err("failed to register driver core: %d\n", ret);
32270b61 2706 goto err_iomap;
5e3dd157
KV
2707 }
2708
2709 return 0;
2710
5e3dd157
KV
2711err_iomap:
2712 pci_iounmap(pdev, mem);
2713err_master:
2714 pci_clear_master(pdev);
2715err_region:
2716 pci_release_region(pdev, BAR_NUM);
2717err_device:
2718 pci_disable_device(pdev);
2719err_ar:
5e3dd157
KV
2720 ath10k_core_destroy(ar);
2721err_ar_pci:
2722 /* call HIF PCI free here */
2723 kfree(ar_pci);
2724
2725 return ret;
2726}
2727
2728static void ath10k_pci_remove(struct pci_dev *pdev)
2729{
2730 struct ath10k *ar = pci_get_drvdata(pdev);
2731 struct ath10k_pci *ar_pci;
2732
2733 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2734
2735 if (!ar)
2736 return;
2737
2738 ar_pci = ath10k_pci_priv(ar);
2739
2740 if (!ar_pci)
2741 return;
2742
2743 tasklet_kill(&ar_pci->msi_fw_err);
2744
2745 ath10k_core_unregister(ar);
5e3dd157 2746
5e3dd157
KV
2747 pci_iounmap(pdev, ar_pci->mem);
2748 pci_release_region(pdev, BAR_NUM);
2749 pci_clear_master(pdev);
2750 pci_disable_device(pdev);
2751
2752 ath10k_core_destroy(ar);
2753 kfree(ar_pci);
2754}
2755
5e3dd157
KV
2756MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2757
2758static struct pci_driver ath10k_pci_driver = {
2759 .name = "ath10k_pci",
2760 .id_table = ath10k_pci_id_table,
2761 .probe = ath10k_pci_probe,
2762 .remove = ath10k_pci_remove,
5e3dd157
KV
2763};
2764
2765static int __init ath10k_pci_init(void)
2766{
2767 int ret;
2768
2769 ret = pci_register_driver(&ath10k_pci_driver);
2770 if (ret)
1d2b48d6 2771 ath10k_err("failed to register PCI driver: %d\n", ret);
5e3dd157
KV
2772
2773 return ret;
2774}
2775module_init(ath10k_pci_init);
2776
2777static void __exit ath10k_pci_exit(void)
2778{
2779 pci_unregister_driver(&ath10k_pci_driver);
2780}
2781
2782module_exit(ath10k_pci_exit);
2783
2784MODULE_AUTHOR("Qualcomm Atheros");
2785MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2786MODULE_LICENSE("Dual BSD/GPL");
5e3dd157
KV
2787MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
2788MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
2789MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
This page took 0.194035 seconds and 5 git commands to generate.