ath10k: print chip id during boot
[deliverable/linux.git] / drivers / net / wireless / ath / ath10k / pci.c
CommitLineData
5e3dd157
KV
1/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include <linux/pci.h>
19#include <linux/module.h>
20#include <linux/interrupt.h>
21#include <linux/spinlock.h>
650b91fb 22#include <linux/bitops.h>
5e3dd157
KV
23
24#include "core.h"
25#include "debug.h"
26
27#include "targaddrs.h"
28#include "bmi.h"
29
30#include "hif.h"
31#include "htc.h"
32
33#include "ce.h"
34#include "pci.h"
35
cfe9c45b
MK
36enum ath10k_pci_irq_mode {
37 ATH10K_PCI_IRQ_AUTO = 0,
38 ATH10K_PCI_IRQ_LEGACY = 1,
39 ATH10K_PCI_IRQ_MSI = 2,
40};
41
35098463
KV
42enum ath10k_pci_reset_mode {
43 ATH10K_PCI_RESET_AUTO = 0,
44 ATH10K_PCI_RESET_WARM_ONLY = 1,
45};
46
e42c1fbd 47static unsigned int ath10k_pci_target_ps;
cfe9c45b 48static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
35098463 49static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
cfe9c45b 50
e42c1fbd
KV
51module_param_named(target_ps, ath10k_pci_target_ps, uint, 0644);
52MODULE_PARM_DESC(target_ps, "Enable ath10k Target (SoC) PS option");
5e3dd157 53
cfe9c45b
MK
54module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
55MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
56
35098463
KV
57module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
58MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
59
0399eca8
KV
60/* how long wait to wait for target to initialise, in ms */
61#define ATH10K_PCI_TARGET_WAIT 3000
62
5e3dd157
KV
63#define QCA988X_2_0_DEVICE_ID (0x003c)
64
65static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
5e3dd157
KV
66 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
67 {0}
68};
69
70static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
71 u32 *data);
72
5e3dd157 73static int ath10k_pci_post_rx(struct ath10k *ar);
87263e5b 74static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
5e3dd157 75 int num);
87263e5b 76static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
fc36e3ff
MK
77static int ath10k_pci_cold_reset(struct ath10k *ar);
78static int ath10k_pci_warm_reset(struct ath10k *ar);
d7fb47f5 79static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
fc15ca13
MK
80static int ath10k_pci_init_irq(struct ath10k *ar);
81static int ath10k_pci_deinit_irq(struct ath10k *ar);
82static int ath10k_pci_request_irq(struct ath10k *ar);
83static void ath10k_pci_free_irq(struct ath10k *ar);
85622cde
MK
84static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
85 struct ath10k_ce_pipe *rx_pipe,
86 struct bmi_xfer *xfer);
5e3dd157
KV
87
88static const struct ce_attr host_ce_config_wlan[] = {
48e9c225
KV
89 /* CE0: host->target HTC control and raw streams */
90 {
91 .flags = CE_ATTR_FLAGS,
92 .src_nentries = 16,
93 .src_sz_max = 256,
94 .dest_nentries = 0,
95 },
96
97 /* CE1: target->host HTT + HTC control */
98 {
99 .flags = CE_ATTR_FLAGS,
100 .src_nentries = 0,
101 .src_sz_max = 512,
102 .dest_nentries = 512,
103 },
104
105 /* CE2: target->host WMI */
106 {
107 .flags = CE_ATTR_FLAGS,
108 .src_nentries = 0,
109 .src_sz_max = 2048,
110 .dest_nentries = 32,
111 },
112
113 /* CE3: host->target WMI */
114 {
115 .flags = CE_ATTR_FLAGS,
116 .src_nentries = 32,
117 .src_sz_max = 2048,
118 .dest_nentries = 0,
119 },
120
121 /* CE4: host->target HTT */
122 {
123 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
124 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
125 .src_sz_max = 256,
126 .dest_nentries = 0,
127 },
128
129 /* CE5: unused */
130 {
131 .flags = CE_ATTR_FLAGS,
132 .src_nentries = 0,
133 .src_sz_max = 0,
134 .dest_nentries = 0,
135 },
136
137 /* CE6: target autonomous hif_memcpy */
138 {
139 .flags = CE_ATTR_FLAGS,
140 .src_nentries = 0,
141 .src_sz_max = 0,
142 .dest_nentries = 0,
143 },
144
145 /* CE7: ce_diag, the Diagnostic Window */
146 {
147 .flags = CE_ATTR_FLAGS,
148 .src_nentries = 2,
149 .src_sz_max = DIAG_TRANSFER_LIMIT,
150 .dest_nentries = 2,
151 },
5e3dd157
KV
152};
153
154/* Target firmware's Copy Engine configuration. */
155static const struct ce_pipe_config target_ce_config_wlan[] = {
d88effba
KV
156 /* CE0: host->target HTC control and raw streams */
157 {
158 .pipenum = 0,
159 .pipedir = PIPEDIR_OUT,
160 .nentries = 32,
161 .nbytes_max = 256,
162 .flags = CE_ATTR_FLAGS,
163 .reserved = 0,
164 },
165
166 /* CE1: target->host HTT + HTC control */
167 {
168 .pipenum = 1,
169 .pipedir = PIPEDIR_IN,
170 .nentries = 32,
171 .nbytes_max = 512,
172 .flags = CE_ATTR_FLAGS,
173 .reserved = 0,
174 },
175
176 /* CE2: target->host WMI */
177 {
178 .pipenum = 2,
179 .pipedir = PIPEDIR_IN,
180 .nentries = 32,
181 .nbytes_max = 2048,
182 .flags = CE_ATTR_FLAGS,
183 .reserved = 0,
184 },
185
186 /* CE3: host->target WMI */
187 {
188 .pipenum = 3,
189 .pipedir = PIPEDIR_OUT,
190 .nentries = 32,
191 .nbytes_max = 2048,
192 .flags = CE_ATTR_FLAGS,
193 .reserved = 0,
194 },
195
196 /* CE4: host->target HTT */
197 {
198 .pipenum = 4,
199 .pipedir = PIPEDIR_OUT,
200 .nentries = 256,
201 .nbytes_max = 256,
202 .flags = CE_ATTR_FLAGS,
203 .reserved = 0,
204 },
205
5e3dd157 206 /* NB: 50% of src nentries, since tx has 2 frags */
d88effba
KV
207
208 /* CE5: unused */
209 {
210 .pipenum = 5,
211 .pipedir = PIPEDIR_OUT,
212 .nentries = 32,
213 .nbytes_max = 2048,
214 .flags = CE_ATTR_FLAGS,
215 .reserved = 0,
216 },
217
218 /* CE6: Reserved for target autonomous hif_memcpy */
219 {
220 .pipenum = 6,
221 .pipedir = PIPEDIR_INOUT,
222 .nentries = 32,
223 .nbytes_max = 4096,
224 .flags = CE_ATTR_FLAGS,
225 .reserved = 0,
226 },
227
5e3dd157
KV
228 /* CE7 used only by Host */
229};
230
e539887b
MK
231static bool ath10k_pci_irq_pending(struct ath10k *ar)
232{
233 u32 cause;
234
235 /* Check if the shared legacy irq is for us */
236 cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
237 PCIE_INTR_CAUSE_ADDRESS);
238 if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
239 return true;
240
241 return false;
242}
243
2685218b
MK
244static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
245{
246 /* IMPORTANT: INTR_CLR register has to be set after
247 * INTR_ENABLE is set to 0, otherwise interrupt can not be
248 * really cleared. */
249 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
250 0);
251 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
252 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
253
254 /* IMPORTANT: this extra read transaction is required to
255 * flush the posted write buffer. */
256 (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
257 PCIE_INTR_ENABLE_ADDRESS);
258}
259
260static void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
261{
262 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
263 PCIE_INTR_ENABLE_ADDRESS,
264 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
265
266 /* IMPORTANT: this extra read transaction is required to
267 * flush the posted write buffer. */
268 (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
269 PCIE_INTR_ENABLE_ADDRESS);
270}
271
ab977bd0
MK
272static irqreturn_t ath10k_pci_early_irq_handler(int irq, void *arg)
273{
274 struct ath10k *ar = arg;
275 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
276
277 if (ar_pci->num_msi_intrs == 0) {
278 if (!ath10k_pci_irq_pending(ar))
279 return IRQ_NONE;
280
281 ath10k_pci_disable_and_clear_legacy_irq(ar);
282 }
283
284 tasklet_schedule(&ar_pci->early_irq_tasklet);
285
286 return IRQ_HANDLED;
287}
288
289static int ath10k_pci_request_early_irq(struct ath10k *ar)
290{
291 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
292 int ret;
293
294 /* Regardless whether MSI-X/MSI/legacy irqs have been set up the first
295 * interrupt from irq vector is triggered in all cases for FW
296 * indication/errors */
297 ret = request_irq(ar_pci->pdev->irq, ath10k_pci_early_irq_handler,
298 IRQF_SHARED, "ath10k_pci (early)", ar);
299 if (ret) {
300 ath10k_warn("failed to request early irq: %d\n", ret);
301 return ret;
302 }
303
304 return 0;
305}
306
307static void ath10k_pci_free_early_irq(struct ath10k *ar)
308{
309 free_irq(ath10k_pci_priv(ar)->pdev->irq, ar);
310}
311
5e3dd157
KV
312/*
313 * Diagnostic read/write access is provided for startup/config/debug usage.
314 * Caller must guarantee proper alignment, when applicable, and single user
315 * at any moment.
316 */
317static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
318 int nbytes)
319{
320 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
321 int ret = 0;
322 u32 buf;
323 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
324 unsigned int id;
325 unsigned int flags;
2aa39115 326 struct ath10k_ce_pipe *ce_diag;
5e3dd157
KV
327 /* Host buffer address in CE space */
328 u32 ce_data;
329 dma_addr_t ce_data_base = 0;
330 void *data_buf = NULL;
331 int i;
332
333 /*
334 * This code cannot handle reads to non-memory space. Redirect to the
335 * register read fn but preserve the multi word read capability of
336 * this fn
337 */
338 if (address < DRAM_BASE_ADDRESS) {
339 if (!IS_ALIGNED(address, 4) ||
340 !IS_ALIGNED((unsigned long)data, 4))
341 return -EIO;
342
343 while ((nbytes >= 4) && ((ret = ath10k_pci_diag_read_access(
344 ar, address, (u32 *)data)) == 0)) {
345 nbytes -= sizeof(u32);
346 address += sizeof(u32);
347 data += sizeof(u32);
348 }
349 return ret;
350 }
351
352 ce_diag = ar_pci->ce_diag;
353
354 /*
355 * Allocate a temporary bounce buffer to hold caller's data
356 * to be DMA'ed from Target. This guarantees
357 * 1) 4-byte alignment
358 * 2) Buffer in DMA-able space
359 */
360 orig_nbytes = nbytes;
361 data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
362 orig_nbytes,
363 &ce_data_base);
364
365 if (!data_buf) {
366 ret = -ENOMEM;
367 goto done;
368 }
369 memset(data_buf, 0, orig_nbytes);
370
371 remaining_bytes = orig_nbytes;
372 ce_data = ce_data_base;
373 while (remaining_bytes) {
374 nbytes = min_t(unsigned int, remaining_bytes,
375 DIAG_TRANSFER_LIMIT);
376
377 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
378 if (ret != 0)
379 goto done;
380
381 /* Request CE to send from Target(!) address to Host buffer */
382 /*
383 * The address supplied by the caller is in the
384 * Target CPU virtual address space.
385 *
386 * In order to use this address with the diagnostic CE,
387 * convert it from Target CPU virtual address space
388 * to CE address space
389 */
390 ath10k_pci_wake(ar);
391 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
392 address);
393 ath10k_pci_sleep(ar);
394
395 ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
396 0);
397 if (ret)
398 goto done;
399
400 i = 0;
401 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
402 &completed_nbytes,
403 &id) != 0) {
404 mdelay(1);
405 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
406 ret = -EBUSY;
407 goto done;
408 }
409 }
410
411 if (nbytes != completed_nbytes) {
412 ret = -EIO;
413 goto done;
414 }
415
416 if (buf != (u32) address) {
417 ret = -EIO;
418 goto done;
419 }
420
421 i = 0;
422 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
423 &completed_nbytes,
424 &id, &flags) != 0) {
425 mdelay(1);
426
427 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
428 ret = -EBUSY;
429 goto done;
430 }
431 }
432
433 if (nbytes != completed_nbytes) {
434 ret = -EIO;
435 goto done;
436 }
437
438 if (buf != ce_data) {
439 ret = -EIO;
440 goto done;
441 }
442
443 remaining_bytes -= nbytes;
444 address += nbytes;
445 ce_data += nbytes;
446 }
447
448done:
449 if (ret == 0) {
450 /* Copy data from allocated DMA buf to caller's buf */
451 WARN_ON_ONCE(orig_nbytes & 3);
452 for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
453 ((u32 *)data)[i] =
454 __le32_to_cpu(((__le32 *)data_buf)[i]);
455 }
456 } else
50f87a67
KV
457 ath10k_warn("failed to read diag value at 0x%x: %d\n",
458 address, ret);
5e3dd157
KV
459
460 if (data_buf)
461 pci_free_consistent(ar_pci->pdev, orig_nbytes,
462 data_buf, ce_data_base);
463
464 return ret;
465}
466
467/* Read 4-byte aligned data from Target memory or register */
468static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
469 u32 *data)
470{
471 /* Assume range doesn't cross this boundary */
472 if (address >= DRAM_BASE_ADDRESS)
473 return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
474
475 ath10k_pci_wake(ar);
476 *data = ath10k_pci_read32(ar, address);
477 ath10k_pci_sleep(ar);
478 return 0;
479}
480
481static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
482 const void *data, int nbytes)
483{
484 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
485 int ret = 0;
486 u32 buf;
487 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
488 unsigned int id;
489 unsigned int flags;
2aa39115 490 struct ath10k_ce_pipe *ce_diag;
5e3dd157
KV
491 void *data_buf = NULL;
492 u32 ce_data; /* Host buffer address in CE space */
493 dma_addr_t ce_data_base = 0;
494 int i;
495
496 ce_diag = ar_pci->ce_diag;
497
498 /*
499 * Allocate a temporary bounce buffer to hold caller's data
500 * to be DMA'ed to Target. This guarantees
501 * 1) 4-byte alignment
502 * 2) Buffer in DMA-able space
503 */
504 orig_nbytes = nbytes;
505 data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
506 orig_nbytes,
507 &ce_data_base);
508 if (!data_buf) {
509 ret = -ENOMEM;
510 goto done;
511 }
512
513 /* Copy caller's data to allocated DMA buf */
514 WARN_ON_ONCE(orig_nbytes & 3);
515 for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
516 ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
517
518 /*
519 * The address supplied by the caller is in the
520 * Target CPU virtual address space.
521 *
522 * In order to use this address with the diagnostic CE,
523 * convert it from
524 * Target CPU virtual address space
525 * to
526 * CE address space
527 */
528 ath10k_pci_wake(ar);
529 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
530 ath10k_pci_sleep(ar);
531
532 remaining_bytes = orig_nbytes;
533 ce_data = ce_data_base;
534 while (remaining_bytes) {
535 /* FIXME: check cast */
536 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
537
538 /* Set up to receive directly into Target(!) address */
539 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
540 if (ret != 0)
541 goto done;
542
543 /*
544 * Request CE to send caller-supplied data that
545 * was copied to bounce buffer to Target(!) address.
546 */
547 ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
548 nbytes, 0, 0);
549 if (ret != 0)
550 goto done;
551
552 i = 0;
553 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
554 &completed_nbytes,
555 &id) != 0) {
556 mdelay(1);
557
558 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
559 ret = -EBUSY;
560 goto done;
561 }
562 }
563
564 if (nbytes != completed_nbytes) {
565 ret = -EIO;
566 goto done;
567 }
568
569 if (buf != ce_data) {
570 ret = -EIO;
571 goto done;
572 }
573
574 i = 0;
575 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
576 &completed_nbytes,
577 &id, &flags) != 0) {
578 mdelay(1);
579
580 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
581 ret = -EBUSY;
582 goto done;
583 }
584 }
585
586 if (nbytes != completed_nbytes) {
587 ret = -EIO;
588 goto done;
589 }
590
591 if (buf != address) {
592 ret = -EIO;
593 goto done;
594 }
595
596 remaining_bytes -= nbytes;
597 address += nbytes;
598 ce_data += nbytes;
599 }
600
601done:
602 if (data_buf) {
603 pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf,
604 ce_data_base);
605 }
606
607 if (ret != 0)
50f87a67
KV
608 ath10k_warn("failed to write diag value at 0x%x: %d\n",
609 address, ret);
5e3dd157
KV
610
611 return ret;
612}
613
614/* Write 4B data to Target memory or register */
615static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
616 u32 data)
617{
618 /* Assume range doesn't cross this boundary */
619 if (address >= DRAM_BASE_ADDRESS)
620 return ath10k_pci_diag_write_mem(ar, address, &data,
621 sizeof(u32));
622
623 ath10k_pci_wake(ar);
624 ath10k_pci_write32(ar, address, data);
625 ath10k_pci_sleep(ar);
626 return 0;
627}
628
629static bool ath10k_pci_target_is_awake(struct ath10k *ar)
630{
631 void __iomem *mem = ath10k_pci_priv(ar)->mem;
632 u32 val;
633 val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
634 RTC_STATE_ADDRESS);
635 return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
636}
637
3aebe54b 638int ath10k_do_pci_wake(struct ath10k *ar)
5e3dd157
KV
639{
640 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
641 void __iomem *pci_addr = ar_pci->mem;
642 int tot_delay = 0;
643 int curr_delay = 5;
644
645 if (atomic_read(&ar_pci->keep_awake_count) == 0) {
646 /* Force AWAKE */
647 iowrite32(PCIE_SOC_WAKE_V_MASK,
648 pci_addr + PCIE_LOCAL_BASE_ADDRESS +
649 PCIE_SOC_WAKE_ADDRESS);
650 }
651 atomic_inc(&ar_pci->keep_awake_count);
652
653 if (ar_pci->verified_awake)
3aebe54b 654 return 0;
5e3dd157
KV
655
656 for (;;) {
657 if (ath10k_pci_target_is_awake(ar)) {
658 ar_pci->verified_awake = true;
3aebe54b 659 return 0;
5e3dd157
KV
660 }
661
662 if (tot_delay > PCIE_WAKE_TIMEOUT) {
3aebe54b
KV
663 ath10k_warn("target took longer %d us to wake up (awake count %d)\n",
664 PCIE_WAKE_TIMEOUT,
5e3dd157 665 atomic_read(&ar_pci->keep_awake_count));
3aebe54b 666 return -ETIMEDOUT;
5e3dd157
KV
667 }
668
669 udelay(curr_delay);
670 tot_delay += curr_delay;
671
672 if (curr_delay < 50)
673 curr_delay += 5;
674 }
675}
676
677void ath10k_do_pci_sleep(struct ath10k *ar)
678{
679 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
680 void __iomem *pci_addr = ar_pci->mem;
681
682 if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
683 /* Allow sleep */
684 ar_pci->verified_awake = false;
685 iowrite32(PCIE_SOC_WAKE_RESET,
686 pci_addr + PCIE_LOCAL_BASE_ADDRESS +
687 PCIE_SOC_WAKE_ADDRESS);
688 }
689}
690
5e3dd157 691/* Called by lower (CE) layer when a send to Target completes. */
5440ce25 692static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
5e3dd157
KV
693{
694 struct ath10k *ar = ce_state->ar;
695 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2f5280da 696 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
5440ce25
MK
697 void *transfer_context;
698 u32 ce_data;
699 unsigned int nbytes;
700 unsigned int transfer_id;
5e3dd157 701
5440ce25
MK
702 while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
703 &ce_data, &nbytes,
704 &transfer_id) == 0) {
a16942e6 705 /* no need to call tx completion for NULL pointers */
726346fc
MK
706 if (transfer_context == NULL)
707 continue;
708
2f5280da 709 cb->tx_completion(ar, transfer_context, transfer_id);
5440ce25 710 }
5e3dd157
KV
711}
712
713/* Called by lower (CE) layer when data is received from the Target. */
5440ce25 714static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
5e3dd157
KV
715{
716 struct ath10k *ar = ce_state->ar;
717 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
87263e5b 718 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
2f5280da 719 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
5e3dd157 720 struct sk_buff *skb;
5440ce25
MK
721 void *transfer_context;
722 u32 ce_data;
2f5280da 723 unsigned int nbytes, max_nbytes;
5440ce25
MK
724 unsigned int transfer_id;
725 unsigned int flags;
2f5280da 726 int err;
5e3dd157 727
5440ce25
MK
728 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
729 &ce_data, &nbytes, &transfer_id,
730 &flags) == 0) {
2f5280da
MK
731 err = ath10k_pci_post_rx_pipe(pipe_info, 1);
732 if (unlikely(err)) {
733 /* FIXME: retry */
734 ath10k_warn("failed to replenish CE rx ring %d: %d\n",
735 pipe_info->pipe_num, err);
736 }
5e3dd157
KV
737
738 skb = transfer_context;
2f5280da 739 max_nbytes = skb->len + skb_tailroom(skb);
5e3dd157 740 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
2f5280da
MK
741 max_nbytes, DMA_FROM_DEVICE);
742
743 if (unlikely(max_nbytes < nbytes)) {
744 ath10k_warn("rxed more than expected (nbytes %d, max %d)",
745 nbytes, max_nbytes);
746 dev_kfree_skb_any(skb);
747 continue;
748 }
5e3dd157 749
2f5280da
MK
750 skb_put(skb, nbytes);
751 cb->rx_completion(ar, skb, pipe_info->pipe_num);
752 }
5e3dd157
KV
753}
754
726346fc
MK
755static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
756 struct ath10k_hif_sg_item *items, int n_items)
5e3dd157 757{
5e3dd157 758 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
726346fc
MK
759 struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
760 struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
761 struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
762 unsigned int nentries_mask = src_ring->nentries_mask;
763 unsigned int sw_index = src_ring->sw_index;
764 unsigned int write_index = src_ring->write_index;
765 int err, i;
5e3dd157 766
726346fc 767 spin_lock_bh(&ar_pci->ce_lock);
5e3dd157 768
726346fc
MK
769 if (unlikely(CE_RING_DELTA(nentries_mask,
770 write_index, sw_index - 1) < n_items)) {
771 err = -ENOBUFS;
772 goto unlock;
773 }
5e3dd157 774
726346fc
MK
775 for (i = 0; i < n_items - 1; i++) {
776 ath10k_dbg(ATH10K_DBG_PCI,
777 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
778 i, items[i].paddr, items[i].len, n_items);
779 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
780 items[i].vaddr, items[i].len);
5e3dd157 781
726346fc
MK
782 err = ath10k_ce_send_nolock(ce_pipe,
783 items[i].transfer_context,
784 items[i].paddr,
785 items[i].len,
786 items[i].transfer_id,
787 CE_SEND_FLAG_GATHER);
788 if (err)
789 goto unlock;
790 }
791
792 /* `i` is equal to `n_items -1` after for() */
793
794 ath10k_dbg(ATH10K_DBG_PCI,
795 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
796 i, items[i].paddr, items[i].len, n_items);
797 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
798 items[i].vaddr, items[i].len);
799
800 err = ath10k_ce_send_nolock(ce_pipe,
801 items[i].transfer_context,
802 items[i].paddr,
803 items[i].len,
804 items[i].transfer_id,
805 0);
806 if (err)
807 goto unlock;
808
809 err = 0;
810unlock:
811 spin_unlock_bh(&ar_pci->ce_lock);
812 return err;
5e3dd157
KV
813}
814
815static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
816{
817 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
50f87a67
KV
818
819 ath10k_dbg(ATH10K_DBG_PCI, "pci hif get free queue number\n");
820
3efcb3b4 821 return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
5e3dd157
KV
822}
823
824static void ath10k_pci_hif_dump_area(struct ath10k *ar)
825{
826 u32 reg_dump_area = 0;
827 u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
828 u32 host_addr;
829 int ret;
830 u32 i;
831
832 ath10k_err("firmware crashed!\n");
833 ath10k_err("hardware name %s version 0x%x\n",
834 ar->hw_params.name, ar->target_version);
5ba88b39 835 ath10k_err("firmware version: %s\n", ar->hw->wiphy->fw_version);
5e3dd157
KV
836
837 host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
1d2b48d6
MK
838 ret = ath10k_pci_diag_read_mem(ar, host_addr,
839 &reg_dump_area, sizeof(u32));
840 if (ret) {
841 ath10k_err("failed to read FW dump area address: %d\n", ret);
5e3dd157
KV
842 return;
843 }
844
845 ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
846
847 ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
848 &reg_dump_values[0],
849 REG_DUMP_COUNT_QCA988X * sizeof(u32));
850 if (ret != 0) {
1d2b48d6 851 ath10k_err("failed to read FW dump area: %d\n", ret);
5e3dd157
KV
852 return;
853 }
854
855 BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
856
857 ath10k_err("target Register Dump\n");
858 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
859 ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
860 i,
861 reg_dump_values[i],
862 reg_dump_values[i + 1],
863 reg_dump_values[i + 2],
864 reg_dump_values[i + 3]);
affd3217 865
5e90de86 866 queue_work(ar->workqueue, &ar->restart_work);
5e3dd157
KV
867}
868
869static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
870 int force)
871{
50f87a67
KV
872 ath10k_dbg(ATH10K_DBG_PCI, "pci hif send complete check\n");
873
5e3dd157
KV
874 if (!force) {
875 int resources;
876 /*
877 * Decide whether to actually poll for completions, or just
878 * wait for a later chance.
879 * If there seem to be plenty of resources left, then just wait
880 * since checking involves reading a CE register, which is a
881 * relatively expensive operation.
882 */
883 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
884
885 /*
886 * If at least 50% of the total resources are still available,
887 * don't bother checking again yet.
888 */
889 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
890 return;
891 }
892 ath10k_ce_per_engine_service(ar, pipe);
893}
894
e799bbff
MK
895static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
896 struct ath10k_hif_cb *callbacks)
5e3dd157
KV
897{
898 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
899
50f87a67 900 ath10k_dbg(ATH10K_DBG_PCI, "pci hif set callbacks\n");
5e3dd157
KV
901
902 memcpy(&ar_pci->msg_callbacks_current, callbacks,
903 sizeof(ar_pci->msg_callbacks_current));
904}
905
c80de12b 906static int ath10k_pci_setup_ce_irq(struct ath10k *ar)
5e3dd157
KV
907{
908 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
c80de12b
MK
909 const struct ce_attr *attr;
910 struct ath10k_pci_pipe *pipe_info;
911 int pipe_num, disable_interrupts;
5e3dd157 912
c80de12b
MK
913 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
914 pipe_info = &ar_pci->pipe_info[pipe_num];
915
916 /* Handle Diagnostic CE specially */
917 if (pipe_info->ce_hdl == ar_pci->ce_diag)
918 continue;
919
920 attr = &host_ce_config_wlan[pipe_num];
921
922 if (attr->src_nentries) {
923 disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
924 ath10k_ce_send_cb_register(pipe_info->ce_hdl,
925 ath10k_pci_ce_send_done,
926 disable_interrupts);
927 }
928
929 if (attr->dest_nentries)
930 ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
931 ath10k_pci_ce_recv_data);
932 }
933
934 return 0;
935}
936
96a9d0dc 937static void ath10k_pci_kill_tasklet(struct ath10k *ar)
5e3dd157
KV
938{
939 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
5e3dd157 940 int i;
5e3dd157 941
5e3dd157 942 tasklet_kill(&ar_pci->intr_tq);
103d4f5e 943 tasklet_kill(&ar_pci->msi_fw_err);
ab977bd0 944 tasklet_kill(&ar_pci->early_irq_tasklet);
5e3dd157
KV
945
946 for (i = 0; i < CE_COUNT; i++)
947 tasklet_kill(&ar_pci->pipe_info[i].intr);
96a9d0dc
MK
948}
949
5e3dd157
KV
950/* TODO - temporary mapping while we have too few CE's */
951static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
952 u16 service_id, u8 *ul_pipe,
953 u8 *dl_pipe, int *ul_is_polled,
954 int *dl_is_polled)
955{
956 int ret = 0;
957
50f87a67
KV
958 ath10k_dbg(ATH10K_DBG_PCI, "pci hif map service\n");
959
5e3dd157
KV
960 /* polling for received messages not supported */
961 *dl_is_polled = 0;
962
963 switch (service_id) {
964 case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
965 /*
966 * Host->target HTT gets its own pipe, so it can be polled
967 * while other pipes are interrupt driven.
968 */
969 *ul_pipe = 4;
970 /*
971 * Use the same target->host pipe for HTC ctrl, HTC raw
972 * streams, and HTT.
973 */
974 *dl_pipe = 1;
975 break;
976
977 case ATH10K_HTC_SVC_ID_RSVD_CTRL:
978 case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
979 /*
980 * Note: HTC_RAW_STREAMS_SVC is currently unused, and
981 * HTC_CTRL_RSVD_SVC could share the same pipe as the
982 * WMI services. So, if another CE is needed, change
983 * this to *ul_pipe = 3, which frees up CE 0.
984 */
985 /* *ul_pipe = 3; */
986 *ul_pipe = 0;
987 *dl_pipe = 1;
988 break;
989
990 case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
991 case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
992 case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
993 case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
994
995 case ATH10K_HTC_SVC_ID_WMI_CONTROL:
996 *ul_pipe = 3;
997 *dl_pipe = 2;
998 break;
999
1000 /* pipe 5 unused */
1001 /* pipe 6 reserved */
1002 /* pipe 7 reserved */
1003
1004 default:
1005 ret = -1;
1006 break;
1007 }
1008 *ul_is_polled =
1009 (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1010
1011 return ret;
1012}
1013
1014static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1015 u8 *ul_pipe, u8 *dl_pipe)
1016{
1017 int ul_is_polled, dl_is_polled;
1018
50f87a67
KV
1019 ath10k_dbg(ATH10K_DBG_PCI, "pci hif get default pipe\n");
1020
5e3dd157
KV
1021 (void)ath10k_pci_hif_map_service_to_pipe(ar,
1022 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1023 ul_pipe,
1024 dl_pipe,
1025 &ul_is_polled,
1026 &dl_is_polled);
1027}
1028
87263e5b 1029static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
5e3dd157
KV
1030 int num)
1031{
1032 struct ath10k *ar = pipe_info->hif_ce_state;
1033 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2aa39115 1034 struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
5e3dd157
KV
1035 struct sk_buff *skb;
1036 dma_addr_t ce_data;
1037 int i, ret = 0;
1038
1039 if (pipe_info->buf_sz == 0)
1040 return 0;
1041
1042 for (i = 0; i < num; i++) {
1043 skb = dev_alloc_skb(pipe_info->buf_sz);
1044 if (!skb) {
1d2b48d6 1045 ath10k_warn("failed to allocate skbuff for pipe %d\n",
5e3dd157
KV
1046 num);
1047 ret = -ENOMEM;
1048 goto err;
1049 }
1050
1051 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
1052
1053 ce_data = dma_map_single(ar->dev, skb->data,
1054 skb->len + skb_tailroom(skb),
1055 DMA_FROM_DEVICE);
1056
1057 if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
1d2b48d6 1058 ath10k_warn("failed to DMA map sk_buff\n");
5e3dd157
KV
1059 dev_kfree_skb_any(skb);
1060 ret = -EIO;
1061 goto err;
1062 }
1063
1064 ATH10K_SKB_CB(skb)->paddr = ce_data;
1065
1066 pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
1067 pipe_info->buf_sz,
1068 PCI_DMA_FROMDEVICE);
1069
1070 ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
1071 ce_data);
1072 if (ret) {
1d2b48d6 1073 ath10k_warn("failed to enqueue to pipe %d: %d\n",
5e3dd157
KV
1074 num, ret);
1075 goto err;
1076 }
1077 }
1078
1079 return ret;
1080
1081err:
1082 ath10k_pci_rx_pipe_cleanup(pipe_info);
1083 return ret;
1084}
1085
1086static int ath10k_pci_post_rx(struct ath10k *ar)
1087{
1088 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
87263e5b 1089 struct ath10k_pci_pipe *pipe_info;
5e3dd157
KV
1090 const struct ce_attr *attr;
1091 int pipe_num, ret = 0;
1092
fad6ed78 1093 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
5e3dd157
KV
1094 pipe_info = &ar_pci->pipe_info[pipe_num];
1095 attr = &host_ce_config_wlan[pipe_num];
1096
1097 if (attr->dest_nentries == 0)
1098 continue;
1099
1100 ret = ath10k_pci_post_rx_pipe(pipe_info,
1101 attr->dest_nentries - 1);
1102 if (ret) {
1d2b48d6
MK
1103 ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
1104 pipe_num, ret);
5e3dd157
KV
1105
1106 for (; pipe_num >= 0; pipe_num--) {
1107 pipe_info = &ar_pci->pipe_info[pipe_num];
1108 ath10k_pci_rx_pipe_cleanup(pipe_info);
1109 }
1110 return ret;
1111 }
1112 }
1113
1114 return 0;
1115}
1116
1117static int ath10k_pci_hif_start(struct ath10k *ar)
1118{
1119 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
ab977bd0 1120 int ret, ret_early;
5e3dd157 1121
50f87a67
KV
1122 ath10k_dbg(ATH10K_DBG_BOOT, "boot hif start\n");
1123
ab977bd0
MK
1124 ath10k_pci_free_early_irq(ar);
1125 ath10k_pci_kill_tasklet(ar);
5e3dd157 1126
5d1aa946
MK
1127 ret = ath10k_pci_request_irq(ar);
1128 if (ret) {
1129 ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1130 ret);
2f5280da 1131 goto err_early_irq;
5d1aa946
MK
1132 }
1133
c80de12b
MK
1134 ret = ath10k_pci_setup_ce_irq(ar);
1135 if (ret) {
1136 ath10k_warn("failed to setup CE interrupts: %d\n", ret);
5d1aa946 1137 goto err_stop;
5e3dd157
KV
1138 }
1139
1140 /* Post buffers once to start things off. */
1141 ret = ath10k_pci_post_rx(ar);
1142 if (ret) {
1d2b48d6
MK
1143 ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1144 ret);
5d1aa946 1145 goto err_stop;
5e3dd157
KV
1146 }
1147
1148 ar_pci->started = 1;
1149 return 0;
c80de12b 1150
5d1aa946
MK
1151err_stop:
1152 ath10k_ce_disable_interrupts(ar);
1153 ath10k_pci_free_irq(ar);
1154 ath10k_pci_kill_tasklet(ar);
ab977bd0
MK
1155err_early_irq:
1156 /* Though there should be no interrupts (device was reset)
1157 * power_down() expects the early IRQ to be installed as per the
1158 * driver lifecycle. */
1159 ret_early = ath10k_pci_request_early_irq(ar);
1160 if (ret_early)
1161 ath10k_warn("failed to re-enable early irq: %d\n", ret_early);
1162
c80de12b 1163 return ret;
5e3dd157
KV
1164}
1165
87263e5b 1166static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
5e3dd157
KV
1167{
1168 struct ath10k *ar;
1169 struct ath10k_pci *ar_pci;
2aa39115 1170 struct ath10k_ce_pipe *ce_hdl;
5e3dd157
KV
1171 u32 buf_sz;
1172 struct sk_buff *netbuf;
1173 u32 ce_data;
1174
1175 buf_sz = pipe_info->buf_sz;
1176
1177 /* Unused Copy Engine */
1178 if (buf_sz == 0)
1179 return;
1180
1181 ar = pipe_info->hif_ce_state;
1182 ar_pci = ath10k_pci_priv(ar);
1183
1184 if (!ar_pci->started)
1185 return;
1186
1187 ce_hdl = pipe_info->ce_hdl;
1188
1189 while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
1190 &ce_data) == 0) {
1191 dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
1192 netbuf->len + skb_tailroom(netbuf),
1193 DMA_FROM_DEVICE);
1194 dev_kfree_skb_any(netbuf);
1195 }
1196}
1197
87263e5b 1198static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
5e3dd157
KV
1199{
1200 struct ath10k *ar;
1201 struct ath10k_pci *ar_pci;
2aa39115 1202 struct ath10k_ce_pipe *ce_hdl;
5e3dd157
KV
1203 struct sk_buff *netbuf;
1204 u32 ce_data;
1205 unsigned int nbytes;
1206 unsigned int id;
1207 u32 buf_sz;
1208
1209 buf_sz = pipe_info->buf_sz;
1210
1211 /* Unused Copy Engine */
1212 if (buf_sz == 0)
1213 return;
1214
1215 ar = pipe_info->hif_ce_state;
1216 ar_pci = ath10k_pci_priv(ar);
1217
1218 if (!ar_pci->started)
1219 return;
1220
1221 ce_hdl = pipe_info->ce_hdl;
1222
1223 while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1224 &ce_data, &nbytes, &id) == 0) {
a16942e6
MK
1225 /* no need to call tx completion for NULL pointers */
1226 if (!netbuf)
2415fc16 1227 continue;
2415fc16 1228
e9bb0aa3
KV
1229 ar_pci->msg_callbacks_current.tx_completion(ar,
1230 netbuf,
1231 id);
5e3dd157
KV
1232 }
1233}
1234
1235/*
1236 * Cleanup residual buffers for device shutdown:
1237 * buffers that were enqueued for receive
1238 * buffers that were to be sent
1239 * Note: Buffers that had completed but which were
1240 * not yet processed are on a completion queue. They
1241 * are handled when the completion thread shuts down.
1242 */
1243static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1244{
1245 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1246 int pipe_num;
1247
fad6ed78 1248 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
87263e5b 1249 struct ath10k_pci_pipe *pipe_info;
5e3dd157
KV
1250
1251 pipe_info = &ar_pci->pipe_info[pipe_num];
1252 ath10k_pci_rx_pipe_cleanup(pipe_info);
1253 ath10k_pci_tx_pipe_cleanup(pipe_info);
1254 }
1255}
1256
1257static void ath10k_pci_ce_deinit(struct ath10k *ar)
1258{
1259 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
87263e5b 1260 struct ath10k_pci_pipe *pipe_info;
5e3dd157
KV
1261 int pipe_num;
1262
fad6ed78 1263 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
5e3dd157
KV
1264 pipe_info = &ar_pci->pipe_info[pipe_num];
1265 if (pipe_info->ce_hdl) {
1266 ath10k_ce_deinit(pipe_info->ce_hdl);
1267 pipe_info->ce_hdl = NULL;
1268 pipe_info->buf_sz = 0;
1269 }
1270 }
1271}
1272
1273static void ath10k_pci_hif_stop(struct ath10k *ar)
1274{
32270b61 1275 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
5d1aa946 1276 int ret;
32270b61 1277
50f87a67 1278 ath10k_dbg(ATH10K_DBG_BOOT, "boot hif stop\n");
5e3dd157 1279
5d1aa946
MK
1280 ret = ath10k_ce_disable_interrupts(ar);
1281 if (ret)
1282 ath10k_warn("failed to disable CE interrupts: %d\n", ret);
32270b61 1283
5d1aa946
MK
1284 ath10k_pci_free_irq(ar);
1285 ath10k_pci_kill_tasklet(ar);
5e3dd157 1286
ab977bd0
MK
1287 ret = ath10k_pci_request_early_irq(ar);
1288 if (ret)
1289 ath10k_warn("failed to re-enable early irq: %d\n", ret);
1290
5e3dd157
KV
1291 /* At this point, asynchronous threads are stopped, the target should
1292 * not DMA nor interrupt. We process the leftovers and then free
1293 * everything else up. */
1294
5e3dd157 1295 ath10k_pci_buffer_cleanup(ar);
32270b61 1296
6a42a47e
MK
1297 /* Make the sure the device won't access any structures on the host by
1298 * resetting it. The device was fed with PCI CE ringbuffer
1299 * configuration during init. If ringbuffers are freed and the device
1300 * were to access them this could lead to memory corruption on the
1301 * host. */
fc36e3ff 1302 ath10k_pci_warm_reset(ar);
6a42a47e 1303
32270b61 1304 ar_pci->started = 0;
5e3dd157
KV
1305}
1306
1307static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1308 void *req, u32 req_len,
1309 void *resp, u32 *resp_len)
1310{
1311 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2aa39115
MK
1312 struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1313 struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1314 struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1315 struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
5e3dd157
KV
1316 dma_addr_t req_paddr = 0;
1317 dma_addr_t resp_paddr = 0;
1318 struct bmi_xfer xfer = {};
1319 void *treq, *tresp = NULL;
1320 int ret = 0;
1321
85622cde
MK
1322 might_sleep();
1323
5e3dd157
KV
1324 if (resp && !resp_len)
1325 return -EINVAL;
1326
1327 if (resp && resp_len && *resp_len == 0)
1328 return -EINVAL;
1329
1330 treq = kmemdup(req, req_len, GFP_KERNEL);
1331 if (!treq)
1332 return -ENOMEM;
1333
1334 req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1335 ret = dma_mapping_error(ar->dev, req_paddr);
1336 if (ret)
1337 goto err_dma;
1338
1339 if (resp && resp_len) {
1340 tresp = kzalloc(*resp_len, GFP_KERNEL);
1341 if (!tresp) {
1342 ret = -ENOMEM;
1343 goto err_req;
1344 }
1345
1346 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1347 DMA_FROM_DEVICE);
1348 ret = dma_mapping_error(ar->dev, resp_paddr);
1349 if (ret)
1350 goto err_req;
1351
1352 xfer.wait_for_resp = true;
1353 xfer.resp_len = 0;
1354
1355 ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
1356 }
1357
1358 init_completion(&xfer.done);
1359
1360 ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1361 if (ret)
1362 goto err_resp;
1363
85622cde
MK
1364 ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
1365 if (ret) {
5e3dd157
KV
1366 u32 unused_buffer;
1367 unsigned int unused_nbytes;
1368 unsigned int unused_id;
1369
5e3dd157
KV
1370 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1371 &unused_nbytes, &unused_id);
1372 } else {
1373 /* non-zero means we did not time out */
1374 ret = 0;
1375 }
1376
1377err_resp:
1378 if (resp) {
1379 u32 unused_buffer;
1380
1381 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1382 dma_unmap_single(ar->dev, resp_paddr,
1383 *resp_len, DMA_FROM_DEVICE);
1384 }
1385err_req:
1386 dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1387
1388 if (ret == 0 && resp_len) {
1389 *resp_len = min(*resp_len, xfer.resp_len);
1390 memcpy(resp, tresp, xfer.resp_len);
1391 }
1392err_dma:
1393 kfree(treq);
1394 kfree(tresp);
1395
1396 return ret;
1397}
1398
5440ce25 1399static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
5e3dd157 1400{
5440ce25
MK
1401 struct bmi_xfer *xfer;
1402 u32 ce_data;
1403 unsigned int nbytes;
1404 unsigned int transfer_id;
1405
1406 if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
1407 &nbytes, &transfer_id))
1408 return;
5e3dd157
KV
1409
1410 if (xfer->wait_for_resp)
1411 return;
1412
1413 complete(&xfer->done);
1414}
1415
5440ce25 1416static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
5e3dd157 1417{
5440ce25
MK
1418 struct bmi_xfer *xfer;
1419 u32 ce_data;
1420 unsigned int nbytes;
1421 unsigned int transfer_id;
1422 unsigned int flags;
1423
1424 if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
1425 &nbytes, &transfer_id, &flags))
1426 return;
5e3dd157
KV
1427
1428 if (!xfer->wait_for_resp) {
1429 ath10k_warn("unexpected: BMI data received; ignoring\n");
1430 return;
1431 }
1432
1433 xfer->resp_len = nbytes;
1434 complete(&xfer->done);
1435}
1436
85622cde
MK
1437static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
1438 struct ath10k_ce_pipe *rx_pipe,
1439 struct bmi_xfer *xfer)
1440{
1441 unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1442
1443 while (time_before_eq(jiffies, timeout)) {
1444 ath10k_pci_bmi_send_done(tx_pipe);
1445 ath10k_pci_bmi_recv_data(rx_pipe);
1446
1447 if (completion_done(&xfer->done))
1448 return 0;
1449
1450 schedule();
1451 }
1452
1453 return -ETIMEDOUT;
1454}
1455
5e3dd157
KV
1456/*
1457 * Map from service/endpoint to Copy Engine.
1458 * This table is derived from the CE_PCI TABLE, above.
1459 * It is passed to the Target at startup for use by firmware.
1460 */
1461static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
1462 {
1463 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1464 PIPEDIR_OUT, /* out = UL = host -> target */
1465 3,
1466 },
1467 {
1468 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1469 PIPEDIR_IN, /* in = DL = target -> host */
1470 2,
1471 },
1472 {
1473 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1474 PIPEDIR_OUT, /* out = UL = host -> target */
1475 3,
1476 },
1477 {
1478 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1479 PIPEDIR_IN, /* in = DL = target -> host */
1480 2,
1481 },
1482 {
1483 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1484 PIPEDIR_OUT, /* out = UL = host -> target */
1485 3,
1486 },
1487 {
1488 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1489 PIPEDIR_IN, /* in = DL = target -> host */
1490 2,
1491 },
1492 {
1493 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1494 PIPEDIR_OUT, /* out = UL = host -> target */
1495 3,
1496 },
1497 {
1498 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1499 PIPEDIR_IN, /* in = DL = target -> host */
1500 2,
1501 },
1502 {
1503 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1504 PIPEDIR_OUT, /* out = UL = host -> target */
1505 3,
1506 },
1507 {
1508 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1509 PIPEDIR_IN, /* in = DL = target -> host */
1510 2,
1511 },
1512 {
1513 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1514 PIPEDIR_OUT, /* out = UL = host -> target */
1515 0, /* could be moved to 3 (share with WMI) */
1516 },
1517 {
1518 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1519 PIPEDIR_IN, /* in = DL = target -> host */
1520 1,
1521 },
1522 {
1523 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
1524 PIPEDIR_OUT, /* out = UL = host -> target */
1525 0,
1526 },
1527 {
1528 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
1529 PIPEDIR_IN, /* in = DL = target -> host */
1530 1,
1531 },
1532 {
1533 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1534 PIPEDIR_OUT, /* out = UL = host -> target */
1535 4,
1536 },
1537 {
1538 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1539 PIPEDIR_IN, /* in = DL = target -> host */
1540 1,
1541 },
1542
1543 /* (Additions here) */
1544
1545 { /* Must be last */
1546 0,
1547 0,
1548 0,
1549 },
1550};
1551
1552/*
1553 * Send an interrupt to the device to wake up the Target CPU
1554 * so it has an opportunity to notice any changed state.
1555 */
1556static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1557{
1558 int ret;
1559 u32 core_ctrl;
1560
1561 ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
1562 CORE_CTRL_ADDRESS,
1563 &core_ctrl);
1564 if (ret) {
1d2b48d6 1565 ath10k_warn("failed to read core_ctrl: %d\n", ret);
5e3dd157
KV
1566 return ret;
1567 }
1568
1569 /* A_INUM_FIRMWARE interrupt to Target CPU */
1570 core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1571
1572 ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
1573 CORE_CTRL_ADDRESS,
1574 core_ctrl);
1d2b48d6
MK
1575 if (ret) {
1576 ath10k_warn("failed to set target CPU interrupt mask: %d\n",
1577 ret);
1578 return ret;
1579 }
5e3dd157 1580
1d2b48d6 1581 return 0;
5e3dd157
KV
1582}
1583
1584static int ath10k_pci_init_config(struct ath10k *ar)
1585{
1586 u32 interconnect_targ_addr;
1587 u32 pcie_state_targ_addr = 0;
1588 u32 pipe_cfg_targ_addr = 0;
1589 u32 svc_to_pipe_map = 0;
1590 u32 pcie_config_flags = 0;
1591 u32 ealloc_value;
1592 u32 ealloc_targ_addr;
1593 u32 flag2_value;
1594 u32 flag2_targ_addr;
1595 int ret = 0;
1596
1597 /* Download to Target the CE Config and the service-to-CE map */
1598 interconnect_targ_addr =
1599 host_interest_item_address(HI_ITEM(hi_interconnect_state));
1600
1601 /* Supply Target-side CE configuration */
1602 ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
1603 &pcie_state_targ_addr);
1604 if (ret != 0) {
1605 ath10k_err("Failed to get pcie state addr: %d\n", ret);
1606 return ret;
1607 }
1608
1609 if (pcie_state_targ_addr == 0) {
1610 ret = -EIO;
1611 ath10k_err("Invalid pcie state addr\n");
1612 return ret;
1613 }
1614
1615 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1616 offsetof(struct pcie_state,
1617 pipe_cfg_addr),
1618 &pipe_cfg_targ_addr);
1619 if (ret != 0) {
1620 ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
1621 return ret;
1622 }
1623
1624 if (pipe_cfg_targ_addr == 0) {
1625 ret = -EIO;
1626 ath10k_err("Invalid pipe cfg addr\n");
1627 return ret;
1628 }
1629
1630 ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1631 target_ce_config_wlan,
1632 sizeof(target_ce_config_wlan));
1633
1634 if (ret != 0) {
1635 ath10k_err("Failed to write pipe cfg: %d\n", ret);
1636 return ret;
1637 }
1638
1639 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1640 offsetof(struct pcie_state,
1641 svc_to_pipe_map),
1642 &svc_to_pipe_map);
1643 if (ret != 0) {
1644 ath10k_err("Failed to get svc/pipe map: %d\n", ret);
1645 return ret;
1646 }
1647
1648 if (svc_to_pipe_map == 0) {
1649 ret = -EIO;
1650 ath10k_err("Invalid svc_to_pipe map\n");
1651 return ret;
1652 }
1653
1654 ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1655 target_service_to_ce_map_wlan,
1656 sizeof(target_service_to_ce_map_wlan));
1657 if (ret != 0) {
1658 ath10k_err("Failed to write svc/pipe map: %d\n", ret);
1659 return ret;
1660 }
1661
1662 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1663 offsetof(struct pcie_state,
1664 config_flags),
1665 &pcie_config_flags);
1666 if (ret != 0) {
1667 ath10k_err("Failed to get pcie config_flags: %d\n", ret);
1668 return ret;
1669 }
1670
1671 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1672
1673 ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
1674 offsetof(struct pcie_state, config_flags),
1675 &pcie_config_flags,
1676 sizeof(pcie_config_flags));
1677 if (ret != 0) {
1678 ath10k_err("Failed to write pcie config_flags: %d\n", ret);
1679 return ret;
1680 }
1681
1682 /* configure early allocation */
1683 ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1684
1685 ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
1686 if (ret != 0) {
1687 ath10k_err("Faile to get early alloc val: %d\n", ret);
1688 return ret;
1689 }
1690
1691 /* first bank is switched to IRAM */
1692 ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1693 HI_EARLY_ALLOC_MAGIC_MASK);
1694 ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1695 HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1696
1697 ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
1698 if (ret != 0) {
1699 ath10k_err("Failed to set early alloc val: %d\n", ret);
1700 return ret;
1701 }
1702
1703 /* Tell Target to proceed with initialization */
1704 flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1705
1706 ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
1707 if (ret != 0) {
1708 ath10k_err("Failed to get option val: %d\n", ret);
1709 return ret;
1710 }
1711
1712 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1713
1714 ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
1715 if (ret != 0) {
1716 ath10k_err("Failed to set option val: %d\n", ret);
1717 return ret;
1718 }
1719
1720 return 0;
1721}
1722
1723
1724
1725static int ath10k_pci_ce_init(struct ath10k *ar)
1726{
1727 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
87263e5b 1728 struct ath10k_pci_pipe *pipe_info;
5e3dd157
KV
1729 const struct ce_attr *attr;
1730 int pipe_num;
1731
fad6ed78 1732 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
5e3dd157
KV
1733 pipe_info = &ar_pci->pipe_info[pipe_num];
1734 pipe_info->pipe_num = pipe_num;
1735 pipe_info->hif_ce_state = ar;
1736 attr = &host_ce_config_wlan[pipe_num];
1737
1738 pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
1739 if (pipe_info->ce_hdl == NULL) {
1d2b48d6 1740 ath10k_err("failed to initialize CE for pipe: %d\n",
5e3dd157
KV
1741 pipe_num);
1742
1743 /* It is safe to call it here. It checks if ce_hdl is
1744 * valid for each pipe */
1745 ath10k_pci_ce_deinit(ar);
1746 return -1;
1747 }
1748
fad6ed78 1749 if (pipe_num == CE_COUNT - 1) {
5e3dd157
KV
1750 /*
1751 * Reserve the ultimate CE for
1752 * diagnostic Window support
1753 */
fad6ed78 1754 ar_pci->ce_diag = pipe_info->ce_hdl;
5e3dd157
KV
1755 continue;
1756 }
1757
1758 pipe_info->buf_sz = (size_t) (attr->src_sz_max);
1759 }
1760
5e3dd157
KV
1761 return 0;
1762}
1763
1764static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1765{
1766 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
b39712ce 1767 u32 fw_indicator;
5e3dd157
KV
1768
1769 ath10k_pci_wake(ar);
1770
b39712ce 1771 fw_indicator = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
5e3dd157
KV
1772
1773 if (fw_indicator & FW_IND_EVENT_PENDING) {
1774 /* ACK: clear Target-side pending event */
b39712ce 1775 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
5e3dd157
KV
1776 fw_indicator & ~FW_IND_EVENT_PENDING);
1777
1778 if (ar_pci->started) {
1779 ath10k_pci_hif_dump_area(ar);
1780 } else {
1781 /*
1782 * Probable Target failure before we're prepared
1783 * to handle it. Generally unexpected.
1784 */
1785 ath10k_warn("early firmware event indicated\n");
1786 }
1787 }
1788
1789 ath10k_pci_sleep(ar);
1790}
1791
fc36e3ff
MK
1792static int ath10k_pci_warm_reset(struct ath10k *ar)
1793{
fc36e3ff
MK
1794 int ret = 0;
1795 u32 val;
1796
50f87a67 1797 ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset\n");
fc36e3ff
MK
1798
1799 ret = ath10k_do_pci_wake(ar);
1800 if (ret) {
1801 ath10k_err("failed to wake up target: %d\n", ret);
1802 return ret;
1803 }
1804
1805 /* debug */
1806 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1807 PCIE_INTR_CAUSE_ADDRESS);
1808 ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
1809
1810 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1811 CPU_INTR_ADDRESS);
1812 ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
1813 val);
1814
1815 /* disable pending irqs */
1816 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1817 PCIE_INTR_ENABLE_ADDRESS, 0);
1818
1819 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1820 PCIE_INTR_CLR_ADDRESS, ~0);
1821
1822 msleep(100);
1823
1824 /* clear fw indicator */
b39712ce 1825 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
fc36e3ff
MK
1826
1827 /* clear target LF timer interrupts */
1828 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1829 SOC_LF_TIMER_CONTROL0_ADDRESS);
1830 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
1831 SOC_LF_TIMER_CONTROL0_ADDRESS,
1832 val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
1833
1834 /* reset CE */
1835 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1836 SOC_RESET_CONTROL_ADDRESS);
1837 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1838 val | SOC_RESET_CONTROL_CE_RST_MASK);
1839 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1840 SOC_RESET_CONTROL_ADDRESS);
1841 msleep(10);
1842
1843 /* unreset CE */
1844 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1845 val & ~SOC_RESET_CONTROL_CE_RST_MASK);
1846 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1847 SOC_RESET_CONTROL_ADDRESS);
1848 msleep(10);
1849
1850 /* debug */
1851 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1852 PCIE_INTR_CAUSE_ADDRESS);
1853 ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
1854
1855 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1856 CPU_INTR_ADDRESS);
1857 ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
1858 val);
1859
1860 /* CPU warm reset */
1861 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1862 SOC_RESET_CONTROL_ADDRESS);
1863 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1864 val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
1865
1866 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1867 SOC_RESET_CONTROL_ADDRESS);
1868 ath10k_dbg(ATH10K_DBG_BOOT, "boot target reset state: 0x%08x\n", val);
1869
1870 msleep(100);
1871
1872 ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset complete\n");
1873
1874 ath10k_do_pci_sleep(ar);
1875 return ret;
1876}
1877
1878static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
8c5c5368 1879{
8cc8df90 1880 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
95cbb6a8 1881 const char *irq_mode;
8c5c5368
MK
1882 int ret;
1883
1884 /*
1885 * Bring the target up cleanly.
1886 *
1887 * The target may be in an undefined state with an AUX-powered Target
1888 * and a Host in WoW mode. If the Host crashes, loses power, or is
1889 * restarted (without unloading the driver) then the Target is left
1890 * (aux) powered and running. On a subsequent driver load, the Target
1891 * is in an unexpected state. We try to catch that here in order to
1892 * reset the Target and retry the probe.
1893 */
fc36e3ff
MK
1894 if (cold_reset)
1895 ret = ath10k_pci_cold_reset(ar);
1896 else
1897 ret = ath10k_pci_warm_reset(ar);
1898
5b2589fc
MK
1899 if (ret) {
1900 ath10k_err("failed to reset target: %d\n", ret);
98563d5a 1901 goto err;
5b2589fc 1902 }
8c5c5368 1903
8cc8df90 1904 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
8c5c5368 1905 /* Force AWAKE forever */
8c5c5368 1906 ath10k_do_pci_wake(ar);
8c5c5368
MK
1907
1908 ret = ath10k_pci_ce_init(ar);
1d2b48d6
MK
1909 if (ret) {
1910 ath10k_err("failed to initialize CE: %d\n", ret);
8c5c5368 1911 goto err_ps;
1d2b48d6 1912 }
8c5c5368 1913
98563d5a
MK
1914 ret = ath10k_ce_disable_interrupts(ar);
1915 if (ret) {
1916 ath10k_err("failed to disable CE interrupts: %d\n", ret);
8c5c5368 1917 goto err_ce;
98563d5a 1918 }
8c5c5368 1919
fc15ca13 1920 ret = ath10k_pci_init_irq(ar);
8c5c5368 1921 if (ret) {
fc15ca13 1922 ath10k_err("failed to init irqs: %d\n", ret);
8c5c5368
MK
1923 goto err_ce;
1924 }
1925
ab977bd0
MK
1926 ret = ath10k_pci_request_early_irq(ar);
1927 if (ret) {
1928 ath10k_err("failed to request early irq: %d\n", ret);
1929 goto err_deinit_irq;
1930 }
1931
98563d5a
MK
1932 ret = ath10k_pci_wait_for_target_init(ar);
1933 if (ret) {
1934 ath10k_err("failed to wait for target to init: %d\n", ret);
ab977bd0 1935 goto err_free_early_irq;
98563d5a
MK
1936 }
1937
1938 ret = ath10k_pci_init_config(ar);
1939 if (ret) {
1940 ath10k_err("failed to setup init config: %d\n", ret);
ab977bd0 1941 goto err_free_early_irq;
98563d5a 1942 }
8c5c5368
MK
1943
1944 ret = ath10k_pci_wake_target_cpu(ar);
1945 if (ret) {
1d2b48d6 1946 ath10k_err("could not wake up target CPU: %d\n", ret);
ab977bd0 1947 goto err_free_early_irq;
8c5c5368
MK
1948 }
1949
95cbb6a8
KV
1950 if (ar_pci->num_msi_intrs > 1)
1951 irq_mode = "MSI-X";
1952 else if (ar_pci->num_msi_intrs == 1)
1953 irq_mode = "MSI";
1954 else
1955 irq_mode = "legacy";
1956
650b91fb 1957 if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
78a9cb4c
KV
1958 ath10k_info("pci irq %s irq_mode %d reset_mode %d\n",
1959 irq_mode, ath10k_pci_irq_mode,
1960 ath10k_pci_reset_mode);
95cbb6a8 1961
8c5c5368
MK
1962 return 0;
1963
ab977bd0
MK
1964err_free_early_irq:
1965 ath10k_pci_free_early_irq(ar);
fc15ca13
MK
1966err_deinit_irq:
1967 ath10k_pci_deinit_irq(ar);
8c5c5368
MK
1968err_ce:
1969 ath10k_pci_ce_deinit(ar);
fc36e3ff 1970 ath10k_pci_warm_reset(ar);
8c5c5368 1971err_ps:
8cc8df90 1972 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
8c5c5368
MK
1973 ath10k_do_pci_sleep(ar);
1974err:
1975 return ret;
1976}
1977
fc36e3ff
MK
1978static int ath10k_pci_hif_power_up(struct ath10k *ar)
1979{
1980 int ret;
1981
50f87a67
KV
1982 ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power up\n");
1983
fc36e3ff
MK
1984 /*
1985 * Hardware CUS232 version 2 has some issues with cold reset and the
1986 * preferred (and safer) way to perform a device reset is through a
1987 * warm reset.
1988 *
1989 * Warm reset doesn't always work though (notably after a firmware
1990 * crash) so fall back to cold reset if necessary.
1991 */
1992 ret = __ath10k_pci_hif_power_up(ar, false);
1993 if (ret) {
35098463 1994 ath10k_warn("failed to power up target using warm reset: %d\n",
fc36e3ff
MK
1995 ret);
1996
35098463
KV
1997 if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY)
1998 return ret;
1999
2000 ath10k_warn("trying cold reset\n");
2001
fc36e3ff
MK
2002 ret = __ath10k_pci_hif_power_up(ar, true);
2003 if (ret) {
2004 ath10k_err("failed to power up target using cold reset too (%d)\n",
2005 ret);
2006 return ret;
2007 }
2008 }
2009
2010 return 0;
2011}
2012
8c5c5368
MK
2013static void ath10k_pci_hif_power_down(struct ath10k *ar)
2014{
8cc8df90
BM
2015 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2016
50f87a67
KV
2017 ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power down\n");
2018
ab977bd0
MK
2019 ath10k_pci_free_early_irq(ar);
2020 ath10k_pci_kill_tasklet(ar);
fc15ca13 2021 ath10k_pci_deinit_irq(ar);
fc36e3ff 2022 ath10k_pci_warm_reset(ar);
8cc8df90 2023
8c5c5368 2024 ath10k_pci_ce_deinit(ar);
8cc8df90 2025 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
8c5c5368
MK
2026 ath10k_do_pci_sleep(ar);
2027}
2028
8cd13cad
MK
2029#ifdef CONFIG_PM
2030
2031#define ATH10K_PCI_PM_CONTROL 0x44
2032
2033static int ath10k_pci_hif_suspend(struct ath10k *ar)
2034{
2035 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2036 struct pci_dev *pdev = ar_pci->pdev;
2037 u32 val;
2038
2039 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2040
2041 if ((val & 0x000000ff) != 0x3) {
2042 pci_save_state(pdev);
2043 pci_disable_device(pdev);
2044 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2045 (val & 0xffffff00) | 0x03);
2046 }
2047
2048 return 0;
2049}
2050
2051static int ath10k_pci_hif_resume(struct ath10k *ar)
2052{
2053 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2054 struct pci_dev *pdev = ar_pci->pdev;
2055 u32 val;
2056
2057 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2058
2059 if ((val & 0x000000ff) != 0) {
2060 pci_restore_state(pdev);
2061 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2062 val & 0xffffff00);
2063 /*
2064 * Suspend/Resume resets the PCI configuration space,
2065 * so we have to re-disable the RETRY_TIMEOUT register (0x41)
2066 * to keep PCI Tx retries from interfering with C3 CPU state
2067 */
2068 pci_read_config_dword(pdev, 0x40, &val);
2069
2070 if ((val & 0x0000ff00) != 0)
2071 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
2072 }
2073
2074 return 0;
2075}
2076#endif
2077
5e3dd157 2078static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
726346fc 2079 .tx_sg = ath10k_pci_hif_tx_sg,
5e3dd157
KV
2080 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
2081 .start = ath10k_pci_hif_start,
2082 .stop = ath10k_pci_hif_stop,
2083 .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
2084 .get_default_pipe = ath10k_pci_hif_get_default_pipe,
2085 .send_complete_check = ath10k_pci_hif_send_complete_check,
e799bbff 2086 .set_callbacks = ath10k_pci_hif_set_callbacks,
5e3dd157 2087 .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
8c5c5368
MK
2088 .power_up = ath10k_pci_hif_power_up,
2089 .power_down = ath10k_pci_hif_power_down,
8cd13cad
MK
2090#ifdef CONFIG_PM
2091 .suspend = ath10k_pci_hif_suspend,
2092 .resume = ath10k_pci_hif_resume,
2093#endif
5e3dd157
KV
2094};
2095
2096static void ath10k_pci_ce_tasklet(unsigned long ptr)
2097{
87263e5b 2098 struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
5e3dd157
KV
2099 struct ath10k_pci *ar_pci = pipe->ar_pci;
2100
2101 ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
2102}
2103
2104static void ath10k_msi_err_tasklet(unsigned long data)
2105{
2106 struct ath10k *ar = (struct ath10k *)data;
2107
2108 ath10k_pci_fw_interrupt_handler(ar);
2109}
2110
2111/*
2112 * Handler for a per-engine interrupt on a PARTICULAR CE.
2113 * This is used in cases where each CE has a private MSI interrupt.
2114 */
2115static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
2116{
2117 struct ath10k *ar = arg;
2118 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2119 int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
2120
e5742672 2121 if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
5e3dd157
KV
2122 ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
2123 return IRQ_HANDLED;
2124 }
2125
2126 /*
2127 * NOTE: We are able to derive ce_id from irq because we
2128 * use a one-to-one mapping for CE's 0..5.
2129 * CE's 6 & 7 do not use interrupts at all.
2130 *
2131 * This mapping must be kept in sync with the mapping
2132 * used by firmware.
2133 */
2134 tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
2135 return IRQ_HANDLED;
2136}
2137
2138static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
2139{
2140 struct ath10k *ar = arg;
2141 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2142
2143 tasklet_schedule(&ar_pci->msi_fw_err);
2144 return IRQ_HANDLED;
2145}
2146
2147/*
2148 * Top-level interrupt handler for all PCI interrupts from a Target.
2149 * When a block of MSI interrupts is allocated, this top-level handler
2150 * is not used; instead, we directly call the correct sub-handler.
2151 */
2152static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2153{
2154 struct ath10k *ar = arg;
2155 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2156
2157 if (ar_pci->num_msi_intrs == 0) {
e539887b
MK
2158 if (!ath10k_pci_irq_pending(ar))
2159 return IRQ_NONE;
2160
2685218b 2161 ath10k_pci_disable_and_clear_legacy_irq(ar);
5e3dd157
KV
2162 }
2163
2164 tasklet_schedule(&ar_pci->intr_tq);
2165
2166 return IRQ_HANDLED;
2167}
2168
ab977bd0
MK
2169static void ath10k_pci_early_irq_tasklet(unsigned long data)
2170{
2171 struct ath10k *ar = (struct ath10k *)data;
ab977bd0
MK
2172 u32 fw_ind;
2173 int ret;
2174
2175 ret = ath10k_pci_wake(ar);
2176 if (ret) {
2177 ath10k_warn("failed to wake target in early irq tasklet: %d\n",
2178 ret);
2179 return;
2180 }
2181
b39712ce 2182 fw_ind = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
ab977bd0 2183 if (fw_ind & FW_IND_EVENT_PENDING) {
b39712ce 2184 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
ab977bd0
MK
2185 fw_ind & ~FW_IND_EVENT_PENDING);
2186
2187 /* Some structures are unavailable during early boot or at
2188 * driver teardown so just print that the device has crashed. */
2189 ath10k_warn("device crashed - no diagnostics available\n");
2190 }
2191
2192 ath10k_pci_sleep(ar);
2193 ath10k_pci_enable_legacy_irq(ar);
2194}
2195
5e3dd157
KV
2196static void ath10k_pci_tasklet(unsigned long data)
2197{
2198 struct ath10k *ar = (struct ath10k *)data;
2199 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2200
2201 ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
2202 ath10k_ce_per_engine_service_any(ar);
2203
2685218b
MK
2204 /* Re-enable legacy irq that was disabled in the irq handler */
2205 if (ar_pci->num_msi_intrs == 0)
2206 ath10k_pci_enable_legacy_irq(ar);
5e3dd157
KV
2207}
2208
fc15ca13 2209static int ath10k_pci_request_irq_msix(struct ath10k *ar)
5e3dd157
KV
2210{
2211 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
fc15ca13 2212 int ret, i;
5e3dd157
KV
2213
2214 ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2215 ath10k_pci_msi_fw_handler,
2216 IRQF_SHARED, "ath10k_pci", ar);
591ecdb8 2217 if (ret) {
fc15ca13 2218 ath10k_warn("failed to request MSI-X fw irq %d: %d\n",
591ecdb8 2219 ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
5e3dd157 2220 return ret;
591ecdb8 2221 }
5e3dd157
KV
2222
2223 for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2224 ret = request_irq(ar_pci->pdev->irq + i,
2225 ath10k_pci_per_engine_handler,
2226 IRQF_SHARED, "ath10k_pci", ar);
2227 if (ret) {
fc15ca13 2228 ath10k_warn("failed to request MSI-X ce irq %d: %d\n",
5e3dd157
KV
2229 ar_pci->pdev->irq + i, ret);
2230
87b1423b
MK
2231 for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2232 free_irq(ar_pci->pdev->irq + i, ar);
5e3dd157 2233
87b1423b 2234 free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
5e3dd157
KV
2235 return ret;
2236 }
2237 }
2238
5e3dd157
KV
2239 return 0;
2240}
2241
fc15ca13 2242static int ath10k_pci_request_irq_msi(struct ath10k *ar)
5e3dd157
KV
2243{
2244 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2245 int ret;
2246
5e3dd157
KV
2247 ret = request_irq(ar_pci->pdev->irq,
2248 ath10k_pci_interrupt_handler,
2249 IRQF_SHARED, "ath10k_pci", ar);
fc15ca13
MK
2250 if (ret) {
2251 ath10k_warn("failed to request MSI irq %d: %d\n",
2252 ar_pci->pdev->irq, ret);
5e3dd157
KV
2253 return ret;
2254 }
2255
5e3dd157
KV
2256 return 0;
2257}
2258
fc15ca13 2259static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
5e3dd157
KV
2260{
2261 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2262 int ret;
2263
2264 ret = request_irq(ar_pci->pdev->irq,
2265 ath10k_pci_interrupt_handler,
2266 IRQF_SHARED, "ath10k_pci", ar);
f3782744 2267 if (ret) {
fc15ca13
MK
2268 ath10k_warn("failed to request legacy irq %d: %d\n",
2269 ar_pci->pdev->irq, ret);
5e3dd157 2270 return ret;
f3782744 2271 }
5e3dd157 2272
5e3dd157
KV
2273 return 0;
2274}
2275
fc15ca13
MK
2276static int ath10k_pci_request_irq(struct ath10k *ar)
2277{
2278 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
5e3dd157 2279
fc15ca13
MK
2280 switch (ar_pci->num_msi_intrs) {
2281 case 0:
2282 return ath10k_pci_request_irq_legacy(ar);
2283 case 1:
2284 return ath10k_pci_request_irq_msi(ar);
2285 case MSI_NUM_REQUEST:
2286 return ath10k_pci_request_irq_msix(ar);
2287 }
5e3dd157 2288
fc15ca13
MK
2289 ath10k_warn("unknown irq configuration upon request\n");
2290 return -EINVAL;
5e3dd157
KV
2291}
2292
fc15ca13
MK
2293static void ath10k_pci_free_irq(struct ath10k *ar)
2294{
2295 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2296 int i;
2297
2298 /* There's at least one interrupt irregardless whether its legacy INTR
2299 * or MSI or MSI-X */
2300 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2301 free_irq(ar_pci->pdev->irq + i, ar);
2302}
2303
2304static void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
5e3dd157
KV
2305{
2306 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
5e3dd157
KV
2307 int i;
2308
fc15ca13 2309 tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
5e3dd157 2310 tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
fc15ca13 2311 (unsigned long)ar);
ab977bd0
MK
2312 tasklet_init(&ar_pci->early_irq_tasklet, ath10k_pci_early_irq_tasklet,
2313 (unsigned long)ar);
5e3dd157
KV
2314
2315 for (i = 0; i < CE_COUNT; i++) {
2316 ar_pci->pipe_info[i].ar_pci = ar_pci;
fc15ca13 2317 tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
5e3dd157
KV
2318 (unsigned long)&ar_pci->pipe_info[i]);
2319 }
fc15ca13
MK
2320}
2321
2322static int ath10k_pci_init_irq(struct ath10k *ar)
2323{
2324 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
cfe9c45b
MK
2325 bool msix_supported = test_bit(ATH10K_PCI_FEATURE_MSI_X,
2326 ar_pci->features);
fc15ca13 2327 int ret;
5e3dd157 2328
fc15ca13 2329 ath10k_pci_init_irq_tasklets(ar);
5e3dd157 2330
cfe9c45b
MK
2331 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO &&
2332 !test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
2333 ath10k_info("limiting irq mode to: %d\n", ath10k_pci_irq_mode);
5e3dd157 2334
fc15ca13 2335 /* Try MSI-X */
cfe9c45b
MK
2336 if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO && msix_supported) {
2337 ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
5ad6867c
AG
2338 ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs,
2339 ar_pci->num_msi_intrs);
2340 if (ret > 0)
cfe9c45b 2341 return 0;
5e3dd157 2342
cfe9c45b 2343 /* fall-through */
5e3dd157
KV
2344 }
2345
fc15ca13 2346 /* Try MSI */
cfe9c45b
MK
2347 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
2348 ar_pci->num_msi_intrs = 1;
2349 ret = pci_enable_msi(ar_pci->pdev);
5e3dd157 2350 if (ret == 0)
cfe9c45b 2351 return 0;
5e3dd157 2352
cfe9c45b 2353 /* fall-through */
5e3dd157
KV
2354 }
2355
fc15ca13
MK
2356 /* Try legacy irq
2357 *
2358 * A potential race occurs here: The CORE_BASE write
2359 * depends on target correctly decoding AXI address but
2360 * host won't know when target writes BAR to CORE_CTRL.
2361 * This write might get lost if target has NOT written BAR.
2362 * For now, fix the race by repeating the write in below
2363 * synchronization checking. */
2364 ar_pci->num_msi_intrs = 0;
5e3dd157 2365
fc15ca13
MK
2366 ret = ath10k_pci_wake(ar);
2367 if (ret) {
2368 ath10k_warn("failed to wake target: %d\n", ret);
2369 return ret;
5e3dd157
KV
2370 }
2371
fc15ca13
MK
2372 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2373 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
2374 ath10k_pci_sleep(ar);
2375
2376 return 0;
5e3dd157
KV
2377}
2378
fc15ca13 2379static int ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
5e3dd157 2380{
fc15ca13 2381 int ret;
5e3dd157 2382
fc15ca13 2383 ret = ath10k_pci_wake(ar);
f3782744 2384 if (ret) {
fc15ca13 2385 ath10k_warn("failed to wake target: %d\n", ret);
f3782744
KV
2386 return ret;
2387 }
5e3dd157 2388
fc15ca13
MK
2389 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2390 0);
2391 ath10k_pci_sleep(ar);
2392
2393 return 0;
5e3dd157
KV
2394}
2395
fc15ca13 2396static int ath10k_pci_deinit_irq(struct ath10k *ar)
5e3dd157
KV
2397{
2398 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
5e3dd157 2399
fc15ca13
MK
2400 switch (ar_pci->num_msi_intrs) {
2401 case 0:
2402 return ath10k_pci_deinit_irq_legacy(ar);
2403 case 1:
2404 /* fall-through */
2405 case MSI_NUM_REQUEST:
5e3dd157 2406 pci_disable_msi(ar_pci->pdev);
fc15ca13 2407 return 0;
bb8b621a
AG
2408 default:
2409 pci_disable_msi(ar_pci->pdev);
fc15ca13
MK
2410 }
2411
2412 ath10k_warn("unknown irq configuration upon deinit\n");
2413 return -EINVAL;
5e3dd157
KV
2414}
2415
d7fb47f5 2416static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
5e3dd157
KV
2417{
2418 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
0399eca8 2419 unsigned long timeout;
f3782744 2420 int ret;
0399eca8 2421 u32 val;
5e3dd157 2422
50f87a67
KV
2423 ath10k_dbg(ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
2424
98563d5a 2425 ret = ath10k_pci_wake(ar);
f3782744 2426 if (ret) {
0399eca8 2427 ath10k_err("failed to wake up target for init: %d\n", ret);
f3782744
KV
2428 return ret;
2429 }
5e3dd157 2430
0399eca8
KV
2431 timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
2432
2433 do {
2434 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2435
50f87a67
KV
2436 ath10k_dbg(ATH10K_DBG_BOOT, "boot target indicator %x\n", val);
2437
0399eca8
KV
2438 /* target should never return this */
2439 if (val == 0xffffffff)
2440 continue;
2441
2442 if (val & FW_IND_INITIALIZED)
2443 break;
2444
5e3dd157
KV
2445 if (ar_pci->num_msi_intrs == 0)
2446 /* Fix potential race by repeating CORE_BASE writes */
0399eca8
KV
2447 ath10k_pci_soc_write32(ar, PCIE_INTR_ENABLE_ADDRESS,
2448 PCIE_INTR_FIRMWARE_MASK |
2449 PCIE_INTR_CE_MASK_ALL);
2450
5e3dd157 2451 mdelay(10);
0399eca8 2452 } while (time_before(jiffies, timeout));
5e3dd157 2453
0399eca8
KV
2454 if (val == 0xffffffff || !(val & FW_IND_INITIALIZED)) {
2455 ath10k_err("failed to receive initialized event from target: %08x\n",
2456 val);
2457 ret = -ETIMEDOUT;
5b2589fc 2458 goto out;
5e3dd157
KV
2459 }
2460
50f87a67
KV
2461 ath10k_dbg(ATH10K_DBG_BOOT, "boot target initialised\n");
2462
5b2589fc 2463out:
98563d5a 2464 ath10k_pci_sleep(ar);
5b2589fc 2465 return ret;
5e3dd157
KV
2466}
2467
fc36e3ff 2468static int ath10k_pci_cold_reset(struct ath10k *ar)
5e3dd157 2469{
5b2589fc 2470 int i, ret;
5e3dd157
KV
2471 u32 val;
2472
50f87a67
KV
2473 ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset\n");
2474
5b2589fc
MK
2475 ret = ath10k_do_pci_wake(ar);
2476 if (ret) {
2477 ath10k_err("failed to wake up target: %d\n",
2478 ret);
2479 return ret;
5e3dd157
KV
2480 }
2481
2482 /* Put Target, including PCIe, into RESET. */
e479ed43 2483 val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
5e3dd157 2484 val |= 1;
e479ed43 2485 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
5e3dd157
KV
2486
2487 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
e479ed43 2488 if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
5e3dd157
KV
2489 RTC_STATE_COLD_RESET_MASK)
2490 break;
2491 msleep(1);
2492 }
2493
2494 /* Pull Target, including PCIe, out of RESET. */
2495 val &= ~1;
e479ed43 2496 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
5e3dd157
KV
2497
2498 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
e479ed43 2499 if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
5e3dd157
KV
2500 RTC_STATE_COLD_RESET_MASK))
2501 break;
2502 msleep(1);
2503 }
2504
5b2589fc 2505 ath10k_do_pci_sleep(ar);
50f87a67
KV
2506
2507 ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset complete\n");
2508
5b2589fc 2509 return 0;
5e3dd157
KV
2510}
2511
2512static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
2513{
2514 int i;
2515
2516 for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
2517 if (!test_bit(i, ar_pci->features))
2518 continue;
2519
2520 switch (i) {
2521 case ATH10K_PCI_FEATURE_MSI_X:
24cfade1 2522 ath10k_dbg(ATH10K_DBG_BOOT, "device supports MSI-X\n");
5e3dd157 2523 break;
8cc8df90 2524 case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
24cfade1 2525 ath10k_dbg(ATH10K_DBG_BOOT, "QCA98XX SoC power save enabled\n");
8cc8df90 2526 break;
5e3dd157
KV
2527 }
2528 }
2529}
2530
2531static int ath10k_pci_probe(struct pci_dev *pdev,
2532 const struct pci_device_id *pci_dev)
2533{
2534 void __iomem *mem;
2535 int ret = 0;
2536 struct ath10k *ar;
2537 struct ath10k_pci *ar_pci;
e01ae68c 2538 u32 lcr_val, chip_id;
5e3dd157 2539
50f87a67 2540 ath10k_dbg(ATH10K_DBG_PCI, "pci probe\n");
5e3dd157
KV
2541
2542 ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
2543 if (ar_pci == NULL)
2544 return -ENOMEM;
2545
2546 ar_pci->pdev = pdev;
2547 ar_pci->dev = &pdev->dev;
2548
2549 switch (pci_dev->device) {
5e3dd157
KV
2550 case QCA988X_2_0_DEVICE_ID:
2551 set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
2552 break;
2553 default:
2554 ret = -ENODEV;
6d3be300 2555 ath10k_err("Unknown device ID: %d\n", pci_dev->device);
5e3dd157
KV
2556 goto err_ar_pci;
2557 }
2558
e42c1fbd 2559 if (ath10k_pci_target_ps)
8cc8df90
BM
2560 set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
2561
5e3dd157
KV
2562 ath10k_pci_dump_features(ar_pci);
2563
3a0861ff 2564 ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
5e3dd157 2565 if (!ar) {
1d2b48d6 2566 ath10k_err("failed to create driver core\n");
5e3dd157
KV
2567 ret = -EINVAL;
2568 goto err_ar_pci;
2569 }
2570
5e3dd157 2571 ar_pci->ar = ar;
5e3dd157
KV
2572 atomic_set(&ar_pci->keep_awake_count, 0);
2573
2574 pci_set_drvdata(pdev, ar);
2575
2576 /*
2577 * Without any knowledge of the Host, the Target may have been reset or
2578 * power cycled and its Config Space may no longer reflect the PCI
2579 * address space that was assigned earlier by the PCI infrastructure.
2580 * Refresh it now.
2581 */
2582 ret = pci_assign_resource(pdev, BAR_NUM);
2583 if (ret) {
1d2b48d6 2584 ath10k_err("failed to assign PCI space: %d\n", ret);
5e3dd157
KV
2585 goto err_ar;
2586 }
2587
2588 ret = pci_enable_device(pdev);
2589 if (ret) {
1d2b48d6 2590 ath10k_err("failed to enable PCI device: %d\n", ret);
5e3dd157
KV
2591 goto err_ar;
2592 }
2593
2594 /* Request MMIO resources */
2595 ret = pci_request_region(pdev, BAR_NUM, "ath");
2596 if (ret) {
1d2b48d6 2597 ath10k_err("failed to request MMIO region: %d\n", ret);
5e3dd157
KV
2598 goto err_device;
2599 }
2600
2601 /*
2602 * Target structures have a limit of 32 bit DMA pointers.
2603 * DMA pointers can be wider than 32 bits by default on some systems.
2604 */
2605 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2606 if (ret) {
1d2b48d6 2607 ath10k_err("failed to set DMA mask to 32-bit: %d\n", ret);
5e3dd157
KV
2608 goto err_region;
2609 }
2610
2611 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2612 if (ret) {
1d2b48d6 2613 ath10k_err("failed to set consistent DMA mask to 32-bit\n");
5e3dd157
KV
2614 goto err_region;
2615 }
2616
2617 /* Set bus master bit in PCI_COMMAND to enable DMA */
2618 pci_set_master(pdev);
2619
2620 /*
2621 * Temporary FIX: disable ASPM
2622 * Will be removed after the OTP is programmed
2623 */
2624 pci_read_config_dword(pdev, 0x80, &lcr_val);
2625 pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2626
2627 /* Arrange for access to Target SoC registers. */
2628 mem = pci_iomap(pdev, BAR_NUM, 0);
2629 if (!mem) {
1d2b48d6 2630 ath10k_err("failed to perform IOMAP for BAR%d\n", BAR_NUM);
5e3dd157
KV
2631 ret = -EIO;
2632 goto err_master;
2633 }
2634
2635 ar_pci->mem = mem;
2636
2637 spin_lock_init(&ar_pci->ce_lock);
2638
e01ae68c
KV
2639 ret = ath10k_do_pci_wake(ar);
2640 if (ret) {
2641 ath10k_err("Failed to get chip id: %d\n", ret);
12eb0879 2642 goto err_iomap;
e01ae68c
KV
2643 }
2644
233eb97f 2645 chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
e01ae68c
KV
2646
2647 ath10k_do_pci_sleep(ar);
2648
24cfade1
KV
2649 ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
2650
e01ae68c 2651 ret = ath10k_core_register(ar, chip_id);
5e3dd157 2652 if (ret) {
1d2b48d6 2653 ath10k_err("failed to register driver core: %d\n", ret);
32270b61 2654 goto err_iomap;
5e3dd157
KV
2655 }
2656
2657 return 0;
2658
5e3dd157
KV
2659err_iomap:
2660 pci_iounmap(pdev, mem);
2661err_master:
2662 pci_clear_master(pdev);
2663err_region:
2664 pci_release_region(pdev, BAR_NUM);
2665err_device:
2666 pci_disable_device(pdev);
2667err_ar:
5e3dd157
KV
2668 ath10k_core_destroy(ar);
2669err_ar_pci:
2670 /* call HIF PCI free here */
2671 kfree(ar_pci);
2672
2673 return ret;
2674}
2675
2676static void ath10k_pci_remove(struct pci_dev *pdev)
2677{
2678 struct ath10k *ar = pci_get_drvdata(pdev);
2679 struct ath10k_pci *ar_pci;
2680
50f87a67 2681 ath10k_dbg(ATH10K_DBG_PCI, "pci remove\n");
5e3dd157
KV
2682
2683 if (!ar)
2684 return;
2685
2686 ar_pci = ath10k_pci_priv(ar);
2687
2688 if (!ar_pci)
2689 return;
2690
2691 tasklet_kill(&ar_pci->msi_fw_err);
2692
2693 ath10k_core_unregister(ar);
5e3dd157 2694
5e3dd157
KV
2695 pci_iounmap(pdev, ar_pci->mem);
2696 pci_release_region(pdev, BAR_NUM);
2697 pci_clear_master(pdev);
2698 pci_disable_device(pdev);
2699
2700 ath10k_core_destroy(ar);
2701 kfree(ar_pci);
2702}
2703
5e3dd157
KV
2704MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2705
2706static struct pci_driver ath10k_pci_driver = {
2707 .name = "ath10k_pci",
2708 .id_table = ath10k_pci_id_table,
2709 .probe = ath10k_pci_probe,
2710 .remove = ath10k_pci_remove,
5e3dd157
KV
2711};
2712
2713static int __init ath10k_pci_init(void)
2714{
2715 int ret;
2716
2717 ret = pci_register_driver(&ath10k_pci_driver);
2718 if (ret)
1d2b48d6 2719 ath10k_err("failed to register PCI driver: %d\n", ret);
5e3dd157
KV
2720
2721 return ret;
2722}
2723module_init(ath10k_pci_init);
2724
2725static void __exit ath10k_pci_exit(void)
2726{
2727 pci_unregister_driver(&ath10k_pci_driver);
2728}
2729
2730module_exit(ath10k_pci_exit);
2731
2732MODULE_AUTHOR("Qualcomm Atheros");
2733MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2734MODULE_LICENSE("Dual BSD/GPL");
929417cf 2735MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_2_FILE);
5e3dd157 2736MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
This page took 0.196267 seconds and 5 git commands to generate.