2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #include <linux/pci.h>
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
22 #include <linux/bitops.h>
27 #include "targaddrs.h"
36 enum ath10k_pci_irq_mode
{
37 ATH10K_PCI_IRQ_AUTO
= 0,
38 ATH10K_PCI_IRQ_LEGACY
= 1,
39 ATH10K_PCI_IRQ_MSI
= 2,
42 enum ath10k_pci_reset_mode
{
43 ATH10K_PCI_RESET_AUTO
= 0,
44 ATH10K_PCI_RESET_WARM_ONLY
= 1,
47 static unsigned int ath10k_pci_target_ps
;
48 static unsigned int ath10k_pci_irq_mode
= ATH10K_PCI_IRQ_AUTO
;
49 static unsigned int ath10k_pci_reset_mode
= ATH10K_PCI_RESET_AUTO
;
51 module_param_named(target_ps
, ath10k_pci_target_ps
, uint
, 0644);
52 MODULE_PARM_DESC(target_ps
, "Enable ath10k Target (SoC) PS option");
54 module_param_named(irq_mode
, ath10k_pci_irq_mode
, uint
, 0644);
55 MODULE_PARM_DESC(irq_mode
, "0: auto, 1: legacy, 2: msi (default: 0)");
57 module_param_named(reset_mode
, ath10k_pci_reset_mode
, uint
, 0644);
58 MODULE_PARM_DESC(reset_mode
, "0: auto, 1: warm only (default: 0)");
60 /* how long wait to wait for target to initialise, in ms */
61 #define ATH10K_PCI_TARGET_WAIT 3000
63 #define QCA988X_2_0_DEVICE_ID (0x003c)
65 static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table
) = {
66 { PCI_VDEVICE(ATHEROS
, QCA988X_2_0_DEVICE_ID
) }, /* PCI-E QCA988X V2 */
70 static int ath10k_pci_diag_read_access(struct ath10k
*ar
, u32 address
,
73 static int ath10k_pci_post_rx(struct ath10k
*ar
);
74 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe
*pipe_info
,
76 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe
*pipe_info
);
77 static int ath10k_pci_cold_reset(struct ath10k
*ar
);
78 static int ath10k_pci_warm_reset(struct ath10k
*ar
);
79 static int ath10k_pci_wait_for_target_init(struct ath10k
*ar
);
80 static int ath10k_pci_init_irq(struct ath10k
*ar
);
81 static int ath10k_pci_deinit_irq(struct ath10k
*ar
);
82 static int ath10k_pci_request_irq(struct ath10k
*ar
);
83 static void ath10k_pci_free_irq(struct ath10k
*ar
);
84 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe
*tx_pipe
,
85 struct ath10k_ce_pipe
*rx_pipe
,
86 struct bmi_xfer
*xfer
);
88 static const struct ce_attr host_ce_config_wlan
[] = {
89 /* CE0: host->target HTC control and raw streams */
91 .flags
= CE_ATTR_FLAGS
,
97 /* CE1: target->host HTT + HTC control */
99 .flags
= CE_ATTR_FLAGS
,
102 .dest_nentries
= 512,
105 /* CE2: target->host WMI */
107 .flags
= CE_ATTR_FLAGS
,
113 /* CE3: host->target WMI */
115 .flags
= CE_ATTR_FLAGS
,
121 /* CE4: host->target HTT */
123 .flags
= CE_ATTR_FLAGS
| CE_ATTR_DIS_INTR
,
124 .src_nentries
= CE_HTT_H2T_MSG_SRC_NENTRIES
,
131 .flags
= CE_ATTR_FLAGS
,
137 /* CE6: target autonomous hif_memcpy */
139 .flags
= CE_ATTR_FLAGS
,
145 /* CE7: ce_diag, the Diagnostic Window */
147 .flags
= CE_ATTR_FLAGS
,
149 .src_sz_max
= DIAG_TRANSFER_LIMIT
,
154 /* Target firmware's Copy Engine configuration. */
155 static const struct ce_pipe_config target_ce_config_wlan
[] = {
156 /* CE0: host->target HTC control and raw streams */
159 .pipedir
= PIPEDIR_OUT
,
162 .flags
= CE_ATTR_FLAGS
,
166 /* CE1: target->host HTT + HTC control */
169 .pipedir
= PIPEDIR_IN
,
172 .flags
= CE_ATTR_FLAGS
,
176 /* CE2: target->host WMI */
179 .pipedir
= PIPEDIR_IN
,
182 .flags
= CE_ATTR_FLAGS
,
186 /* CE3: host->target WMI */
189 .pipedir
= PIPEDIR_OUT
,
192 .flags
= CE_ATTR_FLAGS
,
196 /* CE4: host->target HTT */
199 .pipedir
= PIPEDIR_OUT
,
202 .flags
= CE_ATTR_FLAGS
,
206 /* NB: 50% of src nentries, since tx has 2 frags */
211 .pipedir
= PIPEDIR_OUT
,
214 .flags
= CE_ATTR_FLAGS
,
218 /* CE6: Reserved for target autonomous hif_memcpy */
221 .pipedir
= PIPEDIR_INOUT
,
224 .flags
= CE_ATTR_FLAGS
,
228 /* CE7 used only by Host */
231 static bool ath10k_pci_irq_pending(struct ath10k
*ar
)
235 /* Check if the shared legacy irq is for us */
236 cause
= ath10k_pci_read32(ar
, SOC_CORE_BASE_ADDRESS
+
237 PCIE_INTR_CAUSE_ADDRESS
);
238 if (cause
& (PCIE_INTR_FIRMWARE_MASK
| PCIE_INTR_CE_MASK_ALL
))
244 static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k
*ar
)
246 /* IMPORTANT: INTR_CLR register has to be set after
247 * INTR_ENABLE is set to 0, otherwise interrupt can not be
249 ath10k_pci_write32(ar
, SOC_CORE_BASE_ADDRESS
+ PCIE_INTR_ENABLE_ADDRESS
,
251 ath10k_pci_write32(ar
, SOC_CORE_BASE_ADDRESS
+ PCIE_INTR_CLR_ADDRESS
,
252 PCIE_INTR_FIRMWARE_MASK
| PCIE_INTR_CE_MASK_ALL
);
254 /* IMPORTANT: this extra read transaction is required to
255 * flush the posted write buffer. */
256 (void) ath10k_pci_read32(ar
, SOC_CORE_BASE_ADDRESS
+
257 PCIE_INTR_ENABLE_ADDRESS
);
260 static void ath10k_pci_enable_legacy_irq(struct ath10k
*ar
)
262 ath10k_pci_write32(ar
, SOC_CORE_BASE_ADDRESS
+
263 PCIE_INTR_ENABLE_ADDRESS
,
264 PCIE_INTR_FIRMWARE_MASK
| PCIE_INTR_CE_MASK_ALL
);
266 /* IMPORTANT: this extra read transaction is required to
267 * flush the posted write buffer. */
268 (void) ath10k_pci_read32(ar
, SOC_CORE_BASE_ADDRESS
+
269 PCIE_INTR_ENABLE_ADDRESS
);
272 static irqreturn_t
ath10k_pci_early_irq_handler(int irq
, void *arg
)
274 struct ath10k
*ar
= arg
;
275 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
277 if (ar_pci
->num_msi_intrs
== 0) {
278 if (!ath10k_pci_irq_pending(ar
))
281 ath10k_pci_disable_and_clear_legacy_irq(ar
);
284 tasklet_schedule(&ar_pci
->early_irq_tasklet
);
289 static int ath10k_pci_request_early_irq(struct ath10k
*ar
)
291 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
294 /* Regardless whether MSI-X/MSI/legacy irqs have been set up the first
295 * interrupt from irq vector is triggered in all cases for FW
296 * indication/errors */
297 ret
= request_irq(ar_pci
->pdev
->irq
, ath10k_pci_early_irq_handler
,
298 IRQF_SHARED
, "ath10k_pci (early)", ar
);
300 ath10k_warn("failed to request early irq: %d\n", ret
);
307 static void ath10k_pci_free_early_irq(struct ath10k
*ar
)
309 free_irq(ath10k_pci_priv(ar
)->pdev
->irq
, ar
);
313 * Diagnostic read/write access is provided for startup/config/debug usage.
314 * Caller must guarantee proper alignment, when applicable, and single user
317 static int ath10k_pci_diag_read_mem(struct ath10k
*ar
, u32 address
, void *data
,
320 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
323 unsigned int completed_nbytes
, orig_nbytes
, remaining_bytes
;
326 struct ath10k_ce_pipe
*ce_diag
;
327 /* Host buffer address in CE space */
329 dma_addr_t ce_data_base
= 0;
330 void *data_buf
= NULL
;
334 * This code cannot handle reads to non-memory space. Redirect to the
335 * register read fn but preserve the multi word read capability of
338 if (address
< DRAM_BASE_ADDRESS
) {
339 if (!IS_ALIGNED(address
, 4) ||
340 !IS_ALIGNED((unsigned long)data
, 4))
343 while ((nbytes
>= 4) && ((ret
= ath10k_pci_diag_read_access(
344 ar
, address
, (u32
*)data
)) == 0)) {
345 nbytes
-= sizeof(u32
);
346 address
+= sizeof(u32
);
352 ce_diag
= ar_pci
->ce_diag
;
355 * Allocate a temporary bounce buffer to hold caller's data
356 * to be DMA'ed from Target. This guarantees
357 * 1) 4-byte alignment
358 * 2) Buffer in DMA-able space
360 orig_nbytes
= nbytes
;
361 data_buf
= (unsigned char *)dma_alloc_coherent(ar
->dev
,
370 memset(data_buf
, 0, orig_nbytes
);
372 remaining_bytes
= orig_nbytes
;
373 ce_data
= ce_data_base
;
374 while (remaining_bytes
) {
375 nbytes
= min_t(unsigned int, remaining_bytes
,
376 DIAG_TRANSFER_LIMIT
);
378 ret
= ath10k_ce_recv_buf_enqueue(ce_diag
, NULL
, ce_data
);
382 /* Request CE to send from Target(!) address to Host buffer */
384 * The address supplied by the caller is in the
385 * Target CPU virtual address space.
387 * In order to use this address with the diagnostic CE,
388 * convert it from Target CPU virtual address space
389 * to CE address space
392 address
= TARG_CPU_SPACE_TO_CE_SPACE(ar
, ar_pci
->mem
,
394 ath10k_pci_sleep(ar
);
396 ret
= ath10k_ce_send(ce_diag
, NULL
, (u32
)address
, nbytes
, 0,
402 while (ath10k_ce_completed_send_next(ce_diag
, NULL
, &buf
,
406 if (i
++ > DIAG_ACCESS_CE_TIMEOUT_MS
) {
412 if (nbytes
!= completed_nbytes
) {
417 if (buf
!= (u32
) address
) {
423 while (ath10k_ce_completed_recv_next(ce_diag
, NULL
, &buf
,
428 if (i
++ > DIAG_ACCESS_CE_TIMEOUT_MS
) {
434 if (nbytes
!= completed_nbytes
) {
439 if (buf
!= ce_data
) {
444 remaining_bytes
-= nbytes
;
451 /* Copy data from allocated DMA buf to caller's buf */
452 WARN_ON_ONCE(orig_nbytes
& 3);
453 for (i
= 0; i
< orig_nbytes
/ sizeof(__le32
); i
++) {
455 __le32_to_cpu(((__le32
*)data_buf
)[i
]);
458 ath10k_warn("failed to read diag value at 0x%x: %d\n",
462 dma_free_coherent(ar
->dev
, orig_nbytes
, data_buf
,
468 /* Read 4-byte aligned data from Target memory or register */
469 static int ath10k_pci_diag_read_access(struct ath10k
*ar
, u32 address
,
472 /* Assume range doesn't cross this boundary */
473 if (address
>= DRAM_BASE_ADDRESS
)
474 return ath10k_pci_diag_read_mem(ar
, address
, data
, sizeof(u32
));
477 *data
= ath10k_pci_read32(ar
, address
);
478 ath10k_pci_sleep(ar
);
482 static int ath10k_pci_diag_write_mem(struct ath10k
*ar
, u32 address
,
483 const void *data
, int nbytes
)
485 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
488 unsigned int completed_nbytes
, orig_nbytes
, remaining_bytes
;
491 struct ath10k_ce_pipe
*ce_diag
;
492 void *data_buf
= NULL
;
493 u32 ce_data
; /* Host buffer address in CE space */
494 dma_addr_t ce_data_base
= 0;
497 ce_diag
= ar_pci
->ce_diag
;
500 * Allocate a temporary bounce buffer to hold caller's data
501 * to be DMA'ed to Target. This guarantees
502 * 1) 4-byte alignment
503 * 2) Buffer in DMA-able space
505 orig_nbytes
= nbytes
;
506 data_buf
= (unsigned char *)dma_alloc_coherent(ar
->dev
,
515 /* Copy caller's data to allocated DMA buf */
516 WARN_ON_ONCE(orig_nbytes
& 3);
517 for (i
= 0; i
< orig_nbytes
/ sizeof(__le32
); i
++)
518 ((__le32
*)data_buf
)[i
] = __cpu_to_le32(((u32
*)data
)[i
]);
521 * The address supplied by the caller is in the
522 * Target CPU virtual address space.
524 * In order to use this address with the diagnostic CE,
526 * Target CPU virtual address space
531 address
= TARG_CPU_SPACE_TO_CE_SPACE(ar
, ar_pci
->mem
, address
);
532 ath10k_pci_sleep(ar
);
534 remaining_bytes
= orig_nbytes
;
535 ce_data
= ce_data_base
;
536 while (remaining_bytes
) {
537 /* FIXME: check cast */
538 nbytes
= min_t(int, remaining_bytes
, DIAG_TRANSFER_LIMIT
);
540 /* Set up to receive directly into Target(!) address */
541 ret
= ath10k_ce_recv_buf_enqueue(ce_diag
, NULL
, address
);
546 * Request CE to send caller-supplied data that
547 * was copied to bounce buffer to Target(!) address.
549 ret
= ath10k_ce_send(ce_diag
, NULL
, (u32
) ce_data
,
555 while (ath10k_ce_completed_send_next(ce_diag
, NULL
, &buf
,
560 if (i
++ > DIAG_ACCESS_CE_TIMEOUT_MS
) {
566 if (nbytes
!= completed_nbytes
) {
571 if (buf
!= ce_data
) {
577 while (ath10k_ce_completed_recv_next(ce_diag
, NULL
, &buf
,
582 if (i
++ > DIAG_ACCESS_CE_TIMEOUT_MS
) {
588 if (nbytes
!= completed_nbytes
) {
593 if (buf
!= address
) {
598 remaining_bytes
-= nbytes
;
605 dma_free_coherent(ar
->dev
, orig_nbytes
, data_buf
,
610 ath10k_warn("failed to write diag value at 0x%x: %d\n",
616 /* Write 4B data to Target memory or register */
617 static int ath10k_pci_diag_write_access(struct ath10k
*ar
, u32 address
,
620 /* Assume range doesn't cross this boundary */
621 if (address
>= DRAM_BASE_ADDRESS
)
622 return ath10k_pci_diag_write_mem(ar
, address
, &data
,
626 ath10k_pci_write32(ar
, address
, data
);
627 ath10k_pci_sleep(ar
);
631 static bool ath10k_pci_target_is_awake(struct ath10k
*ar
)
633 void __iomem
*mem
= ath10k_pci_priv(ar
)->mem
;
635 val
= ioread32(mem
+ PCIE_LOCAL_BASE_ADDRESS
+
637 return (RTC_STATE_V_GET(val
) == RTC_STATE_V_ON
);
640 int ath10k_do_pci_wake(struct ath10k
*ar
)
642 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
643 void __iomem
*pci_addr
= ar_pci
->mem
;
647 if (atomic_read(&ar_pci
->keep_awake_count
) == 0) {
649 iowrite32(PCIE_SOC_WAKE_V_MASK
,
650 pci_addr
+ PCIE_LOCAL_BASE_ADDRESS
+
651 PCIE_SOC_WAKE_ADDRESS
);
653 atomic_inc(&ar_pci
->keep_awake_count
);
655 if (ar_pci
->verified_awake
)
659 if (ath10k_pci_target_is_awake(ar
)) {
660 ar_pci
->verified_awake
= true;
664 if (tot_delay
> PCIE_WAKE_TIMEOUT
) {
665 ath10k_warn("target took longer %d us to wake up (awake count %d)\n",
667 atomic_read(&ar_pci
->keep_awake_count
));
672 tot_delay
+= curr_delay
;
679 void ath10k_do_pci_sleep(struct ath10k
*ar
)
681 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
682 void __iomem
*pci_addr
= ar_pci
->mem
;
684 if (atomic_dec_and_test(&ar_pci
->keep_awake_count
)) {
686 ar_pci
->verified_awake
= false;
687 iowrite32(PCIE_SOC_WAKE_RESET
,
688 pci_addr
+ PCIE_LOCAL_BASE_ADDRESS
+
689 PCIE_SOC_WAKE_ADDRESS
);
693 /* Called by lower (CE) layer when a send to Target completes. */
694 static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe
*ce_state
)
696 struct ath10k
*ar
= ce_state
->ar
;
697 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
698 struct ath10k_hif_cb
*cb
= &ar_pci
->msg_callbacks_current
;
699 void *transfer_context
;
702 unsigned int transfer_id
;
704 while (ath10k_ce_completed_send_next(ce_state
, &transfer_context
,
706 &transfer_id
) == 0) {
707 /* no need to call tx completion for NULL pointers */
708 if (transfer_context
== NULL
)
711 cb
->tx_completion(ar
, transfer_context
, transfer_id
);
715 /* Called by lower (CE) layer when data is received from the Target. */
716 static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe
*ce_state
)
718 struct ath10k
*ar
= ce_state
->ar
;
719 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
720 struct ath10k_pci_pipe
*pipe_info
= &ar_pci
->pipe_info
[ce_state
->id
];
721 struct ath10k_hif_cb
*cb
= &ar_pci
->msg_callbacks_current
;
723 void *transfer_context
;
725 unsigned int nbytes
, max_nbytes
;
726 unsigned int transfer_id
;
730 while (ath10k_ce_completed_recv_next(ce_state
, &transfer_context
,
731 &ce_data
, &nbytes
, &transfer_id
,
733 err
= ath10k_pci_post_rx_pipe(pipe_info
, 1);
736 ath10k_warn("failed to replenish CE rx ring %d: %d\n",
737 pipe_info
->pipe_num
, err
);
740 skb
= transfer_context
;
741 max_nbytes
= skb
->len
+ skb_tailroom(skb
);
742 dma_unmap_single(ar
->dev
, ATH10K_SKB_CB(skb
)->paddr
,
743 max_nbytes
, DMA_FROM_DEVICE
);
745 if (unlikely(max_nbytes
< nbytes
)) {
746 ath10k_warn("rxed more than expected (nbytes %d, max %d)",
748 dev_kfree_skb_any(skb
);
752 skb_put(skb
, nbytes
);
753 cb
->rx_completion(ar
, skb
, pipe_info
->pipe_num
);
757 static int ath10k_pci_hif_tx_sg(struct ath10k
*ar
, u8 pipe_id
,
758 struct ath10k_hif_sg_item
*items
, int n_items
)
760 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
761 struct ath10k_pci_pipe
*pci_pipe
= &ar_pci
->pipe_info
[pipe_id
];
762 struct ath10k_ce_pipe
*ce_pipe
= pci_pipe
->ce_hdl
;
763 struct ath10k_ce_ring
*src_ring
= ce_pipe
->src_ring
;
764 unsigned int nentries_mask
= src_ring
->nentries_mask
;
765 unsigned int sw_index
= src_ring
->sw_index
;
766 unsigned int write_index
= src_ring
->write_index
;
769 spin_lock_bh(&ar_pci
->ce_lock
);
771 if (unlikely(CE_RING_DELTA(nentries_mask
,
772 write_index
, sw_index
- 1) < n_items
)) {
777 for (i
= 0; i
< n_items
- 1; i
++) {
778 ath10k_dbg(ATH10K_DBG_PCI
,
779 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
780 i
, items
[i
].paddr
, items
[i
].len
, n_items
);
781 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP
, NULL
, "item data: ",
782 items
[i
].vaddr
, items
[i
].len
);
784 err
= ath10k_ce_send_nolock(ce_pipe
,
785 items
[i
].transfer_context
,
788 items
[i
].transfer_id
,
789 CE_SEND_FLAG_GATHER
);
794 /* `i` is equal to `n_items -1` after for() */
796 ath10k_dbg(ATH10K_DBG_PCI
,
797 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
798 i
, items
[i
].paddr
, items
[i
].len
, n_items
);
799 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP
, NULL
, "item data: ",
800 items
[i
].vaddr
, items
[i
].len
);
802 err
= ath10k_ce_send_nolock(ce_pipe
,
803 items
[i
].transfer_context
,
806 items
[i
].transfer_id
,
813 spin_unlock_bh(&ar_pci
->ce_lock
);
817 static u16
ath10k_pci_hif_get_free_queue_number(struct ath10k
*ar
, u8 pipe
)
819 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
821 ath10k_dbg(ATH10K_DBG_PCI
, "pci hif get free queue number\n");
823 return ath10k_ce_num_free_src_entries(ar_pci
->pipe_info
[pipe
].ce_hdl
);
826 static void ath10k_pci_hif_dump_area(struct ath10k
*ar
)
828 u32 reg_dump_area
= 0;
829 u32 reg_dump_values
[REG_DUMP_COUNT_QCA988X
] = {};
834 ath10k_err("firmware crashed!\n");
835 ath10k_err("hardware name %s version 0x%x\n",
836 ar
->hw_params
.name
, ar
->target_version
);
837 ath10k_err("firmware version: %s\n", ar
->hw
->wiphy
->fw_version
);
839 host_addr
= host_interest_item_address(HI_ITEM(hi_failure_state
));
840 ret
= ath10k_pci_diag_read_mem(ar
, host_addr
,
841 ®_dump_area
, sizeof(u32
));
843 ath10k_err("failed to read FW dump area address: %d\n", ret
);
847 ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area
);
849 ret
= ath10k_pci_diag_read_mem(ar
, reg_dump_area
,
851 REG_DUMP_COUNT_QCA988X
* sizeof(u32
));
853 ath10k_err("failed to read FW dump area: %d\n", ret
);
857 BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X
% 4);
859 ath10k_err("target Register Dump\n");
860 for (i
= 0; i
< REG_DUMP_COUNT_QCA988X
; i
+= 4)
861 ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
864 reg_dump_values
[i
+ 1],
865 reg_dump_values
[i
+ 2],
866 reg_dump_values
[i
+ 3]);
868 queue_work(ar
->workqueue
, &ar
->restart_work
);
871 static void ath10k_pci_hif_send_complete_check(struct ath10k
*ar
, u8 pipe
,
874 ath10k_dbg(ATH10K_DBG_PCI
, "pci hif send complete check\n");
879 * Decide whether to actually poll for completions, or just
880 * wait for a later chance.
881 * If there seem to be plenty of resources left, then just wait
882 * since checking involves reading a CE register, which is a
883 * relatively expensive operation.
885 resources
= ath10k_pci_hif_get_free_queue_number(ar
, pipe
);
888 * If at least 50% of the total resources are still available,
889 * don't bother checking again yet.
891 if (resources
> (host_ce_config_wlan
[pipe
].src_nentries
>> 1))
894 ath10k_ce_per_engine_service(ar
, pipe
);
897 static void ath10k_pci_hif_set_callbacks(struct ath10k
*ar
,
898 struct ath10k_hif_cb
*callbacks
)
900 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
902 ath10k_dbg(ATH10K_DBG_PCI
, "pci hif set callbacks\n");
904 memcpy(&ar_pci
->msg_callbacks_current
, callbacks
,
905 sizeof(ar_pci
->msg_callbacks_current
));
908 static int ath10k_pci_setup_ce_irq(struct ath10k
*ar
)
910 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
911 const struct ce_attr
*attr
;
912 struct ath10k_pci_pipe
*pipe_info
;
913 int pipe_num
, disable_interrupts
;
915 for (pipe_num
= 0; pipe_num
< CE_COUNT
; pipe_num
++) {
916 pipe_info
= &ar_pci
->pipe_info
[pipe_num
];
918 /* Handle Diagnostic CE specially */
919 if (pipe_info
->ce_hdl
== ar_pci
->ce_diag
)
922 attr
= &host_ce_config_wlan
[pipe_num
];
924 if (attr
->src_nentries
) {
925 disable_interrupts
= attr
->flags
& CE_ATTR_DIS_INTR
;
926 ath10k_ce_send_cb_register(pipe_info
->ce_hdl
,
927 ath10k_pci_ce_send_done
,
931 if (attr
->dest_nentries
)
932 ath10k_ce_recv_cb_register(pipe_info
->ce_hdl
,
933 ath10k_pci_ce_recv_data
);
939 static void ath10k_pci_kill_tasklet(struct ath10k
*ar
)
941 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
944 tasklet_kill(&ar_pci
->intr_tq
);
945 tasklet_kill(&ar_pci
->msi_fw_err
);
946 tasklet_kill(&ar_pci
->early_irq_tasklet
);
948 for (i
= 0; i
< CE_COUNT
; i
++)
949 tasklet_kill(&ar_pci
->pipe_info
[i
].intr
);
952 /* TODO - temporary mapping while we have too few CE's */
953 static int ath10k_pci_hif_map_service_to_pipe(struct ath10k
*ar
,
954 u16 service_id
, u8
*ul_pipe
,
955 u8
*dl_pipe
, int *ul_is_polled
,
960 ath10k_dbg(ATH10K_DBG_PCI
, "pci hif map service\n");
962 /* polling for received messages not supported */
965 switch (service_id
) {
966 case ATH10K_HTC_SVC_ID_HTT_DATA_MSG
:
968 * Host->target HTT gets its own pipe, so it can be polled
969 * while other pipes are interrupt driven.
973 * Use the same target->host pipe for HTC ctrl, HTC raw
979 case ATH10K_HTC_SVC_ID_RSVD_CTRL
:
980 case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS
:
982 * Note: HTC_RAW_STREAMS_SVC is currently unused, and
983 * HTC_CTRL_RSVD_SVC could share the same pipe as the
984 * WMI services. So, if another CE is needed, change
985 * this to *ul_pipe = 3, which frees up CE 0.
992 case ATH10K_HTC_SVC_ID_WMI_DATA_BK
:
993 case ATH10K_HTC_SVC_ID_WMI_DATA_BE
:
994 case ATH10K_HTC_SVC_ID_WMI_DATA_VI
:
995 case ATH10K_HTC_SVC_ID_WMI_DATA_VO
:
997 case ATH10K_HTC_SVC_ID_WMI_CONTROL
:
1003 /* pipe 6 reserved */
1004 /* pipe 7 reserved */
1011 (host_ce_config_wlan
[*ul_pipe
].flags
& CE_ATTR_DIS_INTR
) != 0;
1016 static void ath10k_pci_hif_get_default_pipe(struct ath10k
*ar
,
1017 u8
*ul_pipe
, u8
*dl_pipe
)
1019 int ul_is_polled
, dl_is_polled
;
1021 ath10k_dbg(ATH10K_DBG_PCI
, "pci hif get default pipe\n");
1023 (void)ath10k_pci_hif_map_service_to_pipe(ar
,
1024 ATH10K_HTC_SVC_ID_RSVD_CTRL
,
1031 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe
*pipe_info
,
1034 struct ath10k
*ar
= pipe_info
->hif_ce_state
;
1035 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
1036 struct ath10k_ce_pipe
*ce_state
= pipe_info
->ce_hdl
;
1037 struct sk_buff
*skb
;
1041 if (pipe_info
->buf_sz
== 0)
1044 for (i
= 0; i
< num
; i
++) {
1045 skb
= dev_alloc_skb(pipe_info
->buf_sz
);
1047 ath10k_warn("failed to allocate skbuff for pipe %d\n",
1053 WARN_ONCE((unsigned long)skb
->data
& 3, "unaligned skb");
1055 ce_data
= dma_map_single(ar
->dev
, skb
->data
,
1056 skb
->len
+ skb_tailroom(skb
),
1059 if (unlikely(dma_mapping_error(ar
->dev
, ce_data
))) {
1060 ath10k_warn("failed to DMA map sk_buff\n");
1061 dev_kfree_skb_any(skb
);
1066 ATH10K_SKB_CB(skb
)->paddr
= ce_data
;
1068 pci_dma_sync_single_for_device(ar_pci
->pdev
, ce_data
,
1070 PCI_DMA_FROMDEVICE
);
1072 ret
= ath10k_ce_recv_buf_enqueue(ce_state
, (void *)skb
,
1075 ath10k_warn("failed to enqueue to pipe %d: %d\n",
1084 ath10k_pci_rx_pipe_cleanup(pipe_info
);
1088 static int ath10k_pci_post_rx(struct ath10k
*ar
)
1090 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
1091 struct ath10k_pci_pipe
*pipe_info
;
1092 const struct ce_attr
*attr
;
1093 int pipe_num
, ret
= 0;
1095 for (pipe_num
= 0; pipe_num
< CE_COUNT
; pipe_num
++) {
1096 pipe_info
= &ar_pci
->pipe_info
[pipe_num
];
1097 attr
= &host_ce_config_wlan
[pipe_num
];
1099 if (attr
->dest_nentries
== 0)
1102 ret
= ath10k_pci_post_rx_pipe(pipe_info
,
1103 attr
->dest_nentries
- 1);
1105 ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
1108 for (; pipe_num
>= 0; pipe_num
--) {
1109 pipe_info
= &ar_pci
->pipe_info
[pipe_num
];
1110 ath10k_pci_rx_pipe_cleanup(pipe_info
);
1119 static int ath10k_pci_hif_start(struct ath10k
*ar
)
1121 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
1124 ath10k_dbg(ATH10K_DBG_BOOT
, "boot hif start\n");
1126 ath10k_pci_free_early_irq(ar
);
1127 ath10k_pci_kill_tasklet(ar
);
1129 ret
= ath10k_pci_request_irq(ar
);
1131 ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1136 ret
= ath10k_pci_setup_ce_irq(ar
);
1138 ath10k_warn("failed to setup CE interrupts: %d\n", ret
);
1142 /* Post buffers once to start things off. */
1143 ret
= ath10k_pci_post_rx(ar
);
1145 ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1150 ar_pci
->started
= 1;
1154 ath10k_ce_disable_interrupts(ar
);
1155 ath10k_pci_free_irq(ar
);
1156 ath10k_pci_kill_tasklet(ar
);
1158 /* Though there should be no interrupts (device was reset)
1159 * power_down() expects the early IRQ to be installed as per the
1160 * driver lifecycle. */
1161 ret_early
= ath10k_pci_request_early_irq(ar
);
1163 ath10k_warn("failed to re-enable early irq: %d\n", ret_early
);
1168 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe
*pipe_info
)
1171 struct ath10k_pci
*ar_pci
;
1172 struct ath10k_ce_pipe
*ce_hdl
;
1174 struct sk_buff
*netbuf
;
1177 buf_sz
= pipe_info
->buf_sz
;
1179 /* Unused Copy Engine */
1183 ar
= pipe_info
->hif_ce_state
;
1184 ar_pci
= ath10k_pci_priv(ar
);
1186 if (!ar_pci
->started
)
1189 ce_hdl
= pipe_info
->ce_hdl
;
1191 while (ath10k_ce_revoke_recv_next(ce_hdl
, (void **)&netbuf
,
1193 dma_unmap_single(ar
->dev
, ATH10K_SKB_CB(netbuf
)->paddr
,
1194 netbuf
->len
+ skb_tailroom(netbuf
),
1196 dev_kfree_skb_any(netbuf
);
1200 static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe
*pipe_info
)
1203 struct ath10k_pci
*ar_pci
;
1204 struct ath10k_ce_pipe
*ce_hdl
;
1205 struct sk_buff
*netbuf
;
1207 unsigned int nbytes
;
1211 buf_sz
= pipe_info
->buf_sz
;
1213 /* Unused Copy Engine */
1217 ar
= pipe_info
->hif_ce_state
;
1218 ar_pci
= ath10k_pci_priv(ar
);
1220 if (!ar_pci
->started
)
1223 ce_hdl
= pipe_info
->ce_hdl
;
1225 while (ath10k_ce_cancel_send_next(ce_hdl
, (void **)&netbuf
,
1226 &ce_data
, &nbytes
, &id
) == 0) {
1227 /* no need to call tx completion for NULL pointers */
1231 ar_pci
->msg_callbacks_current
.tx_completion(ar
,
1238 * Cleanup residual buffers for device shutdown:
1239 * buffers that were enqueued for receive
1240 * buffers that were to be sent
1241 * Note: Buffers that had completed but which were
1242 * not yet processed are on a completion queue. They
1243 * are handled when the completion thread shuts down.
1245 static void ath10k_pci_buffer_cleanup(struct ath10k
*ar
)
1247 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
1250 for (pipe_num
= 0; pipe_num
< CE_COUNT
; pipe_num
++) {
1251 struct ath10k_pci_pipe
*pipe_info
;
1253 pipe_info
= &ar_pci
->pipe_info
[pipe_num
];
1254 ath10k_pci_rx_pipe_cleanup(pipe_info
);
1255 ath10k_pci_tx_pipe_cleanup(pipe_info
);
1259 static void ath10k_pci_ce_deinit(struct ath10k
*ar
)
1263 for (i
= 0; i
< CE_COUNT
; i
++)
1264 ath10k_ce_deinit_pipe(ar
, i
);
1267 static void ath10k_pci_hif_stop(struct ath10k
*ar
)
1269 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
1272 ath10k_dbg(ATH10K_DBG_BOOT
, "boot hif stop\n");
1274 ret
= ath10k_ce_disable_interrupts(ar
);
1276 ath10k_warn("failed to disable CE interrupts: %d\n", ret
);
1278 ath10k_pci_free_irq(ar
);
1279 ath10k_pci_kill_tasklet(ar
);
1281 ret
= ath10k_pci_request_early_irq(ar
);
1283 ath10k_warn("failed to re-enable early irq: %d\n", ret
);
1285 /* At this point, asynchronous threads are stopped, the target should
1286 * not DMA nor interrupt. We process the leftovers and then free
1287 * everything else up. */
1289 ath10k_pci_buffer_cleanup(ar
);
1291 /* Make the sure the device won't access any structures on the host by
1292 * resetting it. The device was fed with PCI CE ringbuffer
1293 * configuration during init. If ringbuffers are freed and the device
1294 * were to access them this could lead to memory corruption on the
1296 ath10k_pci_warm_reset(ar
);
1298 ar_pci
->started
= 0;
1301 static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k
*ar
,
1302 void *req
, u32 req_len
,
1303 void *resp
, u32
*resp_len
)
1305 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
1306 struct ath10k_pci_pipe
*pci_tx
= &ar_pci
->pipe_info
[BMI_CE_NUM_TO_TARG
];
1307 struct ath10k_pci_pipe
*pci_rx
= &ar_pci
->pipe_info
[BMI_CE_NUM_TO_HOST
];
1308 struct ath10k_ce_pipe
*ce_tx
= pci_tx
->ce_hdl
;
1309 struct ath10k_ce_pipe
*ce_rx
= pci_rx
->ce_hdl
;
1310 dma_addr_t req_paddr
= 0;
1311 dma_addr_t resp_paddr
= 0;
1312 struct bmi_xfer xfer
= {};
1313 void *treq
, *tresp
= NULL
;
1318 if (resp
&& !resp_len
)
1321 if (resp
&& resp_len
&& *resp_len
== 0)
1324 treq
= kmemdup(req
, req_len
, GFP_KERNEL
);
1328 req_paddr
= dma_map_single(ar
->dev
, treq
, req_len
, DMA_TO_DEVICE
);
1329 ret
= dma_mapping_error(ar
->dev
, req_paddr
);
1333 if (resp
&& resp_len
) {
1334 tresp
= kzalloc(*resp_len
, GFP_KERNEL
);
1340 resp_paddr
= dma_map_single(ar
->dev
, tresp
, *resp_len
,
1342 ret
= dma_mapping_error(ar
->dev
, resp_paddr
);
1346 xfer
.wait_for_resp
= true;
1349 ath10k_ce_recv_buf_enqueue(ce_rx
, &xfer
, resp_paddr
);
1352 init_completion(&xfer
.done
);
1354 ret
= ath10k_ce_send(ce_tx
, &xfer
, req_paddr
, req_len
, -1, 0);
1358 ret
= ath10k_pci_bmi_wait(ce_tx
, ce_rx
, &xfer
);
1361 unsigned int unused_nbytes
;
1362 unsigned int unused_id
;
1364 ath10k_ce_cancel_send_next(ce_tx
, NULL
, &unused_buffer
,
1365 &unused_nbytes
, &unused_id
);
1367 /* non-zero means we did not time out */
1375 ath10k_ce_revoke_recv_next(ce_rx
, NULL
, &unused_buffer
);
1376 dma_unmap_single(ar
->dev
, resp_paddr
,
1377 *resp_len
, DMA_FROM_DEVICE
);
1380 dma_unmap_single(ar
->dev
, req_paddr
, req_len
, DMA_TO_DEVICE
);
1382 if (ret
== 0 && resp_len
) {
1383 *resp_len
= min(*resp_len
, xfer
.resp_len
);
1384 memcpy(resp
, tresp
, xfer
.resp_len
);
1393 static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe
*ce_state
)
1395 struct bmi_xfer
*xfer
;
1397 unsigned int nbytes
;
1398 unsigned int transfer_id
;
1400 if (ath10k_ce_completed_send_next(ce_state
, (void **)&xfer
, &ce_data
,
1401 &nbytes
, &transfer_id
))
1404 if (xfer
->wait_for_resp
)
1407 complete(&xfer
->done
);
1410 static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe
*ce_state
)
1412 struct bmi_xfer
*xfer
;
1414 unsigned int nbytes
;
1415 unsigned int transfer_id
;
1418 if (ath10k_ce_completed_recv_next(ce_state
, (void **)&xfer
, &ce_data
,
1419 &nbytes
, &transfer_id
, &flags
))
1422 if (!xfer
->wait_for_resp
) {
1423 ath10k_warn("unexpected: BMI data received; ignoring\n");
1427 xfer
->resp_len
= nbytes
;
1428 complete(&xfer
->done
);
1431 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe
*tx_pipe
,
1432 struct ath10k_ce_pipe
*rx_pipe
,
1433 struct bmi_xfer
*xfer
)
1435 unsigned long timeout
= jiffies
+ BMI_COMMUNICATION_TIMEOUT_HZ
;
1437 while (time_before_eq(jiffies
, timeout
)) {
1438 ath10k_pci_bmi_send_done(tx_pipe
);
1439 ath10k_pci_bmi_recv_data(rx_pipe
);
1441 if (completion_done(&xfer
->done
))
1451 * Map from service/endpoint to Copy Engine.
1452 * This table is derived from the CE_PCI TABLE, above.
1453 * It is passed to the Target at startup for use by firmware.
1455 static const struct service_to_pipe target_service_to_ce_map_wlan
[] = {
1457 ATH10K_HTC_SVC_ID_WMI_DATA_VO
,
1458 PIPEDIR_OUT
, /* out = UL = host -> target */
1462 ATH10K_HTC_SVC_ID_WMI_DATA_VO
,
1463 PIPEDIR_IN
, /* in = DL = target -> host */
1467 ATH10K_HTC_SVC_ID_WMI_DATA_BK
,
1468 PIPEDIR_OUT
, /* out = UL = host -> target */
1472 ATH10K_HTC_SVC_ID_WMI_DATA_BK
,
1473 PIPEDIR_IN
, /* in = DL = target -> host */
1477 ATH10K_HTC_SVC_ID_WMI_DATA_BE
,
1478 PIPEDIR_OUT
, /* out = UL = host -> target */
1482 ATH10K_HTC_SVC_ID_WMI_DATA_BE
,
1483 PIPEDIR_IN
, /* in = DL = target -> host */
1487 ATH10K_HTC_SVC_ID_WMI_DATA_VI
,
1488 PIPEDIR_OUT
, /* out = UL = host -> target */
1492 ATH10K_HTC_SVC_ID_WMI_DATA_VI
,
1493 PIPEDIR_IN
, /* in = DL = target -> host */
1497 ATH10K_HTC_SVC_ID_WMI_CONTROL
,
1498 PIPEDIR_OUT
, /* out = UL = host -> target */
1502 ATH10K_HTC_SVC_ID_WMI_CONTROL
,
1503 PIPEDIR_IN
, /* in = DL = target -> host */
1507 ATH10K_HTC_SVC_ID_RSVD_CTRL
,
1508 PIPEDIR_OUT
, /* out = UL = host -> target */
1509 0, /* could be moved to 3 (share with WMI) */
1512 ATH10K_HTC_SVC_ID_RSVD_CTRL
,
1513 PIPEDIR_IN
, /* in = DL = target -> host */
1517 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS
, /* not currently used */
1518 PIPEDIR_OUT
, /* out = UL = host -> target */
1522 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS
, /* not currently used */
1523 PIPEDIR_IN
, /* in = DL = target -> host */
1527 ATH10K_HTC_SVC_ID_HTT_DATA_MSG
,
1528 PIPEDIR_OUT
, /* out = UL = host -> target */
1532 ATH10K_HTC_SVC_ID_HTT_DATA_MSG
,
1533 PIPEDIR_IN
, /* in = DL = target -> host */
1537 /* (Additions here) */
1539 { /* Must be last */
1547 * Send an interrupt to the device to wake up the Target CPU
1548 * so it has an opportunity to notice any changed state.
1550 static int ath10k_pci_wake_target_cpu(struct ath10k
*ar
)
1555 ret
= ath10k_pci_diag_read_access(ar
, SOC_CORE_BASE_ADDRESS
|
1559 ath10k_warn("failed to read core_ctrl: %d\n", ret
);
1563 /* A_INUM_FIRMWARE interrupt to Target CPU */
1564 core_ctrl
|= CORE_CTRL_CPU_INTR_MASK
;
1566 ret
= ath10k_pci_diag_write_access(ar
, SOC_CORE_BASE_ADDRESS
|
1570 ath10k_warn("failed to set target CPU interrupt mask: %d\n",
1578 static int ath10k_pci_init_config(struct ath10k
*ar
)
1580 u32 interconnect_targ_addr
;
1581 u32 pcie_state_targ_addr
= 0;
1582 u32 pipe_cfg_targ_addr
= 0;
1583 u32 svc_to_pipe_map
= 0;
1584 u32 pcie_config_flags
= 0;
1586 u32 ealloc_targ_addr
;
1588 u32 flag2_targ_addr
;
1591 /* Download to Target the CE Config and the service-to-CE map */
1592 interconnect_targ_addr
=
1593 host_interest_item_address(HI_ITEM(hi_interconnect_state
));
1595 /* Supply Target-side CE configuration */
1596 ret
= ath10k_pci_diag_read_access(ar
, interconnect_targ_addr
,
1597 &pcie_state_targ_addr
);
1599 ath10k_err("Failed to get pcie state addr: %d\n", ret
);
1603 if (pcie_state_targ_addr
== 0) {
1605 ath10k_err("Invalid pcie state addr\n");
1609 ret
= ath10k_pci_diag_read_access(ar
, pcie_state_targ_addr
+
1610 offsetof(struct pcie_state
,
1612 &pipe_cfg_targ_addr
);
1614 ath10k_err("Failed to get pipe cfg addr: %d\n", ret
);
1618 if (pipe_cfg_targ_addr
== 0) {
1620 ath10k_err("Invalid pipe cfg addr\n");
1624 ret
= ath10k_pci_diag_write_mem(ar
, pipe_cfg_targ_addr
,
1625 target_ce_config_wlan
,
1626 sizeof(target_ce_config_wlan
));
1629 ath10k_err("Failed to write pipe cfg: %d\n", ret
);
1633 ret
= ath10k_pci_diag_read_access(ar
, pcie_state_targ_addr
+
1634 offsetof(struct pcie_state
,
1638 ath10k_err("Failed to get svc/pipe map: %d\n", ret
);
1642 if (svc_to_pipe_map
== 0) {
1644 ath10k_err("Invalid svc_to_pipe map\n");
1648 ret
= ath10k_pci_diag_write_mem(ar
, svc_to_pipe_map
,
1649 target_service_to_ce_map_wlan
,
1650 sizeof(target_service_to_ce_map_wlan
));
1652 ath10k_err("Failed to write svc/pipe map: %d\n", ret
);
1656 ret
= ath10k_pci_diag_read_access(ar
, pcie_state_targ_addr
+
1657 offsetof(struct pcie_state
,
1659 &pcie_config_flags
);
1661 ath10k_err("Failed to get pcie config_flags: %d\n", ret
);
1665 pcie_config_flags
&= ~PCIE_CONFIG_FLAG_ENABLE_L1
;
1667 ret
= ath10k_pci_diag_write_mem(ar
, pcie_state_targ_addr
+
1668 offsetof(struct pcie_state
, config_flags
),
1670 sizeof(pcie_config_flags
));
1672 ath10k_err("Failed to write pcie config_flags: %d\n", ret
);
1676 /* configure early allocation */
1677 ealloc_targ_addr
= host_interest_item_address(HI_ITEM(hi_early_alloc
));
1679 ret
= ath10k_pci_diag_read_access(ar
, ealloc_targ_addr
, &ealloc_value
);
1681 ath10k_err("Faile to get early alloc val: %d\n", ret
);
1685 /* first bank is switched to IRAM */
1686 ealloc_value
|= ((HI_EARLY_ALLOC_MAGIC
<< HI_EARLY_ALLOC_MAGIC_SHIFT
) &
1687 HI_EARLY_ALLOC_MAGIC_MASK
);
1688 ealloc_value
|= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT
) &
1689 HI_EARLY_ALLOC_IRAM_BANKS_MASK
);
1691 ret
= ath10k_pci_diag_write_access(ar
, ealloc_targ_addr
, ealloc_value
);
1693 ath10k_err("Failed to set early alloc val: %d\n", ret
);
1697 /* Tell Target to proceed with initialization */
1698 flag2_targ_addr
= host_interest_item_address(HI_ITEM(hi_option_flag2
));
1700 ret
= ath10k_pci_diag_read_access(ar
, flag2_targ_addr
, &flag2_value
);
1702 ath10k_err("Failed to get option val: %d\n", ret
);
1706 flag2_value
|= HI_OPTION_EARLY_CFG_DONE
;
1708 ret
= ath10k_pci_diag_write_access(ar
, flag2_targ_addr
, flag2_value
);
1710 ath10k_err("Failed to set option val: %d\n", ret
);
1717 static int ath10k_pci_alloc_ce(struct ath10k
*ar
)
1721 for (i
= 0; i
< CE_COUNT
; i
++) {
1722 ret
= ath10k_ce_alloc_pipe(ar
, i
, &host_ce_config_wlan
[i
]);
1724 ath10k_err("failed to allocate copy engine pipe %d: %d\n",
1733 static void ath10k_pci_free_ce(struct ath10k
*ar
)
1737 for (i
= 0; i
< CE_COUNT
; i
++)
1738 ath10k_ce_free_pipe(ar
, i
);
1741 static int ath10k_pci_ce_init(struct ath10k
*ar
)
1743 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
1744 struct ath10k_pci_pipe
*pipe_info
;
1745 const struct ce_attr
*attr
;
1748 for (pipe_num
= 0; pipe_num
< CE_COUNT
; pipe_num
++) {
1749 pipe_info
= &ar_pci
->pipe_info
[pipe_num
];
1750 pipe_info
->ce_hdl
= &ar_pci
->ce_states
[pipe_num
];
1751 pipe_info
->pipe_num
= pipe_num
;
1752 pipe_info
->hif_ce_state
= ar
;
1753 attr
= &host_ce_config_wlan
[pipe_num
];
1755 ret
= ath10k_ce_init_pipe(ar
, pipe_num
, attr
);
1757 ath10k_err("failed to initialize copy engine pipe %d: %d\n",
1762 if (pipe_num
== CE_COUNT
- 1) {
1764 * Reserve the ultimate CE for
1765 * diagnostic Window support
1767 ar_pci
->ce_diag
= pipe_info
->ce_hdl
;
1771 pipe_info
->buf_sz
= (size_t) (attr
->src_sz_max
);
1777 static void ath10k_pci_fw_interrupt_handler(struct ath10k
*ar
)
1779 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
1782 ath10k_pci_wake(ar
);
1784 fw_indicator
= ath10k_pci_read32(ar
, FW_INDICATOR_ADDRESS
);
1786 if (fw_indicator
& FW_IND_EVENT_PENDING
) {
1787 /* ACK: clear Target-side pending event */
1788 ath10k_pci_write32(ar
, FW_INDICATOR_ADDRESS
,
1789 fw_indicator
& ~FW_IND_EVENT_PENDING
);
1791 if (ar_pci
->started
) {
1792 ath10k_pci_hif_dump_area(ar
);
1795 * Probable Target failure before we're prepared
1796 * to handle it. Generally unexpected.
1798 ath10k_warn("early firmware event indicated\n");
1802 ath10k_pci_sleep(ar
);
1805 /* this function effectively clears target memory controller assert line */
1806 static void ath10k_pci_warm_reset_si0(struct ath10k
*ar
)
1810 val
= ath10k_pci_soc_read32(ar
, SOC_RESET_CONTROL_ADDRESS
);
1811 ath10k_pci_soc_write32(ar
, SOC_RESET_CONTROL_ADDRESS
,
1812 val
| SOC_RESET_CONTROL_SI0_RST_MASK
);
1813 val
= ath10k_pci_soc_read32(ar
, SOC_RESET_CONTROL_ADDRESS
);
1817 val
= ath10k_pci_soc_read32(ar
, SOC_RESET_CONTROL_ADDRESS
);
1818 ath10k_pci_soc_write32(ar
, SOC_RESET_CONTROL_ADDRESS
,
1819 val
& ~SOC_RESET_CONTROL_SI0_RST_MASK
);
1820 val
= ath10k_pci_soc_read32(ar
, SOC_RESET_CONTROL_ADDRESS
);
1825 static int ath10k_pci_warm_reset(struct ath10k
*ar
)
1830 ath10k_dbg(ATH10K_DBG_BOOT
, "boot warm reset\n");
1832 ret
= ath10k_do_pci_wake(ar
);
1834 ath10k_err("failed to wake up target: %d\n", ret
);
1839 val
= ath10k_pci_read32(ar
, SOC_CORE_BASE_ADDRESS
+
1840 PCIE_INTR_CAUSE_ADDRESS
);
1841 ath10k_dbg(ATH10K_DBG_BOOT
, "boot host cpu intr cause: 0x%08x\n", val
);
1843 val
= ath10k_pci_read32(ar
, SOC_CORE_BASE_ADDRESS
+
1845 ath10k_dbg(ATH10K_DBG_BOOT
, "boot target cpu intr cause: 0x%08x\n",
1848 /* disable pending irqs */
1849 ath10k_pci_write32(ar
, SOC_CORE_BASE_ADDRESS
+
1850 PCIE_INTR_ENABLE_ADDRESS
, 0);
1852 ath10k_pci_write32(ar
, SOC_CORE_BASE_ADDRESS
+
1853 PCIE_INTR_CLR_ADDRESS
, ~0);
1857 /* clear fw indicator */
1858 ath10k_pci_write32(ar
, FW_INDICATOR_ADDRESS
, 0);
1860 /* clear target LF timer interrupts */
1861 val
= ath10k_pci_read32(ar
, RTC_SOC_BASE_ADDRESS
+
1862 SOC_LF_TIMER_CONTROL0_ADDRESS
);
1863 ath10k_pci_write32(ar
, RTC_SOC_BASE_ADDRESS
+
1864 SOC_LF_TIMER_CONTROL0_ADDRESS
,
1865 val
& ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK
);
1868 val
= ath10k_pci_read32(ar
, RTC_SOC_BASE_ADDRESS
+
1869 SOC_RESET_CONTROL_ADDRESS
);
1870 ath10k_pci_write32(ar
, RTC_SOC_BASE_ADDRESS
+ SOC_RESET_CONTROL_ADDRESS
,
1871 val
| SOC_RESET_CONTROL_CE_RST_MASK
);
1872 val
= ath10k_pci_read32(ar
, RTC_SOC_BASE_ADDRESS
+
1873 SOC_RESET_CONTROL_ADDRESS
);
1877 ath10k_pci_write32(ar
, RTC_SOC_BASE_ADDRESS
+ SOC_RESET_CONTROL_ADDRESS
,
1878 val
& ~SOC_RESET_CONTROL_CE_RST_MASK
);
1879 val
= ath10k_pci_read32(ar
, RTC_SOC_BASE_ADDRESS
+
1880 SOC_RESET_CONTROL_ADDRESS
);
1883 ath10k_pci_warm_reset_si0(ar
);
1886 val
= ath10k_pci_read32(ar
, SOC_CORE_BASE_ADDRESS
+
1887 PCIE_INTR_CAUSE_ADDRESS
);
1888 ath10k_dbg(ATH10K_DBG_BOOT
, "boot host cpu intr cause: 0x%08x\n", val
);
1890 val
= ath10k_pci_read32(ar
, SOC_CORE_BASE_ADDRESS
+
1892 ath10k_dbg(ATH10K_DBG_BOOT
, "boot target cpu intr cause: 0x%08x\n",
1895 /* CPU warm reset */
1896 val
= ath10k_pci_read32(ar
, RTC_SOC_BASE_ADDRESS
+
1897 SOC_RESET_CONTROL_ADDRESS
);
1898 ath10k_pci_write32(ar
, RTC_SOC_BASE_ADDRESS
+ SOC_RESET_CONTROL_ADDRESS
,
1899 val
| SOC_RESET_CONTROL_CPU_WARM_RST_MASK
);
1901 val
= ath10k_pci_read32(ar
, RTC_SOC_BASE_ADDRESS
+
1902 SOC_RESET_CONTROL_ADDRESS
);
1903 ath10k_dbg(ATH10K_DBG_BOOT
, "boot target reset state: 0x%08x\n", val
);
1907 ath10k_dbg(ATH10K_DBG_BOOT
, "boot warm reset complete\n");
1909 ath10k_do_pci_sleep(ar
);
1913 static int __ath10k_pci_hif_power_up(struct ath10k
*ar
, bool cold_reset
)
1915 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
1916 const char *irq_mode
;
1920 * Bring the target up cleanly.
1922 * The target may be in an undefined state with an AUX-powered Target
1923 * and a Host in WoW mode. If the Host crashes, loses power, or is
1924 * restarted (without unloading the driver) then the Target is left
1925 * (aux) powered and running. On a subsequent driver load, the Target
1926 * is in an unexpected state. We try to catch that here in order to
1927 * reset the Target and retry the probe.
1930 ret
= ath10k_pci_cold_reset(ar
);
1932 ret
= ath10k_pci_warm_reset(ar
);
1935 ath10k_err("failed to reset target: %d\n", ret
);
1939 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE
, ar_pci
->features
))
1940 /* Force AWAKE forever */
1941 ath10k_do_pci_wake(ar
);
1943 ret
= ath10k_pci_ce_init(ar
);
1945 ath10k_err("failed to initialize CE: %d\n", ret
);
1949 ret
= ath10k_ce_disable_interrupts(ar
);
1951 ath10k_err("failed to disable CE interrupts: %d\n", ret
);
1955 ret
= ath10k_pci_init_irq(ar
);
1957 ath10k_err("failed to init irqs: %d\n", ret
);
1961 ret
= ath10k_pci_request_early_irq(ar
);
1963 ath10k_err("failed to request early irq: %d\n", ret
);
1964 goto err_deinit_irq
;
1967 ret
= ath10k_pci_wait_for_target_init(ar
);
1969 ath10k_err("failed to wait for target to init: %d\n", ret
);
1970 goto err_free_early_irq
;
1973 ret
= ath10k_pci_init_config(ar
);
1975 ath10k_err("failed to setup init config: %d\n", ret
);
1976 goto err_free_early_irq
;
1979 ret
= ath10k_pci_wake_target_cpu(ar
);
1981 ath10k_err("could not wake up target CPU: %d\n", ret
);
1982 goto err_free_early_irq
;
1985 if (ar_pci
->num_msi_intrs
> 1)
1987 else if (ar_pci
->num_msi_intrs
== 1)
1990 irq_mode
= "legacy";
1992 if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE
, &ar
->dev_flags
))
1993 ath10k_info("pci irq %s irq_mode %d reset_mode %d\n",
1994 irq_mode
, ath10k_pci_irq_mode
,
1995 ath10k_pci_reset_mode
);
2000 ath10k_pci_free_early_irq(ar
);
2002 ath10k_pci_deinit_irq(ar
);
2004 ath10k_pci_ce_deinit(ar
);
2005 ath10k_pci_warm_reset(ar
);
2007 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE
, ar_pci
->features
))
2008 ath10k_do_pci_sleep(ar
);
2013 static int ath10k_pci_hif_power_up(struct ath10k
*ar
)
2017 ath10k_dbg(ATH10K_DBG_BOOT
, "boot hif power up\n");
2020 * Hardware CUS232 version 2 has some issues with cold reset and the
2021 * preferred (and safer) way to perform a device reset is through a
2024 * Warm reset doesn't always work though (notably after a firmware
2025 * crash) so fall back to cold reset if necessary.
2027 ret
= __ath10k_pci_hif_power_up(ar
, false);
2029 ath10k_warn("failed to power up target using warm reset: %d\n",
2032 if (ath10k_pci_reset_mode
== ATH10K_PCI_RESET_WARM_ONLY
)
2035 ath10k_warn("trying cold reset\n");
2037 ret
= __ath10k_pci_hif_power_up(ar
, true);
2039 ath10k_err("failed to power up target using cold reset too (%d)\n",
2048 static void ath10k_pci_hif_power_down(struct ath10k
*ar
)
2050 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
2052 ath10k_dbg(ATH10K_DBG_BOOT
, "boot hif power down\n");
2054 ath10k_pci_free_early_irq(ar
);
2055 ath10k_pci_kill_tasklet(ar
);
2056 ath10k_pci_deinit_irq(ar
);
2057 ath10k_pci_ce_deinit(ar
);
2058 ath10k_pci_warm_reset(ar
);
2060 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE
, ar_pci
->features
))
2061 ath10k_do_pci_sleep(ar
);
2066 #define ATH10K_PCI_PM_CONTROL 0x44
2068 static int ath10k_pci_hif_suspend(struct ath10k
*ar
)
2070 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
2071 struct pci_dev
*pdev
= ar_pci
->pdev
;
2074 pci_read_config_dword(pdev
, ATH10K_PCI_PM_CONTROL
, &val
);
2076 if ((val
& 0x000000ff) != 0x3) {
2077 pci_save_state(pdev
);
2078 pci_disable_device(pdev
);
2079 pci_write_config_dword(pdev
, ATH10K_PCI_PM_CONTROL
,
2080 (val
& 0xffffff00) | 0x03);
2086 static int ath10k_pci_hif_resume(struct ath10k
*ar
)
2088 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
2089 struct pci_dev
*pdev
= ar_pci
->pdev
;
2092 pci_read_config_dword(pdev
, ATH10K_PCI_PM_CONTROL
, &val
);
2094 if ((val
& 0x000000ff) != 0) {
2095 pci_restore_state(pdev
);
2096 pci_write_config_dword(pdev
, ATH10K_PCI_PM_CONTROL
,
2099 * Suspend/Resume resets the PCI configuration space,
2100 * so we have to re-disable the RETRY_TIMEOUT register (0x41)
2101 * to keep PCI Tx retries from interfering with C3 CPU state
2103 pci_read_config_dword(pdev
, 0x40, &val
);
2105 if ((val
& 0x0000ff00) != 0)
2106 pci_write_config_dword(pdev
, 0x40, val
& 0xffff00ff);
2113 static const struct ath10k_hif_ops ath10k_pci_hif_ops
= {
2114 .tx_sg
= ath10k_pci_hif_tx_sg
,
2115 .exchange_bmi_msg
= ath10k_pci_hif_exchange_bmi_msg
,
2116 .start
= ath10k_pci_hif_start
,
2117 .stop
= ath10k_pci_hif_stop
,
2118 .map_service_to_pipe
= ath10k_pci_hif_map_service_to_pipe
,
2119 .get_default_pipe
= ath10k_pci_hif_get_default_pipe
,
2120 .send_complete_check
= ath10k_pci_hif_send_complete_check
,
2121 .set_callbacks
= ath10k_pci_hif_set_callbacks
,
2122 .get_free_queue_number
= ath10k_pci_hif_get_free_queue_number
,
2123 .power_up
= ath10k_pci_hif_power_up
,
2124 .power_down
= ath10k_pci_hif_power_down
,
2126 .suspend
= ath10k_pci_hif_suspend
,
2127 .resume
= ath10k_pci_hif_resume
,
2131 static void ath10k_pci_ce_tasklet(unsigned long ptr
)
2133 struct ath10k_pci_pipe
*pipe
= (struct ath10k_pci_pipe
*)ptr
;
2134 struct ath10k_pci
*ar_pci
= pipe
->ar_pci
;
2136 ath10k_ce_per_engine_service(ar_pci
->ar
, pipe
->pipe_num
);
2139 static void ath10k_msi_err_tasklet(unsigned long data
)
2141 struct ath10k
*ar
= (struct ath10k
*)data
;
2143 ath10k_pci_fw_interrupt_handler(ar
);
2147 * Handler for a per-engine interrupt on a PARTICULAR CE.
2148 * This is used in cases where each CE has a private MSI interrupt.
2150 static irqreturn_t
ath10k_pci_per_engine_handler(int irq
, void *arg
)
2152 struct ath10k
*ar
= arg
;
2153 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
2154 int ce_id
= irq
- ar_pci
->pdev
->irq
- MSI_ASSIGN_CE_INITIAL
;
2156 if (ce_id
< 0 || ce_id
>= ARRAY_SIZE(ar_pci
->pipe_info
)) {
2157 ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq
, ce_id
);
2162 * NOTE: We are able to derive ce_id from irq because we
2163 * use a one-to-one mapping for CE's 0..5.
2164 * CE's 6 & 7 do not use interrupts at all.
2166 * This mapping must be kept in sync with the mapping
2169 tasklet_schedule(&ar_pci
->pipe_info
[ce_id
].intr
);
2173 static irqreturn_t
ath10k_pci_msi_fw_handler(int irq
, void *arg
)
2175 struct ath10k
*ar
= arg
;
2176 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
2178 tasklet_schedule(&ar_pci
->msi_fw_err
);
2183 * Top-level interrupt handler for all PCI interrupts from a Target.
2184 * When a block of MSI interrupts is allocated, this top-level handler
2185 * is not used; instead, we directly call the correct sub-handler.
2187 static irqreturn_t
ath10k_pci_interrupt_handler(int irq
, void *arg
)
2189 struct ath10k
*ar
= arg
;
2190 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
2192 if (ar_pci
->num_msi_intrs
== 0) {
2193 if (!ath10k_pci_irq_pending(ar
))
2196 ath10k_pci_disable_and_clear_legacy_irq(ar
);
2199 tasklet_schedule(&ar_pci
->intr_tq
);
2204 static void ath10k_pci_early_irq_tasklet(unsigned long data
)
2206 struct ath10k
*ar
= (struct ath10k
*)data
;
2210 ret
= ath10k_pci_wake(ar
);
2212 ath10k_warn("failed to wake target in early irq tasklet: %d\n",
2217 fw_ind
= ath10k_pci_read32(ar
, FW_INDICATOR_ADDRESS
);
2218 if (fw_ind
& FW_IND_EVENT_PENDING
) {
2219 ath10k_pci_write32(ar
, FW_INDICATOR_ADDRESS
,
2220 fw_ind
& ~FW_IND_EVENT_PENDING
);
2222 /* Some structures are unavailable during early boot or at
2223 * driver teardown so just print that the device has crashed. */
2224 ath10k_warn("device crashed - no diagnostics available\n");
2227 ath10k_pci_sleep(ar
);
2228 ath10k_pci_enable_legacy_irq(ar
);
2231 static void ath10k_pci_tasklet(unsigned long data
)
2233 struct ath10k
*ar
= (struct ath10k
*)data
;
2234 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
2236 ath10k_pci_fw_interrupt_handler(ar
); /* FIXME: Handle FW error */
2237 ath10k_ce_per_engine_service_any(ar
);
2239 /* Re-enable legacy irq that was disabled in the irq handler */
2240 if (ar_pci
->num_msi_intrs
== 0)
2241 ath10k_pci_enable_legacy_irq(ar
);
2244 static int ath10k_pci_request_irq_msix(struct ath10k
*ar
)
2246 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
2249 ret
= request_irq(ar_pci
->pdev
->irq
+ MSI_ASSIGN_FW
,
2250 ath10k_pci_msi_fw_handler
,
2251 IRQF_SHARED
, "ath10k_pci", ar
);
2253 ath10k_warn("failed to request MSI-X fw irq %d: %d\n",
2254 ar_pci
->pdev
->irq
+ MSI_ASSIGN_FW
, ret
);
2258 for (i
= MSI_ASSIGN_CE_INITIAL
; i
<= MSI_ASSIGN_CE_MAX
; i
++) {
2259 ret
= request_irq(ar_pci
->pdev
->irq
+ i
,
2260 ath10k_pci_per_engine_handler
,
2261 IRQF_SHARED
, "ath10k_pci", ar
);
2263 ath10k_warn("failed to request MSI-X ce irq %d: %d\n",
2264 ar_pci
->pdev
->irq
+ i
, ret
);
2266 for (i
--; i
>= MSI_ASSIGN_CE_INITIAL
; i
--)
2267 free_irq(ar_pci
->pdev
->irq
+ i
, ar
);
2269 free_irq(ar_pci
->pdev
->irq
+ MSI_ASSIGN_FW
, ar
);
2277 static int ath10k_pci_request_irq_msi(struct ath10k
*ar
)
2279 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
2282 ret
= request_irq(ar_pci
->pdev
->irq
,
2283 ath10k_pci_interrupt_handler
,
2284 IRQF_SHARED
, "ath10k_pci", ar
);
2286 ath10k_warn("failed to request MSI irq %d: %d\n",
2287 ar_pci
->pdev
->irq
, ret
);
2294 static int ath10k_pci_request_irq_legacy(struct ath10k
*ar
)
2296 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
2299 ret
= request_irq(ar_pci
->pdev
->irq
,
2300 ath10k_pci_interrupt_handler
,
2301 IRQF_SHARED
, "ath10k_pci", ar
);
2303 ath10k_warn("failed to request legacy irq %d: %d\n",
2304 ar_pci
->pdev
->irq
, ret
);
2311 static int ath10k_pci_request_irq(struct ath10k
*ar
)
2313 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
2315 switch (ar_pci
->num_msi_intrs
) {
2317 return ath10k_pci_request_irq_legacy(ar
);
2319 return ath10k_pci_request_irq_msi(ar
);
2320 case MSI_NUM_REQUEST
:
2321 return ath10k_pci_request_irq_msix(ar
);
2324 ath10k_warn("unknown irq configuration upon request\n");
2328 static void ath10k_pci_free_irq(struct ath10k
*ar
)
2330 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
2333 /* There's at least one interrupt irregardless whether its legacy INTR
2334 * or MSI or MSI-X */
2335 for (i
= 0; i
< max(1, ar_pci
->num_msi_intrs
); i
++)
2336 free_irq(ar_pci
->pdev
->irq
+ i
, ar
);
2339 static void ath10k_pci_init_irq_tasklets(struct ath10k
*ar
)
2341 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
2344 tasklet_init(&ar_pci
->intr_tq
, ath10k_pci_tasklet
, (unsigned long)ar
);
2345 tasklet_init(&ar_pci
->msi_fw_err
, ath10k_msi_err_tasklet
,
2347 tasklet_init(&ar_pci
->early_irq_tasklet
, ath10k_pci_early_irq_tasklet
,
2350 for (i
= 0; i
< CE_COUNT
; i
++) {
2351 ar_pci
->pipe_info
[i
].ar_pci
= ar_pci
;
2352 tasklet_init(&ar_pci
->pipe_info
[i
].intr
, ath10k_pci_ce_tasklet
,
2353 (unsigned long)&ar_pci
->pipe_info
[i
]);
2357 static int ath10k_pci_init_irq(struct ath10k
*ar
)
2359 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
2360 bool msix_supported
= test_bit(ATH10K_PCI_FEATURE_MSI_X
,
2364 ath10k_pci_init_irq_tasklets(ar
);
2366 if (ath10k_pci_irq_mode
!= ATH10K_PCI_IRQ_AUTO
&&
2367 !test_bit(ATH10K_FLAG_FIRST_BOOT_DONE
, &ar
->dev_flags
))
2368 ath10k_info("limiting irq mode to: %d\n", ath10k_pci_irq_mode
);
2371 if (ath10k_pci_irq_mode
== ATH10K_PCI_IRQ_AUTO
&& msix_supported
) {
2372 ar_pci
->num_msi_intrs
= MSI_NUM_REQUEST
;
2373 ret
= pci_enable_msi_range(ar_pci
->pdev
, ar_pci
->num_msi_intrs
,
2374 ar_pci
->num_msi_intrs
);
2382 if (ath10k_pci_irq_mode
!= ATH10K_PCI_IRQ_LEGACY
) {
2383 ar_pci
->num_msi_intrs
= 1;
2384 ret
= pci_enable_msi(ar_pci
->pdev
);
2393 * A potential race occurs here: The CORE_BASE write
2394 * depends on target correctly decoding AXI address but
2395 * host won't know when target writes BAR to CORE_CTRL.
2396 * This write might get lost if target has NOT written BAR.
2397 * For now, fix the race by repeating the write in below
2398 * synchronization checking. */
2399 ar_pci
->num_msi_intrs
= 0;
2401 ret
= ath10k_pci_wake(ar
);
2403 ath10k_warn("failed to wake target: %d\n", ret
);
2407 ath10k_pci_write32(ar
, SOC_CORE_BASE_ADDRESS
+ PCIE_INTR_ENABLE_ADDRESS
,
2408 PCIE_INTR_FIRMWARE_MASK
| PCIE_INTR_CE_MASK_ALL
);
2409 ath10k_pci_sleep(ar
);
2414 static int ath10k_pci_deinit_irq_legacy(struct ath10k
*ar
)
2418 ret
= ath10k_pci_wake(ar
);
2420 ath10k_warn("failed to wake target: %d\n", ret
);
2424 ath10k_pci_write32(ar
, SOC_CORE_BASE_ADDRESS
+ PCIE_INTR_ENABLE_ADDRESS
,
2426 ath10k_pci_sleep(ar
);
2431 static int ath10k_pci_deinit_irq(struct ath10k
*ar
)
2433 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
2435 switch (ar_pci
->num_msi_intrs
) {
2437 return ath10k_pci_deinit_irq_legacy(ar
);
2440 case MSI_NUM_REQUEST
:
2441 pci_disable_msi(ar_pci
->pdev
);
2444 pci_disable_msi(ar_pci
->pdev
);
2447 ath10k_warn("unknown irq configuration upon deinit\n");
2451 static int ath10k_pci_wait_for_target_init(struct ath10k
*ar
)
2453 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
2454 unsigned long timeout
;
2458 ath10k_dbg(ATH10K_DBG_BOOT
, "boot waiting target to initialise\n");
2460 ret
= ath10k_pci_wake(ar
);
2462 ath10k_err("failed to wake up target for init: %d\n", ret
);
2466 timeout
= jiffies
+ msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT
);
2469 val
= ath10k_pci_read32(ar
, FW_INDICATOR_ADDRESS
);
2471 ath10k_dbg(ATH10K_DBG_BOOT
, "boot target indicator %x\n", val
);
2473 /* target should never return this */
2474 if (val
== 0xffffffff)
2477 /* the device has crashed so don't bother trying anymore */
2478 if (val
& FW_IND_EVENT_PENDING
)
2481 if (val
& FW_IND_INITIALIZED
)
2484 if (ar_pci
->num_msi_intrs
== 0)
2485 /* Fix potential race by repeating CORE_BASE writes */
2486 ath10k_pci_soc_write32(ar
, PCIE_INTR_ENABLE_ADDRESS
,
2487 PCIE_INTR_FIRMWARE_MASK
|
2488 PCIE_INTR_CE_MASK_ALL
);
2491 } while (time_before(jiffies
, timeout
));
2493 if (val
== 0xffffffff) {
2494 ath10k_err("failed to read device register, device is gone\n");
2499 if (val
& FW_IND_EVENT_PENDING
) {
2500 ath10k_warn("device has crashed during init\n");
2505 if (!(val
& FW_IND_INITIALIZED
)) {
2506 ath10k_err("failed to receive initialized event from target: %08x\n",
2512 ath10k_dbg(ATH10K_DBG_BOOT
, "boot target initialised\n");
2515 ath10k_pci_sleep(ar
);
2519 static int ath10k_pci_cold_reset(struct ath10k
*ar
)
2524 ath10k_dbg(ATH10K_DBG_BOOT
, "boot cold reset\n");
2526 ret
= ath10k_do_pci_wake(ar
);
2528 ath10k_err("failed to wake up target: %d\n",
2533 /* Put Target, including PCIe, into RESET. */
2534 val
= ath10k_pci_reg_read32(ar
, SOC_GLOBAL_RESET_ADDRESS
);
2536 ath10k_pci_reg_write32(ar
, SOC_GLOBAL_RESET_ADDRESS
, val
);
2538 for (i
= 0; i
< ATH_PCI_RESET_WAIT_MAX
; i
++) {
2539 if (ath10k_pci_reg_read32(ar
, RTC_STATE_ADDRESS
) &
2540 RTC_STATE_COLD_RESET_MASK
)
2545 /* Pull Target, including PCIe, out of RESET. */
2547 ath10k_pci_reg_write32(ar
, SOC_GLOBAL_RESET_ADDRESS
, val
);
2549 for (i
= 0; i
< ATH_PCI_RESET_WAIT_MAX
; i
++) {
2550 if (!(ath10k_pci_reg_read32(ar
, RTC_STATE_ADDRESS
) &
2551 RTC_STATE_COLD_RESET_MASK
))
2556 ath10k_do_pci_sleep(ar
);
2558 ath10k_dbg(ATH10K_DBG_BOOT
, "boot cold reset complete\n");
2563 static void ath10k_pci_dump_features(struct ath10k_pci
*ar_pci
)
2567 for (i
= 0; i
< ATH10K_PCI_FEATURE_COUNT
; i
++) {
2568 if (!test_bit(i
, ar_pci
->features
))
2572 case ATH10K_PCI_FEATURE_MSI_X
:
2573 ath10k_dbg(ATH10K_DBG_BOOT
, "device supports MSI-X\n");
2575 case ATH10K_PCI_FEATURE_SOC_POWER_SAVE
:
2576 ath10k_dbg(ATH10K_DBG_BOOT
, "QCA98XX SoC power save enabled\n");
2582 static int ath10k_pci_probe(struct pci_dev
*pdev
,
2583 const struct pci_device_id
*pci_dev
)
2588 struct ath10k_pci
*ar_pci
;
2589 u32 lcr_val
, chip_id
;
2591 ath10k_dbg(ATH10K_DBG_PCI
, "pci probe\n");
2593 ar_pci
= kzalloc(sizeof(*ar_pci
), GFP_KERNEL
);
2597 ar_pci
->pdev
= pdev
;
2598 ar_pci
->dev
= &pdev
->dev
;
2600 switch (pci_dev
->device
) {
2601 case QCA988X_2_0_DEVICE_ID
:
2602 set_bit(ATH10K_PCI_FEATURE_MSI_X
, ar_pci
->features
);
2606 ath10k_err("Unknown device ID: %d\n", pci_dev
->device
);
2610 if (ath10k_pci_target_ps
)
2611 set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE
, ar_pci
->features
);
2613 ath10k_pci_dump_features(ar_pci
);
2615 ar
= ath10k_core_create(ar_pci
, ar_pci
->dev
, &ath10k_pci_hif_ops
);
2617 ath10k_err("failed to create driver core\n");
2623 atomic_set(&ar_pci
->keep_awake_count
, 0);
2625 pci_set_drvdata(pdev
, ar
);
2628 * Without any knowledge of the Host, the Target may have been reset or
2629 * power cycled and its Config Space may no longer reflect the PCI
2630 * address space that was assigned earlier by the PCI infrastructure.
2633 ret
= pci_assign_resource(pdev
, BAR_NUM
);
2635 ath10k_err("failed to assign PCI space: %d\n", ret
);
2639 ret
= pci_enable_device(pdev
);
2641 ath10k_err("failed to enable PCI device: %d\n", ret
);
2645 /* Request MMIO resources */
2646 ret
= pci_request_region(pdev
, BAR_NUM
, "ath");
2648 ath10k_err("failed to request MMIO region: %d\n", ret
);
2653 * Target structures have a limit of 32 bit DMA pointers.
2654 * DMA pointers can be wider than 32 bits by default on some systems.
2656 ret
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
2658 ath10k_err("failed to set DMA mask to 32-bit: %d\n", ret
);
2662 ret
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
2664 ath10k_err("failed to set consistent DMA mask to 32-bit\n");
2668 /* Set bus master bit in PCI_COMMAND to enable DMA */
2669 pci_set_master(pdev
);
2672 * Temporary FIX: disable ASPM
2673 * Will be removed after the OTP is programmed
2675 pci_read_config_dword(pdev
, 0x80, &lcr_val
);
2676 pci_write_config_dword(pdev
, 0x80, (lcr_val
& 0xffffff00));
2678 /* Arrange for access to Target SoC registers. */
2679 mem
= pci_iomap(pdev
, BAR_NUM
, 0);
2681 ath10k_err("failed to perform IOMAP for BAR%d\n", BAR_NUM
);
2688 spin_lock_init(&ar_pci
->ce_lock
);
2690 ret
= ath10k_do_pci_wake(ar
);
2692 ath10k_err("Failed to get chip id: %d\n", ret
);
2696 chip_id
= ath10k_pci_soc_read32(ar
, SOC_CHIP_ID_ADDRESS
);
2698 ath10k_do_pci_sleep(ar
);
2700 ret
= ath10k_pci_alloc_ce(ar
);
2702 ath10k_err("failed to allocate copy engine pipes: %d\n", ret
);
2706 ath10k_dbg(ATH10K_DBG_BOOT
, "boot pci_mem 0x%p\n", ar_pci
->mem
);
2708 ret
= ath10k_core_register(ar
, chip_id
);
2710 ath10k_err("failed to register driver core: %d\n", ret
);
2717 ath10k_pci_free_ce(ar
);
2719 pci_iounmap(pdev
, mem
);
2721 pci_clear_master(pdev
);
2723 pci_release_region(pdev
, BAR_NUM
);
2725 pci_disable_device(pdev
);
2727 ath10k_core_destroy(ar
);
2729 /* call HIF PCI free here */
2735 static void ath10k_pci_remove(struct pci_dev
*pdev
)
2737 struct ath10k
*ar
= pci_get_drvdata(pdev
);
2738 struct ath10k_pci
*ar_pci
;
2740 ath10k_dbg(ATH10K_DBG_PCI
, "pci remove\n");
2745 ar_pci
= ath10k_pci_priv(ar
);
2750 tasklet_kill(&ar_pci
->msi_fw_err
);
2752 ath10k_core_unregister(ar
);
2753 ath10k_pci_free_ce(ar
);
2755 pci_iounmap(pdev
, ar_pci
->mem
);
2756 pci_release_region(pdev
, BAR_NUM
);
2757 pci_clear_master(pdev
);
2758 pci_disable_device(pdev
);
2760 ath10k_core_destroy(ar
);
2764 MODULE_DEVICE_TABLE(pci
, ath10k_pci_id_table
);
2766 static struct pci_driver ath10k_pci_driver
= {
2767 .name
= "ath10k_pci",
2768 .id_table
= ath10k_pci_id_table
,
2769 .probe
= ath10k_pci_probe
,
2770 .remove
= ath10k_pci_remove
,
2773 static int __init
ath10k_pci_init(void)
2777 ret
= pci_register_driver(&ath10k_pci_driver
);
2779 ath10k_err("failed to register PCI driver: %d\n", ret
);
2783 module_init(ath10k_pci_init
);
2785 static void __exit
ath10k_pci_exit(void)
2787 pci_unregister_driver(&ath10k_pci_driver
);
2790 module_exit(ath10k_pci_exit
);
2792 MODULE_AUTHOR("Qualcomm Atheros");
2793 MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2794 MODULE_LICENSE("Dual BSD/GPL");
2795 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR
"/" QCA988X_HW_2_0_FW_2_FILE
);
2796 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR
"/" QCA988X_HW_2_0_BOARD_DATA_FILE
);