2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #include <linux/pci.h>
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
26 #include "targaddrs.h"
35 unsigned int ath10k_target_ps
;
36 module_param(ath10k_target_ps
, uint
, 0644);
37 MODULE_PARM_DESC(ath10k_target_ps
, "Enable ath10k Target (SoC) PS option");
39 #define QCA988X_1_0_DEVICE_ID (0xabcd)
40 #define QCA988X_2_0_DEVICE_ID (0x003c)
42 static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table
) = {
43 { PCI_VDEVICE(ATHEROS
, QCA988X_1_0_DEVICE_ID
) }, /* PCI-E QCA988X V1 */
44 { PCI_VDEVICE(ATHEROS
, QCA988X_2_0_DEVICE_ID
) }, /* PCI-E QCA988X V2 */
48 static int ath10k_pci_diag_read_access(struct ath10k
*ar
, u32 address
,
51 static void ath10k_pci_process_ce(struct ath10k
*ar
);
52 static int ath10k_pci_post_rx(struct ath10k
*ar
);
53 static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info
*pipe_info
,
55 static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info
*pipe_info
);
56 static void ath10k_pci_stop_ce(struct ath10k
*ar
);
57 static void ath10k_pci_device_reset(struct ath10k
*ar
);
58 static int ath10k_pci_reset_target(struct ath10k
*ar
);
60 static const struct ce_attr host_ce_config_wlan
[] = {
61 /* host->target HTC control and raw streams */
62 { /* CE0 */ CE_ATTR_FLAGS
, 0, 16, 256, 0, NULL
,},
63 /* could be moved to share CE3 */
64 /* target->host HTT + HTC control */
65 { /* CE1 */ CE_ATTR_FLAGS
, 0, 0, 512, 512, NULL
,},
66 /* target->host WMI */
67 { /* CE2 */ CE_ATTR_FLAGS
, 0, 0, 2048, 32, NULL
,},
68 /* host->target WMI */
69 { /* CE3 */ CE_ATTR_FLAGS
, 0, 32, 2048, 0, NULL
,},
70 /* host->target HTT */
71 { /* CE4 */ CE_ATTR_FLAGS
| CE_ATTR_DIS_INTR
, 0,
72 CE_HTT_H2T_MSG_SRC_NENTRIES
, 256, 0, NULL
,},
74 { /* CE5 */ CE_ATTR_FLAGS
, 0, 0, 0, 0, NULL
,},
75 /* Target autonomous hif_memcpy */
76 { /* CE6 */ CE_ATTR_FLAGS
, 0, 0, 0, 0, NULL
,},
77 /* ce_diag, the Diagnostic Window */
78 { /* CE7 */ CE_ATTR_FLAGS
, 0, 2, DIAG_TRANSFER_LIMIT
, 2, NULL
,},
81 /* Target firmware's Copy Engine configuration. */
82 static const struct ce_pipe_config target_ce_config_wlan
[] = {
83 /* host->target HTC control and raw streams */
84 { /* CE0 */ 0, PIPEDIR_OUT
, 32, 256, CE_ATTR_FLAGS
, 0,},
85 /* target->host HTT + HTC control */
86 { /* CE1 */ 1, PIPEDIR_IN
, 32, 512, CE_ATTR_FLAGS
, 0,},
87 /* target->host WMI */
88 { /* CE2 */ 2, PIPEDIR_IN
, 32, 2048, CE_ATTR_FLAGS
, 0,},
89 /* host->target WMI */
90 { /* CE3 */ 3, PIPEDIR_OUT
, 32, 2048, CE_ATTR_FLAGS
, 0,},
91 /* host->target HTT */
92 { /* CE4 */ 4, PIPEDIR_OUT
, 256, 256, CE_ATTR_FLAGS
, 0,},
93 /* NB: 50% of src nentries, since tx has 2 frags */
95 { /* CE5 */ 5, PIPEDIR_OUT
, 32, 2048, CE_ATTR_FLAGS
, 0,},
96 /* Reserved for target autonomous hif_memcpy */
97 { /* CE6 */ 6, PIPEDIR_INOUT
, 32, 4096, CE_ATTR_FLAGS
, 0,},
98 /* CE7 used only by Host */
102 * Diagnostic read/write access is provided for startup/config/debug usage.
103 * Caller must guarantee proper alignment, when applicable, and single user
106 static int ath10k_pci_diag_read_mem(struct ath10k
*ar
, u32 address
, void *data
,
109 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
112 unsigned int completed_nbytes
, orig_nbytes
, remaining_bytes
;
115 struct ce_state
*ce_diag
;
116 /* Host buffer address in CE space */
118 dma_addr_t ce_data_base
= 0;
119 void *data_buf
= NULL
;
123 * This code cannot handle reads to non-memory space. Redirect to the
124 * register read fn but preserve the multi word read capability of
127 if (address
< DRAM_BASE_ADDRESS
) {
128 if (!IS_ALIGNED(address
, 4) ||
129 !IS_ALIGNED((unsigned long)data
, 4))
132 while ((nbytes
>= 4) && ((ret
= ath10k_pci_diag_read_access(
133 ar
, address
, (u32
*)data
)) == 0)) {
134 nbytes
-= sizeof(u32
);
135 address
+= sizeof(u32
);
141 ce_diag
= ar_pci
->ce_diag
;
144 * Allocate a temporary bounce buffer to hold caller's data
145 * to be DMA'ed from Target. This guarantees
146 * 1) 4-byte alignment
147 * 2) Buffer in DMA-able space
149 orig_nbytes
= nbytes
;
150 data_buf
= (unsigned char *)pci_alloc_consistent(ar_pci
->pdev
,
158 memset(data_buf
, 0, orig_nbytes
);
160 remaining_bytes
= orig_nbytes
;
161 ce_data
= ce_data_base
;
162 while (remaining_bytes
) {
163 nbytes
= min_t(unsigned int, remaining_bytes
,
164 DIAG_TRANSFER_LIMIT
);
166 ret
= ath10k_ce_recv_buf_enqueue(ce_diag
, NULL
, ce_data
);
170 /* Request CE to send from Target(!) address to Host buffer */
172 * The address supplied by the caller is in the
173 * Target CPU virtual address space.
175 * In order to use this address with the diagnostic CE,
176 * convert it from Target CPU virtual address space
177 * to CE address space
180 address
= TARG_CPU_SPACE_TO_CE_SPACE(ar
, ar_pci
->mem
,
182 ath10k_pci_sleep(ar
);
184 ret
= ath10k_ce_send(ce_diag
, NULL
, (u32
)address
, nbytes
, 0,
190 while (ath10k_ce_completed_send_next(ce_diag
, NULL
, &buf
,
194 if (i
++ > DIAG_ACCESS_CE_TIMEOUT_MS
) {
200 if (nbytes
!= completed_nbytes
) {
205 if (buf
!= (u32
) address
) {
211 while (ath10k_ce_completed_recv_next(ce_diag
, NULL
, &buf
,
216 if (i
++ > DIAG_ACCESS_CE_TIMEOUT_MS
) {
222 if (nbytes
!= completed_nbytes
) {
227 if (buf
!= ce_data
) {
232 remaining_bytes
-= nbytes
;
239 /* Copy data from allocated DMA buf to caller's buf */
240 WARN_ON_ONCE(orig_nbytes
& 3);
241 for (i
= 0; i
< orig_nbytes
/ sizeof(__le32
); i
++) {
243 __le32_to_cpu(((__le32
*)data_buf
)[i
]);
246 ath10k_dbg(ATH10K_DBG_PCI
, "%s failure (0x%x)\n",
250 pci_free_consistent(ar_pci
->pdev
, orig_nbytes
,
251 data_buf
, ce_data_base
);
256 /* Read 4-byte aligned data from Target memory or register */
257 static int ath10k_pci_diag_read_access(struct ath10k
*ar
, u32 address
,
260 /* Assume range doesn't cross this boundary */
261 if (address
>= DRAM_BASE_ADDRESS
)
262 return ath10k_pci_diag_read_mem(ar
, address
, data
, sizeof(u32
));
265 *data
= ath10k_pci_read32(ar
, address
);
266 ath10k_pci_sleep(ar
);
270 static int ath10k_pci_diag_write_mem(struct ath10k
*ar
, u32 address
,
271 const void *data
, int nbytes
)
273 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
276 unsigned int completed_nbytes
, orig_nbytes
, remaining_bytes
;
279 struct ce_state
*ce_diag
;
280 void *data_buf
= NULL
;
281 u32 ce_data
; /* Host buffer address in CE space */
282 dma_addr_t ce_data_base
= 0;
285 ce_diag
= ar_pci
->ce_diag
;
288 * Allocate a temporary bounce buffer to hold caller's data
289 * to be DMA'ed to Target. This guarantees
290 * 1) 4-byte alignment
291 * 2) Buffer in DMA-able space
293 orig_nbytes
= nbytes
;
294 data_buf
= (unsigned char *)pci_alloc_consistent(ar_pci
->pdev
,
302 /* Copy caller's data to allocated DMA buf */
303 WARN_ON_ONCE(orig_nbytes
& 3);
304 for (i
= 0; i
< orig_nbytes
/ sizeof(__le32
); i
++)
305 ((__le32
*)data_buf
)[i
] = __cpu_to_le32(((u32
*)data
)[i
]);
308 * The address supplied by the caller is in the
309 * Target CPU virtual address space.
311 * In order to use this address with the diagnostic CE,
313 * Target CPU virtual address space
318 address
= TARG_CPU_SPACE_TO_CE_SPACE(ar
, ar_pci
->mem
, address
);
319 ath10k_pci_sleep(ar
);
321 remaining_bytes
= orig_nbytes
;
322 ce_data
= ce_data_base
;
323 while (remaining_bytes
) {
324 /* FIXME: check cast */
325 nbytes
= min_t(int, remaining_bytes
, DIAG_TRANSFER_LIMIT
);
327 /* Set up to receive directly into Target(!) address */
328 ret
= ath10k_ce_recv_buf_enqueue(ce_diag
, NULL
, address
);
333 * Request CE to send caller-supplied data that
334 * was copied to bounce buffer to Target(!) address.
336 ret
= ath10k_ce_send(ce_diag
, NULL
, (u32
) ce_data
,
342 while (ath10k_ce_completed_send_next(ce_diag
, NULL
, &buf
,
347 if (i
++ > DIAG_ACCESS_CE_TIMEOUT_MS
) {
353 if (nbytes
!= completed_nbytes
) {
358 if (buf
!= ce_data
) {
364 while (ath10k_ce_completed_recv_next(ce_diag
, NULL
, &buf
,
369 if (i
++ > DIAG_ACCESS_CE_TIMEOUT_MS
) {
375 if (nbytes
!= completed_nbytes
) {
380 if (buf
!= address
) {
385 remaining_bytes
-= nbytes
;
392 pci_free_consistent(ar_pci
->pdev
, orig_nbytes
, data_buf
,
397 ath10k_dbg(ATH10K_DBG_PCI
, "%s failure (0x%x)\n", __func__
,
403 /* Write 4B data to Target memory or register */
404 static int ath10k_pci_diag_write_access(struct ath10k
*ar
, u32 address
,
407 /* Assume range doesn't cross this boundary */
408 if (address
>= DRAM_BASE_ADDRESS
)
409 return ath10k_pci_diag_write_mem(ar
, address
, &data
,
413 ath10k_pci_write32(ar
, address
, data
);
414 ath10k_pci_sleep(ar
);
418 static bool ath10k_pci_target_is_awake(struct ath10k
*ar
)
420 void __iomem
*mem
= ath10k_pci_priv(ar
)->mem
;
422 val
= ioread32(mem
+ PCIE_LOCAL_BASE_ADDRESS
+
424 return (RTC_STATE_V_GET(val
) == RTC_STATE_V_ON
);
427 static void ath10k_pci_wait(struct ath10k
*ar
)
431 while (n
-- && !ath10k_pci_target_is_awake(ar
))
435 ath10k_warn("Unable to wakeup target\n");
438 void ath10k_do_pci_wake(struct ath10k
*ar
)
440 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
441 void __iomem
*pci_addr
= ar_pci
->mem
;
445 if (atomic_read(&ar_pci
->keep_awake_count
) == 0) {
447 iowrite32(PCIE_SOC_WAKE_V_MASK
,
448 pci_addr
+ PCIE_LOCAL_BASE_ADDRESS
+
449 PCIE_SOC_WAKE_ADDRESS
);
451 atomic_inc(&ar_pci
->keep_awake_count
);
453 if (ar_pci
->verified_awake
)
457 if (ath10k_pci_target_is_awake(ar
)) {
458 ar_pci
->verified_awake
= true;
462 if (tot_delay
> PCIE_WAKE_TIMEOUT
) {
463 ath10k_warn("target takes too long to wake up (awake count %d)\n",
464 atomic_read(&ar_pci
->keep_awake_count
));
469 tot_delay
+= curr_delay
;
476 void ath10k_do_pci_sleep(struct ath10k
*ar
)
478 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
479 void __iomem
*pci_addr
= ar_pci
->mem
;
481 if (atomic_dec_and_test(&ar_pci
->keep_awake_count
)) {
483 ar_pci
->verified_awake
= false;
484 iowrite32(PCIE_SOC_WAKE_RESET
,
485 pci_addr
+ PCIE_LOCAL_BASE_ADDRESS
+
486 PCIE_SOC_WAKE_ADDRESS
);
491 * FIXME: Handle OOM properly.
494 struct ath10k_pci_compl
*get_free_compl(struct hif_ce_pipe_info
*pipe_info
)
496 struct ath10k_pci_compl
*compl = NULL
;
498 spin_lock_bh(&pipe_info
->pipe_lock
);
499 if (list_empty(&pipe_info
->compl_free
)) {
500 ath10k_warn("Completion buffers are full\n");
503 compl = list_first_entry(&pipe_info
->compl_free
,
504 struct ath10k_pci_compl
, list
);
505 list_del(&compl->list
);
507 spin_unlock_bh(&pipe_info
->pipe_lock
);
511 /* Called by lower (CE) layer when a send to Target completes. */
512 static void ath10k_pci_ce_send_done(struct ce_state
*ce_state
,
513 void *transfer_context
,
516 unsigned int transfer_id
)
518 struct ath10k
*ar
= ce_state
->ar
;
519 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
520 struct hif_ce_pipe_info
*pipe_info
= &ar_pci
->pipe_info
[ce_state
->id
];
521 struct ath10k_pci_compl
*compl;
522 bool process
= false;
526 * For the send completion of an item in sendlist, just
527 * increment num_sends_allowed. The upper layer callback will
528 * be triggered when last fragment is done with send.
530 if (transfer_context
== CE_SENDLIST_ITEM_CTXT
) {
531 spin_lock_bh(&pipe_info
->pipe_lock
);
532 pipe_info
->num_sends_allowed
++;
533 spin_unlock_bh(&pipe_info
->pipe_lock
);
537 compl = get_free_compl(pipe_info
);
541 compl->send_or_recv
= HIF_CE_COMPLETE_SEND
;
542 compl->ce_state
= ce_state
;
543 compl->pipe_info
= pipe_info
;
544 compl->transfer_context
= transfer_context
;
545 compl->nbytes
= nbytes
;
546 compl->transfer_id
= transfer_id
;
550 * Add the completion to the processing queue.
552 spin_lock_bh(&ar_pci
->compl_lock
);
553 list_add_tail(&compl->list
, &ar_pci
->compl_process
);
554 spin_unlock_bh(&ar_pci
->compl_lock
);
557 } while (ath10k_ce_completed_send_next(ce_state
,
563 * If only some of the items within a sendlist have completed,
564 * don't invoke completion processing until the entire sendlist
570 ath10k_pci_process_ce(ar
);
573 /* Called by lower (CE) layer when data is received from the Target. */
574 static void ath10k_pci_ce_recv_data(struct ce_state
*ce_state
,
575 void *transfer_context
, u32 ce_data
,
577 unsigned int transfer_id
,
580 struct ath10k
*ar
= ce_state
->ar
;
581 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
582 struct hif_ce_pipe_info
*pipe_info
= &ar_pci
->pipe_info
[ce_state
->id
];
583 struct ath10k_pci_compl
*compl;
587 compl = get_free_compl(pipe_info
);
591 compl->send_or_recv
= HIF_CE_COMPLETE_RECV
;
592 compl->ce_state
= ce_state
;
593 compl->pipe_info
= pipe_info
;
594 compl->transfer_context
= transfer_context
;
595 compl->nbytes
= nbytes
;
596 compl->transfer_id
= transfer_id
;
597 compl->flags
= flags
;
599 skb
= transfer_context
;
600 dma_unmap_single(ar
->dev
, ATH10K_SKB_CB(skb
)->paddr
,
601 skb
->len
+ skb_tailroom(skb
),
604 * Add the completion to the processing queue.
606 spin_lock_bh(&ar_pci
->compl_lock
);
607 list_add_tail(&compl->list
, &ar_pci
->compl_process
);
608 spin_unlock_bh(&ar_pci
->compl_lock
);
610 } while (ath10k_ce_completed_recv_next(ce_state
,
616 ath10k_pci_process_ce(ar
);
619 /* Send the first nbytes bytes of the buffer */
620 static int ath10k_pci_hif_send_head(struct ath10k
*ar
, u8 pipe_id
,
621 unsigned int transfer_id
,
622 unsigned int bytes
, struct sk_buff
*nbuf
)
624 struct ath10k_skb_cb
*skb_cb
= ATH10K_SKB_CB(nbuf
);
625 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
626 struct hif_ce_pipe_info
*pipe_info
= &(ar_pci
->pipe_info
[pipe_id
]);
627 struct ce_state
*ce_hdl
= pipe_info
->ce_hdl
;
628 struct ce_sendlist sendlist
;
633 memset(&sendlist
, 0, sizeof(struct ce_sendlist
));
635 len
= min(bytes
, nbuf
->len
);
639 ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len
);
641 ath10k_dbg(ATH10K_DBG_PCI
,
642 "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n",
643 nbuf
->data
, (unsigned long long) skb_cb
->paddr
,
645 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP
, NULL
,
647 nbuf
->data
, nbuf
->len
);
649 ath10k_ce_sendlist_buf_add(&sendlist
, skb_cb
->paddr
, len
, flags
);
651 /* Make sure we have resources to handle this request */
652 spin_lock_bh(&pipe_info
->pipe_lock
);
653 if (!pipe_info
->num_sends_allowed
) {
654 ath10k_warn("Pipe: %d is full\n", pipe_id
);
655 spin_unlock_bh(&pipe_info
->pipe_lock
);
658 pipe_info
->num_sends_allowed
--;
659 spin_unlock_bh(&pipe_info
->pipe_lock
);
661 ret
= ath10k_ce_sendlist_send(ce_hdl
, nbuf
, &sendlist
, transfer_id
);
663 ath10k_warn("CE send failed: %p\n", nbuf
);
668 static u16
ath10k_pci_hif_get_free_queue_number(struct ath10k
*ar
, u8 pipe
)
670 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
671 struct hif_ce_pipe_info
*pipe_info
= &(ar_pci
->pipe_info
[pipe
]);
674 spin_lock_bh(&pipe_info
->pipe_lock
);
675 ret
= pipe_info
->num_sends_allowed
;
676 spin_unlock_bh(&pipe_info
->pipe_lock
);
681 static void ath10k_pci_hif_dump_area(struct ath10k
*ar
)
683 u32 reg_dump_area
= 0;
684 u32 reg_dump_values
[REG_DUMP_COUNT_QCA988X
] = {};
689 ath10k_err("firmware crashed!\n");
690 ath10k_err("hardware name %s version 0x%x\n",
691 ar
->hw_params
.name
, ar
->target_version
);
692 ath10k_err("firmware version: %u.%u.%u.%u\n", ar
->fw_version_major
,
693 ar
->fw_version_minor
, ar
->fw_version_release
,
694 ar
->fw_version_build
);
696 host_addr
= host_interest_item_address(HI_ITEM(hi_failure_state
));
697 if (ath10k_pci_diag_read_mem(ar
, host_addr
,
698 ®_dump_area
, sizeof(u32
)) != 0) {
699 ath10k_warn("could not read hi_failure_state\n");
703 ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area
);
705 ret
= ath10k_pci_diag_read_mem(ar
, reg_dump_area
,
707 REG_DUMP_COUNT_QCA988X
* sizeof(u32
));
709 ath10k_err("could not dump FW Dump Area\n");
713 BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X
% 4);
715 ath10k_err("target Register Dump\n");
716 for (i
= 0; i
< REG_DUMP_COUNT_QCA988X
; i
+= 4)
717 ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
720 reg_dump_values
[i
+ 1],
721 reg_dump_values
[i
+ 2],
722 reg_dump_values
[i
+ 3]);
725 static void ath10k_pci_hif_send_complete_check(struct ath10k
*ar
, u8 pipe
,
731 * Decide whether to actually poll for completions, or just
732 * wait for a later chance.
733 * If there seem to be plenty of resources left, then just wait
734 * since checking involves reading a CE register, which is a
735 * relatively expensive operation.
737 resources
= ath10k_pci_hif_get_free_queue_number(ar
, pipe
);
740 * If at least 50% of the total resources are still available,
741 * don't bother checking again yet.
743 if (resources
> (host_ce_config_wlan
[pipe
].src_nentries
>> 1))
746 ath10k_ce_per_engine_service(ar
, pipe
);
749 static void ath10k_pci_hif_set_callbacks(struct ath10k
*ar
,
750 struct ath10k_hif_cb
*callbacks
)
752 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
754 ath10k_dbg(ATH10K_DBG_PCI
, "%s\n", __func__
);
756 memcpy(&ar_pci
->msg_callbacks_current
, callbacks
,
757 sizeof(ar_pci
->msg_callbacks_current
));
760 static int ath10k_pci_start_ce(struct ath10k
*ar
)
762 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
763 struct ce_state
*ce_diag
= ar_pci
->ce_diag
;
764 const struct ce_attr
*attr
;
765 struct hif_ce_pipe_info
*pipe_info
;
766 struct ath10k_pci_compl
*compl;
767 int i
, pipe_num
, completions
, disable_interrupts
;
769 spin_lock_init(&ar_pci
->compl_lock
);
770 INIT_LIST_HEAD(&ar_pci
->compl_process
);
772 for (pipe_num
= 0; pipe_num
< ar_pci
->ce_count
; pipe_num
++) {
773 pipe_info
= &ar_pci
->pipe_info
[pipe_num
];
775 spin_lock_init(&pipe_info
->pipe_lock
);
776 INIT_LIST_HEAD(&pipe_info
->compl_free
);
778 /* Handle Diagnostic CE specially */
779 if (pipe_info
->ce_hdl
== ce_diag
)
782 attr
= &host_ce_config_wlan
[pipe_num
];
785 if (attr
->src_nentries
) {
786 disable_interrupts
= attr
->flags
& CE_ATTR_DIS_INTR
;
787 ath10k_ce_send_cb_register(pipe_info
->ce_hdl
,
788 ath10k_pci_ce_send_done
,
790 completions
+= attr
->src_nentries
;
791 pipe_info
->num_sends_allowed
= attr
->src_nentries
- 1;
794 if (attr
->dest_nentries
) {
795 ath10k_ce_recv_cb_register(pipe_info
->ce_hdl
,
796 ath10k_pci_ce_recv_data
);
797 completions
+= attr
->dest_nentries
;
800 if (completions
== 0)
803 for (i
= 0; i
< completions
; i
++) {
804 compl = kmalloc(sizeof(struct ath10k_pci_compl
),
807 ath10k_warn("No memory for completion state\n");
808 ath10k_pci_stop_ce(ar
);
812 compl->send_or_recv
= HIF_CE_COMPLETE_FREE
;
813 list_add_tail(&compl->list
, &pipe_info
->compl_free
);
820 static void ath10k_pci_stop_ce(struct ath10k
*ar
)
822 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
823 struct ath10k_pci_compl
*compl;
827 ath10k_ce_disable_interrupts(ar
);
829 /* Cancel the pending tasklet */
830 tasklet_kill(&ar_pci
->intr_tq
);
832 for (i
= 0; i
< CE_COUNT
; i
++)
833 tasklet_kill(&ar_pci
->pipe_info
[i
].intr
);
835 /* Mark pending completions as aborted, so that upper layers free up
836 * their associated resources */
837 spin_lock_bh(&ar_pci
->compl_lock
);
838 list_for_each_entry(compl, &ar_pci
->compl_process
, list
) {
839 skb
= (struct sk_buff
*)compl->transfer_context
;
840 ATH10K_SKB_CB(skb
)->is_aborted
= true;
842 spin_unlock_bh(&ar_pci
->compl_lock
);
845 static void ath10k_pci_cleanup_ce(struct ath10k
*ar
)
847 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
848 struct ath10k_pci_compl
*compl, *tmp
;
849 struct hif_ce_pipe_info
*pipe_info
;
850 struct sk_buff
*netbuf
;
853 /* Free pending completions. */
854 spin_lock_bh(&ar_pci
->compl_lock
);
855 if (!list_empty(&ar_pci
->compl_process
))
856 ath10k_warn("pending completions still present! possible memory leaks.\n");
858 list_for_each_entry_safe(compl, tmp
, &ar_pci
->compl_process
, list
) {
859 list_del(&compl->list
);
860 netbuf
= (struct sk_buff
*)compl->transfer_context
;
861 dev_kfree_skb_any(netbuf
);
864 spin_unlock_bh(&ar_pci
->compl_lock
);
866 /* Free unused completions for each pipe. */
867 for (pipe_num
= 0; pipe_num
< ar_pci
->ce_count
; pipe_num
++) {
868 pipe_info
= &ar_pci
->pipe_info
[pipe_num
];
870 spin_lock_bh(&pipe_info
->pipe_lock
);
871 list_for_each_entry_safe(compl, tmp
,
872 &pipe_info
->compl_free
, list
) {
873 list_del(&compl->list
);
876 spin_unlock_bh(&pipe_info
->pipe_lock
);
880 static void ath10k_pci_process_ce(struct ath10k
*ar
)
882 struct ath10k_pci
*ar_pci
= ar
->hif
.priv
;
883 struct ath10k_hif_cb
*cb
= &ar_pci
->msg_callbacks_current
;
884 struct ath10k_pci_compl
*compl;
887 int ret
, send_done
= 0;
889 /* Upper layers aren't ready to handle tx/rx completions in parallel so
890 * we must serialize all completion processing. */
892 spin_lock_bh(&ar_pci
->compl_lock
);
893 if (ar_pci
->compl_processing
) {
894 spin_unlock_bh(&ar_pci
->compl_lock
);
897 ar_pci
->compl_processing
= true;
898 spin_unlock_bh(&ar_pci
->compl_lock
);
901 spin_lock_bh(&ar_pci
->compl_lock
);
902 if (list_empty(&ar_pci
->compl_process
)) {
903 spin_unlock_bh(&ar_pci
->compl_lock
);
906 compl = list_first_entry(&ar_pci
->compl_process
,
907 struct ath10k_pci_compl
, list
);
908 list_del(&compl->list
);
909 spin_unlock_bh(&ar_pci
->compl_lock
);
911 if (compl->send_or_recv
== HIF_CE_COMPLETE_SEND
) {
912 cb
->tx_completion(ar
,
913 compl->transfer_context
,
917 ret
= ath10k_pci_post_rx_pipe(compl->pipe_info
, 1);
919 ath10k_warn("Unable to post recv buffer for pipe: %d\n",
920 compl->pipe_info
->pipe_num
);
924 skb
= (struct sk_buff
*)compl->transfer_context
;
925 nbytes
= compl->nbytes
;
927 ath10k_dbg(ATH10K_DBG_PCI
,
928 "ath10k_pci_ce_recv_data netbuf=%p nbytes=%d\n",
930 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP
, NULL
,
931 "ath10k rx: ", skb
->data
, nbytes
);
933 if (skb
->len
+ skb_tailroom(skb
) >= nbytes
) {
935 skb_put(skb
, nbytes
);
936 cb
->rx_completion(ar
, skb
,
937 compl->pipe_info
->pipe_num
);
939 ath10k_warn("rxed more than expected (nbytes %d, max %d)",
941 skb
->len
+ skb_tailroom(skb
));
945 compl->send_or_recv
= HIF_CE_COMPLETE_FREE
;
948 * Add completion back to the pipe's free list.
950 spin_lock_bh(&compl->pipe_info
->pipe_lock
);
951 list_add_tail(&compl->list
, &compl->pipe_info
->compl_free
);
952 compl->pipe_info
->num_sends_allowed
+= send_done
;
953 spin_unlock_bh(&compl->pipe_info
->pipe_lock
);
956 spin_lock_bh(&ar_pci
->compl_lock
);
957 ar_pci
->compl_processing
= false;
958 spin_unlock_bh(&ar_pci
->compl_lock
);
961 /* TODO - temporary mapping while we have too few CE's */
962 static int ath10k_pci_hif_map_service_to_pipe(struct ath10k
*ar
,
963 u16 service_id
, u8
*ul_pipe
,
964 u8
*dl_pipe
, int *ul_is_polled
,
969 /* polling for received messages not supported */
972 switch (service_id
) {
973 case ATH10K_HTC_SVC_ID_HTT_DATA_MSG
:
975 * Host->target HTT gets its own pipe, so it can be polled
976 * while other pipes are interrupt driven.
980 * Use the same target->host pipe for HTC ctrl, HTC raw
986 case ATH10K_HTC_SVC_ID_RSVD_CTRL
:
987 case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS
:
989 * Note: HTC_RAW_STREAMS_SVC is currently unused, and
990 * HTC_CTRL_RSVD_SVC could share the same pipe as the
991 * WMI services. So, if another CE is needed, change
992 * this to *ul_pipe = 3, which frees up CE 0.
999 case ATH10K_HTC_SVC_ID_WMI_DATA_BK
:
1000 case ATH10K_HTC_SVC_ID_WMI_DATA_BE
:
1001 case ATH10K_HTC_SVC_ID_WMI_DATA_VI
:
1002 case ATH10K_HTC_SVC_ID_WMI_DATA_VO
:
1004 case ATH10K_HTC_SVC_ID_WMI_CONTROL
:
1010 /* pipe 6 reserved */
1011 /* pipe 7 reserved */
1018 (host_ce_config_wlan
[*ul_pipe
].flags
& CE_ATTR_DIS_INTR
) != 0;
1023 static void ath10k_pci_hif_get_default_pipe(struct ath10k
*ar
,
1024 u8
*ul_pipe
, u8
*dl_pipe
)
1026 int ul_is_polled
, dl_is_polled
;
1028 (void)ath10k_pci_hif_map_service_to_pipe(ar
,
1029 ATH10K_HTC_SVC_ID_RSVD_CTRL
,
1036 static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info
*pipe_info
,
1039 struct ath10k
*ar
= pipe_info
->hif_ce_state
;
1040 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
1041 struct ce_state
*ce_state
= pipe_info
->ce_hdl
;
1042 struct sk_buff
*skb
;
1046 if (pipe_info
->buf_sz
== 0)
1049 for (i
= 0; i
< num
; i
++) {
1050 skb
= dev_alloc_skb(pipe_info
->buf_sz
);
1052 ath10k_warn("could not allocate skbuff for pipe %d\n",
1058 WARN_ONCE((unsigned long)skb
->data
& 3, "unaligned skb");
1060 ce_data
= dma_map_single(ar
->dev
, skb
->data
,
1061 skb
->len
+ skb_tailroom(skb
),
1064 if (unlikely(dma_mapping_error(ar
->dev
, ce_data
))) {
1065 ath10k_warn("could not dma map skbuff\n");
1066 dev_kfree_skb_any(skb
);
1071 ATH10K_SKB_CB(skb
)->paddr
= ce_data
;
1073 pci_dma_sync_single_for_device(ar_pci
->pdev
, ce_data
,
1075 PCI_DMA_FROMDEVICE
);
1077 ret
= ath10k_ce_recv_buf_enqueue(ce_state
, (void *)skb
,
1080 ath10k_warn("could not enqueue to pipe %d (%d)\n",
1089 ath10k_pci_rx_pipe_cleanup(pipe_info
);
1093 static int ath10k_pci_post_rx(struct ath10k
*ar
)
1095 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
1096 struct hif_ce_pipe_info
*pipe_info
;
1097 const struct ce_attr
*attr
;
1098 int pipe_num
, ret
= 0;
1100 for (pipe_num
= 0; pipe_num
< ar_pci
->ce_count
; pipe_num
++) {
1101 pipe_info
= &ar_pci
->pipe_info
[pipe_num
];
1102 attr
= &host_ce_config_wlan
[pipe_num
];
1104 if (attr
->dest_nentries
== 0)
1107 ret
= ath10k_pci_post_rx_pipe(pipe_info
,
1108 attr
->dest_nentries
- 1);
1110 ath10k_warn("Unable to replenish recv buffers for pipe: %d\n",
1113 for (; pipe_num
>= 0; pipe_num
--) {
1114 pipe_info
= &ar_pci
->pipe_info
[pipe_num
];
1115 ath10k_pci_rx_pipe_cleanup(pipe_info
);
1124 static int ath10k_pci_hif_start(struct ath10k
*ar
)
1126 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
1129 ret
= ath10k_pci_start_ce(ar
);
1131 ath10k_warn("could not start CE (%d)\n", ret
);
1135 /* Post buffers once to start things off. */
1136 ret
= ath10k_pci_post_rx(ar
);
1138 ath10k_warn("could not post rx pipes (%d)\n", ret
);
1142 ar_pci
->started
= 1;
1146 static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info
*pipe_info
)
1149 struct ath10k_pci
*ar_pci
;
1150 struct ce_state
*ce_hdl
;
1152 struct sk_buff
*netbuf
;
1155 buf_sz
= pipe_info
->buf_sz
;
1157 /* Unused Copy Engine */
1161 ar
= pipe_info
->hif_ce_state
;
1162 ar_pci
= ath10k_pci_priv(ar
);
1164 if (!ar_pci
->started
)
1167 ce_hdl
= pipe_info
->ce_hdl
;
1169 while (ath10k_ce_revoke_recv_next(ce_hdl
, (void **)&netbuf
,
1171 dma_unmap_single(ar
->dev
, ATH10K_SKB_CB(netbuf
)->paddr
,
1172 netbuf
->len
+ skb_tailroom(netbuf
),
1174 dev_kfree_skb_any(netbuf
);
1178 static void ath10k_pci_tx_pipe_cleanup(struct hif_ce_pipe_info
*pipe_info
)
1181 struct ath10k_pci
*ar_pci
;
1182 struct ce_state
*ce_hdl
;
1183 struct sk_buff
*netbuf
;
1185 unsigned int nbytes
;
1189 buf_sz
= pipe_info
->buf_sz
;
1191 /* Unused Copy Engine */
1195 ar
= pipe_info
->hif_ce_state
;
1196 ar_pci
= ath10k_pci_priv(ar
);
1198 if (!ar_pci
->started
)
1201 ce_hdl
= pipe_info
->ce_hdl
;
1203 while (ath10k_ce_cancel_send_next(ce_hdl
, (void **)&netbuf
,
1204 &ce_data
, &nbytes
, &id
) == 0) {
1205 if (netbuf
!= CE_SENDLIST_ITEM_CTXT
)
1207 * Indicate the completion to higer layer to free
1210 ATH10K_SKB_CB(netbuf
)->is_aborted
= true;
1211 ar_pci
->msg_callbacks_current
.tx_completion(ar
,
1218 * Cleanup residual buffers for device shutdown:
1219 * buffers that were enqueued for receive
1220 * buffers that were to be sent
1221 * Note: Buffers that had completed but which were
1222 * not yet processed are on a completion queue. They
1223 * are handled when the completion thread shuts down.
1225 static void ath10k_pci_buffer_cleanup(struct ath10k
*ar
)
1227 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
1230 for (pipe_num
= 0; pipe_num
< ar_pci
->ce_count
; pipe_num
++) {
1231 struct hif_ce_pipe_info
*pipe_info
;
1233 pipe_info
= &ar_pci
->pipe_info
[pipe_num
];
1234 ath10k_pci_rx_pipe_cleanup(pipe_info
);
1235 ath10k_pci_tx_pipe_cleanup(pipe_info
);
1239 static void ath10k_pci_ce_deinit(struct ath10k
*ar
)
1241 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
1242 struct hif_ce_pipe_info
*pipe_info
;
1245 for (pipe_num
= 0; pipe_num
< ar_pci
->ce_count
; pipe_num
++) {
1246 pipe_info
= &ar_pci
->pipe_info
[pipe_num
];
1247 if (pipe_info
->ce_hdl
) {
1248 ath10k_ce_deinit(pipe_info
->ce_hdl
);
1249 pipe_info
->ce_hdl
= NULL
;
1250 pipe_info
->buf_sz
= 0;
1255 static void ath10k_pci_hif_stop(struct ath10k
*ar
)
1257 ath10k_dbg(ATH10K_DBG_PCI
, "%s\n", __func__
);
1259 ath10k_pci_stop_ce(ar
);
1261 /* At this point, asynchronous threads are stopped, the target should
1262 * not DMA nor interrupt. We process the leftovers and then free
1263 * everything else up. */
1265 ath10k_pci_process_ce(ar
);
1266 ath10k_pci_cleanup_ce(ar
);
1267 ath10k_pci_buffer_cleanup(ar
);
1270 static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k
*ar
,
1271 void *req
, u32 req_len
,
1272 void *resp
, u32
*resp_len
)
1274 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
1275 struct ce_state
*ce_tx
= ar_pci
->pipe_info
[BMI_CE_NUM_TO_TARG
].ce_hdl
;
1276 struct ce_state
*ce_rx
= ar_pci
->pipe_info
[BMI_CE_NUM_TO_HOST
].ce_hdl
;
1277 dma_addr_t req_paddr
= 0;
1278 dma_addr_t resp_paddr
= 0;
1279 struct bmi_xfer xfer
= {};
1280 void *treq
, *tresp
= NULL
;
1283 if (resp
&& !resp_len
)
1286 if (resp
&& resp_len
&& *resp_len
== 0)
1289 treq
= kmemdup(req
, req_len
, GFP_KERNEL
);
1293 req_paddr
= dma_map_single(ar
->dev
, treq
, req_len
, DMA_TO_DEVICE
);
1294 ret
= dma_mapping_error(ar
->dev
, req_paddr
);
1298 if (resp
&& resp_len
) {
1299 tresp
= kzalloc(*resp_len
, GFP_KERNEL
);
1305 resp_paddr
= dma_map_single(ar
->dev
, tresp
, *resp_len
,
1307 ret
= dma_mapping_error(ar
->dev
, resp_paddr
);
1311 xfer
.wait_for_resp
= true;
1314 ath10k_ce_recv_buf_enqueue(ce_rx
, &xfer
, resp_paddr
);
1317 init_completion(&xfer
.done
);
1319 ret
= ath10k_ce_send(ce_tx
, &xfer
, req_paddr
, req_len
, -1, 0);
1323 ret
= wait_for_completion_timeout(&xfer
.done
,
1324 BMI_COMMUNICATION_TIMEOUT_HZ
);
1327 unsigned int unused_nbytes
;
1328 unsigned int unused_id
;
1331 ath10k_ce_cancel_send_next(ce_tx
, NULL
, &unused_buffer
,
1332 &unused_nbytes
, &unused_id
);
1334 /* non-zero means we did not time out */
1342 ath10k_ce_revoke_recv_next(ce_rx
, NULL
, &unused_buffer
);
1343 dma_unmap_single(ar
->dev
, resp_paddr
,
1344 *resp_len
, DMA_FROM_DEVICE
);
1347 dma_unmap_single(ar
->dev
, req_paddr
, req_len
, DMA_TO_DEVICE
);
1349 if (ret
== 0 && resp_len
) {
1350 *resp_len
= min(*resp_len
, xfer
.resp_len
);
1351 memcpy(resp
, tresp
, xfer
.resp_len
);
1360 static void ath10k_pci_bmi_send_done(struct ce_state
*ce_state
,
1361 void *transfer_context
,
1363 unsigned int nbytes
,
1364 unsigned int transfer_id
)
1366 struct bmi_xfer
*xfer
= transfer_context
;
1368 if (xfer
->wait_for_resp
)
1371 complete(&xfer
->done
);
1374 static void ath10k_pci_bmi_recv_data(struct ce_state
*ce_state
,
1375 void *transfer_context
,
1377 unsigned int nbytes
,
1378 unsigned int transfer_id
,
1381 struct bmi_xfer
*xfer
= transfer_context
;
1383 if (!xfer
->wait_for_resp
) {
1384 ath10k_warn("unexpected: BMI data received; ignoring\n");
1388 xfer
->resp_len
= nbytes
;
1389 complete(&xfer
->done
);
1393 * Map from service/endpoint to Copy Engine.
1394 * This table is derived from the CE_PCI TABLE, above.
1395 * It is passed to the Target at startup for use by firmware.
1397 static const struct service_to_pipe target_service_to_ce_map_wlan
[] = {
1399 ATH10K_HTC_SVC_ID_WMI_DATA_VO
,
1400 PIPEDIR_OUT
, /* out = UL = host -> target */
1404 ATH10K_HTC_SVC_ID_WMI_DATA_VO
,
1405 PIPEDIR_IN
, /* in = DL = target -> host */
1409 ATH10K_HTC_SVC_ID_WMI_DATA_BK
,
1410 PIPEDIR_OUT
, /* out = UL = host -> target */
1414 ATH10K_HTC_SVC_ID_WMI_DATA_BK
,
1415 PIPEDIR_IN
, /* in = DL = target -> host */
1419 ATH10K_HTC_SVC_ID_WMI_DATA_BE
,
1420 PIPEDIR_OUT
, /* out = UL = host -> target */
1424 ATH10K_HTC_SVC_ID_WMI_DATA_BE
,
1425 PIPEDIR_IN
, /* in = DL = target -> host */
1429 ATH10K_HTC_SVC_ID_WMI_DATA_VI
,
1430 PIPEDIR_OUT
, /* out = UL = host -> target */
1434 ATH10K_HTC_SVC_ID_WMI_DATA_VI
,
1435 PIPEDIR_IN
, /* in = DL = target -> host */
1439 ATH10K_HTC_SVC_ID_WMI_CONTROL
,
1440 PIPEDIR_OUT
, /* out = UL = host -> target */
1444 ATH10K_HTC_SVC_ID_WMI_CONTROL
,
1445 PIPEDIR_IN
, /* in = DL = target -> host */
1449 ATH10K_HTC_SVC_ID_RSVD_CTRL
,
1450 PIPEDIR_OUT
, /* out = UL = host -> target */
1451 0, /* could be moved to 3 (share with WMI) */
1454 ATH10K_HTC_SVC_ID_RSVD_CTRL
,
1455 PIPEDIR_IN
, /* in = DL = target -> host */
1459 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS
, /* not currently used */
1460 PIPEDIR_OUT
, /* out = UL = host -> target */
1464 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS
, /* not currently used */
1465 PIPEDIR_IN
, /* in = DL = target -> host */
1469 ATH10K_HTC_SVC_ID_HTT_DATA_MSG
,
1470 PIPEDIR_OUT
, /* out = UL = host -> target */
1474 ATH10K_HTC_SVC_ID_HTT_DATA_MSG
,
1475 PIPEDIR_IN
, /* in = DL = target -> host */
1479 /* (Additions here) */
1481 { /* Must be last */
1489 * Send an interrupt to the device to wake up the Target CPU
1490 * so it has an opportunity to notice any changed state.
1492 static int ath10k_pci_wake_target_cpu(struct ath10k
*ar
)
1497 ret
= ath10k_pci_diag_read_access(ar
, SOC_CORE_BASE_ADDRESS
|
1501 ath10k_warn("Unable to read core ctrl\n");
1505 /* A_INUM_FIRMWARE interrupt to Target CPU */
1506 core_ctrl
|= CORE_CTRL_CPU_INTR_MASK
;
1508 ret
= ath10k_pci_diag_write_access(ar
, SOC_CORE_BASE_ADDRESS
|
1512 ath10k_warn("Unable to set interrupt mask\n");
1517 static int ath10k_pci_init_config(struct ath10k
*ar
)
1519 u32 interconnect_targ_addr
;
1520 u32 pcie_state_targ_addr
= 0;
1521 u32 pipe_cfg_targ_addr
= 0;
1522 u32 svc_to_pipe_map
= 0;
1523 u32 pcie_config_flags
= 0;
1525 u32 ealloc_targ_addr
;
1527 u32 flag2_targ_addr
;
1530 /* Download to Target the CE Config and the service-to-CE map */
1531 interconnect_targ_addr
=
1532 host_interest_item_address(HI_ITEM(hi_interconnect_state
));
1534 /* Supply Target-side CE configuration */
1535 ret
= ath10k_pci_diag_read_access(ar
, interconnect_targ_addr
,
1536 &pcie_state_targ_addr
);
1538 ath10k_err("Failed to get pcie state addr: %d\n", ret
);
1542 if (pcie_state_targ_addr
== 0) {
1544 ath10k_err("Invalid pcie state addr\n");
1548 ret
= ath10k_pci_diag_read_access(ar
, pcie_state_targ_addr
+
1549 offsetof(struct pcie_state
,
1551 &pipe_cfg_targ_addr
);
1553 ath10k_err("Failed to get pipe cfg addr: %d\n", ret
);
1557 if (pipe_cfg_targ_addr
== 0) {
1559 ath10k_err("Invalid pipe cfg addr\n");
1563 ret
= ath10k_pci_diag_write_mem(ar
, pipe_cfg_targ_addr
,
1564 target_ce_config_wlan
,
1565 sizeof(target_ce_config_wlan
));
1568 ath10k_err("Failed to write pipe cfg: %d\n", ret
);
1572 ret
= ath10k_pci_diag_read_access(ar
, pcie_state_targ_addr
+
1573 offsetof(struct pcie_state
,
1577 ath10k_err("Failed to get svc/pipe map: %d\n", ret
);
1581 if (svc_to_pipe_map
== 0) {
1583 ath10k_err("Invalid svc_to_pipe map\n");
1587 ret
= ath10k_pci_diag_write_mem(ar
, svc_to_pipe_map
,
1588 target_service_to_ce_map_wlan
,
1589 sizeof(target_service_to_ce_map_wlan
));
1591 ath10k_err("Failed to write svc/pipe map: %d\n", ret
);
1595 ret
= ath10k_pci_diag_read_access(ar
, pcie_state_targ_addr
+
1596 offsetof(struct pcie_state
,
1598 &pcie_config_flags
);
1600 ath10k_err("Failed to get pcie config_flags: %d\n", ret
);
1604 pcie_config_flags
&= ~PCIE_CONFIG_FLAG_ENABLE_L1
;
1606 ret
= ath10k_pci_diag_write_mem(ar
, pcie_state_targ_addr
+
1607 offsetof(struct pcie_state
, config_flags
),
1609 sizeof(pcie_config_flags
));
1611 ath10k_err("Failed to write pcie config_flags: %d\n", ret
);
1615 /* configure early allocation */
1616 ealloc_targ_addr
= host_interest_item_address(HI_ITEM(hi_early_alloc
));
1618 ret
= ath10k_pci_diag_read_access(ar
, ealloc_targ_addr
, &ealloc_value
);
1620 ath10k_err("Faile to get early alloc val: %d\n", ret
);
1624 /* first bank is switched to IRAM */
1625 ealloc_value
|= ((HI_EARLY_ALLOC_MAGIC
<< HI_EARLY_ALLOC_MAGIC_SHIFT
) &
1626 HI_EARLY_ALLOC_MAGIC_MASK
);
1627 ealloc_value
|= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT
) &
1628 HI_EARLY_ALLOC_IRAM_BANKS_MASK
);
1630 ret
= ath10k_pci_diag_write_access(ar
, ealloc_targ_addr
, ealloc_value
);
1632 ath10k_err("Failed to set early alloc val: %d\n", ret
);
1636 /* Tell Target to proceed with initialization */
1637 flag2_targ_addr
= host_interest_item_address(HI_ITEM(hi_option_flag2
));
1639 ret
= ath10k_pci_diag_read_access(ar
, flag2_targ_addr
, &flag2_value
);
1641 ath10k_err("Failed to get option val: %d\n", ret
);
1645 flag2_value
|= HI_OPTION_EARLY_CFG_DONE
;
1647 ret
= ath10k_pci_diag_write_access(ar
, flag2_targ_addr
, flag2_value
);
1649 ath10k_err("Failed to set option val: %d\n", ret
);
1658 static int ath10k_pci_ce_init(struct ath10k
*ar
)
1660 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
1661 struct hif_ce_pipe_info
*pipe_info
;
1662 const struct ce_attr
*attr
;
1665 for (pipe_num
= 0; pipe_num
< ar_pci
->ce_count
; pipe_num
++) {
1666 pipe_info
= &ar_pci
->pipe_info
[pipe_num
];
1667 pipe_info
->pipe_num
= pipe_num
;
1668 pipe_info
->hif_ce_state
= ar
;
1669 attr
= &host_ce_config_wlan
[pipe_num
];
1671 pipe_info
->ce_hdl
= ath10k_ce_init(ar
, pipe_num
, attr
);
1672 if (pipe_info
->ce_hdl
== NULL
) {
1673 ath10k_err("Unable to initialize CE for pipe: %d\n",
1676 /* It is safe to call it here. It checks if ce_hdl is
1677 * valid for each pipe */
1678 ath10k_pci_ce_deinit(ar
);
1682 if (pipe_num
== ar_pci
->ce_count
- 1) {
1684 * Reserve the ultimate CE for
1685 * diagnostic Window support
1688 ar_pci
->pipe_info
[ar_pci
->ce_count
- 1].ce_hdl
;
1692 pipe_info
->buf_sz
= (size_t) (attr
->src_sz_max
);
1696 * Initially, establish CE completion handlers for use with BMI.
1697 * These are overwritten with generic handlers after we exit BMI phase.
1699 pipe_info
= &ar_pci
->pipe_info
[BMI_CE_NUM_TO_TARG
];
1700 ath10k_ce_send_cb_register(pipe_info
->ce_hdl
,
1701 ath10k_pci_bmi_send_done
, 0);
1703 pipe_info
= &ar_pci
->pipe_info
[BMI_CE_NUM_TO_HOST
];
1704 ath10k_ce_recv_cb_register(pipe_info
->ce_hdl
,
1705 ath10k_pci_bmi_recv_data
);
1710 static void ath10k_pci_fw_interrupt_handler(struct ath10k
*ar
)
1712 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
1713 u32 fw_indicator_address
, fw_indicator
;
1715 ath10k_pci_wake(ar
);
1717 fw_indicator_address
= ar_pci
->fw_indicator_address
;
1718 fw_indicator
= ath10k_pci_read32(ar
, fw_indicator_address
);
1720 if (fw_indicator
& FW_IND_EVENT_PENDING
) {
1721 /* ACK: clear Target-side pending event */
1722 ath10k_pci_write32(ar
, fw_indicator_address
,
1723 fw_indicator
& ~FW_IND_EVENT_PENDING
);
1725 if (ar_pci
->started
) {
1726 ath10k_pci_hif_dump_area(ar
);
1729 * Probable Target failure before we're prepared
1730 * to handle it. Generally unexpected.
1732 ath10k_warn("early firmware event indicated\n");
1736 ath10k_pci_sleep(ar
);
1739 static int ath10k_pci_hif_power_up(struct ath10k
*ar
)
1744 * Bring the target up cleanly.
1746 * The target may be in an undefined state with an AUX-powered Target
1747 * and a Host in WoW mode. If the Host crashes, loses power, or is
1748 * restarted (without unloading the driver) then the Target is left
1749 * (aux) powered and running. On a subsequent driver load, the Target
1750 * is in an unexpected state. We try to catch that here in order to
1751 * reset the Target and retry the probe.
1753 ath10k_pci_device_reset(ar
);
1755 ret
= ath10k_pci_reset_target(ar
);
1759 if (ath10k_target_ps
) {
1760 ath10k_dbg(ATH10K_DBG_PCI
, "on-chip power save enabled\n");
1762 /* Force AWAKE forever */
1763 ath10k_dbg(ATH10K_DBG_PCI
, "on-chip power save disabled\n");
1764 ath10k_do_pci_wake(ar
);
1767 ret
= ath10k_pci_ce_init(ar
);
1771 ret
= ath10k_pci_init_config(ar
);
1775 ret
= ath10k_pci_wake_target_cpu(ar
);
1777 ath10k_err("could not wake up target CPU (%d)\n", ret
);
1784 ath10k_pci_ce_deinit(ar
);
1786 if (!ath10k_target_ps
)
1787 ath10k_do_pci_sleep(ar
);
1792 static void ath10k_pci_hif_power_down(struct ath10k
*ar
)
1794 ath10k_pci_ce_deinit(ar
);
1795 if (!ath10k_target_ps
)
1796 ath10k_do_pci_sleep(ar
);
1799 static const struct ath10k_hif_ops ath10k_pci_hif_ops
= {
1800 .send_head
= ath10k_pci_hif_send_head
,
1801 .exchange_bmi_msg
= ath10k_pci_hif_exchange_bmi_msg
,
1802 .start
= ath10k_pci_hif_start
,
1803 .stop
= ath10k_pci_hif_stop
,
1804 .map_service_to_pipe
= ath10k_pci_hif_map_service_to_pipe
,
1805 .get_default_pipe
= ath10k_pci_hif_get_default_pipe
,
1806 .send_complete_check
= ath10k_pci_hif_send_complete_check
,
1807 .set_callbacks
= ath10k_pci_hif_set_callbacks
,
1808 .get_free_queue_number
= ath10k_pci_hif_get_free_queue_number
,
1809 .power_up
= ath10k_pci_hif_power_up
,
1810 .power_down
= ath10k_pci_hif_power_down
,
1813 static void ath10k_pci_ce_tasklet(unsigned long ptr
)
1815 struct hif_ce_pipe_info
*pipe
= (struct hif_ce_pipe_info
*)ptr
;
1816 struct ath10k_pci
*ar_pci
= pipe
->ar_pci
;
1818 ath10k_ce_per_engine_service(ar_pci
->ar
, pipe
->pipe_num
);
1821 static void ath10k_msi_err_tasklet(unsigned long data
)
1823 struct ath10k
*ar
= (struct ath10k
*)data
;
1825 ath10k_pci_fw_interrupt_handler(ar
);
1829 * Handler for a per-engine interrupt on a PARTICULAR CE.
1830 * This is used in cases where each CE has a private MSI interrupt.
1832 static irqreturn_t
ath10k_pci_per_engine_handler(int irq
, void *arg
)
1834 struct ath10k
*ar
= arg
;
1835 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
1836 int ce_id
= irq
- ar_pci
->pdev
->irq
- MSI_ASSIGN_CE_INITIAL
;
1838 if (ce_id
< 0 || ce_id
>= ARRAY_SIZE(ar_pci
->pipe_info
)) {
1839 ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq
, ce_id
);
1844 * NOTE: We are able to derive ce_id from irq because we
1845 * use a one-to-one mapping for CE's 0..5.
1846 * CE's 6 & 7 do not use interrupts at all.
1848 * This mapping must be kept in sync with the mapping
1851 tasklet_schedule(&ar_pci
->pipe_info
[ce_id
].intr
);
1855 static irqreturn_t
ath10k_pci_msi_fw_handler(int irq
, void *arg
)
1857 struct ath10k
*ar
= arg
;
1858 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
1860 tasklet_schedule(&ar_pci
->msi_fw_err
);
1865 * Top-level interrupt handler for all PCI interrupts from a Target.
1866 * When a block of MSI interrupts is allocated, this top-level handler
1867 * is not used; instead, we directly call the correct sub-handler.
1869 static irqreturn_t
ath10k_pci_interrupt_handler(int irq
, void *arg
)
1871 struct ath10k
*ar
= arg
;
1872 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
1874 if (ar_pci
->num_msi_intrs
== 0) {
1876 * IMPORTANT: INTR_CLR regiser has to be set after
1877 * INTR_ENABLE is set to 0, otherwise interrupt can not be
1880 iowrite32(0, ar_pci
->mem
+
1881 (SOC_CORE_BASE_ADDRESS
|
1882 PCIE_INTR_ENABLE_ADDRESS
));
1883 iowrite32(PCIE_INTR_FIRMWARE_MASK
|
1884 PCIE_INTR_CE_MASK_ALL
,
1885 ar_pci
->mem
+ (SOC_CORE_BASE_ADDRESS
|
1886 PCIE_INTR_CLR_ADDRESS
));
1888 * IMPORTANT: this extra read transaction is required to
1889 * flush the posted write buffer.
1891 (void) ioread32(ar_pci
->mem
+
1892 (SOC_CORE_BASE_ADDRESS
|
1893 PCIE_INTR_ENABLE_ADDRESS
));
1896 tasklet_schedule(&ar_pci
->intr_tq
);
1901 static void ath10k_pci_tasklet(unsigned long data
)
1903 struct ath10k
*ar
= (struct ath10k
*)data
;
1904 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
1906 ath10k_pci_fw_interrupt_handler(ar
); /* FIXME: Handle FW error */
1907 ath10k_ce_per_engine_service_any(ar
);
1909 if (ar_pci
->num_msi_intrs
== 0) {
1910 /* Enable Legacy PCI line interrupts */
1911 iowrite32(PCIE_INTR_FIRMWARE_MASK
|
1912 PCIE_INTR_CE_MASK_ALL
,
1913 ar_pci
->mem
+ (SOC_CORE_BASE_ADDRESS
|
1914 PCIE_INTR_ENABLE_ADDRESS
));
1916 * IMPORTANT: this extra read transaction is required to
1917 * flush the posted write buffer
1919 (void) ioread32(ar_pci
->mem
+
1920 (SOC_CORE_BASE_ADDRESS
|
1921 PCIE_INTR_ENABLE_ADDRESS
));
1925 static int ath10k_pci_start_intr_msix(struct ath10k
*ar
, int num
)
1927 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
1931 ret
= pci_enable_msi_block(ar_pci
->pdev
, num
);
1935 ret
= request_irq(ar_pci
->pdev
->irq
+ MSI_ASSIGN_FW
,
1936 ath10k_pci_msi_fw_handler
,
1937 IRQF_SHARED
, "ath10k_pci", ar
);
1941 for (i
= MSI_ASSIGN_CE_INITIAL
; i
<= MSI_ASSIGN_CE_MAX
; i
++) {
1942 ret
= request_irq(ar_pci
->pdev
->irq
+ i
,
1943 ath10k_pci_per_engine_handler
,
1944 IRQF_SHARED
, "ath10k_pci", ar
);
1946 ath10k_warn("request_irq(%d) failed %d\n",
1947 ar_pci
->pdev
->irq
+ i
, ret
);
1949 for (i
--; i
>= MSI_ASSIGN_CE_INITIAL
; i
--)
1950 free_irq(ar_pci
->pdev
->irq
+ i
, ar
);
1952 free_irq(ar_pci
->pdev
->irq
+ MSI_ASSIGN_FW
, ar
);
1953 pci_disable_msi(ar_pci
->pdev
);
1958 ath10k_info("MSI-X interrupt handling (%d intrs)\n", num
);
1962 static int ath10k_pci_start_intr_msi(struct ath10k
*ar
)
1964 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
1967 ret
= pci_enable_msi(ar_pci
->pdev
);
1971 ret
= request_irq(ar_pci
->pdev
->irq
,
1972 ath10k_pci_interrupt_handler
,
1973 IRQF_SHARED
, "ath10k_pci", ar
);
1975 pci_disable_msi(ar_pci
->pdev
);
1979 ath10k_info("MSI interrupt handling\n");
1983 static int ath10k_pci_start_intr_legacy(struct ath10k
*ar
)
1985 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
1988 ret
= request_irq(ar_pci
->pdev
->irq
,
1989 ath10k_pci_interrupt_handler
,
1990 IRQF_SHARED
, "ath10k_pci", ar
);
1995 * Make sure to wake the Target before enabling Legacy
1998 iowrite32(PCIE_SOC_WAKE_V_MASK
,
1999 ar_pci
->mem
+ PCIE_LOCAL_BASE_ADDRESS
+
2000 PCIE_SOC_WAKE_ADDRESS
);
2002 ath10k_pci_wait(ar
);
2005 * A potential race occurs here: The CORE_BASE write
2006 * depends on target correctly decoding AXI address but
2007 * host won't know when target writes BAR to CORE_CTRL.
2008 * This write might get lost if target has NOT written BAR.
2009 * For now, fix the race by repeating the write in below
2010 * synchronization checking.
2012 iowrite32(PCIE_INTR_FIRMWARE_MASK
|
2013 PCIE_INTR_CE_MASK_ALL
,
2014 ar_pci
->mem
+ (SOC_CORE_BASE_ADDRESS
|
2015 PCIE_INTR_ENABLE_ADDRESS
));
2016 iowrite32(PCIE_SOC_WAKE_RESET
,
2017 ar_pci
->mem
+ PCIE_LOCAL_BASE_ADDRESS
+
2018 PCIE_SOC_WAKE_ADDRESS
);
2020 ath10k_info("legacy interrupt handling\n");
2024 static int ath10k_pci_start_intr(struct ath10k
*ar
)
2026 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
2027 int num
= MSI_NUM_REQUEST
;
2031 tasklet_init(&ar_pci
->intr_tq
, ath10k_pci_tasklet
, (unsigned long) ar
);
2032 tasklet_init(&ar_pci
->msi_fw_err
, ath10k_msi_err_tasklet
,
2033 (unsigned long) ar
);
2035 for (i
= 0; i
< CE_COUNT
; i
++) {
2036 ar_pci
->pipe_info
[i
].ar_pci
= ar_pci
;
2037 tasklet_init(&ar_pci
->pipe_info
[i
].intr
,
2038 ath10k_pci_ce_tasklet
,
2039 (unsigned long)&ar_pci
->pipe_info
[i
]);
2042 if (!test_bit(ATH10K_PCI_FEATURE_MSI_X
, ar_pci
->features
))
2046 ret
= ath10k_pci_start_intr_msix(ar
, num
);
2050 ath10k_warn("MSI-X didn't succeed (%d), trying MSI\n", ret
);
2055 ret
= ath10k_pci_start_intr_msi(ar
);
2059 ath10k_warn("MSI didn't succeed (%d), trying legacy INTR\n",
2064 ret
= ath10k_pci_start_intr_legacy(ar
);
2067 ar_pci
->num_msi_intrs
= num
;
2068 ar_pci
->ce_count
= CE_COUNT
;
2072 static void ath10k_pci_stop_intr(struct ath10k
*ar
)
2074 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
2077 /* There's at least one interrupt irregardless whether its legacy INTR
2078 * or MSI or MSI-X */
2079 for (i
= 0; i
< max(1, ar_pci
->num_msi_intrs
); i
++)
2080 free_irq(ar_pci
->pdev
->irq
+ i
, ar
);
2082 if (ar_pci
->num_msi_intrs
> 0)
2083 pci_disable_msi(ar_pci
->pdev
);
2086 static int ath10k_pci_reset_target(struct ath10k
*ar
)
2088 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
2089 int wait_limit
= 300; /* 3 sec */
2091 /* Wait for Target to finish initialization before we proceed. */
2092 iowrite32(PCIE_SOC_WAKE_V_MASK
,
2093 ar_pci
->mem
+ PCIE_LOCAL_BASE_ADDRESS
+
2094 PCIE_SOC_WAKE_ADDRESS
);
2096 ath10k_pci_wait(ar
);
2098 while (wait_limit
-- &&
2099 !(ioread32(ar_pci
->mem
+ FW_INDICATOR_ADDRESS
) &
2100 FW_IND_INITIALIZED
)) {
2101 if (ar_pci
->num_msi_intrs
== 0)
2102 /* Fix potential race by repeating CORE_BASE writes */
2103 iowrite32(PCIE_INTR_FIRMWARE_MASK
|
2104 PCIE_INTR_CE_MASK_ALL
,
2105 ar_pci
->mem
+ (SOC_CORE_BASE_ADDRESS
|
2106 PCIE_INTR_ENABLE_ADDRESS
));
2110 if (wait_limit
< 0) {
2111 ath10k_err("Target stalled\n");
2112 iowrite32(PCIE_SOC_WAKE_RESET
,
2113 ar_pci
->mem
+ PCIE_LOCAL_BASE_ADDRESS
+
2114 PCIE_SOC_WAKE_ADDRESS
);
2118 iowrite32(PCIE_SOC_WAKE_RESET
,
2119 ar_pci
->mem
+ PCIE_LOCAL_BASE_ADDRESS
+
2120 PCIE_SOC_WAKE_ADDRESS
);
2125 static void ath10k_pci_device_reset(struct ath10k
*ar
)
2127 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
2128 void __iomem
*mem
= ar_pci
->mem
;
2132 if (!SOC_GLOBAL_RESET_ADDRESS
)
2138 ath10k_pci_reg_write32(mem
, PCIE_SOC_WAKE_ADDRESS
,
2139 PCIE_SOC_WAKE_V_MASK
);
2140 for (i
= 0; i
< ATH_PCI_RESET_WAIT_MAX
; i
++) {
2141 if (ath10k_pci_target_is_awake(ar
))
2146 /* Put Target, including PCIe, into RESET. */
2147 val
= ath10k_pci_reg_read32(mem
, SOC_GLOBAL_RESET_ADDRESS
);
2149 ath10k_pci_reg_write32(mem
, SOC_GLOBAL_RESET_ADDRESS
, val
);
2151 for (i
= 0; i
< ATH_PCI_RESET_WAIT_MAX
; i
++) {
2152 if (ath10k_pci_reg_read32(mem
, RTC_STATE_ADDRESS
) &
2153 RTC_STATE_COLD_RESET_MASK
)
2158 /* Pull Target, including PCIe, out of RESET. */
2160 ath10k_pci_reg_write32(mem
, SOC_GLOBAL_RESET_ADDRESS
, val
);
2162 for (i
= 0; i
< ATH_PCI_RESET_WAIT_MAX
; i
++) {
2163 if (!(ath10k_pci_reg_read32(mem
, RTC_STATE_ADDRESS
) &
2164 RTC_STATE_COLD_RESET_MASK
))
2169 ath10k_pci_reg_write32(mem
, PCIE_SOC_WAKE_ADDRESS
, PCIE_SOC_WAKE_RESET
);
2172 static void ath10k_pci_dump_features(struct ath10k_pci
*ar_pci
)
2176 for (i
= 0; i
< ATH10K_PCI_FEATURE_COUNT
; i
++) {
2177 if (!test_bit(i
, ar_pci
->features
))
2181 case ATH10K_PCI_FEATURE_MSI_X
:
2182 ath10k_dbg(ATH10K_DBG_PCI
, "device supports MSI-X\n");
2184 case ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND
:
2185 ath10k_dbg(ATH10K_DBG_PCI
, "QCA988X_1.0 workaround enabled\n");
2191 static int ath10k_pci_probe(struct pci_dev
*pdev
,
2192 const struct pci_device_id
*pci_dev
)
2197 struct ath10k_pci
*ar_pci
;
2200 ath10k_dbg(ATH10K_DBG_PCI
, "%s\n", __func__
);
2202 ar_pci
= kzalloc(sizeof(*ar_pci
), GFP_KERNEL
);
2206 ar_pci
->pdev
= pdev
;
2207 ar_pci
->dev
= &pdev
->dev
;
2209 switch (pci_dev
->device
) {
2210 case QCA988X_1_0_DEVICE_ID
:
2211 set_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND
, ar_pci
->features
);
2213 case QCA988X_2_0_DEVICE_ID
:
2214 set_bit(ATH10K_PCI_FEATURE_MSI_X
, ar_pci
->features
);
2218 ath10k_err("Unkown device ID: %d\n", pci_dev
->device
);
2222 ath10k_pci_dump_features(ar_pci
);
2224 ar
= ath10k_core_create(ar_pci
, ar_pci
->dev
, &ath10k_pci_hif_ops
);
2226 ath10k_err("ath10k_core_create failed!\n");
2231 /* Enable QCA988X_1.0 HW workarounds */
2232 if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND
, ar_pci
->features
))
2233 spin_lock_init(&ar_pci
->hw_v1_workaround_lock
);
2236 ar_pci
->fw_indicator_address
= FW_INDICATOR_ADDRESS
;
2237 atomic_set(&ar_pci
->keep_awake_count
, 0);
2239 pci_set_drvdata(pdev
, ar
);
2242 * Without any knowledge of the Host, the Target may have been reset or
2243 * power cycled and its Config Space may no longer reflect the PCI
2244 * address space that was assigned earlier by the PCI infrastructure.
2247 ret
= pci_assign_resource(pdev
, BAR_NUM
);
2249 ath10k_err("cannot assign PCI space: %d\n", ret
);
2253 ret
= pci_enable_device(pdev
);
2255 ath10k_err("cannot enable PCI device: %d\n", ret
);
2259 /* Request MMIO resources */
2260 ret
= pci_request_region(pdev
, BAR_NUM
, "ath");
2262 ath10k_err("PCI MMIO reservation error: %d\n", ret
);
2267 * Target structures have a limit of 32 bit DMA pointers.
2268 * DMA pointers can be wider than 32 bits by default on some systems.
2270 ret
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
2272 ath10k_err("32-bit DMA not available: %d\n", ret
);
2276 ret
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
2278 ath10k_err("cannot enable 32-bit consistent DMA\n");
2282 /* Set bus master bit in PCI_COMMAND to enable DMA */
2283 pci_set_master(pdev
);
2286 * Temporary FIX: disable ASPM
2287 * Will be removed after the OTP is programmed
2289 pci_read_config_dword(pdev
, 0x80, &lcr_val
);
2290 pci_write_config_dword(pdev
, 0x80, (lcr_val
& 0xffffff00));
2292 /* Arrange for access to Target SoC registers. */
2293 mem
= pci_iomap(pdev
, BAR_NUM
, 0);
2295 ath10k_err("PCI iomap error\n");
2302 spin_lock_init(&ar_pci
->ce_lock
);
2304 ar_pci
->cacheline_sz
= dma_get_cache_alignment();
2306 ret
= ath10k_pci_start_intr(ar
);
2308 ath10k_err("could not start interrupt handling (%d)\n", ret
);
2312 ret
= ath10k_pci_hif_power_up(ar
);
2314 ath10k_err("could not start pci hif (%d)\n", ret
);
2318 ret
= ath10k_core_register(ar
);
2320 ath10k_err("could not register driver core (%d)\n", ret
);
2327 ath10k_pci_hif_power_down(ar
);
2329 ath10k_pci_stop_intr(ar
);
2331 pci_iounmap(pdev
, mem
);
2333 pci_clear_master(pdev
);
2335 pci_release_region(pdev
, BAR_NUM
);
2337 pci_disable_device(pdev
);
2339 pci_set_drvdata(pdev
, NULL
);
2340 ath10k_core_destroy(ar
);
2342 /* call HIF PCI free here */
2348 static void ath10k_pci_remove(struct pci_dev
*pdev
)
2350 struct ath10k
*ar
= pci_get_drvdata(pdev
);
2351 struct ath10k_pci
*ar_pci
;
2353 ath10k_dbg(ATH10K_DBG_PCI
, "%s\n", __func__
);
2358 ar_pci
= ath10k_pci_priv(ar
);
2363 tasklet_kill(&ar_pci
->msi_fw_err
);
2365 ath10k_core_unregister(ar
);
2366 ath10k_pci_hif_power_down(ar
);
2367 ath10k_pci_stop_intr(ar
);
2369 pci_set_drvdata(pdev
, NULL
);
2370 pci_iounmap(pdev
, ar_pci
->mem
);
2371 pci_release_region(pdev
, BAR_NUM
);
2372 pci_clear_master(pdev
);
2373 pci_disable_device(pdev
);
2375 ath10k_core_destroy(ar
);
2379 #if defined(CONFIG_PM_SLEEP)
2381 #define ATH10K_PCI_PM_CONTROL 0x44
2383 static int ath10k_pci_suspend(struct device
*device
)
2385 struct pci_dev
*pdev
= to_pci_dev(device
);
2386 struct ath10k
*ar
= pci_get_drvdata(pdev
);
2387 struct ath10k_pci
*ar_pci
;
2391 ath10k_dbg(ATH10K_DBG_PCI
, "%s\n", __func__
);
2396 ar_pci
= ath10k_pci_priv(ar
);
2400 if (ath10k_core_target_suspend(ar
))
2403 ret
= wait_event_interruptible_timeout(ar
->event_queue
,
2404 ar
->is_target_paused
== true,
2407 ath10k_warn("suspend interrupted (%d)\n", ret
);
2410 } else if (ret
== 0) {
2411 ath10k_warn("suspend timed out - target pause event never came\n");
2417 * reset is_target_paused and host can check that in next time,
2418 * or it will always be TRUE and host just skip the waiting
2419 * condition, it causes target assert due to host already
2422 ar
->is_target_paused
= false;
2424 pci_read_config_dword(pdev
, ATH10K_PCI_PM_CONTROL
, &val
);
2426 if ((val
& 0x000000ff) != 0x3) {
2427 pci_save_state(pdev
);
2428 pci_disable_device(pdev
);
2429 pci_write_config_dword(pdev
, ATH10K_PCI_PM_CONTROL
,
2430 (val
& 0xffffff00) | 0x03);
2435 ret
= ath10k_core_target_resume(ar
);
2437 ath10k_warn("could not resume (%d)\n", ret
);
2442 static int ath10k_pci_resume(struct device
*device
)
2444 struct pci_dev
*pdev
= to_pci_dev(device
);
2445 struct ath10k
*ar
= pci_get_drvdata(pdev
);
2446 struct ath10k_pci
*ar_pci
;
2450 ath10k_dbg(ATH10K_DBG_PCI
, "%s\n", __func__
);
2454 ar_pci
= ath10k_pci_priv(ar
);
2459 ret
= pci_enable_device(pdev
);
2461 ath10k_warn("cannot enable PCI device: %d\n", ret
);
2465 pci_read_config_dword(pdev
, ATH10K_PCI_PM_CONTROL
, &val
);
2467 if ((val
& 0x000000ff) != 0) {
2468 pci_restore_state(pdev
);
2469 pci_write_config_dword(pdev
, ATH10K_PCI_PM_CONTROL
,
2472 * Suspend/Resume resets the PCI configuration space,
2473 * so we have to re-disable the RETRY_TIMEOUT register (0x41)
2474 * to keep PCI Tx retries from interfering with C3 CPU state
2476 pci_read_config_dword(pdev
, 0x40, &val
);
2478 if ((val
& 0x0000ff00) != 0)
2479 pci_write_config_dword(pdev
, 0x40, val
& 0xffff00ff);
2482 ret
= ath10k_core_target_resume(ar
);
2484 ath10k_warn("target resume failed: %d\n", ret
);
2489 static SIMPLE_DEV_PM_OPS(ath10k_dev_pm_ops
,
2493 #define ATH10K_PCI_PM_OPS (&ath10k_dev_pm_ops)
2497 #define ATH10K_PCI_PM_OPS NULL
2499 #endif /* CONFIG_PM_SLEEP */
2501 MODULE_DEVICE_TABLE(pci
, ath10k_pci_id_table
);
2503 static struct pci_driver ath10k_pci_driver
= {
2504 .name
= "ath10k_pci",
2505 .id_table
= ath10k_pci_id_table
,
2506 .probe
= ath10k_pci_probe
,
2507 .remove
= ath10k_pci_remove
,
2508 .driver
.pm
= ATH10K_PCI_PM_OPS
,
2511 static int __init
ath10k_pci_init(void)
2515 ret
= pci_register_driver(&ath10k_pci_driver
);
2517 ath10k_err("pci_register_driver failed [%d]\n", ret
);
2521 module_init(ath10k_pci_init
);
2523 static void __exit
ath10k_pci_exit(void)
2525 pci_unregister_driver(&ath10k_pci_driver
);
2528 module_exit(ath10k_pci_exit
);
2530 MODULE_AUTHOR("Qualcomm Atheros");
2531 MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2532 MODULE_LICENSE("Dual BSD/GPL");
2533 MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR
"/" QCA988X_HW_1_0_FW_FILE
);
2534 MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR
"/" QCA988X_HW_1_0_OTP_FILE
);
2535 MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR
"/" QCA988X_HW_1_0_BOARD_DATA_FILE
);
2536 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR
"/" QCA988X_HW_2_0_FW_FILE
);
2537 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR
"/" QCA988X_HW_2_0_OTP_FILE
);
2538 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR
"/" QCA988X_HW_2_0_BOARD_DATA_FILE
);