Merge branch 'for-linville' of git://github.com/kvalo/ath
[deliverable/linux.git] / drivers / net / wireless / ath / ath10k / pci.c
CommitLineData
5e3dd157
KV
1/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include <linux/pci.h>
19#include <linux/module.h>
20#include <linux/interrupt.h>
21#include <linux/spinlock.h>
650b91fb 22#include <linux/bitops.h>
5e3dd157
KV
23
24#include "core.h"
25#include "debug.h"
26
27#include "targaddrs.h"
28#include "bmi.h"
29
30#include "hif.h"
31#include "htc.h"
32
33#include "ce.h"
34#include "pci.h"
35
cfe9c45b
MK
36enum ath10k_pci_irq_mode {
37 ATH10K_PCI_IRQ_AUTO = 0,
38 ATH10K_PCI_IRQ_LEGACY = 1,
39 ATH10K_PCI_IRQ_MSI = 2,
40};
41
35098463
KV
42enum ath10k_pci_reset_mode {
43 ATH10K_PCI_RESET_AUTO = 0,
44 ATH10K_PCI_RESET_WARM_ONLY = 1,
45};
46
cfe9c45b 47static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
35098463 48static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
cfe9c45b 49
cfe9c45b
MK
50module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
51MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
52
35098463
KV
53module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
54MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
55
0399eca8
KV
56/* how long wait to wait for target to initialise, in ms */
57#define ATH10K_PCI_TARGET_WAIT 3000
61c95cea 58#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
0399eca8 59
5e3dd157
KV
60#define QCA988X_2_0_DEVICE_ID (0x003c)
61
9baa3c34 62static const struct pci_device_id ath10k_pci_id_table[] = {
5e3dd157
KV
63 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
64 {0}
65};
66
67static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
68 u32 *data);
69
728f95ee 70static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
fc36e3ff
MK
71static int ath10k_pci_cold_reset(struct ath10k *ar);
72static int ath10k_pci_warm_reset(struct ath10k *ar);
d7fb47f5 73static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
fc15ca13
MK
74static int ath10k_pci_init_irq(struct ath10k *ar);
75static int ath10k_pci_deinit_irq(struct ath10k *ar);
76static int ath10k_pci_request_irq(struct ath10k *ar);
77static void ath10k_pci_free_irq(struct ath10k *ar);
85622cde
MK
78static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
79 struct ath10k_ce_pipe *rx_pipe,
80 struct bmi_xfer *xfer);
5e3dd157
KV
81
82static const struct ce_attr host_ce_config_wlan[] = {
48e9c225
KV
83 /* CE0: host->target HTC control and raw streams */
84 {
85 .flags = CE_ATTR_FLAGS,
86 .src_nentries = 16,
87 .src_sz_max = 256,
88 .dest_nentries = 0,
89 },
90
91 /* CE1: target->host HTT + HTC control */
92 {
93 .flags = CE_ATTR_FLAGS,
94 .src_nentries = 0,
95 .src_sz_max = 512,
96 .dest_nentries = 512,
97 },
98
99 /* CE2: target->host WMI */
100 {
101 .flags = CE_ATTR_FLAGS,
102 .src_nentries = 0,
103 .src_sz_max = 2048,
104 .dest_nentries = 32,
105 },
106
107 /* CE3: host->target WMI */
108 {
109 .flags = CE_ATTR_FLAGS,
110 .src_nentries = 32,
111 .src_sz_max = 2048,
112 .dest_nentries = 0,
113 },
114
115 /* CE4: host->target HTT */
116 {
117 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
118 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
119 .src_sz_max = 256,
120 .dest_nentries = 0,
121 },
122
123 /* CE5: unused */
124 {
125 .flags = CE_ATTR_FLAGS,
126 .src_nentries = 0,
127 .src_sz_max = 0,
128 .dest_nentries = 0,
129 },
130
131 /* CE6: target autonomous hif_memcpy */
132 {
133 .flags = CE_ATTR_FLAGS,
134 .src_nentries = 0,
135 .src_sz_max = 0,
136 .dest_nentries = 0,
137 },
138
139 /* CE7: ce_diag, the Diagnostic Window */
140 {
141 .flags = CE_ATTR_FLAGS,
142 .src_nentries = 2,
143 .src_sz_max = DIAG_TRANSFER_LIMIT,
144 .dest_nentries = 2,
145 },
5e3dd157
KV
146};
147
148/* Target firmware's Copy Engine configuration. */
149static const struct ce_pipe_config target_ce_config_wlan[] = {
d88effba
KV
150 /* CE0: host->target HTC control and raw streams */
151 {
0fdc14e4
MK
152 .pipenum = __cpu_to_le32(0),
153 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
154 .nentries = __cpu_to_le32(32),
155 .nbytes_max = __cpu_to_le32(256),
156 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
157 .reserved = __cpu_to_le32(0),
d88effba
KV
158 },
159
160 /* CE1: target->host HTT + HTC control */
161 {
0fdc14e4
MK
162 .pipenum = __cpu_to_le32(1),
163 .pipedir = __cpu_to_le32(PIPEDIR_IN),
164 .nentries = __cpu_to_le32(32),
165 .nbytes_max = __cpu_to_le32(512),
166 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
167 .reserved = __cpu_to_le32(0),
d88effba
KV
168 },
169
170 /* CE2: target->host WMI */
171 {
0fdc14e4
MK
172 .pipenum = __cpu_to_le32(2),
173 .pipedir = __cpu_to_le32(PIPEDIR_IN),
174 .nentries = __cpu_to_le32(32),
175 .nbytes_max = __cpu_to_le32(2048),
176 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
177 .reserved = __cpu_to_le32(0),
d88effba
KV
178 },
179
180 /* CE3: host->target WMI */
181 {
0fdc14e4
MK
182 .pipenum = __cpu_to_le32(3),
183 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
184 .nentries = __cpu_to_le32(32),
185 .nbytes_max = __cpu_to_le32(2048),
186 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
187 .reserved = __cpu_to_le32(0),
d88effba
KV
188 },
189
190 /* CE4: host->target HTT */
191 {
0fdc14e4
MK
192 .pipenum = __cpu_to_le32(4),
193 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
194 .nentries = __cpu_to_le32(256),
195 .nbytes_max = __cpu_to_le32(256),
196 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
197 .reserved = __cpu_to_le32(0),
d88effba
KV
198 },
199
5e3dd157 200 /* NB: 50% of src nentries, since tx has 2 frags */
d88effba
KV
201
202 /* CE5: unused */
203 {
0fdc14e4
MK
204 .pipenum = __cpu_to_le32(5),
205 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
206 .nentries = __cpu_to_le32(32),
207 .nbytes_max = __cpu_to_le32(2048),
208 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
209 .reserved = __cpu_to_le32(0),
d88effba
KV
210 },
211
212 /* CE6: Reserved for target autonomous hif_memcpy */
213 {
0fdc14e4
MK
214 .pipenum = __cpu_to_le32(6),
215 .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
216 .nentries = __cpu_to_le32(32),
217 .nbytes_max = __cpu_to_le32(4096),
218 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
219 .reserved = __cpu_to_le32(0),
d88effba
KV
220 },
221
5e3dd157
KV
222 /* CE7 used only by Host */
223};
224
d7bfb7aa
MK
225/*
226 * Map from service/endpoint to Copy Engine.
227 * This table is derived from the CE_PCI TABLE, above.
228 * It is passed to the Target at startup for use by firmware.
229 */
230static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
231 {
0fdc14e4
MK
232 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
233 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
234 __cpu_to_le32(3),
d7bfb7aa
MK
235 },
236 {
0fdc14e4
MK
237 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
238 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
239 __cpu_to_le32(2),
d7bfb7aa
MK
240 },
241 {
0fdc14e4
MK
242 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
243 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
244 __cpu_to_le32(3),
d7bfb7aa
MK
245 },
246 {
0fdc14e4
MK
247 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
248 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
249 __cpu_to_le32(2),
d7bfb7aa
MK
250 },
251 {
0fdc14e4
MK
252 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
253 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
254 __cpu_to_le32(3),
d7bfb7aa
MK
255 },
256 {
0fdc14e4
MK
257 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
258 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
259 __cpu_to_le32(2),
d7bfb7aa
MK
260 },
261 {
0fdc14e4
MK
262 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
263 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
264 __cpu_to_le32(3),
d7bfb7aa
MK
265 },
266 {
0fdc14e4
MK
267 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
268 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
269 __cpu_to_le32(2),
d7bfb7aa
MK
270 },
271 {
0fdc14e4
MK
272 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
273 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
274 __cpu_to_le32(3),
d7bfb7aa
MK
275 },
276 {
0fdc14e4
MK
277 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
278 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
279 __cpu_to_le32(2),
d7bfb7aa
MK
280 },
281 {
0fdc14e4
MK
282 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
283 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
284 __cpu_to_le32(0),
d7bfb7aa
MK
285 },
286 {
0fdc14e4
MK
287 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
288 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
289 __cpu_to_le32(1),
d7bfb7aa 290 },
0fdc14e4
MK
291 { /* not used */
292 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
293 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
294 __cpu_to_le32(0),
d7bfb7aa 295 },
0fdc14e4
MK
296 { /* not used */
297 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
298 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
299 __cpu_to_le32(1),
d7bfb7aa
MK
300 },
301 {
0fdc14e4
MK
302 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
303 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
304 __cpu_to_le32(4),
d7bfb7aa
MK
305 },
306 {
0fdc14e4
MK
307 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
308 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
309 __cpu_to_le32(1),
d7bfb7aa
MK
310 },
311
312 /* (Additions here) */
313
0fdc14e4
MK
314 { /* must be last */
315 __cpu_to_le32(0),
316 __cpu_to_le32(0),
317 __cpu_to_le32(0),
d7bfb7aa
MK
318 },
319};
320
e539887b
MK
321static bool ath10k_pci_irq_pending(struct ath10k *ar)
322{
323 u32 cause;
324
325 /* Check if the shared legacy irq is for us */
326 cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
327 PCIE_INTR_CAUSE_ADDRESS);
328 if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
329 return true;
330
331 return false;
332}
333
2685218b
MK
334static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
335{
336 /* IMPORTANT: INTR_CLR register has to be set after
337 * INTR_ENABLE is set to 0, otherwise interrupt can not be
338 * really cleared. */
339 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
340 0);
341 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
342 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
343
344 /* IMPORTANT: this extra read transaction is required to
345 * flush the posted write buffer. */
346 (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
347 PCIE_INTR_ENABLE_ADDRESS);
348}
349
350static void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
351{
352 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
353 PCIE_INTR_ENABLE_ADDRESS,
354 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
355
356 /* IMPORTANT: this extra read transaction is required to
357 * flush the posted write buffer. */
358 (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
359 PCIE_INTR_ENABLE_ADDRESS);
360}
361
403d627b 362static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar)
ab977bd0 363{
ab977bd0
MK
364 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
365
403d627b
MK
366 if (ar_pci->num_msi_intrs > 1)
367 return "msi-x";
368 else if (ar_pci->num_msi_intrs == 1)
369 return "msi";
370 else
371 return "legacy";
ab977bd0
MK
372}
373
728f95ee 374static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
ab977bd0 375{
728f95ee 376 struct ath10k *ar = pipe->hif_ce_state;
ab977bd0 377 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
728f95ee
MK
378 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
379 struct sk_buff *skb;
380 dma_addr_t paddr;
ab977bd0
MK
381 int ret;
382
728f95ee
MK
383 lockdep_assert_held(&ar_pci->ce_lock);
384
385 skb = dev_alloc_skb(pipe->buf_sz);
386 if (!skb)
387 return -ENOMEM;
388
389 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
390
391 paddr = dma_map_single(ar->dev, skb->data,
392 skb->len + skb_tailroom(skb),
393 DMA_FROM_DEVICE);
394 if (unlikely(dma_mapping_error(ar->dev, paddr))) {
7aa7a72a 395 ath10k_warn(ar, "failed to dma map pci rx buf\n");
728f95ee
MK
396 dev_kfree_skb_any(skb);
397 return -EIO;
398 }
399
400 ATH10K_SKB_CB(skb)->paddr = paddr;
401
402 ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr);
ab977bd0 403 if (ret) {
7aa7a72a 404 ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
728f95ee
MK
405 dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
406 DMA_FROM_DEVICE);
407 dev_kfree_skb_any(skb);
ab977bd0
MK
408 return ret;
409 }
410
411 return 0;
412}
413
728f95ee 414static void __ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
ab977bd0 415{
728f95ee
MK
416 struct ath10k *ar = pipe->hif_ce_state;
417 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
418 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
419 int ret, num;
420
421 lockdep_assert_held(&ar_pci->ce_lock);
422
423 if (pipe->buf_sz == 0)
424 return;
425
426 if (!ce_pipe->dest_ring)
427 return;
428
429 num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
430 while (num--) {
431 ret = __ath10k_pci_rx_post_buf(pipe);
432 if (ret) {
7aa7a72a 433 ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
728f95ee
MK
434 mod_timer(&ar_pci->rx_post_retry, jiffies +
435 ATH10K_PCI_RX_POST_RETRY_MS);
436 break;
437 }
438 }
439}
440
441static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
442{
443 struct ath10k *ar = pipe->hif_ce_state;
444 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
445
446 spin_lock_bh(&ar_pci->ce_lock);
447 __ath10k_pci_rx_post_pipe(pipe);
448 spin_unlock_bh(&ar_pci->ce_lock);
449}
450
451static void ath10k_pci_rx_post(struct ath10k *ar)
452{
453 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
454 int i;
455
456 spin_lock_bh(&ar_pci->ce_lock);
457 for (i = 0; i < CE_COUNT; i++)
458 __ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
459 spin_unlock_bh(&ar_pci->ce_lock);
460}
461
462static void ath10k_pci_rx_replenish_retry(unsigned long ptr)
463{
464 struct ath10k *ar = (void *)ptr;
465
466 ath10k_pci_rx_post(ar);
ab977bd0
MK
467}
468
5e3dd157
KV
469/*
470 * Diagnostic read/write access is provided for startup/config/debug usage.
471 * Caller must guarantee proper alignment, when applicable, and single user
472 * at any moment.
473 */
474static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
475 int nbytes)
476{
477 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
478 int ret = 0;
479 u32 buf;
480 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
481 unsigned int id;
482 unsigned int flags;
2aa39115 483 struct ath10k_ce_pipe *ce_diag;
5e3dd157
KV
484 /* Host buffer address in CE space */
485 u32 ce_data;
486 dma_addr_t ce_data_base = 0;
487 void *data_buf = NULL;
488 int i;
489
490 /*
491 * This code cannot handle reads to non-memory space. Redirect to the
492 * register read fn but preserve the multi word read capability of
493 * this fn
494 */
495 if (address < DRAM_BASE_ADDRESS) {
496 if (!IS_ALIGNED(address, 4) ||
497 !IS_ALIGNED((unsigned long)data, 4))
498 return -EIO;
499
500 while ((nbytes >= 4) && ((ret = ath10k_pci_diag_read_access(
501 ar, address, (u32 *)data)) == 0)) {
502 nbytes -= sizeof(u32);
503 address += sizeof(u32);
504 data += sizeof(u32);
505 }
506 return ret;
507 }
508
509 ce_diag = ar_pci->ce_diag;
510
511 /*
512 * Allocate a temporary bounce buffer to hold caller's data
513 * to be DMA'ed from Target. This guarantees
514 * 1) 4-byte alignment
515 * 2) Buffer in DMA-able space
516 */
517 orig_nbytes = nbytes;
68c03249
MK
518 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
519 orig_nbytes,
520 &ce_data_base,
521 GFP_ATOMIC);
5e3dd157
KV
522
523 if (!data_buf) {
524 ret = -ENOMEM;
525 goto done;
526 }
527 memset(data_buf, 0, orig_nbytes);
528
529 remaining_bytes = orig_nbytes;
530 ce_data = ce_data_base;
531 while (remaining_bytes) {
532 nbytes = min_t(unsigned int, remaining_bytes,
533 DIAG_TRANSFER_LIMIT);
534
728f95ee 535 ret = ath10k_ce_rx_post_buf(ce_diag, NULL, ce_data);
5e3dd157
KV
536 if (ret != 0)
537 goto done;
538
539 /* Request CE to send from Target(!) address to Host buffer */
540 /*
541 * The address supplied by the caller is in the
542 * Target CPU virtual address space.
543 *
544 * In order to use this address with the diagnostic CE,
545 * convert it from Target CPU virtual address space
546 * to CE address space
547 */
5e3dd157
KV
548 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
549 address);
5e3dd157
KV
550
551 ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
552 0);
553 if (ret)
554 goto done;
555
556 i = 0;
557 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
558 &completed_nbytes,
559 &id) != 0) {
560 mdelay(1);
561 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
562 ret = -EBUSY;
563 goto done;
564 }
565 }
566
567 if (nbytes != completed_nbytes) {
568 ret = -EIO;
569 goto done;
570 }
571
572 if (buf != (u32) address) {
573 ret = -EIO;
574 goto done;
575 }
576
577 i = 0;
578 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
579 &completed_nbytes,
580 &id, &flags) != 0) {
581 mdelay(1);
582
583 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
584 ret = -EBUSY;
585 goto done;
586 }
587 }
588
589 if (nbytes != completed_nbytes) {
590 ret = -EIO;
591 goto done;
592 }
593
594 if (buf != ce_data) {
595 ret = -EIO;
596 goto done;
597 }
598
599 remaining_bytes -= nbytes;
600 address += nbytes;
601 ce_data += nbytes;
602 }
603
604done:
0fdc14e4
MK
605 if (ret == 0)
606 memcpy(data, data_buf, orig_nbytes);
607 else
7aa7a72a 608 ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n",
50f87a67 609 address, ret);
5e3dd157
KV
610
611 if (data_buf)
68c03249
MK
612 dma_free_coherent(ar->dev, orig_nbytes, data_buf,
613 ce_data_base);
5e3dd157
KV
614
615 return ret;
616}
617
3d29a3e0
KV
618static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value)
619{
0fdc14e4
MK
620 __le32 val = 0;
621 int ret;
622
623 ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val));
624 *value = __le32_to_cpu(val);
625
626 return ret;
3d29a3e0
KV
627}
628
629static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest,
630 u32 src, u32 len)
631{
632 u32 host_addr, addr;
633 int ret;
634
635 host_addr = host_interest_item_address(src);
636
637 ret = ath10k_pci_diag_read32(ar, host_addr, &addr);
638 if (ret != 0) {
7aa7a72a 639 ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n",
3d29a3e0
KV
640 src, ret);
641 return ret;
642 }
643
644 ret = ath10k_pci_diag_read_mem(ar, addr, dest, len);
645 if (ret != 0) {
7aa7a72a 646 ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n",
3d29a3e0
KV
647 addr, len, ret);
648 return ret;
649 }
650
651 return 0;
652}
653
654#define ath10k_pci_diag_read_hi(ar, dest, src, len) \
655 __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len);
656
5e3dd157
KV
657/* Read 4-byte aligned data from Target memory or register */
658static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
659 u32 *data)
660{
661 /* Assume range doesn't cross this boundary */
662 if (address >= DRAM_BASE_ADDRESS)
3d29a3e0 663 return ath10k_pci_diag_read32(ar, address, data);
5e3dd157 664
5e3dd157 665 *data = ath10k_pci_read32(ar, address);
5e3dd157
KV
666 return 0;
667}
668
669static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
670 const void *data, int nbytes)
671{
672 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
673 int ret = 0;
674 u32 buf;
675 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
676 unsigned int id;
677 unsigned int flags;
2aa39115 678 struct ath10k_ce_pipe *ce_diag;
5e3dd157
KV
679 void *data_buf = NULL;
680 u32 ce_data; /* Host buffer address in CE space */
681 dma_addr_t ce_data_base = 0;
682 int i;
683
684 ce_diag = ar_pci->ce_diag;
685
686 /*
687 * Allocate a temporary bounce buffer to hold caller's data
688 * to be DMA'ed to Target. This guarantees
689 * 1) 4-byte alignment
690 * 2) Buffer in DMA-able space
691 */
692 orig_nbytes = nbytes;
68c03249
MK
693 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
694 orig_nbytes,
695 &ce_data_base,
696 GFP_ATOMIC);
5e3dd157
KV
697 if (!data_buf) {
698 ret = -ENOMEM;
699 goto done;
700 }
701
702 /* Copy caller's data to allocated DMA buf */
0fdc14e4 703 memcpy(data_buf, data, orig_nbytes);
5e3dd157
KV
704
705 /*
706 * The address supplied by the caller is in the
707 * Target CPU virtual address space.
708 *
709 * In order to use this address with the diagnostic CE,
710 * convert it from
711 * Target CPU virtual address space
712 * to
713 * CE address space
714 */
5e3dd157 715 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
5e3dd157
KV
716
717 remaining_bytes = orig_nbytes;
718 ce_data = ce_data_base;
719 while (remaining_bytes) {
720 /* FIXME: check cast */
721 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
722
723 /* Set up to receive directly into Target(!) address */
728f95ee 724 ret = ath10k_ce_rx_post_buf(ce_diag, NULL, address);
5e3dd157
KV
725 if (ret != 0)
726 goto done;
727
728 /*
729 * Request CE to send caller-supplied data that
730 * was copied to bounce buffer to Target(!) address.
731 */
732 ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
733 nbytes, 0, 0);
734 if (ret != 0)
735 goto done;
736
737 i = 0;
738 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
739 &completed_nbytes,
740 &id) != 0) {
741 mdelay(1);
742
743 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
744 ret = -EBUSY;
745 goto done;
746 }
747 }
748
749 if (nbytes != completed_nbytes) {
750 ret = -EIO;
751 goto done;
752 }
753
754 if (buf != ce_data) {
755 ret = -EIO;
756 goto done;
757 }
758
759 i = 0;
760 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
761 &completed_nbytes,
762 &id, &flags) != 0) {
763 mdelay(1);
764
765 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
766 ret = -EBUSY;
767 goto done;
768 }
769 }
770
771 if (nbytes != completed_nbytes) {
772 ret = -EIO;
773 goto done;
774 }
775
776 if (buf != address) {
777 ret = -EIO;
778 goto done;
779 }
780
781 remaining_bytes -= nbytes;
782 address += nbytes;
783 ce_data += nbytes;
784 }
785
786done:
787 if (data_buf) {
68c03249
MK
788 dma_free_coherent(ar->dev, orig_nbytes, data_buf,
789 ce_data_base);
5e3dd157
KV
790 }
791
792 if (ret != 0)
7aa7a72a 793 ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n",
50f87a67 794 address, ret);
5e3dd157
KV
795
796 return ret;
797}
798
0fdc14e4
MK
799static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value)
800{
801 __le32 val = __cpu_to_le32(value);
802
803 return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val));
804}
805
5e3dd157
KV
806/* Write 4B data to Target memory or register */
807static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
808 u32 data)
809{
810 /* Assume range doesn't cross this boundary */
811 if (address >= DRAM_BASE_ADDRESS)
0fdc14e4 812 return ath10k_pci_diag_write32(ar, address, data);
5e3dd157 813
5e3dd157 814 ath10k_pci_write32(ar, address, data);
5e3dd157
KV
815 return 0;
816}
817
c0c378f9 818static bool ath10k_pci_is_awake(struct ath10k *ar)
5e3dd157 819{
c0c378f9
MK
820 u32 val = ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS);
821
822 return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
5e3dd157
KV
823}
824
c0c378f9 825static int ath10k_pci_wake_wait(struct ath10k *ar)
5e3dd157 826{
5e3dd157
KV
827 int tot_delay = 0;
828 int curr_delay = 5;
829
c0c378f9
MK
830 while (tot_delay < PCIE_WAKE_TIMEOUT) {
831 if (ath10k_pci_is_awake(ar))
3aebe54b 832 return 0;
5e3dd157
KV
833
834 udelay(curr_delay);
835 tot_delay += curr_delay;
836
837 if (curr_delay < 50)
838 curr_delay += 5;
839 }
c0c378f9
MK
840
841 return -ETIMEDOUT;
5e3dd157
KV
842}
843
c0c378f9 844static int ath10k_pci_wake(struct ath10k *ar)
5e3dd157 845{
c0c378f9
MK
846 ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
847 PCIE_SOC_WAKE_V_MASK);
848 return ath10k_pci_wake_wait(ar);
849}
5e3dd157 850
c0c378f9
MK
851static void ath10k_pci_sleep(struct ath10k *ar)
852{
853 ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
854 PCIE_SOC_WAKE_RESET);
5e3dd157
KV
855}
856
5e3dd157 857/* Called by lower (CE) layer when a send to Target completes. */
5440ce25 858static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
5e3dd157
KV
859{
860 struct ath10k *ar = ce_state->ar;
861 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2f5280da 862 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
5440ce25
MK
863 void *transfer_context;
864 u32 ce_data;
865 unsigned int nbytes;
866 unsigned int transfer_id;
5e3dd157 867
5440ce25
MK
868 while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
869 &ce_data, &nbytes,
870 &transfer_id) == 0) {
a16942e6 871 /* no need to call tx completion for NULL pointers */
726346fc
MK
872 if (transfer_context == NULL)
873 continue;
874
2f5280da 875 cb->tx_completion(ar, transfer_context, transfer_id);
5440ce25 876 }
5e3dd157
KV
877}
878
879/* Called by lower (CE) layer when data is received from the Target. */
5440ce25 880static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
5e3dd157
KV
881{
882 struct ath10k *ar = ce_state->ar;
883 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
87263e5b 884 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
2f5280da 885 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
5e3dd157 886 struct sk_buff *skb;
5440ce25
MK
887 void *transfer_context;
888 u32 ce_data;
2f5280da 889 unsigned int nbytes, max_nbytes;
5440ce25
MK
890 unsigned int transfer_id;
891 unsigned int flags;
5e3dd157 892
5440ce25
MK
893 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
894 &ce_data, &nbytes, &transfer_id,
895 &flags) == 0) {
5e3dd157 896 skb = transfer_context;
2f5280da 897 max_nbytes = skb->len + skb_tailroom(skb);
5e3dd157 898 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
2f5280da
MK
899 max_nbytes, DMA_FROM_DEVICE);
900
901 if (unlikely(max_nbytes < nbytes)) {
7aa7a72a 902 ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
2f5280da
MK
903 nbytes, max_nbytes);
904 dev_kfree_skb_any(skb);
905 continue;
906 }
5e3dd157 907
2f5280da
MK
908 skb_put(skb, nbytes);
909 cb->rx_completion(ar, skb, pipe_info->pipe_num);
910 }
c29a380e 911
728f95ee 912 ath10k_pci_rx_post_pipe(pipe_info);
5e3dd157
KV
913}
914
726346fc
MK
915static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
916 struct ath10k_hif_sg_item *items, int n_items)
5e3dd157 917{
5e3dd157 918 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
726346fc
MK
919 struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
920 struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
921 struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
7147a131
MK
922 unsigned int nentries_mask;
923 unsigned int sw_index;
924 unsigned int write_index;
08b8aa09 925 int err, i = 0;
5e3dd157 926
726346fc 927 spin_lock_bh(&ar_pci->ce_lock);
5e3dd157 928
7147a131
MK
929 nentries_mask = src_ring->nentries_mask;
930 sw_index = src_ring->sw_index;
931 write_index = src_ring->write_index;
932
726346fc
MK
933 if (unlikely(CE_RING_DELTA(nentries_mask,
934 write_index, sw_index - 1) < n_items)) {
935 err = -ENOBUFS;
08b8aa09 936 goto err;
726346fc 937 }
5e3dd157 938
726346fc 939 for (i = 0; i < n_items - 1; i++) {
7aa7a72a 940 ath10k_dbg(ar, ATH10K_DBG_PCI,
726346fc
MK
941 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
942 i, items[i].paddr, items[i].len, n_items);
7aa7a72a 943 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
726346fc 944 items[i].vaddr, items[i].len);
5e3dd157 945
726346fc
MK
946 err = ath10k_ce_send_nolock(ce_pipe,
947 items[i].transfer_context,
948 items[i].paddr,
949 items[i].len,
950 items[i].transfer_id,
951 CE_SEND_FLAG_GATHER);
952 if (err)
08b8aa09 953 goto err;
726346fc
MK
954 }
955
956 /* `i` is equal to `n_items -1` after for() */
957
7aa7a72a 958 ath10k_dbg(ar, ATH10K_DBG_PCI,
726346fc
MK
959 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
960 i, items[i].paddr, items[i].len, n_items);
7aa7a72a 961 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
726346fc
MK
962 items[i].vaddr, items[i].len);
963
964 err = ath10k_ce_send_nolock(ce_pipe,
965 items[i].transfer_context,
966 items[i].paddr,
967 items[i].len,
968 items[i].transfer_id,
969 0);
970 if (err)
08b8aa09
MK
971 goto err;
972
973 spin_unlock_bh(&ar_pci->ce_lock);
974 return 0;
975
976err:
977 for (; i > 0; i--)
978 __ath10k_ce_send_revert(ce_pipe);
726346fc 979
726346fc
MK
980 spin_unlock_bh(&ar_pci->ce_lock);
981 return err;
5e3dd157
KV
982}
983
984static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
985{
986 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
50f87a67 987
7aa7a72a 988 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n");
50f87a67 989
3efcb3b4 990 return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
5e3dd157
KV
991}
992
384914b2
BG
993static void ath10k_pci_dump_registers(struct ath10k *ar,
994 struct ath10k_fw_crash_data *crash_data)
5e3dd157 995{
0fdc14e4
MK
996 __le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
997 int i, ret;
5e3dd157 998
384914b2 999 lockdep_assert_held(&ar->data_lock);
5e3dd157 1000
3d29a3e0
KV
1001 ret = ath10k_pci_diag_read_hi(ar, &reg_dump_values[0],
1002 hi_failure_state,
0fdc14e4 1003 REG_DUMP_COUNT_QCA988X * sizeof(__le32));
1d2b48d6 1004 if (ret) {
7aa7a72a 1005 ath10k_err(ar, "failed to read firmware dump area: %d\n", ret);
5e3dd157
KV
1006 return;
1007 }
1008
1009 BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
1010
7aa7a72a 1011 ath10k_err(ar, "firmware register dump:\n");
5e3dd157 1012 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
7aa7a72a 1013 ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
5e3dd157 1014 i,
0fdc14e4
MK
1015 __le32_to_cpu(reg_dump_values[i]),
1016 __le32_to_cpu(reg_dump_values[i + 1]),
1017 __le32_to_cpu(reg_dump_values[i + 2]),
1018 __le32_to_cpu(reg_dump_values[i + 3]));
affd3217 1019
1bbb119d
MK
1020 if (!crash_data)
1021 return;
1022
384914b2 1023 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++)
0fdc14e4 1024 crash_data->registers[i] = reg_dump_values[i];
384914b2
BG
1025}
1026
0e9848c0 1027static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
384914b2
BG
1028{
1029 struct ath10k_fw_crash_data *crash_data;
1030 char uuid[50];
1031
1032 spin_lock_bh(&ar->data_lock);
1033
1034 crash_data = ath10k_debug_get_new_fw_crash_data(ar);
1035
1036 if (crash_data)
1037 scnprintf(uuid, sizeof(uuid), "%pUl", &crash_data->uuid);
1038 else
1039 scnprintf(uuid, sizeof(uuid), "n/a");
1040
7aa7a72a 1041 ath10k_err(ar, "firmware crashed! (uuid %s)\n", uuid);
8a0c797e 1042 ath10k_print_driver_info(ar);
384914b2
BG
1043 ath10k_pci_dump_registers(ar, crash_data);
1044
384914b2 1045 spin_unlock_bh(&ar->data_lock);
affd3217 1046
5e90de86 1047 queue_work(ar->workqueue, &ar->restart_work);
5e3dd157
KV
1048}
1049
1050static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
1051 int force)
1052{
7aa7a72a 1053 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n");
50f87a67 1054
5e3dd157
KV
1055 if (!force) {
1056 int resources;
1057 /*
1058 * Decide whether to actually poll for completions, or just
1059 * wait for a later chance.
1060 * If there seem to be plenty of resources left, then just wait
1061 * since checking involves reading a CE register, which is a
1062 * relatively expensive operation.
1063 */
1064 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
1065
1066 /*
1067 * If at least 50% of the total resources are still available,
1068 * don't bother checking again yet.
1069 */
1070 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
1071 return;
1072 }
1073 ath10k_ce_per_engine_service(ar, pipe);
1074}
1075
e799bbff
MK
1076static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
1077 struct ath10k_hif_cb *callbacks)
5e3dd157
KV
1078{
1079 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1080
7aa7a72a 1081 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif set callbacks\n");
5e3dd157
KV
1082
1083 memcpy(&ar_pci->msg_callbacks_current, callbacks,
1084 sizeof(ar_pci->msg_callbacks_current));
1085}
1086
96a9d0dc 1087static void ath10k_pci_kill_tasklet(struct ath10k *ar)
5e3dd157
KV
1088{
1089 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
5e3dd157 1090 int i;
5e3dd157 1091
5e3dd157 1092 tasklet_kill(&ar_pci->intr_tq);
103d4f5e 1093 tasklet_kill(&ar_pci->msi_fw_err);
5e3dd157
KV
1094
1095 for (i = 0; i < CE_COUNT; i++)
1096 tasklet_kill(&ar_pci->pipe_info[i].intr);
728f95ee
MK
1097
1098 del_timer_sync(&ar_pci->rx_post_retry);
96a9d0dc
MK
1099}
1100
5e3dd157
KV
1101static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
1102 u16 service_id, u8 *ul_pipe,
1103 u8 *dl_pipe, int *ul_is_polled,
1104 int *dl_is_polled)
1105{
7c6aa25d
MK
1106 const struct service_to_pipe *entry;
1107 bool ul_set = false, dl_set = false;
1108 int i;
5e3dd157 1109
7aa7a72a 1110 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n");
50f87a67 1111
5e3dd157
KV
1112 /* polling for received messages not supported */
1113 *dl_is_polled = 0;
1114
7c6aa25d
MK
1115 for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
1116 entry = &target_service_to_ce_map_wlan[i];
5e3dd157 1117
0fdc14e4 1118 if (__le32_to_cpu(entry->service_id) != service_id)
7c6aa25d 1119 continue;
5e3dd157 1120
0fdc14e4 1121 switch (__le32_to_cpu(entry->pipedir)) {
7c6aa25d
MK
1122 case PIPEDIR_NONE:
1123 break;
1124 case PIPEDIR_IN:
1125 WARN_ON(dl_set);
0fdc14e4 1126 *dl_pipe = __le32_to_cpu(entry->pipenum);
7c6aa25d
MK
1127 dl_set = true;
1128 break;
1129 case PIPEDIR_OUT:
1130 WARN_ON(ul_set);
0fdc14e4 1131 *ul_pipe = __le32_to_cpu(entry->pipenum);
7c6aa25d
MK
1132 ul_set = true;
1133 break;
1134 case PIPEDIR_INOUT:
1135 WARN_ON(dl_set);
1136 WARN_ON(ul_set);
0fdc14e4
MK
1137 *dl_pipe = __le32_to_cpu(entry->pipenum);
1138 *ul_pipe = __le32_to_cpu(entry->pipenum);
7c6aa25d
MK
1139 dl_set = true;
1140 ul_set = true;
1141 break;
1142 }
5e3dd157 1143 }
5e3dd157 1144
7c6aa25d
MK
1145 if (WARN_ON(!ul_set || !dl_set))
1146 return -ENOENT;
5e3dd157 1147
5e3dd157
KV
1148 *ul_is_polled =
1149 (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1150
7c6aa25d 1151 return 0;
5e3dd157
KV
1152}
1153
1154static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1155 u8 *ul_pipe, u8 *dl_pipe)
1156{
1157 int ul_is_polled, dl_is_polled;
1158
7aa7a72a 1159 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n");
50f87a67 1160
5e3dd157
KV
1161 (void)ath10k_pci_hif_map_service_to_pipe(ar,
1162 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1163 ul_pipe,
1164 dl_pipe,
1165 &ul_is_polled,
1166 &dl_is_polled);
1167}
1168
ec5ba4d3 1169static void ath10k_pci_irq_disable(struct ath10k *ar)
5e3dd157 1170{
5e3dd157 1171 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
ec5ba4d3 1172 int i;
5e3dd157 1173
ec5ba4d3 1174 ath10k_ce_disable_interrupts(ar);
5e3dd157 1175
ec5ba4d3
MK
1176 /* Regardless how many interrupts were assigned for MSI the first one
1177 * is always used for firmware indications (crashes). There's no way to
1178 * mask the irq in the device so call disable_irq(). Legacy (shared)
1179 * interrupts can be masked on the device though.
1180 */
1181 if (ar_pci->num_msi_intrs > 0)
1182 disable_irq(ar_pci->pdev->irq);
1183 else
1184 ath10k_pci_disable_and_clear_legacy_irq(ar);
5e3dd157 1185
ec5ba4d3
MK
1186 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
1187 synchronize_irq(ar_pci->pdev->irq + i);
5e3dd157
KV
1188}
1189
ec5ba4d3 1190static void ath10k_pci_irq_enable(struct ath10k *ar)
5e3dd157
KV
1191{
1192 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
5e3dd157 1193
ec5ba4d3 1194 ath10k_ce_enable_interrupts(ar);
5e3dd157 1195
ec5ba4d3
MK
1196 /* See comment in ath10k_pci_irq_disable() */
1197 if (ar_pci->num_msi_intrs > 0)
1198 enable_irq(ar_pci->pdev->irq);
1199 else
1200 ath10k_pci_enable_legacy_irq(ar);
5e3dd157
KV
1201}
1202
1203static int ath10k_pci_hif_start(struct ath10k *ar)
1204{
7aa7a72a 1205 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
5e3dd157 1206
ec5ba4d3 1207 ath10k_pci_irq_enable(ar);
728f95ee 1208 ath10k_pci_rx_post(ar);
50f87a67 1209
5e3dd157
KV
1210 return 0;
1211}
1212
87263e5b 1213static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
5e3dd157
KV
1214{
1215 struct ath10k *ar;
1216 struct ath10k_pci *ar_pci;
2aa39115 1217 struct ath10k_ce_pipe *ce_hdl;
5e3dd157
KV
1218 u32 buf_sz;
1219 struct sk_buff *netbuf;
1220 u32 ce_data;
1221
1222 buf_sz = pipe_info->buf_sz;
1223
1224 /* Unused Copy Engine */
1225 if (buf_sz == 0)
1226 return;
1227
1228 ar = pipe_info->hif_ce_state;
1229 ar_pci = ath10k_pci_priv(ar);
5e3dd157
KV
1230 ce_hdl = pipe_info->ce_hdl;
1231
1232 while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
1233 &ce_data) == 0) {
1234 dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
1235 netbuf->len + skb_tailroom(netbuf),
1236 DMA_FROM_DEVICE);
1237 dev_kfree_skb_any(netbuf);
1238 }
1239}
1240
87263e5b 1241static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
5e3dd157
KV
1242{
1243 struct ath10k *ar;
1244 struct ath10k_pci *ar_pci;
2aa39115 1245 struct ath10k_ce_pipe *ce_hdl;
5e3dd157
KV
1246 struct sk_buff *netbuf;
1247 u32 ce_data;
1248 unsigned int nbytes;
1249 unsigned int id;
1250 u32 buf_sz;
1251
1252 buf_sz = pipe_info->buf_sz;
1253
1254 /* Unused Copy Engine */
1255 if (buf_sz == 0)
1256 return;
1257
1258 ar = pipe_info->hif_ce_state;
1259 ar_pci = ath10k_pci_priv(ar);
5e3dd157
KV
1260 ce_hdl = pipe_info->ce_hdl;
1261
1262 while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1263 &ce_data, &nbytes, &id) == 0) {
a16942e6
MK
1264 /* no need to call tx completion for NULL pointers */
1265 if (!netbuf)
2415fc16 1266 continue;
2415fc16 1267
e9bb0aa3
KV
1268 ar_pci->msg_callbacks_current.tx_completion(ar,
1269 netbuf,
1270 id);
5e3dd157
KV
1271 }
1272}
1273
1274/*
1275 * Cleanup residual buffers for device shutdown:
1276 * buffers that were enqueued for receive
1277 * buffers that were to be sent
1278 * Note: Buffers that had completed but which were
1279 * not yet processed are on a completion queue. They
1280 * are handled when the completion thread shuts down.
1281 */
1282static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1283{
1284 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1285 int pipe_num;
1286
fad6ed78 1287 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
87263e5b 1288 struct ath10k_pci_pipe *pipe_info;
5e3dd157
KV
1289
1290 pipe_info = &ar_pci->pipe_info[pipe_num];
1291 ath10k_pci_rx_pipe_cleanup(pipe_info);
1292 ath10k_pci_tx_pipe_cleanup(pipe_info);
1293 }
1294}
1295
1296static void ath10k_pci_ce_deinit(struct ath10k *ar)
1297{
25d0dbcb 1298 int i;
5e3dd157 1299
25d0dbcb
MK
1300 for (i = 0; i < CE_COUNT; i++)
1301 ath10k_ce_deinit_pipe(ar, i);
5e3dd157
KV
1302}
1303
728f95ee 1304static void ath10k_pci_flush(struct ath10k *ar)
5e3dd157 1305{
5d1aa946 1306 ath10k_pci_kill_tasklet(ar);
728f95ee
MK
1307 ath10k_pci_buffer_cleanup(ar);
1308}
5e3dd157 1309
5e3dd157
KV
1310static void ath10k_pci_hif_stop(struct ath10k *ar)
1311{
7aa7a72a 1312 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
5e3dd157 1313
ec5ba4d3 1314 ath10k_pci_irq_disable(ar);
728f95ee 1315 ath10k_pci_flush(ar);
32270b61 1316
10d23db4
MK
1317 /* Most likely the device has HTT Rx ring configured. The only way to
1318 * prevent the device from accessing (and possible corrupting) host
1319 * memory is to reset the chip now.
1320 */
fc36e3ff 1321 ath10k_pci_warm_reset(ar);
5e3dd157
KV
1322}
1323
1324static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1325 void *req, u32 req_len,
1326 void *resp, u32 *resp_len)
1327{
1328 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2aa39115
MK
1329 struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1330 struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1331 struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1332 struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
5e3dd157
KV
1333 dma_addr_t req_paddr = 0;
1334 dma_addr_t resp_paddr = 0;
1335 struct bmi_xfer xfer = {};
1336 void *treq, *tresp = NULL;
1337 int ret = 0;
1338
85622cde
MK
1339 might_sleep();
1340
5e3dd157
KV
1341 if (resp && !resp_len)
1342 return -EINVAL;
1343
1344 if (resp && resp_len && *resp_len == 0)
1345 return -EINVAL;
1346
1347 treq = kmemdup(req, req_len, GFP_KERNEL);
1348 if (!treq)
1349 return -ENOMEM;
1350
1351 req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1352 ret = dma_mapping_error(ar->dev, req_paddr);
1353 if (ret)
1354 goto err_dma;
1355
1356 if (resp && resp_len) {
1357 tresp = kzalloc(*resp_len, GFP_KERNEL);
1358 if (!tresp) {
1359 ret = -ENOMEM;
1360 goto err_req;
1361 }
1362
1363 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1364 DMA_FROM_DEVICE);
1365 ret = dma_mapping_error(ar->dev, resp_paddr);
1366 if (ret)
1367 goto err_req;
1368
1369 xfer.wait_for_resp = true;
1370 xfer.resp_len = 0;
1371
728f95ee 1372 ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr);
5e3dd157
KV
1373 }
1374
5e3dd157
KV
1375 ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1376 if (ret)
1377 goto err_resp;
1378
85622cde
MK
1379 ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
1380 if (ret) {
5e3dd157
KV
1381 u32 unused_buffer;
1382 unsigned int unused_nbytes;
1383 unsigned int unused_id;
1384
5e3dd157
KV
1385 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1386 &unused_nbytes, &unused_id);
1387 } else {
1388 /* non-zero means we did not time out */
1389 ret = 0;
1390 }
1391
1392err_resp:
1393 if (resp) {
1394 u32 unused_buffer;
1395
1396 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1397 dma_unmap_single(ar->dev, resp_paddr,
1398 *resp_len, DMA_FROM_DEVICE);
1399 }
1400err_req:
1401 dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1402
1403 if (ret == 0 && resp_len) {
1404 *resp_len = min(*resp_len, xfer.resp_len);
1405 memcpy(resp, tresp, xfer.resp_len);
1406 }
1407err_dma:
1408 kfree(treq);
1409 kfree(tresp);
1410
1411 return ret;
1412}
1413
5440ce25 1414static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
5e3dd157 1415{
5440ce25
MK
1416 struct bmi_xfer *xfer;
1417 u32 ce_data;
1418 unsigned int nbytes;
1419 unsigned int transfer_id;
1420
1421 if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
1422 &nbytes, &transfer_id))
1423 return;
5e3dd157 1424
2374b186 1425 xfer->tx_done = true;
5e3dd157
KV
1426}
1427
5440ce25 1428static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
5e3dd157 1429{
7aa7a72a 1430 struct ath10k *ar = ce_state->ar;
5440ce25
MK
1431 struct bmi_xfer *xfer;
1432 u32 ce_data;
1433 unsigned int nbytes;
1434 unsigned int transfer_id;
1435 unsigned int flags;
1436
1437 if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
1438 &nbytes, &transfer_id, &flags))
1439 return;
5e3dd157
KV
1440
1441 if (!xfer->wait_for_resp) {
7aa7a72a 1442 ath10k_warn(ar, "unexpected: BMI data received; ignoring\n");
5e3dd157
KV
1443 return;
1444 }
1445
1446 xfer->resp_len = nbytes;
2374b186 1447 xfer->rx_done = true;
5e3dd157
KV
1448}
1449
85622cde
MK
1450static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
1451 struct ath10k_ce_pipe *rx_pipe,
1452 struct bmi_xfer *xfer)
1453{
1454 unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1455
1456 while (time_before_eq(jiffies, timeout)) {
1457 ath10k_pci_bmi_send_done(tx_pipe);
1458 ath10k_pci_bmi_recv_data(rx_pipe);
1459
2374b186 1460 if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp))
85622cde
MK
1461 return 0;
1462
1463 schedule();
1464 }
5e3dd157 1465
85622cde
MK
1466 return -ETIMEDOUT;
1467}
5e3dd157
KV
1468
1469/*
1470 * Send an interrupt to the device to wake up the Target CPU
1471 * so it has an opportunity to notice any changed state.
1472 */
1473static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1474{
1475 int ret;
1476 u32 core_ctrl;
1477
1478 ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
1479 CORE_CTRL_ADDRESS,
1480 &core_ctrl);
1481 if (ret) {
7aa7a72a 1482 ath10k_warn(ar, "failed to read core_ctrl: %d\n", ret);
5e3dd157
KV
1483 return ret;
1484 }
1485
1486 /* A_INUM_FIRMWARE interrupt to Target CPU */
1487 core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1488
1489 ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
1490 CORE_CTRL_ADDRESS,
1491 core_ctrl);
1d2b48d6 1492 if (ret) {
7aa7a72a 1493 ath10k_warn(ar, "failed to set target CPU interrupt mask: %d\n",
1d2b48d6
MK
1494 ret);
1495 return ret;
1496 }
5e3dd157 1497
1d2b48d6 1498 return 0;
5e3dd157
KV
1499}
1500
1501static int ath10k_pci_init_config(struct ath10k *ar)
1502{
1503 u32 interconnect_targ_addr;
1504 u32 pcie_state_targ_addr = 0;
1505 u32 pipe_cfg_targ_addr = 0;
1506 u32 svc_to_pipe_map = 0;
1507 u32 pcie_config_flags = 0;
1508 u32 ealloc_value;
1509 u32 ealloc_targ_addr;
1510 u32 flag2_value;
1511 u32 flag2_targ_addr;
1512 int ret = 0;
1513
1514 /* Download to Target the CE Config and the service-to-CE map */
1515 interconnect_targ_addr =
1516 host_interest_item_address(HI_ITEM(hi_interconnect_state));
1517
1518 /* Supply Target-side CE configuration */
1519 ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
1520 &pcie_state_targ_addr);
1521 if (ret != 0) {
7aa7a72a 1522 ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret);
5e3dd157
KV
1523 return ret;
1524 }
1525
1526 if (pcie_state_targ_addr == 0) {
1527 ret = -EIO;
7aa7a72a 1528 ath10k_err(ar, "Invalid pcie state addr\n");
5e3dd157
KV
1529 return ret;
1530 }
1531
1532 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1533 offsetof(struct pcie_state,
1534 pipe_cfg_addr),
1535 &pipe_cfg_targ_addr);
1536 if (ret != 0) {
7aa7a72a 1537 ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret);
5e3dd157
KV
1538 return ret;
1539 }
1540
1541 if (pipe_cfg_targ_addr == 0) {
1542 ret = -EIO;
7aa7a72a 1543 ath10k_err(ar, "Invalid pipe cfg addr\n");
5e3dd157
KV
1544 return ret;
1545 }
1546
1547 ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1548 target_ce_config_wlan,
1549 sizeof(target_ce_config_wlan));
1550
1551 if (ret != 0) {
7aa7a72a 1552 ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret);
5e3dd157
KV
1553 return ret;
1554 }
1555
1556 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1557 offsetof(struct pcie_state,
1558 svc_to_pipe_map),
1559 &svc_to_pipe_map);
1560 if (ret != 0) {
7aa7a72a 1561 ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret);
5e3dd157
KV
1562 return ret;
1563 }
1564
1565 if (svc_to_pipe_map == 0) {
1566 ret = -EIO;
7aa7a72a 1567 ath10k_err(ar, "Invalid svc_to_pipe map\n");
5e3dd157
KV
1568 return ret;
1569 }
1570
1571 ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1572 target_service_to_ce_map_wlan,
1573 sizeof(target_service_to_ce_map_wlan));
1574 if (ret != 0) {
7aa7a72a 1575 ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret);
5e3dd157
KV
1576 return ret;
1577 }
1578
1579 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1580 offsetof(struct pcie_state,
1581 config_flags),
1582 &pcie_config_flags);
1583 if (ret != 0) {
7aa7a72a 1584 ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret);
5e3dd157
KV
1585 return ret;
1586 }
1587
1588 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1589
0fdc14e4 1590 ret = ath10k_pci_diag_write_access(ar, pcie_state_targ_addr +
5e3dd157 1591 offsetof(struct pcie_state, config_flags),
0fdc14e4 1592 pcie_config_flags);
5e3dd157 1593 if (ret != 0) {
7aa7a72a 1594 ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret);
5e3dd157
KV
1595 return ret;
1596 }
1597
1598 /* configure early allocation */
1599 ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1600
1601 ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
1602 if (ret != 0) {
7aa7a72a 1603 ath10k_err(ar, "Faile to get early alloc val: %d\n", ret);
5e3dd157
KV
1604 return ret;
1605 }
1606
1607 /* first bank is switched to IRAM */
1608 ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1609 HI_EARLY_ALLOC_MAGIC_MASK);
1610 ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1611 HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1612
1613 ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
1614 if (ret != 0) {
7aa7a72a 1615 ath10k_err(ar, "Failed to set early alloc val: %d\n", ret);
5e3dd157
KV
1616 return ret;
1617 }
1618
1619 /* Tell Target to proceed with initialization */
1620 flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1621
1622 ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
1623 if (ret != 0) {
7aa7a72a 1624 ath10k_err(ar, "Failed to get option val: %d\n", ret);
5e3dd157
KV
1625 return ret;
1626 }
1627
1628 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1629
1630 ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
1631 if (ret != 0) {
7aa7a72a 1632 ath10k_err(ar, "Failed to set option val: %d\n", ret);
5e3dd157
KV
1633 return ret;
1634 }
1635
1636 return 0;
1637}
1638
25d0dbcb
MK
1639static int ath10k_pci_alloc_ce(struct ath10k *ar)
1640{
1641 int i, ret;
1642
1643 for (i = 0; i < CE_COUNT; i++) {
1644 ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
1645 if (ret) {
7aa7a72a 1646 ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
25d0dbcb
MK
1647 i, ret);
1648 return ret;
1649 }
1650 }
1651
1652 return 0;
1653}
1654
1655static void ath10k_pci_free_ce(struct ath10k *ar)
1656{
1657 int i;
5e3dd157 1658
25d0dbcb
MK
1659 for (i = 0; i < CE_COUNT; i++)
1660 ath10k_ce_free_pipe(ar, i);
1661}
5e3dd157
KV
1662
1663static int ath10k_pci_ce_init(struct ath10k *ar)
1664{
1665 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
87263e5b 1666 struct ath10k_pci_pipe *pipe_info;
5e3dd157 1667 const struct ce_attr *attr;
25d0dbcb 1668 int pipe_num, ret;
5e3dd157 1669
fad6ed78 1670 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
5e3dd157 1671 pipe_info = &ar_pci->pipe_info[pipe_num];
25d0dbcb 1672 pipe_info->ce_hdl = &ar_pci->ce_states[pipe_num];
5e3dd157
KV
1673 pipe_info->pipe_num = pipe_num;
1674 pipe_info->hif_ce_state = ar;
1675 attr = &host_ce_config_wlan[pipe_num];
1676
145cc121
MK
1677 ret = ath10k_ce_init_pipe(ar, pipe_num, attr,
1678 ath10k_pci_ce_send_done,
1679 ath10k_pci_ce_recv_data);
25d0dbcb 1680 if (ret) {
7aa7a72a 1681 ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
25d0dbcb
MK
1682 pipe_num, ret);
1683 return ret;
5e3dd157
KV
1684 }
1685
fad6ed78 1686 if (pipe_num == CE_COUNT - 1) {
5e3dd157
KV
1687 /*
1688 * Reserve the ultimate CE for
1689 * diagnostic Window support
1690 */
fad6ed78 1691 ar_pci->ce_diag = pipe_info->ce_hdl;
5e3dd157
KV
1692 continue;
1693 }
1694
1695 pipe_info->buf_sz = (size_t) (attr->src_sz_max);
1696 }
1697
5e3dd157
KV
1698 return 0;
1699}
1700
5c771e74 1701static bool ath10k_pci_has_fw_crashed(struct ath10k *ar)
5e3dd157 1702{
5c771e74
MK
1703 return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) &
1704 FW_IND_EVENT_PENDING;
1705}
5e3dd157 1706
5c771e74
MK
1707static void ath10k_pci_fw_crashed_clear(struct ath10k *ar)
1708{
1709 u32 val;
5e3dd157 1710
5c771e74
MK
1711 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
1712 val &= ~FW_IND_EVENT_PENDING;
1713 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val);
5e3dd157
KV
1714}
1715
de01357b
MK
1716/* this function effectively clears target memory controller assert line */
1717static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
1718{
1719 u32 val;
1720
1721 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1722 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
1723 val | SOC_RESET_CONTROL_SI0_RST_MASK);
1724 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1725
1726 msleep(10);
1727
1728 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1729 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
1730 val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
1731 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1732
1733 msleep(10);
1734}
1735
fc36e3ff
MK
1736static int ath10k_pci_warm_reset(struct ath10k *ar)
1737{
fc36e3ff
MK
1738 u32 val;
1739
7aa7a72a 1740 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n");
fc36e3ff
MK
1741
1742 /* debug */
1743 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1744 PCIE_INTR_CAUSE_ADDRESS);
7aa7a72a
MK
1745 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n",
1746 val);
fc36e3ff
MK
1747
1748 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1749 CPU_INTR_ADDRESS);
7aa7a72a 1750 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
fc36e3ff
MK
1751 val);
1752
1753 /* disable pending irqs */
1754 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1755 PCIE_INTR_ENABLE_ADDRESS, 0);
1756
1757 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1758 PCIE_INTR_CLR_ADDRESS, ~0);
1759
1760 msleep(100);
1761
1762 /* clear fw indicator */
b39712ce 1763 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
fc36e3ff
MK
1764
1765 /* clear target LF timer interrupts */
1766 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1767 SOC_LF_TIMER_CONTROL0_ADDRESS);
1768 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
1769 SOC_LF_TIMER_CONTROL0_ADDRESS,
1770 val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
1771
1772 /* reset CE */
1773 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1774 SOC_RESET_CONTROL_ADDRESS);
1775 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1776 val | SOC_RESET_CONTROL_CE_RST_MASK);
1777 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1778 SOC_RESET_CONTROL_ADDRESS);
1779 msleep(10);
1780
1781 /* unreset CE */
1782 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1783 val & ~SOC_RESET_CONTROL_CE_RST_MASK);
1784 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1785 SOC_RESET_CONTROL_ADDRESS);
1786 msleep(10);
1787
de01357b
MK
1788 ath10k_pci_warm_reset_si0(ar);
1789
fc36e3ff
MK
1790 /* debug */
1791 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1792 PCIE_INTR_CAUSE_ADDRESS);
7aa7a72a
MK
1793 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n",
1794 val);
fc36e3ff
MK
1795
1796 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1797 CPU_INTR_ADDRESS);
7aa7a72a 1798 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
fc36e3ff
MK
1799 val);
1800
1801 /* CPU warm reset */
1802 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1803 SOC_RESET_CONTROL_ADDRESS);
1804 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1805 val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
1806
1807 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1808 SOC_RESET_CONTROL_ADDRESS);
7aa7a72a
MK
1809 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target reset state: 0x%08x\n",
1810 val);
fc36e3ff
MK
1811
1812 msleep(100);
1813
7aa7a72a 1814 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n");
fc36e3ff 1815
c0c378f9 1816 return 0;
fc36e3ff
MK
1817}
1818
1819static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
8c5c5368
MK
1820{
1821 int ret;
1822
1823 /*
1824 * Bring the target up cleanly.
1825 *
1826 * The target may be in an undefined state with an AUX-powered Target
1827 * and a Host in WoW mode. If the Host crashes, loses power, or is
1828 * restarted (without unloading the driver) then the Target is left
1829 * (aux) powered and running. On a subsequent driver load, the Target
1830 * is in an unexpected state. We try to catch that here in order to
1831 * reset the Target and retry the probe.
1832 */
fc36e3ff
MK
1833 if (cold_reset)
1834 ret = ath10k_pci_cold_reset(ar);
1835 else
1836 ret = ath10k_pci_warm_reset(ar);
1837
5b2589fc 1838 if (ret) {
7aa7a72a 1839 ath10k_err(ar, "failed to reset target: %d\n", ret);
98563d5a 1840 goto err;
5b2589fc 1841 }
8c5c5368 1842
8c5c5368 1843 ret = ath10k_pci_ce_init(ar);
1d2b48d6 1844 if (ret) {
7aa7a72a 1845 ath10k_err(ar, "failed to initialize CE: %d\n", ret);
c0c378f9 1846 goto err;
ab977bd0
MK
1847 }
1848
98563d5a
MK
1849 ret = ath10k_pci_wait_for_target_init(ar);
1850 if (ret) {
7aa7a72a 1851 ath10k_err(ar, "failed to wait for target to init: %d\n", ret);
5c771e74 1852 goto err_ce;
98563d5a
MK
1853 }
1854
1855 ret = ath10k_pci_init_config(ar);
1856 if (ret) {
7aa7a72a 1857 ath10k_err(ar, "failed to setup init config: %d\n", ret);
5c771e74 1858 goto err_ce;
98563d5a 1859 }
8c5c5368
MK
1860
1861 ret = ath10k_pci_wake_target_cpu(ar);
1862 if (ret) {
7aa7a72a 1863 ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
5c771e74 1864 goto err_ce;
8c5c5368
MK
1865 }
1866
1867 return 0;
1868
1869err_ce:
1870 ath10k_pci_ce_deinit(ar);
fc36e3ff 1871 ath10k_pci_warm_reset(ar);
8c5c5368
MK
1872err:
1873 return ret;
1874}
1875
61c95cea
MK
1876static int ath10k_pci_hif_power_up_warm(struct ath10k *ar)
1877{
1878 int i, ret;
1879
1880 /*
1881 * Sometime warm reset succeeds after retries.
1882 *
1883 * FIXME: It might be possible to tune ath10k_pci_warm_reset() to work
1884 * at first try.
1885 */
1886 for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
1887 ret = __ath10k_pci_hif_power_up(ar, false);
1888 if (ret == 0)
1889 break;
1890
7aa7a72a 1891 ath10k_warn(ar, "failed to warm reset (attempt %d out of %d): %d\n",
61c95cea
MK
1892 i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS, ret);
1893 }
1894
1895 return ret;
1896}
1897
fc36e3ff
MK
1898static int ath10k_pci_hif_power_up(struct ath10k *ar)
1899{
1900 int ret;
1901
7aa7a72a 1902 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
50f87a67 1903
fc36e3ff
MK
1904 /*
1905 * Hardware CUS232 version 2 has some issues with cold reset and the
1906 * preferred (and safer) way to perform a device reset is through a
1907 * warm reset.
1908 *
61c95cea
MK
1909 * Warm reset doesn't always work though so fall back to cold reset may
1910 * be necessary.
fc36e3ff 1911 */
61c95cea 1912 ret = ath10k_pci_hif_power_up_warm(ar);
fc36e3ff 1913 if (ret) {
7aa7a72a 1914 ath10k_warn(ar, "failed to power up target using warm reset: %d\n",
fc36e3ff
MK
1915 ret);
1916
35098463
KV
1917 if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY)
1918 return ret;
1919
7aa7a72a 1920 ath10k_warn(ar, "trying cold reset\n");
35098463 1921
fc36e3ff
MK
1922 ret = __ath10k_pci_hif_power_up(ar, true);
1923 if (ret) {
7aa7a72a 1924 ath10k_err(ar, "failed to power up target using cold reset too (%d)\n",
fc36e3ff
MK
1925 ret);
1926 return ret;
1927 }
1928 }
1929
1930 return 0;
1931}
1932
8c5c5368
MK
1933static void ath10k_pci_hif_power_down(struct ath10k *ar)
1934{
7aa7a72a 1935 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
50f87a67 1936
fc36e3ff 1937 ath10k_pci_warm_reset(ar);
8c5c5368
MK
1938}
1939
8cd13cad
MK
1940#ifdef CONFIG_PM
1941
1942#define ATH10K_PCI_PM_CONTROL 0x44
1943
1944static int ath10k_pci_hif_suspend(struct ath10k *ar)
1945{
1946 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1947 struct pci_dev *pdev = ar_pci->pdev;
1948 u32 val;
1949
1950 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
1951
1952 if ((val & 0x000000ff) != 0x3) {
1953 pci_save_state(pdev);
1954 pci_disable_device(pdev);
1955 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
1956 (val & 0xffffff00) | 0x03);
1957 }
1958
1959 return 0;
1960}
1961
1962static int ath10k_pci_hif_resume(struct ath10k *ar)
1963{
1964 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1965 struct pci_dev *pdev = ar_pci->pdev;
1966 u32 val;
1967
1968 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
1969
1970 if ((val & 0x000000ff) != 0) {
1971 pci_restore_state(pdev);
1972 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
1973 val & 0xffffff00);
1974 /*
1975 * Suspend/Resume resets the PCI configuration space,
1976 * so we have to re-disable the RETRY_TIMEOUT register (0x41)
1977 * to keep PCI Tx retries from interfering with C3 CPU state
1978 */
1979 pci_read_config_dword(pdev, 0x40, &val);
1980
1981 if ((val & 0x0000ff00) != 0)
1982 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
1983 }
1984
1985 return 0;
1986}
1987#endif
1988
5e3dd157 1989static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
726346fc 1990 .tx_sg = ath10k_pci_hif_tx_sg,
5e3dd157
KV
1991 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
1992 .start = ath10k_pci_hif_start,
1993 .stop = ath10k_pci_hif_stop,
1994 .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
1995 .get_default_pipe = ath10k_pci_hif_get_default_pipe,
1996 .send_complete_check = ath10k_pci_hif_send_complete_check,
e799bbff 1997 .set_callbacks = ath10k_pci_hif_set_callbacks,
5e3dd157 1998 .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
8c5c5368
MK
1999 .power_up = ath10k_pci_hif_power_up,
2000 .power_down = ath10k_pci_hif_power_down,
8cd13cad
MK
2001#ifdef CONFIG_PM
2002 .suspend = ath10k_pci_hif_suspend,
2003 .resume = ath10k_pci_hif_resume,
2004#endif
5e3dd157
KV
2005};
2006
2007static void ath10k_pci_ce_tasklet(unsigned long ptr)
2008{
87263e5b 2009 struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
5e3dd157
KV
2010 struct ath10k_pci *ar_pci = pipe->ar_pci;
2011
2012 ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
2013}
2014
2015static void ath10k_msi_err_tasklet(unsigned long data)
2016{
2017 struct ath10k *ar = (struct ath10k *)data;
2018
5c771e74 2019 if (!ath10k_pci_has_fw_crashed(ar)) {
7aa7a72a 2020 ath10k_warn(ar, "received unsolicited fw crash interrupt\n");
5c771e74
MK
2021 return;
2022 }
2023
2024 ath10k_pci_fw_crashed_clear(ar);
2025 ath10k_pci_fw_crashed_dump(ar);
5e3dd157
KV
2026}
2027
2028/*
2029 * Handler for a per-engine interrupt on a PARTICULAR CE.
2030 * This is used in cases where each CE has a private MSI interrupt.
2031 */
2032static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
2033{
2034 struct ath10k *ar = arg;
2035 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2036 int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
2037
e5742672 2038 if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
7aa7a72a
MK
2039 ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq,
2040 ce_id);
5e3dd157
KV
2041 return IRQ_HANDLED;
2042 }
2043
2044 /*
2045 * NOTE: We are able to derive ce_id from irq because we
2046 * use a one-to-one mapping for CE's 0..5.
2047 * CE's 6 & 7 do not use interrupts at all.
2048 *
2049 * This mapping must be kept in sync with the mapping
2050 * used by firmware.
2051 */
2052 tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
2053 return IRQ_HANDLED;
2054}
2055
2056static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
2057{
2058 struct ath10k *ar = arg;
2059 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2060
2061 tasklet_schedule(&ar_pci->msi_fw_err);
2062 return IRQ_HANDLED;
2063}
2064
2065/*
2066 * Top-level interrupt handler for all PCI interrupts from a Target.
2067 * When a block of MSI interrupts is allocated, this top-level handler
2068 * is not used; instead, we directly call the correct sub-handler.
2069 */
2070static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2071{
2072 struct ath10k *ar = arg;
2073 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2074
2075 if (ar_pci->num_msi_intrs == 0) {
e539887b
MK
2076 if (!ath10k_pci_irq_pending(ar))
2077 return IRQ_NONE;
2078
2685218b 2079 ath10k_pci_disable_and_clear_legacy_irq(ar);
5e3dd157
KV
2080 }
2081
2082 tasklet_schedule(&ar_pci->intr_tq);
2083
2084 return IRQ_HANDLED;
2085}
2086
5c771e74 2087static void ath10k_pci_tasklet(unsigned long data)
ab977bd0
MK
2088{
2089 struct ath10k *ar = (struct ath10k *)data;
5c771e74 2090 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
ab977bd0 2091
5c771e74
MK
2092 if (ath10k_pci_has_fw_crashed(ar)) {
2093 ath10k_pci_fw_crashed_clear(ar);
0e9848c0 2094 ath10k_pci_fw_crashed_dump(ar);
ab977bd0
MK
2095 return;
2096 }
2097
5e3dd157
KV
2098 ath10k_ce_per_engine_service_any(ar);
2099
2685218b
MK
2100 /* Re-enable legacy irq that was disabled in the irq handler */
2101 if (ar_pci->num_msi_intrs == 0)
2102 ath10k_pci_enable_legacy_irq(ar);
5e3dd157
KV
2103}
2104
fc15ca13 2105static int ath10k_pci_request_irq_msix(struct ath10k *ar)
5e3dd157
KV
2106{
2107 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
fc15ca13 2108 int ret, i;
5e3dd157
KV
2109
2110 ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2111 ath10k_pci_msi_fw_handler,
2112 IRQF_SHARED, "ath10k_pci", ar);
591ecdb8 2113 if (ret) {
7aa7a72a 2114 ath10k_warn(ar, "failed to request MSI-X fw irq %d: %d\n",
591ecdb8 2115 ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
5e3dd157 2116 return ret;
591ecdb8 2117 }
5e3dd157
KV
2118
2119 for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2120 ret = request_irq(ar_pci->pdev->irq + i,
2121 ath10k_pci_per_engine_handler,
2122 IRQF_SHARED, "ath10k_pci", ar);
2123 if (ret) {
7aa7a72a 2124 ath10k_warn(ar, "failed to request MSI-X ce irq %d: %d\n",
5e3dd157
KV
2125 ar_pci->pdev->irq + i, ret);
2126
87b1423b
MK
2127 for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2128 free_irq(ar_pci->pdev->irq + i, ar);
5e3dd157 2129
87b1423b 2130 free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
5e3dd157
KV
2131 return ret;
2132 }
2133 }
2134
5e3dd157
KV
2135 return 0;
2136}
2137
fc15ca13 2138static int ath10k_pci_request_irq_msi(struct ath10k *ar)
5e3dd157
KV
2139{
2140 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2141 int ret;
2142
5e3dd157
KV
2143 ret = request_irq(ar_pci->pdev->irq,
2144 ath10k_pci_interrupt_handler,
2145 IRQF_SHARED, "ath10k_pci", ar);
fc15ca13 2146 if (ret) {
7aa7a72a 2147 ath10k_warn(ar, "failed to request MSI irq %d: %d\n",
fc15ca13 2148 ar_pci->pdev->irq, ret);
5e3dd157
KV
2149 return ret;
2150 }
2151
5e3dd157
KV
2152 return 0;
2153}
2154
fc15ca13 2155static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
5e3dd157
KV
2156{
2157 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2158 int ret;
2159
2160 ret = request_irq(ar_pci->pdev->irq,
2161 ath10k_pci_interrupt_handler,
2162 IRQF_SHARED, "ath10k_pci", ar);
f3782744 2163 if (ret) {
7aa7a72a 2164 ath10k_warn(ar, "failed to request legacy irq %d: %d\n",
fc15ca13 2165 ar_pci->pdev->irq, ret);
5e3dd157 2166 return ret;
f3782744 2167 }
5e3dd157 2168
5e3dd157
KV
2169 return 0;
2170}
2171
fc15ca13
MK
2172static int ath10k_pci_request_irq(struct ath10k *ar)
2173{
2174 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
5e3dd157 2175
fc15ca13
MK
2176 switch (ar_pci->num_msi_intrs) {
2177 case 0:
2178 return ath10k_pci_request_irq_legacy(ar);
2179 case 1:
2180 return ath10k_pci_request_irq_msi(ar);
2181 case MSI_NUM_REQUEST:
2182 return ath10k_pci_request_irq_msix(ar);
2183 }
5e3dd157 2184
7aa7a72a 2185 ath10k_warn(ar, "unknown irq configuration upon request\n");
fc15ca13 2186 return -EINVAL;
5e3dd157
KV
2187}
2188
fc15ca13
MK
2189static void ath10k_pci_free_irq(struct ath10k *ar)
2190{
2191 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2192 int i;
2193
2194 /* There's at least one interrupt irregardless whether its legacy INTR
2195 * or MSI or MSI-X */
2196 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2197 free_irq(ar_pci->pdev->irq + i, ar);
2198}
2199
2200static void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
5e3dd157
KV
2201{
2202 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
5e3dd157
KV
2203 int i;
2204
fc15ca13 2205 tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
5e3dd157 2206 tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
fc15ca13 2207 (unsigned long)ar);
5e3dd157
KV
2208
2209 for (i = 0; i < CE_COUNT; i++) {
2210 ar_pci->pipe_info[i].ar_pci = ar_pci;
fc15ca13 2211 tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
5e3dd157
KV
2212 (unsigned long)&ar_pci->pipe_info[i]);
2213 }
fc15ca13
MK
2214}
2215
2216static int ath10k_pci_init_irq(struct ath10k *ar)
2217{
2218 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2219 int ret;
5e3dd157 2220
fc15ca13 2221 ath10k_pci_init_irq_tasklets(ar);
5e3dd157 2222
403d627b 2223 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO)
7aa7a72a
MK
2224 ath10k_info(ar, "limiting irq mode to: %d\n",
2225 ath10k_pci_irq_mode);
5e3dd157 2226
fc15ca13 2227 /* Try MSI-X */
0edf2577 2228 if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO) {
cfe9c45b 2229 ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
5ad6867c
AG
2230 ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs,
2231 ar_pci->num_msi_intrs);
2232 if (ret > 0)
cfe9c45b 2233 return 0;
5e3dd157 2234
cfe9c45b 2235 /* fall-through */
5e3dd157
KV
2236 }
2237
fc15ca13 2238 /* Try MSI */
cfe9c45b
MK
2239 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
2240 ar_pci->num_msi_intrs = 1;
2241 ret = pci_enable_msi(ar_pci->pdev);
5e3dd157 2242 if (ret == 0)
cfe9c45b 2243 return 0;
5e3dd157 2244
cfe9c45b 2245 /* fall-through */
5e3dd157
KV
2246 }
2247
fc15ca13
MK
2248 /* Try legacy irq
2249 *
2250 * A potential race occurs here: The CORE_BASE write
2251 * depends on target correctly decoding AXI address but
2252 * host won't know when target writes BAR to CORE_CTRL.
2253 * This write might get lost if target has NOT written BAR.
2254 * For now, fix the race by repeating the write in below
2255 * synchronization checking. */
2256 ar_pci->num_msi_intrs = 0;
5e3dd157 2257
fc15ca13
MK
2258 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2259 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
fc15ca13
MK
2260
2261 return 0;
5e3dd157
KV
2262}
2263
c0c378f9 2264static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
5e3dd157 2265{
fc15ca13
MK
2266 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2267 0);
5e3dd157
KV
2268}
2269
fc15ca13 2270static int ath10k_pci_deinit_irq(struct ath10k *ar)
5e3dd157
KV
2271{
2272 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
5e3dd157 2273
fc15ca13
MK
2274 switch (ar_pci->num_msi_intrs) {
2275 case 0:
c0c378f9
MK
2276 ath10k_pci_deinit_irq_legacy(ar);
2277 return 0;
fc15ca13
MK
2278 case 1:
2279 /* fall-through */
2280 case MSI_NUM_REQUEST:
5e3dd157 2281 pci_disable_msi(ar_pci->pdev);
fc15ca13 2282 return 0;
bb8b621a
AG
2283 default:
2284 pci_disable_msi(ar_pci->pdev);
fc15ca13
MK
2285 }
2286
7aa7a72a 2287 ath10k_warn(ar, "unknown irq configuration upon deinit\n");
fc15ca13 2288 return -EINVAL;
5e3dd157
KV
2289}
2290
d7fb47f5 2291static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
5e3dd157
KV
2292{
2293 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
0399eca8 2294 unsigned long timeout;
0399eca8 2295 u32 val;
5e3dd157 2296
7aa7a72a 2297 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
5e3dd157 2298
0399eca8
KV
2299 timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
2300
2301 do {
2302 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2303
7aa7a72a
MK
2304 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n",
2305 val);
50f87a67 2306
0399eca8
KV
2307 /* target should never return this */
2308 if (val == 0xffffffff)
2309 continue;
2310
7710cd2e
MK
2311 /* the device has crashed so don't bother trying anymore */
2312 if (val & FW_IND_EVENT_PENDING)
2313 break;
2314
0399eca8
KV
2315 if (val & FW_IND_INITIALIZED)
2316 break;
2317
5e3dd157
KV
2318 if (ar_pci->num_msi_intrs == 0)
2319 /* Fix potential race by repeating CORE_BASE writes */
c947a9e1
MK
2320 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
2321 PCIE_INTR_ENABLE_ADDRESS,
2322 PCIE_INTR_FIRMWARE_MASK |
2323 PCIE_INTR_CE_MASK_ALL);
0399eca8 2324
5e3dd157 2325 mdelay(10);
0399eca8 2326 } while (time_before(jiffies, timeout));
5e3dd157 2327
6a4f6e1d 2328 if (val == 0xffffffff) {
7aa7a72a 2329 ath10k_err(ar, "failed to read device register, device is gone\n");
c0c378f9 2330 return -EIO;
6a4f6e1d
MK
2331 }
2332
7710cd2e 2333 if (val & FW_IND_EVENT_PENDING) {
7aa7a72a 2334 ath10k_warn(ar, "device has crashed during init\n");
5c771e74 2335 ath10k_pci_fw_crashed_clear(ar);
0e9848c0 2336 ath10k_pci_fw_crashed_dump(ar);
c0c378f9 2337 return -ECOMM;
7710cd2e
MK
2338 }
2339
6a4f6e1d 2340 if (!(val & FW_IND_INITIALIZED)) {
7aa7a72a 2341 ath10k_err(ar, "failed to receive initialized event from target: %08x\n",
0399eca8 2342 val);
c0c378f9 2343 return -ETIMEDOUT;
5e3dd157
KV
2344 }
2345
7aa7a72a 2346 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n");
c0c378f9 2347 return 0;
5e3dd157
KV
2348}
2349
fc36e3ff 2350static int ath10k_pci_cold_reset(struct ath10k *ar)
5e3dd157 2351{
c0c378f9 2352 int i;
5e3dd157
KV
2353 u32 val;
2354
7aa7a72a 2355 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
5e3dd157
KV
2356
2357 /* Put Target, including PCIe, into RESET. */
e479ed43 2358 val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
5e3dd157 2359 val |= 1;
e479ed43 2360 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
5e3dd157
KV
2361
2362 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
e479ed43 2363 if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
5e3dd157
KV
2364 RTC_STATE_COLD_RESET_MASK)
2365 break;
2366 msleep(1);
2367 }
2368
2369 /* Pull Target, including PCIe, out of RESET. */
2370 val &= ~1;
e479ed43 2371 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
5e3dd157
KV
2372
2373 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
e479ed43 2374 if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
5e3dd157
KV
2375 RTC_STATE_COLD_RESET_MASK))
2376 break;
2377 msleep(1);
2378 }
2379
7aa7a72a 2380 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");
50f87a67 2381
5b2589fc 2382 return 0;
5e3dd157
KV
2383}
2384
2986e3ef 2385static int ath10k_pci_claim(struct ath10k *ar)
5e3dd157 2386{
2986e3ef
MK
2387 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2388 struct pci_dev *pdev = ar_pci->pdev;
2389 u32 lcr_val;
2390 int ret;
5e3dd157
KV
2391
2392 pci_set_drvdata(pdev, ar);
2393
5e3dd157
KV
2394 ret = pci_enable_device(pdev);
2395 if (ret) {
7aa7a72a 2396 ath10k_err(ar, "failed to enable pci device: %d\n", ret);
2986e3ef 2397 return ret;
5e3dd157
KV
2398 }
2399
5e3dd157
KV
2400 ret = pci_request_region(pdev, BAR_NUM, "ath");
2401 if (ret) {
7aa7a72a 2402 ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM,
2986e3ef 2403 ret);
5e3dd157
KV
2404 goto err_device;
2405 }
2406
2986e3ef 2407 /* Target expects 32 bit DMA. Enforce it. */
5e3dd157
KV
2408 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2409 if (ret) {
7aa7a72a 2410 ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret);
5e3dd157
KV
2411 goto err_region;
2412 }
2413
2414 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2415 if (ret) {
7aa7a72a 2416 ath10k_err(ar, "failed to set consistent dma mask to 32-bit: %d\n",
2986e3ef 2417 ret);
5e3dd157
KV
2418 goto err_region;
2419 }
2420
5e3dd157
KV
2421 pci_set_master(pdev);
2422
2986e3ef 2423 /* Workaround: Disable ASPM */
5e3dd157
KV
2424 pci_read_config_dword(pdev, 0x80, &lcr_val);
2425 pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2426
2427 /* Arrange for access to Target SoC registers. */
2986e3ef
MK
2428 ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
2429 if (!ar_pci->mem) {
7aa7a72a 2430 ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM);
5e3dd157
KV
2431 ret = -EIO;
2432 goto err_master;
2433 }
2434
7aa7a72a 2435 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
2986e3ef
MK
2436 return 0;
2437
2438err_master:
2439 pci_clear_master(pdev);
2440
2441err_region:
2442 pci_release_region(pdev, BAR_NUM);
2443
2444err_device:
2445 pci_disable_device(pdev);
2446
2447 return ret;
2448}
2449
2450static void ath10k_pci_release(struct ath10k *ar)
2451{
2452 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2453 struct pci_dev *pdev = ar_pci->pdev;
2454
2455 pci_iounmap(pdev, ar_pci->mem);
2456 pci_release_region(pdev, BAR_NUM);
2457 pci_clear_master(pdev);
2458 pci_disable_device(pdev);
2459}
2460
2461static int ath10k_pci_probe(struct pci_dev *pdev,
2462 const struct pci_device_id *pci_dev)
2463{
2464 int ret = 0;
2465 struct ath10k *ar;
2466 struct ath10k_pci *ar_pci;
2467 u32 chip_id;
2468
2986e3ef
MK
2469 ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev,
2470 &ath10k_pci_hif_ops);
2471 if (!ar) {
7aa7a72a 2472 dev_err(&pdev->dev, "failed to allocate core\n");
2986e3ef
MK
2473 return -ENOMEM;
2474 }
2475
7aa7a72a
MK
2476 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci probe\n");
2477
2986e3ef
MK
2478 ar_pci = ath10k_pci_priv(ar);
2479 ar_pci->pdev = pdev;
2480 ar_pci->dev = &pdev->dev;
2481 ar_pci->ar = ar;
5e3dd157
KV
2482
2483 spin_lock_init(&ar_pci->ce_lock);
728f95ee
MK
2484 setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry,
2485 (unsigned long)ar);
5e3dd157 2486
2986e3ef 2487 ret = ath10k_pci_claim(ar);
e01ae68c 2488 if (ret) {
7aa7a72a 2489 ath10k_err(ar, "failed to claim device: %d\n", ret);
2986e3ef 2490 goto err_core_destroy;
e01ae68c
KV
2491 }
2492
c0c378f9 2493 ret = ath10k_pci_wake(ar);
e01ae68c 2494 if (ret) {
7aa7a72a 2495 ath10k_err(ar, "failed to wake up: %d\n", ret);
2986e3ef 2496 goto err_release;
e01ae68c 2497 }
e01ae68c 2498
233eb97f 2499 chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
c0c378f9 2500 if (chip_id == 0xffffffff) {
7aa7a72a 2501 ath10k_err(ar, "failed to get chip id\n");
c0c378f9
MK
2502 goto err_sleep;
2503 }
e01ae68c 2504
25d0dbcb
MK
2505 ret = ath10k_pci_alloc_ce(ar);
2506 if (ret) {
7aa7a72a
MK
2507 ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
2508 ret);
c0c378f9 2509 goto err_sleep;
25d0dbcb
MK
2510 }
2511
403d627b
MK
2512 ath10k_pci_ce_deinit(ar);
2513
2514 ret = ath10k_ce_disable_interrupts(ar);
2515 if (ret) {
7aa7a72a 2516 ath10k_err(ar, "failed to disable copy engine interrupts: %d\n",
403d627b
MK
2517 ret);
2518 goto err_free_ce;
25d0dbcb
MK
2519 }
2520
5c771e74
MK
2521 /* Workaround: There's no known way to mask all possible interrupts via
2522 * device CSR. The only way to make sure device doesn't assert
2523 * interrupts is to reset it. Interrupts are then disabled on host
2524 * after handlers are registered.
2525 */
2526 ath10k_pci_warm_reset(ar);
24cfade1 2527
403d627b 2528 ret = ath10k_pci_init_irq(ar);
5e3dd157 2529 if (ret) {
7aa7a72a 2530 ath10k_err(ar, "failed to init irqs: %d\n", ret);
25d0dbcb 2531 goto err_free_ce;
5e3dd157
KV
2532 }
2533
7aa7a72a 2534 ath10k_info(ar, "pci irq %s interrupts %d irq_mode %d reset_mode %d\n",
403d627b
MK
2535 ath10k_pci_get_irq_method(ar), ar_pci->num_msi_intrs,
2536 ath10k_pci_irq_mode, ath10k_pci_reset_mode);
2537
5c771e74
MK
2538 ret = ath10k_pci_request_irq(ar);
2539 if (ret) {
7aa7a72a 2540 ath10k_warn(ar, "failed to request irqs: %d\n", ret);
5c771e74
MK
2541 goto err_deinit_irq;
2542 }
2543
2544 /* This shouldn't race as the device has been reset above. */
2545 ath10k_pci_irq_disable(ar);
2546
e01ae68c 2547 ret = ath10k_core_register(ar, chip_id);
5e3dd157 2548 if (ret) {
7aa7a72a 2549 ath10k_err(ar, "failed to register driver core: %d\n", ret);
5c771e74 2550 goto err_free_irq;
5e3dd157
KV
2551 }
2552
2553 return 0;
2554
5c771e74
MK
2555err_free_irq:
2556 ath10k_pci_free_irq(ar);
2557
403d627b
MK
2558err_deinit_irq:
2559 ath10k_pci_deinit_irq(ar);
2560
25d0dbcb
MK
2561err_free_ce:
2562 ath10k_pci_free_ce(ar);
2986e3ef 2563
c0c378f9
MK
2564err_sleep:
2565 ath10k_pci_sleep(ar);
2986e3ef
MK
2566
2567err_release:
2568 ath10k_pci_release(ar);
2569
e7b54194 2570err_core_destroy:
5e3dd157 2571 ath10k_core_destroy(ar);
5e3dd157
KV
2572
2573 return ret;
2574}
2575
2576static void ath10k_pci_remove(struct pci_dev *pdev)
2577{
2578 struct ath10k *ar = pci_get_drvdata(pdev);
2579 struct ath10k_pci *ar_pci;
2580
7aa7a72a 2581 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n");
5e3dd157
KV
2582
2583 if (!ar)
2584 return;
2585
2586 ar_pci = ath10k_pci_priv(ar);
2587
2588 if (!ar_pci)
2589 return;
2590
5e3dd157 2591 ath10k_core_unregister(ar);
5c771e74 2592 ath10k_pci_free_irq(ar);
403d627b
MK
2593 ath10k_pci_deinit_irq(ar);
2594 ath10k_pci_ce_deinit(ar);
25d0dbcb 2595 ath10k_pci_free_ce(ar);
c0c378f9 2596 ath10k_pci_sleep(ar);
2986e3ef 2597 ath10k_pci_release(ar);
5e3dd157 2598 ath10k_core_destroy(ar);
5e3dd157
KV
2599}
2600
5e3dd157
KV
2601MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2602
2603static struct pci_driver ath10k_pci_driver = {
2604 .name = "ath10k_pci",
2605 .id_table = ath10k_pci_id_table,
2606 .probe = ath10k_pci_probe,
2607 .remove = ath10k_pci_remove,
5e3dd157
KV
2608};
2609
2610static int __init ath10k_pci_init(void)
2611{
2612 int ret;
2613
2614 ret = pci_register_driver(&ath10k_pci_driver);
2615 if (ret)
7aa7a72a
MK
2616 printk(KERN_ERR "failed to register ath10k pci driver: %d\n",
2617 ret);
5e3dd157
KV
2618
2619 return ret;
2620}
2621module_init(ath10k_pci_init);
2622
2623static void __exit ath10k_pci_exit(void)
2624{
2625 pci_unregister_driver(&ath10k_pci_driver);
2626}
2627
2628module_exit(ath10k_pci_exit);
2629
2630MODULE_AUTHOR("Qualcomm Atheros");
2631MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2632MODULE_LICENSE("Dual BSD/GPL");
24c88f78 2633MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_3_FILE);
5e3dd157 2634MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
This page took 0.330674 seconds and 5 git commands to generate.