2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 * Support for Copy Engine hardware, which is mainly used for
25 * communication between Host and Target over a PCIe interconnect.
29 * A single CopyEngine (CE) comprises two "rings":
33 * Each ring consists of a number of descriptors which specify
34 * an address, length, and meta-data.
36 * Typically, one side of the PCIe interconnect (Host or Target)
37 * controls one ring and the other side controls the other ring.
38 * The source side chooses when to initiate a transfer and it
39 * chooses what to send (buffer address, length). The destination
40 * side keeps a supply of "anonymous receive buffers" available and
41 * it handles incoming data as it arrives (when the destination
42 * recieves an interrupt).
44 * The sender may send a simple buffer (address/length) or it may
45 * send a small list of buffers. When a small list is sent, hardware
46 * "gathers" these and they end up in a single destination buffer
47 * with a single interrupt.
49 * There are several "contexts" managed by this layer -- more, it
50 * may seem -- than should be needed. These are provided mainly for
51 * maximum flexibility and especially to facilitate a simpler HIF
52 * implementation. There are per-CopyEngine recv, send, and watermark
53 * contexts. These are supplied by the caller when a recv, send,
54 * or watermark handler is established and they are echoed back to
55 * the caller when the respective callbacks are invoked. There is
56 * also a per-transfer context supplied by the caller when a buffer
57 * (or sendlist) is sent and when a buffer is enqueued for recv.
58 * These per-transfer contexts are echoed back to the caller when
59 * the buffer is sent/received.
62 static inline void ath10k_ce_dest_ring_write_index_set(struct ath10k
*ar
,
66 ath10k_pci_write32(ar
, ce_ctrl_addr
+ DST_WR_INDEX_ADDRESS
, n
);
69 static inline u32
ath10k_ce_dest_ring_write_index_get(struct ath10k
*ar
,
72 return ath10k_pci_read32(ar
, ce_ctrl_addr
+ DST_WR_INDEX_ADDRESS
);
75 static inline void ath10k_ce_src_ring_write_index_set(struct ath10k
*ar
,
79 ath10k_pci_write32(ar
, ce_ctrl_addr
+ SR_WR_INDEX_ADDRESS
, n
);
82 static inline u32
ath10k_ce_src_ring_write_index_get(struct ath10k
*ar
,
85 return ath10k_pci_read32(ar
, ce_ctrl_addr
+ SR_WR_INDEX_ADDRESS
);
88 static inline u32
ath10k_ce_src_ring_read_index_get(struct ath10k
*ar
,
91 return ath10k_pci_read32(ar
, ce_ctrl_addr
+ CURRENT_SRRI_ADDRESS
);
94 static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k
*ar
,
98 ath10k_pci_write32(ar
, ce_ctrl_addr
+ SR_BA_ADDRESS
, addr
);
101 static inline void ath10k_ce_src_ring_size_set(struct ath10k
*ar
,
105 ath10k_pci_write32(ar
, ce_ctrl_addr
+ SR_SIZE_ADDRESS
, n
);
108 static inline void ath10k_ce_src_ring_dmax_set(struct ath10k
*ar
,
112 u32 ctrl1_addr
= ath10k_pci_read32((ar
),
113 (ce_ctrl_addr
) + CE_CTRL1_ADDRESS
);
115 ath10k_pci_write32(ar
, ce_ctrl_addr
+ CE_CTRL1_ADDRESS
,
116 (ctrl1_addr
& ~CE_CTRL1_DMAX_LENGTH_MASK
) |
117 CE_CTRL1_DMAX_LENGTH_SET(n
));
120 static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k
*ar
,
124 u32 ctrl1_addr
= ath10k_pci_read32(ar
, ce_ctrl_addr
+ CE_CTRL1_ADDRESS
);
126 ath10k_pci_write32(ar
, ce_ctrl_addr
+ CE_CTRL1_ADDRESS
,
127 (ctrl1_addr
& ~CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK
) |
128 CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(n
));
131 static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k
*ar
,
135 u32 ctrl1_addr
= ath10k_pci_read32(ar
, ce_ctrl_addr
+ CE_CTRL1_ADDRESS
);
137 ath10k_pci_write32(ar
, ce_ctrl_addr
+ CE_CTRL1_ADDRESS
,
138 (ctrl1_addr
& ~CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK
) |
139 CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(n
));
142 static inline u32
ath10k_ce_dest_ring_read_index_get(struct ath10k
*ar
,
145 return ath10k_pci_read32(ar
, ce_ctrl_addr
+ CURRENT_DRRI_ADDRESS
);
148 static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k
*ar
,
152 ath10k_pci_write32(ar
, ce_ctrl_addr
+ DR_BA_ADDRESS
, addr
);
155 static inline void ath10k_ce_dest_ring_size_set(struct ath10k
*ar
,
159 ath10k_pci_write32(ar
, ce_ctrl_addr
+ DR_SIZE_ADDRESS
, n
);
162 static inline void ath10k_ce_src_ring_highmark_set(struct ath10k
*ar
,
166 u32 addr
= ath10k_pci_read32(ar
, ce_ctrl_addr
+ SRC_WATERMARK_ADDRESS
);
168 ath10k_pci_write32(ar
, ce_ctrl_addr
+ SRC_WATERMARK_ADDRESS
,
169 (addr
& ~SRC_WATERMARK_HIGH_MASK
) |
170 SRC_WATERMARK_HIGH_SET(n
));
173 static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k
*ar
,
177 u32 addr
= ath10k_pci_read32(ar
, ce_ctrl_addr
+ SRC_WATERMARK_ADDRESS
);
179 ath10k_pci_write32(ar
, ce_ctrl_addr
+ SRC_WATERMARK_ADDRESS
,
180 (addr
& ~SRC_WATERMARK_LOW_MASK
) |
181 SRC_WATERMARK_LOW_SET(n
));
184 static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k
*ar
,
188 u32 addr
= ath10k_pci_read32(ar
, ce_ctrl_addr
+ DST_WATERMARK_ADDRESS
);
190 ath10k_pci_write32(ar
, ce_ctrl_addr
+ DST_WATERMARK_ADDRESS
,
191 (addr
& ~DST_WATERMARK_HIGH_MASK
) |
192 DST_WATERMARK_HIGH_SET(n
));
195 static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k
*ar
,
199 u32 addr
= ath10k_pci_read32(ar
, ce_ctrl_addr
+ DST_WATERMARK_ADDRESS
);
201 ath10k_pci_write32(ar
, ce_ctrl_addr
+ DST_WATERMARK_ADDRESS
,
202 (addr
& ~DST_WATERMARK_LOW_MASK
) |
203 DST_WATERMARK_LOW_SET(n
));
206 static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k
*ar
,
209 u32 host_ie_addr
= ath10k_pci_read32(ar
,
210 ce_ctrl_addr
+ HOST_IE_ADDRESS
);
212 ath10k_pci_write32(ar
, ce_ctrl_addr
+ HOST_IE_ADDRESS
,
213 host_ie_addr
| HOST_IE_COPY_COMPLETE_MASK
);
216 static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k
*ar
,
219 u32 host_ie_addr
= ath10k_pci_read32(ar
,
220 ce_ctrl_addr
+ HOST_IE_ADDRESS
);
222 ath10k_pci_write32(ar
, ce_ctrl_addr
+ HOST_IE_ADDRESS
,
223 host_ie_addr
& ~HOST_IE_COPY_COMPLETE_MASK
);
226 static inline void ath10k_ce_watermark_intr_disable(struct ath10k
*ar
,
229 u32 host_ie_addr
= ath10k_pci_read32(ar
,
230 ce_ctrl_addr
+ HOST_IE_ADDRESS
);
232 ath10k_pci_write32(ar
, ce_ctrl_addr
+ HOST_IE_ADDRESS
,
233 host_ie_addr
& ~CE_WATERMARK_MASK
);
236 static inline void ath10k_ce_error_intr_enable(struct ath10k
*ar
,
239 u32 misc_ie_addr
= ath10k_pci_read32(ar
,
240 ce_ctrl_addr
+ MISC_IE_ADDRESS
);
242 ath10k_pci_write32(ar
, ce_ctrl_addr
+ MISC_IE_ADDRESS
,
243 misc_ie_addr
| CE_ERROR_MASK
);
246 static inline void ath10k_ce_error_intr_disable(struct ath10k
*ar
,
249 u32 misc_ie_addr
= ath10k_pci_read32(ar
,
250 ce_ctrl_addr
+ MISC_IE_ADDRESS
);
252 ath10k_pci_write32(ar
, ce_ctrl_addr
+ MISC_IE_ADDRESS
,
253 misc_ie_addr
& ~CE_ERROR_MASK
);
256 static inline void ath10k_ce_engine_int_status_clear(struct ath10k
*ar
,
260 ath10k_pci_write32(ar
, ce_ctrl_addr
+ HOST_IS_ADDRESS
, mask
);
265 * Guts of ath10k_ce_send, used by both ath10k_ce_send and
266 * ath10k_ce_sendlist_send.
267 * The caller takes responsibility for any needed locking.
269 int ath10k_ce_send_nolock(struct ath10k_ce_pipe
*ce_state
,
270 void *per_transfer_context
,
273 unsigned int transfer_id
,
276 struct ath10k
*ar
= ce_state
->ar
;
277 struct ath10k_ce_ring
*src_ring
= ce_state
->src_ring
;
278 struct ce_desc
*desc
, *sdesc
;
279 unsigned int nentries_mask
= src_ring
->nentries_mask
;
280 unsigned int sw_index
= src_ring
->sw_index
;
281 unsigned int write_index
= src_ring
->write_index
;
282 u32 ctrl_addr
= ce_state
->ctrl_addr
;
286 if (nbytes
> ce_state
->src_sz_max
)
287 ath10k_warn("%s: send more we can (nbytes: %d, max: %d)\n",
288 __func__
, nbytes
, ce_state
->src_sz_max
);
290 ret
= ath10k_pci_wake(ar
);
294 if (unlikely(CE_RING_DELTA(nentries_mask
,
295 write_index
, sw_index
- 1) <= 0)) {
300 desc
= CE_SRC_RING_TO_DESC(src_ring
->base_addr_owner_space
,
302 sdesc
= CE_SRC_RING_TO_DESC(src_ring
->shadow_base
, write_index
);
304 desc_flags
|= SM(transfer_id
, CE_DESC_FLAGS_META_DATA
);
306 if (flags
& CE_SEND_FLAG_GATHER
)
307 desc_flags
|= CE_DESC_FLAGS_GATHER
;
308 if (flags
& CE_SEND_FLAG_BYTE_SWAP
)
309 desc_flags
|= CE_DESC_FLAGS_BYTE_SWAP
;
311 sdesc
->addr
= __cpu_to_le32(buffer
);
312 sdesc
->nbytes
= __cpu_to_le16(nbytes
);
313 sdesc
->flags
= __cpu_to_le16(desc_flags
);
317 src_ring
->per_transfer_context
[write_index
] = per_transfer_context
;
319 /* Update Source Ring Write Index */
320 write_index
= CE_RING_IDX_INCR(nentries_mask
, write_index
);
323 if (!(flags
& CE_SEND_FLAG_GATHER
))
324 ath10k_ce_src_ring_write_index_set(ar
, ctrl_addr
, write_index
);
326 src_ring
->write_index
= write_index
;
328 ath10k_pci_sleep(ar
);
332 void __ath10k_ce_send_revert(struct ath10k_ce_pipe
*pipe
)
334 struct ath10k
*ar
= pipe
->ar
;
335 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
336 struct ath10k_ce_ring
*src_ring
= pipe
->src_ring
;
337 u32 ctrl_addr
= pipe
->ctrl_addr
;
339 lockdep_assert_held(&ar_pci
->ce_lock
);
342 * This function must be called only if there is an incomplete
343 * scatter-gather transfer (before index register is updated)
344 * that needs to be cleaned up.
346 if (WARN_ON_ONCE(src_ring
->write_index
== src_ring
->sw_index
))
349 if (WARN_ON_ONCE(src_ring
->write_index
==
350 ath10k_ce_src_ring_write_index_get(ar
, ctrl_addr
)))
353 src_ring
->write_index
--;
354 src_ring
->write_index
&= src_ring
->nentries_mask
;
356 src_ring
->per_transfer_context
[src_ring
->write_index
] = NULL
;
359 int ath10k_ce_send(struct ath10k_ce_pipe
*ce_state
,
360 void *per_transfer_context
,
363 unsigned int transfer_id
,
366 struct ath10k
*ar
= ce_state
->ar
;
367 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
370 spin_lock_bh(&ar_pci
->ce_lock
);
371 ret
= ath10k_ce_send_nolock(ce_state
, per_transfer_context
,
372 buffer
, nbytes
, transfer_id
, flags
);
373 spin_unlock_bh(&ar_pci
->ce_lock
);
378 int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe
*pipe
)
380 struct ath10k
*ar
= pipe
->ar
;
381 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
384 spin_lock_bh(&ar_pci
->ce_lock
);
385 delta
= CE_RING_DELTA(pipe
->src_ring
->nentries_mask
,
386 pipe
->src_ring
->write_index
,
387 pipe
->src_ring
->sw_index
- 1);
388 spin_unlock_bh(&ar_pci
->ce_lock
);
393 int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe
*ce_state
,
394 void *per_recv_context
,
397 struct ath10k_ce_ring
*dest_ring
= ce_state
->dest_ring
;
398 u32 ctrl_addr
= ce_state
->ctrl_addr
;
399 struct ath10k
*ar
= ce_state
->ar
;
400 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
401 unsigned int nentries_mask
= dest_ring
->nentries_mask
;
402 unsigned int write_index
;
403 unsigned int sw_index
;
406 spin_lock_bh(&ar_pci
->ce_lock
);
407 write_index
= dest_ring
->write_index
;
408 sw_index
= dest_ring
->sw_index
;
410 ret
= ath10k_pci_wake(ar
);
414 if (CE_RING_DELTA(nentries_mask
, write_index
, sw_index
- 1) > 0) {
415 struct ce_desc
*base
= dest_ring
->base_addr_owner_space
;
416 struct ce_desc
*desc
= CE_DEST_RING_TO_DESC(base
, write_index
);
418 /* Update destination descriptor */
419 desc
->addr
= __cpu_to_le32(buffer
);
422 dest_ring
->per_transfer_context
[write_index
] =
425 /* Update Destination Ring Write Index */
426 write_index
= CE_RING_IDX_INCR(nentries_mask
, write_index
);
427 ath10k_ce_dest_ring_write_index_set(ar
, ctrl_addr
, write_index
);
428 dest_ring
->write_index
= write_index
;
433 ath10k_pci_sleep(ar
);
436 spin_unlock_bh(&ar_pci
->ce_lock
);
442 * Guts of ath10k_ce_completed_recv_next.
443 * The caller takes responsibility for any necessary locking.
445 static int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe
*ce_state
,
446 void **per_transfer_contextp
,
448 unsigned int *nbytesp
,
449 unsigned int *transfer_idp
,
450 unsigned int *flagsp
)
452 struct ath10k_ce_ring
*dest_ring
= ce_state
->dest_ring
;
453 unsigned int nentries_mask
= dest_ring
->nentries_mask
;
454 unsigned int sw_index
= dest_ring
->sw_index
;
456 struct ce_desc
*base
= dest_ring
->base_addr_owner_space
;
457 struct ce_desc
*desc
= CE_DEST_RING_TO_DESC(base
, sw_index
);
458 struct ce_desc sdesc
;
461 /* Copy in one go for performance reasons */
464 nbytes
= __le16_to_cpu(sdesc
.nbytes
);
467 * This closes a relatively unusual race where the Host
468 * sees the updated DRRI before the update to the
469 * corresponding descriptor has completed. We treat this
470 * as a descriptor that is not yet done.
477 /* Return data from completed destination descriptor */
478 *bufferp
= __le32_to_cpu(sdesc
.addr
);
480 *transfer_idp
= MS(__le16_to_cpu(sdesc
.flags
), CE_DESC_FLAGS_META_DATA
);
482 if (__le16_to_cpu(sdesc
.flags
) & CE_DESC_FLAGS_BYTE_SWAP
)
483 *flagsp
= CE_RECV_FLAG_SWAPPED
;
487 if (per_transfer_contextp
)
488 *per_transfer_contextp
=
489 dest_ring
->per_transfer_context
[sw_index
];
492 dest_ring
->per_transfer_context
[sw_index
] = NULL
;
494 /* Update sw_index */
495 sw_index
= CE_RING_IDX_INCR(nentries_mask
, sw_index
);
496 dest_ring
->sw_index
= sw_index
;
501 int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe
*ce_state
,
502 void **per_transfer_contextp
,
504 unsigned int *nbytesp
,
505 unsigned int *transfer_idp
,
506 unsigned int *flagsp
)
508 struct ath10k
*ar
= ce_state
->ar
;
509 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
512 spin_lock_bh(&ar_pci
->ce_lock
);
513 ret
= ath10k_ce_completed_recv_next_nolock(ce_state
,
514 per_transfer_contextp
,
516 transfer_idp
, flagsp
);
517 spin_unlock_bh(&ar_pci
->ce_lock
);
522 int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe
*ce_state
,
523 void **per_transfer_contextp
,
526 struct ath10k_ce_ring
*dest_ring
;
527 unsigned int nentries_mask
;
528 unsigned int sw_index
;
529 unsigned int write_index
;
532 struct ath10k_pci
*ar_pci
;
534 dest_ring
= ce_state
->dest_ring
;
540 ar_pci
= ath10k_pci_priv(ar
);
542 spin_lock_bh(&ar_pci
->ce_lock
);
544 nentries_mask
= dest_ring
->nentries_mask
;
545 sw_index
= dest_ring
->sw_index
;
546 write_index
= dest_ring
->write_index
;
547 if (write_index
!= sw_index
) {
548 struct ce_desc
*base
= dest_ring
->base_addr_owner_space
;
549 struct ce_desc
*desc
= CE_DEST_RING_TO_DESC(base
, sw_index
);
551 /* Return data from completed destination descriptor */
552 *bufferp
= __le32_to_cpu(desc
->addr
);
554 if (per_transfer_contextp
)
555 *per_transfer_contextp
=
556 dest_ring
->per_transfer_context
[sw_index
];
559 dest_ring
->per_transfer_context
[sw_index
] = NULL
;
561 /* Update sw_index */
562 sw_index
= CE_RING_IDX_INCR(nentries_mask
, sw_index
);
563 dest_ring
->sw_index
= sw_index
;
569 spin_unlock_bh(&ar_pci
->ce_lock
);
575 * Guts of ath10k_ce_completed_send_next.
576 * The caller takes responsibility for any necessary locking.
578 static int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe
*ce_state
,
579 void **per_transfer_contextp
,
581 unsigned int *nbytesp
,
582 unsigned int *transfer_idp
)
584 struct ath10k_ce_ring
*src_ring
= ce_state
->src_ring
;
585 u32 ctrl_addr
= ce_state
->ctrl_addr
;
586 struct ath10k
*ar
= ce_state
->ar
;
587 unsigned int nentries_mask
= src_ring
->nentries_mask
;
588 unsigned int sw_index
= src_ring
->sw_index
;
589 struct ce_desc
*sdesc
, *sbase
;
590 unsigned int read_index
;
593 if (src_ring
->hw_index
== sw_index
) {
595 * The SW completion index has caught up with the cached
596 * version of the HW completion index.
597 * Update the cached HW completion index to see whether
598 * the SW has really caught up to the HW, or if the cached
599 * value of the HW index has become stale.
602 ret
= ath10k_pci_wake(ar
);
606 read_index
= ath10k_ce_src_ring_read_index_get(ar
, ctrl_addr
);
607 if (read_index
== 0xffffffff)
610 read_index
&= nentries_mask
;
611 src_ring
->hw_index
= read_index
;
613 ath10k_pci_sleep(ar
);
616 read_index
= src_ring
->hw_index
;
618 if (read_index
== sw_index
)
621 sbase
= src_ring
->shadow_base
;
622 sdesc
= CE_SRC_RING_TO_DESC(sbase
, sw_index
);
624 /* Return data from completed source descriptor */
625 *bufferp
= __le32_to_cpu(sdesc
->addr
);
626 *nbytesp
= __le16_to_cpu(sdesc
->nbytes
);
627 *transfer_idp
= MS(__le16_to_cpu(sdesc
->flags
),
628 CE_DESC_FLAGS_META_DATA
);
630 if (per_transfer_contextp
)
631 *per_transfer_contextp
=
632 src_ring
->per_transfer_context
[sw_index
];
635 src_ring
->per_transfer_context
[sw_index
] = NULL
;
637 /* Update sw_index */
638 sw_index
= CE_RING_IDX_INCR(nentries_mask
, sw_index
);
639 src_ring
->sw_index
= sw_index
;
644 /* NB: Modeled after ath10k_ce_completed_send_next */
645 int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe
*ce_state
,
646 void **per_transfer_contextp
,
648 unsigned int *nbytesp
,
649 unsigned int *transfer_idp
)
651 struct ath10k_ce_ring
*src_ring
;
652 unsigned int nentries_mask
;
653 unsigned int sw_index
;
654 unsigned int write_index
;
657 struct ath10k_pci
*ar_pci
;
659 src_ring
= ce_state
->src_ring
;
665 ar_pci
= ath10k_pci_priv(ar
);
667 spin_lock_bh(&ar_pci
->ce_lock
);
669 nentries_mask
= src_ring
->nentries_mask
;
670 sw_index
= src_ring
->sw_index
;
671 write_index
= src_ring
->write_index
;
673 if (write_index
!= sw_index
) {
674 struct ce_desc
*base
= src_ring
->base_addr_owner_space
;
675 struct ce_desc
*desc
= CE_SRC_RING_TO_DESC(base
, sw_index
);
677 /* Return data from completed source descriptor */
678 *bufferp
= __le32_to_cpu(desc
->addr
);
679 *nbytesp
= __le16_to_cpu(desc
->nbytes
);
680 *transfer_idp
= MS(__le16_to_cpu(desc
->flags
),
681 CE_DESC_FLAGS_META_DATA
);
683 if (per_transfer_contextp
)
684 *per_transfer_contextp
=
685 src_ring
->per_transfer_context
[sw_index
];
688 src_ring
->per_transfer_context
[sw_index
] = NULL
;
690 /* Update sw_index */
691 sw_index
= CE_RING_IDX_INCR(nentries_mask
, sw_index
);
692 src_ring
->sw_index
= sw_index
;
698 spin_unlock_bh(&ar_pci
->ce_lock
);
703 int ath10k_ce_completed_send_next(struct ath10k_ce_pipe
*ce_state
,
704 void **per_transfer_contextp
,
706 unsigned int *nbytesp
,
707 unsigned int *transfer_idp
)
709 struct ath10k
*ar
= ce_state
->ar
;
710 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
713 spin_lock_bh(&ar_pci
->ce_lock
);
714 ret
= ath10k_ce_completed_send_next_nolock(ce_state
,
715 per_transfer_contextp
,
718 spin_unlock_bh(&ar_pci
->ce_lock
);
724 * Guts of interrupt handler for per-engine interrupts on a particular CE.
726 * Invokes registered callbacks for recv_complete,
727 * send_complete, and watermarks.
729 void ath10k_ce_per_engine_service(struct ath10k
*ar
, unsigned int ce_id
)
731 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
732 struct ath10k_ce_pipe
*ce_state
= &ar_pci
->ce_states
[ce_id
];
733 u32 ctrl_addr
= ce_state
->ctrl_addr
;
736 ret
= ath10k_pci_wake(ar
);
740 spin_lock_bh(&ar_pci
->ce_lock
);
742 /* Clear the copy-complete interrupts that will be handled here. */
743 ath10k_ce_engine_int_status_clear(ar
, ctrl_addr
,
744 HOST_IS_COPY_COMPLETE_MASK
);
746 spin_unlock_bh(&ar_pci
->ce_lock
);
748 if (ce_state
->recv_cb
)
749 ce_state
->recv_cb(ce_state
);
751 if (ce_state
->send_cb
)
752 ce_state
->send_cb(ce_state
);
754 spin_lock_bh(&ar_pci
->ce_lock
);
757 * Misc CE interrupts are not being handled, but still need
760 ath10k_ce_engine_int_status_clear(ar
, ctrl_addr
, CE_WATERMARK_MASK
);
762 spin_unlock_bh(&ar_pci
->ce_lock
);
763 ath10k_pci_sleep(ar
);
767 * Handler for per-engine interrupts on ALL active CEs.
768 * This is used in cases where the system is sharing a
769 * single interrput for all CEs
772 void ath10k_ce_per_engine_service_any(struct ath10k
*ar
)
777 ret
= ath10k_pci_wake(ar
);
781 intr_summary
= CE_INTERRUPT_SUMMARY(ar
);
783 for (ce_id
= 0; intr_summary
&& (ce_id
< CE_COUNT
); ce_id
++) {
784 if (intr_summary
& (1 << ce_id
))
785 intr_summary
&= ~(1 << ce_id
);
787 /* no intr pending on this CE */
790 ath10k_ce_per_engine_service(ar
, ce_id
);
793 ath10k_pci_sleep(ar
);
797 * Adjust interrupts for the copy complete handler.
798 * If it's needed for either send or recv, then unmask
799 * this interrupt; otherwise, mask it.
801 * Called with ce_lock held.
803 static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe
*ce_state
,
804 int disable_copy_compl_intr
)
806 u32 ctrl_addr
= ce_state
->ctrl_addr
;
807 struct ath10k
*ar
= ce_state
->ar
;
810 ret
= ath10k_pci_wake(ar
);
814 if ((!disable_copy_compl_intr
) &&
815 (ce_state
->send_cb
|| ce_state
->recv_cb
))
816 ath10k_ce_copy_complete_inter_enable(ar
, ctrl_addr
);
818 ath10k_ce_copy_complete_intr_disable(ar
, ctrl_addr
);
820 ath10k_ce_watermark_intr_disable(ar
, ctrl_addr
);
822 ath10k_pci_sleep(ar
);
825 int ath10k_ce_disable_interrupts(struct ath10k
*ar
)
829 ret
= ath10k_pci_wake(ar
);
833 for (ce_id
= 0; ce_id
< CE_COUNT
; ce_id
++) {
834 u32 ctrl_addr
= ath10k_ce_base_address(ce_id
);
836 ath10k_ce_copy_complete_intr_disable(ar
, ctrl_addr
);
837 ath10k_ce_error_intr_disable(ar
, ctrl_addr
);
838 ath10k_ce_watermark_intr_disable(ar
, ctrl_addr
);
841 ath10k_pci_sleep(ar
);
846 void ath10k_ce_send_cb_register(struct ath10k_ce_pipe
*ce_state
,
847 void (*send_cb
)(struct ath10k_ce_pipe
*),
848 int disable_interrupts
)
850 struct ath10k
*ar
= ce_state
->ar
;
851 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
853 spin_lock_bh(&ar_pci
->ce_lock
);
854 ce_state
->send_cb
= send_cb
;
855 ath10k_ce_per_engine_handler_adjust(ce_state
, disable_interrupts
);
856 spin_unlock_bh(&ar_pci
->ce_lock
);
859 void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe
*ce_state
,
860 void (*recv_cb
)(struct ath10k_ce_pipe
*))
862 struct ath10k
*ar
= ce_state
->ar
;
863 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
865 spin_lock_bh(&ar_pci
->ce_lock
);
866 ce_state
->recv_cb
= recv_cb
;
867 ath10k_ce_per_engine_handler_adjust(ce_state
, 0);
868 spin_unlock_bh(&ar_pci
->ce_lock
);
871 static int ath10k_ce_init_src_ring(struct ath10k
*ar
,
873 const struct ce_attr
*attr
)
875 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
876 struct ath10k_ce_pipe
*ce_state
= &ar_pci
->ce_states
[ce_id
];
877 struct ath10k_ce_ring
*src_ring
= ce_state
->src_ring
;
878 u32 nentries
, ctrl_addr
= ath10k_ce_base_address(ce_id
);
880 nentries
= roundup_pow_of_two(attr
->src_nentries
);
882 memset(src_ring
->per_transfer_context
, 0,
883 nentries
* sizeof(*src_ring
->per_transfer_context
));
885 src_ring
->sw_index
= ath10k_ce_src_ring_read_index_get(ar
, ctrl_addr
);
886 src_ring
->sw_index
&= src_ring
->nentries_mask
;
887 src_ring
->hw_index
= src_ring
->sw_index
;
889 src_ring
->write_index
=
890 ath10k_ce_src_ring_write_index_get(ar
, ctrl_addr
);
891 src_ring
->write_index
&= src_ring
->nentries_mask
;
893 ath10k_ce_src_ring_base_addr_set(ar
, ctrl_addr
,
894 src_ring
->base_addr_ce_space
);
895 ath10k_ce_src_ring_size_set(ar
, ctrl_addr
, nentries
);
896 ath10k_ce_src_ring_dmax_set(ar
, ctrl_addr
, attr
->src_sz_max
);
897 ath10k_ce_src_ring_byte_swap_set(ar
, ctrl_addr
, 0);
898 ath10k_ce_src_ring_lowmark_set(ar
, ctrl_addr
, 0);
899 ath10k_ce_src_ring_highmark_set(ar
, ctrl_addr
, nentries
);
901 ath10k_dbg(ATH10K_DBG_BOOT
,
902 "boot init ce src ring id %d entries %d base_addr %p\n",
903 ce_id
, nentries
, src_ring
->base_addr_owner_space
);
908 static int ath10k_ce_init_dest_ring(struct ath10k
*ar
,
910 const struct ce_attr
*attr
)
912 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
913 struct ath10k_ce_pipe
*ce_state
= &ar_pci
->ce_states
[ce_id
];
914 struct ath10k_ce_ring
*dest_ring
= ce_state
->dest_ring
;
915 u32 nentries
, ctrl_addr
= ath10k_ce_base_address(ce_id
);
917 nentries
= roundup_pow_of_two(attr
->dest_nentries
);
919 memset(dest_ring
->per_transfer_context
, 0,
920 nentries
* sizeof(*dest_ring
->per_transfer_context
));
922 dest_ring
->sw_index
= ath10k_ce_dest_ring_read_index_get(ar
, ctrl_addr
);
923 dest_ring
->sw_index
&= dest_ring
->nentries_mask
;
924 dest_ring
->write_index
=
925 ath10k_ce_dest_ring_write_index_get(ar
, ctrl_addr
);
926 dest_ring
->write_index
&= dest_ring
->nentries_mask
;
928 ath10k_ce_dest_ring_base_addr_set(ar
, ctrl_addr
,
929 dest_ring
->base_addr_ce_space
);
930 ath10k_ce_dest_ring_size_set(ar
, ctrl_addr
, nentries
);
931 ath10k_ce_dest_ring_byte_swap_set(ar
, ctrl_addr
, 0);
932 ath10k_ce_dest_ring_lowmark_set(ar
, ctrl_addr
, 0);
933 ath10k_ce_dest_ring_highmark_set(ar
, ctrl_addr
, nentries
);
935 ath10k_dbg(ATH10K_DBG_BOOT
,
936 "boot ce dest ring id %d entries %d base_addr %p\n",
937 ce_id
, nentries
, dest_ring
->base_addr_owner_space
);
942 static struct ath10k_ce_ring
*
943 ath10k_ce_alloc_src_ring(struct ath10k
*ar
, unsigned int ce_id
,
944 const struct ce_attr
*attr
)
946 struct ath10k_ce_ring
*src_ring
;
947 u32 nentries
= attr
->src_nentries
;
948 dma_addr_t base_addr
;
950 nentries
= roundup_pow_of_two(nentries
);
952 src_ring
= kzalloc(sizeof(*src_ring
) +
954 sizeof(*src_ring
->per_transfer_context
)),
956 if (src_ring
== NULL
)
957 return ERR_PTR(-ENOMEM
);
959 src_ring
->nentries
= nentries
;
960 src_ring
->nentries_mask
= nentries
- 1;
963 * Legacy platforms that do not support cache
964 * coherent DMA are unsupported
966 src_ring
->base_addr_owner_space_unaligned
=
967 dma_alloc_coherent(ar
->dev
,
968 (nentries
* sizeof(struct ce_desc
) +
970 &base_addr
, GFP_KERNEL
);
971 if (!src_ring
->base_addr_owner_space_unaligned
) {
973 return ERR_PTR(-ENOMEM
);
976 src_ring
->base_addr_ce_space_unaligned
= base_addr
;
978 src_ring
->base_addr_owner_space
= PTR_ALIGN(
979 src_ring
->base_addr_owner_space_unaligned
,
981 src_ring
->base_addr_ce_space
= ALIGN(
982 src_ring
->base_addr_ce_space_unaligned
,
986 * Also allocate a shadow src ring in regular
987 * mem to use for faster access.
989 src_ring
->shadow_base_unaligned
=
990 kmalloc((nentries
* sizeof(struct ce_desc
) +
991 CE_DESC_RING_ALIGN
), GFP_KERNEL
);
992 if (!src_ring
->shadow_base_unaligned
) {
993 dma_free_coherent(ar
->dev
,
994 (nentries
* sizeof(struct ce_desc
) +
996 src_ring
->base_addr_owner_space
,
997 src_ring
->base_addr_ce_space
);
999 return ERR_PTR(-ENOMEM
);
1002 src_ring
->shadow_base
= PTR_ALIGN(
1003 src_ring
->shadow_base_unaligned
,
1004 CE_DESC_RING_ALIGN
);
1009 static struct ath10k_ce_ring
*
1010 ath10k_ce_alloc_dest_ring(struct ath10k
*ar
, unsigned int ce_id
,
1011 const struct ce_attr
*attr
)
1013 struct ath10k_ce_ring
*dest_ring
;
1015 dma_addr_t base_addr
;
1017 nentries
= roundup_pow_of_two(attr
->dest_nentries
);
1019 dest_ring
= kzalloc(sizeof(*dest_ring
) +
1021 sizeof(*dest_ring
->per_transfer_context
)),
1023 if (dest_ring
== NULL
)
1024 return ERR_PTR(-ENOMEM
);
1026 dest_ring
->nentries
= nentries
;
1027 dest_ring
->nentries_mask
= nentries
- 1;
1030 * Legacy platforms that do not support cache
1031 * coherent DMA are unsupported
1033 dest_ring
->base_addr_owner_space_unaligned
=
1034 dma_alloc_coherent(ar
->dev
,
1035 (nentries
* sizeof(struct ce_desc
) +
1036 CE_DESC_RING_ALIGN
),
1037 &base_addr
, GFP_KERNEL
);
1038 if (!dest_ring
->base_addr_owner_space_unaligned
) {
1040 return ERR_PTR(-ENOMEM
);
1043 dest_ring
->base_addr_ce_space_unaligned
= base_addr
;
1046 * Correctly initialize memory to 0 to prevent garbage
1047 * data crashing system when download firmware
1049 memset(dest_ring
->base_addr_owner_space_unaligned
, 0,
1050 nentries
* sizeof(struct ce_desc
) + CE_DESC_RING_ALIGN
);
1052 dest_ring
->base_addr_owner_space
= PTR_ALIGN(
1053 dest_ring
->base_addr_owner_space_unaligned
,
1054 CE_DESC_RING_ALIGN
);
1055 dest_ring
->base_addr_ce_space
= ALIGN(
1056 dest_ring
->base_addr_ce_space_unaligned
,
1057 CE_DESC_RING_ALIGN
);
1063 * Initialize a Copy Engine based on caller-supplied attributes.
1064 * This may be called once to initialize both source and destination
1065 * rings or it may be called twice for separate source and destination
1066 * initialization. It may be that only one side or the other is
1067 * initialized by software/firmware.
1069 int ath10k_ce_init_pipe(struct ath10k
*ar
, unsigned int ce_id
,
1070 const struct ce_attr
*attr
)
1072 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
1073 struct ath10k_ce_pipe
*ce_state
= &ar_pci
->ce_states
[ce_id
];
1077 * Make sure there's enough CE ringbuffer entries for HTT TX to avoid
1078 * additional TX locking checks.
1080 * For the lack of a better place do the check here.
1082 BUILD_BUG_ON(2*TARGET_NUM_MSDU_DESC
>
1083 (CE_HTT_H2T_MSG_SRC_NENTRIES
- 1));
1084 BUILD_BUG_ON(2*TARGET_10X_NUM_MSDU_DESC
>
1085 (CE_HTT_H2T_MSG_SRC_NENTRIES
- 1));
1087 ret
= ath10k_pci_wake(ar
);
1091 spin_lock_bh(&ar_pci
->ce_lock
);
1093 ce_state
->id
= ce_id
;
1094 ce_state
->ctrl_addr
= ath10k_ce_base_address(ce_id
);
1095 ce_state
->attr_flags
= attr
->flags
;
1096 ce_state
->src_sz_max
= attr
->src_sz_max
;
1097 spin_unlock_bh(&ar_pci
->ce_lock
);
1099 if (attr
->src_nentries
) {
1100 ret
= ath10k_ce_init_src_ring(ar
, ce_id
, attr
);
1102 ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n",
1108 if (attr
->dest_nentries
) {
1109 ret
= ath10k_ce_init_dest_ring(ar
, ce_id
, attr
);
1111 ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n",
1118 ath10k_pci_sleep(ar
);
1122 static void ath10k_ce_deinit_src_ring(struct ath10k
*ar
, unsigned int ce_id
)
1124 u32 ctrl_addr
= ath10k_ce_base_address(ce_id
);
1126 ath10k_ce_src_ring_base_addr_set(ar
, ctrl_addr
, 0);
1127 ath10k_ce_src_ring_size_set(ar
, ctrl_addr
, 0);
1128 ath10k_ce_src_ring_dmax_set(ar
, ctrl_addr
, 0);
1129 ath10k_ce_src_ring_highmark_set(ar
, ctrl_addr
, 0);
1132 static void ath10k_ce_deinit_dest_ring(struct ath10k
*ar
, unsigned int ce_id
)
1134 u32 ctrl_addr
= ath10k_ce_base_address(ce_id
);
1136 ath10k_ce_dest_ring_base_addr_set(ar
, ctrl_addr
, 0);
1137 ath10k_ce_dest_ring_size_set(ar
, ctrl_addr
, 0);
1138 ath10k_ce_dest_ring_highmark_set(ar
, ctrl_addr
, 0);
1141 void ath10k_ce_deinit_pipe(struct ath10k
*ar
, unsigned int ce_id
)
1145 ret
= ath10k_pci_wake(ar
);
1149 ath10k_ce_deinit_src_ring(ar
, ce_id
);
1150 ath10k_ce_deinit_dest_ring(ar
, ce_id
);
1152 ath10k_pci_sleep(ar
);
1155 int ath10k_ce_alloc_pipe(struct ath10k
*ar
, int ce_id
,
1156 const struct ce_attr
*attr
)
1158 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
1159 struct ath10k_ce_pipe
*ce_state
= &ar_pci
->ce_states
[ce_id
];
1162 if (attr
->src_nentries
) {
1163 ce_state
->src_ring
= ath10k_ce_alloc_src_ring(ar
, ce_id
, attr
);
1164 if (IS_ERR(ce_state
->src_ring
)) {
1165 ret
= PTR_ERR(ce_state
->src_ring
);
1166 ath10k_err("failed to allocate copy engine source ring %d: %d\n",
1168 ce_state
->src_ring
= NULL
;
1173 if (attr
->dest_nentries
) {
1174 ce_state
->dest_ring
= ath10k_ce_alloc_dest_ring(ar
, ce_id
,
1176 if (IS_ERR(ce_state
->dest_ring
)) {
1177 ret
= PTR_ERR(ce_state
->dest_ring
);
1178 ath10k_err("failed to allocate copy engine destination ring %d: %d\n",
1180 ce_state
->dest_ring
= NULL
;
1188 void ath10k_ce_free_pipe(struct ath10k
*ar
, int ce_id
)
1190 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
1191 struct ath10k_ce_pipe
*ce_state
= &ar_pci
->ce_states
[ce_id
];
1193 if (ce_state
->src_ring
) {
1194 kfree(ce_state
->src_ring
->shadow_base_unaligned
);
1195 dma_free_coherent(ar
->dev
,
1196 (ce_state
->src_ring
->nentries
*
1197 sizeof(struct ce_desc
) +
1198 CE_DESC_RING_ALIGN
),
1199 ce_state
->src_ring
->base_addr_owner_space
,
1200 ce_state
->src_ring
->base_addr_ce_space
);
1201 kfree(ce_state
->src_ring
);
1204 if (ce_state
->dest_ring
) {
1205 dma_free_coherent(ar
->dev
,
1206 (ce_state
->dest_ring
->nentries
*
1207 sizeof(struct ce_desc
) +
1208 CE_DESC_RING_ALIGN
),
1209 ce_state
->dest_ring
->base_addr_owner_space
,
1210 ce_state
->dest_ring
->base_addr_ce_space
);
1211 kfree(ce_state
->dest_ring
);
1214 ce_state
->src_ring
= NULL
;
1215 ce_state
->dest_ring
= NULL
;