2 * Copyright (c) 2007-2011 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 #include <asm/unaligned.h>
23 #define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask))
25 static void htc_prep_send_pkt(struct htc_packet
*packet
, u8 flags
, int ctrl0
,
28 struct htc_frame_hdr
*hdr
;
30 packet
->buf
-= HTC_HDR_LENGTH
;
31 hdr
= (struct htc_frame_hdr
*)packet
->buf
;
34 put_unaligned((u16
)packet
->act_len
, &hdr
->payld_len
);
36 hdr
->eid
= packet
->endpoint
;
41 static void htc_reclaim_txctrl_buf(struct htc_target
*target
,
42 struct htc_packet
*pkt
)
44 spin_lock_bh(&target
->htc_lock
);
45 list_add_tail(&pkt
->list
, &target
->free_ctrl_txbuf
);
46 spin_unlock_bh(&target
->htc_lock
);
49 static struct htc_packet
*htc_get_control_buf(struct htc_target
*target
,
52 struct htc_packet
*packet
= NULL
;
53 struct list_head
*buf_list
;
55 buf_list
= tx
? &target
->free_ctrl_txbuf
: &target
->free_ctrl_rxbuf
;
57 spin_lock_bh(&target
->htc_lock
);
59 if (list_empty(buf_list
)) {
60 spin_unlock_bh(&target
->htc_lock
);
64 packet
= list_first_entry(buf_list
, struct htc_packet
, list
);
65 list_del(&packet
->list
);
66 spin_unlock_bh(&target
->htc_lock
);
69 packet
->buf
= packet
->buf_start
+ HTC_HDR_LENGTH
;
74 static void htc_tx_comp_update(struct htc_target
*target
,
75 struct htc_endpoint
*endpoint
,
76 struct htc_packet
*packet
)
78 packet
->completion
= NULL
;
79 packet
->buf
+= HTC_HDR_LENGTH
;
84 ath6kl_err("req failed (status:%d, ep:%d, len:%d creds:%d)\n",
85 packet
->status
, packet
->endpoint
, packet
->act_len
,
86 packet
->info
.tx
.cred_used
);
88 /* on failure to submit, reclaim credits for this packet */
89 spin_lock_bh(&target
->tx_lock
);
90 endpoint
->cred_dist
.cred_to_dist
+=
91 packet
->info
.tx
.cred_used
;
92 endpoint
->cred_dist
.txq_depth
= get_queue_depth(&endpoint
->txq
);
94 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
, "ctxt:0x%p dist:0x%p\n",
95 target
->cred_dist_cntxt
, &target
->cred_dist_list
);
97 ath6k_credit_distribute(target
->cred_dist_cntxt
,
98 &target
->cred_dist_list
,
99 HTC_CREDIT_DIST_SEND_COMPLETE
);
101 spin_unlock_bh(&target
->tx_lock
);
104 static void htc_tx_complete(struct htc_endpoint
*endpoint
,
105 struct list_head
*txq
)
110 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
,
111 "send complete ep %d, (%d pkts)\n",
112 endpoint
->eid
, get_queue_depth(txq
));
114 ath6kl_tx_complete(endpoint
->target
->dev
->ar
, txq
);
117 static void htc_tx_comp_handler(struct htc_target
*target
,
118 struct htc_packet
*packet
)
120 struct htc_endpoint
*endpoint
= &target
->endpoint
[packet
->endpoint
];
121 struct list_head container
;
123 htc_tx_comp_update(target
, endpoint
, packet
);
124 INIT_LIST_HEAD(&container
);
125 list_add_tail(&packet
->list
, &container
);
127 htc_tx_complete(endpoint
, &container
);
130 static void htc_async_tx_scat_complete(struct htc_target
*target
,
131 struct hif_scatter_req
*scat_req
)
133 struct htc_endpoint
*endpoint
;
134 struct htc_packet
*packet
;
135 struct list_head tx_compq
;
138 INIT_LIST_HEAD(&tx_compq
);
140 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
,
141 "htc_async_tx_scat_complete total len: %d entries: %d\n",
142 scat_req
->len
, scat_req
->scat_entries
);
144 if (scat_req
->status
)
145 ath6kl_err("send scatter req failed: %d\n", scat_req
->status
);
147 packet
= scat_req
->scat_list
[0].packet
;
148 endpoint
= &target
->endpoint
[packet
->endpoint
];
150 /* walk through the scatter list and process */
151 for (i
= 0; i
< scat_req
->scat_entries
; i
++) {
152 packet
= scat_req
->scat_list
[i
].packet
;
158 packet
->status
= scat_req
->status
;
159 htc_tx_comp_update(target
, endpoint
, packet
);
160 list_add_tail(&packet
->list
, &tx_compq
);
163 /* free scatter request */
164 hif_scatter_req_add(target
->dev
->ar
, scat_req
);
166 /* complete all packets */
167 htc_tx_complete(endpoint
, &tx_compq
);
170 static int htc_issue_send(struct htc_target
*target
, struct htc_packet
*packet
)
174 u32 padded_len
, send_len
;
176 if (!packet
->completion
)
179 send_len
= packet
->act_len
+ HTC_HDR_LENGTH
;
181 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
, "%s: transmit len : %d (%s)\n",
182 __func__
, send_len
, sync
? "sync" : "async");
184 padded_len
= CALC_TXRX_PADDED_LEN(target
->dev
, send_len
);
186 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
,
187 "DevSendPacket, padded len: %d mbox:0x%X (mode:%s)\n",
189 target
->dev
->ar
->mbox_info
.htc_addr
,
190 sync
? "sync" : "async");
193 status
= hif_read_write_sync(target
->dev
->ar
,
194 target
->dev
->ar
->mbox_info
.htc_addr
,
195 packet
->buf
, padded_len
,
196 HIF_WR_SYNC_BLOCK_INC
);
198 packet
->status
= status
;
199 packet
->buf
+= HTC_HDR_LENGTH
;
201 status
= hif_write_async(target
->dev
->ar
,
202 target
->dev
->ar
->mbox_info
.htc_addr
,
203 packet
->buf
, padded_len
,
204 HIF_WR_ASYNC_BLOCK_INC
, packet
);
209 static int htc_check_credits(struct htc_target
*target
,
210 struct htc_endpoint
*ep
, u8
*flags
,
211 enum htc_endpoint_id eid
, unsigned int len
,
215 *req_cred
= (len
> target
->tgt_cred_sz
) ?
216 DIV_ROUND_UP(len
, target
->tgt_cred_sz
) : 1;
218 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
, "creds required:%d got:%d\n",
219 *req_cred
, ep
->cred_dist
.credits
);
221 if (ep
->cred_dist
.credits
< *req_cred
) {
222 if (eid
== ENDPOINT_0
)
225 /* Seek more credits */
226 ep
->cred_dist
.seek_cred
= *req_cred
- ep
->cred_dist
.credits
;
228 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
, "ctxt:0x%p dist:0x%p\n",
229 target
->cred_dist_cntxt
, &ep
->cred_dist
);
231 ath6k_seek_credits(target
->cred_dist_cntxt
, &ep
->cred_dist
);
233 ep
->cred_dist
.seek_cred
= 0;
235 if (ep
->cred_dist
.credits
< *req_cred
) {
236 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
,
237 "not enough credits for ep %d - leaving packet in queue\n",
243 ep
->cred_dist
.credits
-= *req_cred
;
244 ep
->ep_st
.cred_cosumd
+= *req_cred
;
246 /* When we are getting low on credits, ask for more */
247 if (ep
->cred_dist
.credits
< ep
->cred_dist
.cred_per_msg
) {
248 ep
->cred_dist
.seek_cred
=
249 ep
->cred_dist
.cred_per_msg
- ep
->cred_dist
.credits
;
251 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
, "ctxt:0x%p dist:0x%p\n",
252 target
->cred_dist_cntxt
, &ep
->cred_dist
);
254 ath6k_seek_credits(target
->cred_dist_cntxt
, &ep
->cred_dist
);
256 /* see if we were successful in getting more */
257 if (ep
->cred_dist
.credits
< ep
->cred_dist
.cred_per_msg
) {
258 /* tell the target we need credits ASAP! */
259 *flags
|= HTC_FLAGS_NEED_CREDIT_UPDATE
;
260 ep
->ep_st
.cred_low_indicate
+= 1;
261 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
, "host needs credits\n");
268 static void htc_tx_pkts_get(struct htc_target
*target
,
269 struct htc_endpoint
*endpoint
,
270 struct list_head
*queue
)
274 struct htc_packet
*packet
;
281 if (list_empty(&endpoint
->txq
))
283 packet
= list_first_entry(&endpoint
->txq
, struct htc_packet
,
286 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
,
287 "got head pkt:0x%p , queue depth: %d\n",
288 packet
, get_queue_depth(&endpoint
->txq
));
290 len
= CALC_TXRX_PADDED_LEN(target
->dev
,
291 packet
->act_len
+ HTC_HDR_LENGTH
);
293 if (htc_check_credits(target
, endpoint
, &flags
,
294 packet
->endpoint
, len
, &req_cred
))
297 /* now we can fully move onto caller's queue */
298 packet
= list_first_entry(&endpoint
->txq
, struct htc_packet
,
300 list_move_tail(&packet
->list
, queue
);
302 /* save the number of credits this packet consumed */
303 packet
->info
.tx
.cred_used
= req_cred
;
305 /* all TX packets are handled asynchronously */
306 packet
->completion
= htc_tx_comp_handler
;
307 packet
->context
= target
;
308 endpoint
->ep_st
.tx_issued
+= 1;
310 /* save send flags */
311 packet
->info
.tx
.flags
= flags
;
312 packet
->info
.tx
.seqno
= endpoint
->seqno
;
317 /* See if the padded tx length falls on a credit boundary */
318 static int htc_get_credit_padding(unsigned int cred_sz
, int *len
,
319 struct htc_endpoint
*ep
)
321 int rem_cred
, cred_pad
;
323 rem_cred
= *len
% cred_sz
;
325 /* No padding needed */
329 if (!(ep
->conn_flags
& HTC_FLGS_TX_BNDL_PAD_EN
))
333 * The transfer consumes a "partial" credit, this
334 * packet cannot be bundled unless we add
335 * additional "dummy" padding (max 255 bytes) to
336 * consume the entire credit.
338 cred_pad
= *len
< cred_sz
? (cred_sz
- *len
) : rem_cred
;
340 if ((cred_pad
> 0) && (cred_pad
<= 255))
343 /* The amount of padding is too large, send as non-bundled */
349 static int htc_setup_send_scat_list(struct htc_target
*target
,
350 struct htc_endpoint
*endpoint
,
351 struct hif_scatter_req
*scat_req
,
353 struct list_head
*queue
)
355 struct htc_packet
*packet
;
356 int i
, len
, rem_scat
, cred_pad
;
359 rem_scat
= target
->dev
->max_tx_bndl_sz
;
361 for (i
= 0; i
< n_scat
; i
++) {
362 scat_req
->scat_list
[i
].packet
= NULL
;
364 if (list_empty(queue
))
367 packet
= list_first_entry(queue
, struct htc_packet
, list
);
368 len
= CALC_TXRX_PADDED_LEN(target
->dev
,
369 packet
->act_len
+ HTC_HDR_LENGTH
);
371 cred_pad
= htc_get_credit_padding(target
->tgt_cred_sz
,
378 if (rem_scat
< len
) {
379 /* exceeds what we can transfer */
385 /* now remove it from the queue */
386 packet
= list_first_entry(queue
, struct htc_packet
, list
);
387 list_del(&packet
->list
);
389 scat_req
->scat_list
[i
].packet
= packet
;
390 /* prepare packet and flag message as part of a send bundle */
391 htc_prep_send_pkt(packet
,
392 packet
->info
.tx
.flags
| HTC_FLAGS_SEND_BUNDLE
,
393 cred_pad
, packet
->info
.tx
.seqno
);
394 scat_req
->scat_list
[i
].buf
= packet
->buf
;
395 scat_req
->scat_list
[i
].len
= len
;
397 scat_req
->len
+= len
;
398 scat_req
->scat_entries
++;
399 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
,
400 "%d, adding pkt : 0x%p len:%d (remaining space:%d)\n",
401 i
, packet
, len
, rem_scat
);
404 /* Roll back scatter setup in case of any failure */
405 if (status
|| (scat_req
->scat_entries
< HTC_MIN_HTC_MSGS_TO_BUNDLE
)) {
406 for (i
= scat_req
->scat_entries
- 1; i
>= 0; i
--) {
407 packet
= scat_req
->scat_list
[i
].packet
;
409 packet
->buf
+= HTC_HDR_LENGTH
;
410 list_add(&packet
->list
, queue
);
420 * htc_issue_send_bundle: drain a queue and send as bundles
421 * this function may return without fully draining the queue
424 * 1. scatter resources are exhausted
425 * 2. a message that will consume a partial credit will stop the
426 * bundling process early
427 * 3. we drop below the minimum number of messages for a bundle
429 static void htc_issue_send_bundle(struct htc_endpoint
*endpoint
,
430 struct list_head
*queue
,
431 int *sent_bundle
, int *n_bundle_pkts
)
433 struct htc_target
*target
= endpoint
->target
;
434 struct hif_scatter_req
*scat_req
= NULL
;
435 struct hif_dev_scat_sup_info hif_info
;
436 int n_scat
, n_sent_bundle
= 0, tot_pkts_bundle
= 0;
438 hif_info
= target
->dev
->hif_scat_info
;
441 n_scat
= get_queue_depth(queue
);
442 n_scat
= min(n_scat
, target
->msg_per_bndl_max
);
444 if (n_scat
< HTC_MIN_HTC_MSGS_TO_BUNDLE
)
445 /* not enough to bundle */
448 scat_req
= hif_scatter_req_get(target
->dev
->ar
);
451 /* no scatter resources */
452 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
,
453 "no more scatter resources\n");
457 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
, "pkts to scatter: %d\n",
461 scat_req
->scat_entries
= 0;
463 if (htc_setup_send_scat_list(target
, endpoint
, scat_req
,
465 hif_scatter_req_add(target
->dev
->ar
, scat_req
);
469 /* send path is always asynchronous */
470 scat_req
->complete
= htc_async_tx_scat_complete
;
472 tot_pkts_bundle
+= scat_req
->scat_entries
;
474 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
,
475 "send scatter total bytes: %d , entries: %d\n",
476 scat_req
->len
, scat_req
->scat_entries
);
477 ath6kldev_submit_scat_req(target
->dev
, scat_req
, false);
480 *sent_bundle
= n_sent_bundle
;
481 *n_bundle_pkts
= tot_pkts_bundle
;
482 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
, "htc_issue_send_bundle (sent:%d)\n",
488 static void htc_tx_from_ep_txq(struct htc_target
*target
,
489 struct htc_endpoint
*endpoint
)
491 struct list_head txq
;
492 struct htc_packet
*packet
;
496 spin_lock_bh(&target
->tx_lock
);
498 endpoint
->tx_proc_cnt
++;
499 if (endpoint
->tx_proc_cnt
> 1) {
500 endpoint
->tx_proc_cnt
--;
501 spin_unlock_bh(&target
->tx_lock
);
502 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
, "htc_try_send (busy)\n");
507 * drain the endpoint TX queue for transmission as long
508 * as we have enough credits.
510 INIT_LIST_HEAD(&txq
);
514 if (list_empty(&endpoint
->txq
))
517 htc_tx_pkts_get(target
, endpoint
, &txq
);
519 if (list_empty(&txq
))
522 spin_unlock_bh(&target
->tx_lock
);
528 /* try to send a bundle on each pass */
529 if ((target
->tx_bndl_enable
) &&
530 (get_queue_depth(&txq
) >=
531 HTC_MIN_HTC_MSGS_TO_BUNDLE
)) {
532 int temp1
= 0, temp2
= 0;
534 htc_issue_send_bundle(endpoint
, &txq
,
536 bundle_sent
+= temp1
;
537 n_pkts_bundle
+= temp2
;
540 if (list_empty(&txq
))
543 packet
= list_first_entry(&txq
, struct htc_packet
,
545 list_del(&packet
->list
);
547 htc_prep_send_pkt(packet
, packet
->info
.tx
.flags
,
548 0, packet
->info
.tx
.seqno
);
549 htc_issue_send(target
, packet
);
552 spin_lock_bh(&target
->tx_lock
);
554 endpoint
->ep_st
.tx_bundles
+= bundle_sent
;
555 endpoint
->ep_st
.tx_pkt_bundled
+= n_pkts_bundle
;
558 endpoint
->tx_proc_cnt
= 0;
559 spin_unlock_bh(&target
->tx_lock
);
562 static bool htc_try_send(struct htc_target
*target
,
563 struct htc_endpoint
*endpoint
,
564 struct htc_packet
*tx_pkt
)
566 struct htc_ep_callbacks ep_cb
;
568 bool overflow
= false;
570 ep_cb
= endpoint
->ep_cb
;
572 spin_lock_bh(&target
->tx_lock
);
573 txq_depth
= get_queue_depth(&endpoint
->txq
);
574 spin_unlock_bh(&target
->tx_lock
);
576 if (txq_depth
>= endpoint
->max_txq_depth
)
580 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
,
581 "ep %d, tx queue will overflow :%d , tx depth:%d, max:%d\n",
582 endpoint
->eid
, overflow
, txq_depth
,
583 endpoint
->max_txq_depth
);
585 if (overflow
&& ep_cb
.tx_full
) {
586 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
,
587 "indicating overflowed tx packet: 0x%p\n", tx_pkt
);
589 if (ep_cb
.tx_full(endpoint
->target
, tx_pkt
) ==
590 HTC_SEND_FULL_DROP
) {
591 endpoint
->ep_st
.tx_dropped
+= 1;
596 spin_lock_bh(&target
->tx_lock
);
597 list_add_tail(&tx_pkt
->list
, &endpoint
->txq
);
598 spin_unlock_bh(&target
->tx_lock
);
600 htc_tx_from_ep_txq(target
, endpoint
);
605 static void htc_chk_ep_txq(struct htc_target
*target
)
607 struct htc_endpoint
*endpoint
;
608 struct htc_endpoint_credit_dist
*cred_dist
;
611 * Run through the credit distribution list to see if there are
612 * packets queued. NOTE: no locks need to be taken since the
613 * distribution list is not dynamic (cannot be re-ordered) and we
614 * are not modifying any state.
616 list_for_each_entry(cred_dist
, &target
->cred_dist_list
, list
) {
617 endpoint
= (struct htc_endpoint
*)cred_dist
->htc_rsvd
;
619 spin_lock_bh(&target
->tx_lock
);
620 if (!list_empty(&endpoint
->txq
)) {
621 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
,
622 "ep %d has %d credits and %d packets in tx queue\n",
624 endpoint
->cred_dist
.credits
,
625 get_queue_depth(&endpoint
->txq
));
626 spin_unlock_bh(&target
->tx_lock
);
628 * Try to start the stalled queue, this list is
629 * ordered by priority. If there are credits
630 * available the highest priority queue will get a
631 * chance to reclaim credits from lower priority
634 htc_tx_from_ep_txq(target
, endpoint
);
635 spin_lock_bh(&target
->tx_lock
);
637 spin_unlock_bh(&target
->tx_lock
);
641 static int htc_setup_tx_complete(struct htc_target
*target
)
643 struct htc_packet
*send_pkt
= NULL
;
646 send_pkt
= htc_get_control_buf(target
, true);
651 if (target
->htc_tgt_ver
>= HTC_VERSION_2P1
) {
652 struct htc_setup_comp_ext_msg
*setup_comp_ext
;
656 (struct htc_setup_comp_ext_msg
*)send_pkt
->buf
;
657 memset(setup_comp_ext
, 0, sizeof(*setup_comp_ext
));
658 setup_comp_ext
->msg_id
=
659 cpu_to_le16(HTC_MSG_SETUP_COMPLETE_EX_ID
);
661 if (target
->msg_per_bndl_max
> 0) {
662 /* Indicate HTC bundling to the target */
663 flags
|= HTC_SETUP_COMP_FLG_RX_BNDL_EN
;
664 setup_comp_ext
->msg_per_rxbndl
=
665 target
->msg_per_bndl_max
;
668 memcpy(&setup_comp_ext
->flags
, &flags
,
669 sizeof(setup_comp_ext
->flags
));
670 set_htc_pkt_info(send_pkt
, NULL
, (u8
*) setup_comp_ext
,
671 sizeof(struct htc_setup_comp_ext_msg
),
672 ENDPOINT_0
, HTC_SERVICE_TX_PACKET_TAG
);
675 struct htc_setup_comp_msg
*setup_comp
;
676 setup_comp
= (struct htc_setup_comp_msg
*)send_pkt
->buf
;
677 memset(setup_comp
, 0, sizeof(struct htc_setup_comp_msg
));
678 setup_comp
->msg_id
= cpu_to_le16(HTC_MSG_SETUP_COMPLETE_ID
);
679 set_htc_pkt_info(send_pkt
, NULL
, (u8
*) setup_comp
,
680 sizeof(struct htc_setup_comp_msg
),
681 ENDPOINT_0
, HTC_SERVICE_TX_PACKET_TAG
);
684 /* we want synchronous operation */
685 send_pkt
->completion
= NULL
;
686 htc_prep_send_pkt(send_pkt
, 0, 0, 0);
687 status
= htc_issue_send(target
, send_pkt
);
689 if (send_pkt
!= NULL
)
690 htc_reclaim_txctrl_buf(target
, send_pkt
);
695 void htc_set_credit_dist(struct htc_target
*target
,
696 struct htc_credit_state_info
*cred_dist_cntxt
,
697 u16 srvc_pri_order
[], int list_len
)
699 struct htc_endpoint
*endpoint
;
702 target
->cred_dist_cntxt
= cred_dist_cntxt
;
704 list_add_tail(&target
->endpoint
[ENDPOINT_0
].cred_dist
.list
,
705 &target
->cred_dist_list
);
707 for (i
= 0; i
< list_len
; i
++) {
708 for (ep
= ENDPOINT_1
; ep
< ENDPOINT_MAX
; ep
++) {
709 endpoint
= &target
->endpoint
[ep
];
710 if (endpoint
->svc_id
== srvc_pri_order
[i
]) {
711 list_add_tail(&endpoint
->cred_dist
.list
,
712 &target
->cred_dist_list
);
716 if (ep
>= ENDPOINT_MAX
) {
723 int htc_tx(struct htc_target
*target
, struct htc_packet
*packet
)
725 struct htc_endpoint
*endpoint
;
726 struct list_head queue
;
728 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
,
729 "htc_tx: ep id: %d, buf: 0x%p, len: %d\n",
730 packet
->endpoint
, packet
->buf
, packet
->act_len
);
732 if (packet
->endpoint
>= ENDPOINT_MAX
) {
737 endpoint
= &target
->endpoint
[packet
->endpoint
];
739 if (!htc_try_send(target
, endpoint
, packet
)) {
740 packet
->status
= (target
->htc_flags
& HTC_OP_STATE_STOPPING
) ?
741 -ECANCELED
: -ENOSPC
;
742 INIT_LIST_HEAD(&queue
);
743 list_add(&packet
->list
, &queue
);
744 htc_tx_complete(endpoint
, &queue
);
750 /* flush endpoint TX queue */
751 void htc_flush_txep(struct htc_target
*target
,
752 enum htc_endpoint_id eid
, u16 tag
)
754 struct htc_packet
*packet
, *tmp_pkt
;
755 struct list_head discard_q
, container
;
756 struct htc_endpoint
*endpoint
= &target
->endpoint
[eid
];
758 if (!endpoint
->svc_id
) {
763 /* initialize the discard queue */
764 INIT_LIST_HEAD(&discard_q
);
766 spin_lock_bh(&target
->tx_lock
);
768 list_for_each_entry_safe(packet
, tmp_pkt
, &endpoint
->txq
, list
) {
769 if ((tag
== HTC_TX_PACKET_TAG_ALL
) ||
770 (tag
== packet
->info
.tx
.tag
))
771 list_move_tail(&packet
->list
, &discard_q
);
774 spin_unlock_bh(&target
->tx_lock
);
776 list_for_each_entry_safe(packet
, tmp_pkt
, &discard_q
, list
) {
777 packet
->status
= -ECANCELED
;
778 list_del(&packet
->list
);
779 ath6kl_dbg(ATH6KL_DBG_TRC
,
780 "flushing tx pkt:0x%p, len:%d, ep:%d tag:0x%X\n",
781 packet
, packet
->act_len
,
782 packet
->endpoint
, packet
->info
.tx
.tag
);
784 INIT_LIST_HEAD(&container
);
785 list_add_tail(&packet
->list
, &container
);
786 htc_tx_complete(endpoint
, &container
);
791 static void htc_flush_txep_all(struct htc_target
*target
)
793 struct htc_endpoint
*endpoint
;
796 dump_cred_dist_stats(target
);
798 for (i
= ENDPOINT_0
; i
< ENDPOINT_MAX
; i
++) {
799 endpoint
= &target
->endpoint
[i
];
800 if (endpoint
->svc_id
== 0)
803 htc_flush_txep(target
, i
, HTC_TX_PACKET_TAG_ALL
);
807 void htc_indicate_activity_change(struct htc_target
*target
,
808 enum htc_endpoint_id eid
, bool active
)
810 struct htc_endpoint
*endpoint
= &target
->endpoint
[eid
];
813 if (endpoint
->svc_id
== 0) {
818 spin_lock_bh(&target
->tx_lock
);
821 if (!(endpoint
->cred_dist
.dist_flags
& HTC_EP_ACTIVE
)) {
822 endpoint
->cred_dist
.dist_flags
|= HTC_EP_ACTIVE
;
826 if (endpoint
->cred_dist
.dist_flags
& HTC_EP_ACTIVE
) {
827 endpoint
->cred_dist
.dist_flags
&= ~HTC_EP_ACTIVE
;
833 endpoint
->cred_dist
.txq_depth
=
834 get_queue_depth(&endpoint
->txq
);
836 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
, "ctxt:0x%p dist:0x%p\n",
837 target
->cred_dist_cntxt
, &target
->cred_dist_list
);
839 ath6k_credit_distribute(target
->cred_dist_cntxt
,
840 &target
->cred_dist_list
,
841 HTC_CREDIT_DIST_ACTIVITY_CHANGE
);
844 spin_unlock_bh(&target
->tx_lock
);
847 htc_chk_ep_txq(target
);
852 static inline void htc_update_rx_stats(struct htc_endpoint
*endpoint
,
855 endpoint
->ep_st
.rx_pkts
++;
856 if (n_look_ahds
== 1)
857 endpoint
->ep_st
.rx_lkahds
++;
858 else if (n_look_ahds
> 1)
859 endpoint
->ep_st
.rx_bundle_lkahd
++;
862 static inline bool htc_valid_rx_frame_len(struct htc_target
*target
,
863 enum htc_endpoint_id eid
, int len
)
865 return (eid
== target
->dev
->ar
->ctrl_ep
) ?
866 len
<= ATH6KL_BUFFER_SIZE
: len
<= ATH6KL_AMSDU_BUFFER_SIZE
;
869 static int htc_add_rxbuf(struct htc_target
*target
, struct htc_packet
*packet
)
871 struct list_head queue
;
873 INIT_LIST_HEAD(&queue
);
874 list_add_tail(&packet
->list
, &queue
);
875 return htc_add_rxbuf_multiple(target
, &queue
);
878 static void htc_reclaim_rxbuf(struct htc_target
*target
,
879 struct htc_packet
*packet
,
880 struct htc_endpoint
*ep
)
882 if (packet
->info
.rx
.rx_flags
& HTC_RX_PKT_NO_RECYCLE
) {
883 htc_rxpkt_reset(packet
);
884 packet
->status
= -ECANCELED
;
885 ep
->ep_cb
.rx(ep
->target
, packet
);
887 htc_rxpkt_reset(packet
);
888 htc_add_rxbuf((void *)(target
), packet
);
892 static void reclaim_rx_ctrl_buf(struct htc_target
*target
,
893 struct htc_packet
*packet
)
895 spin_lock_bh(&target
->htc_lock
);
896 list_add_tail(&packet
->list
, &target
->free_ctrl_rxbuf
);
897 spin_unlock_bh(&target
->htc_lock
);
900 static int dev_rx_pkt(struct htc_target
*target
, struct htc_packet
*packet
,
903 struct ath6kl_device
*dev
= target
->dev
;
907 padded_len
= CALC_TXRX_PADDED_LEN(dev
, rx_len
);
909 if (padded_len
> packet
->buf_len
) {
910 ath6kl_err("not enough receive space for packet - padlen:%d recvlen:%d bufferlen:%d\n",
911 padded_len
, rx_len
, packet
->buf_len
);
915 ath6kl_dbg(ATH6KL_DBG_HTC_RECV
,
916 "dev_rx_pkt (0x%p : hdr:0x%X) padded len: %d mbox:0x%X (mode:%s)\n",
917 packet
, packet
->info
.rx
.exp_hdr
,
918 padded_len
, dev
->ar
->mbox_info
.htc_addr
, "sync");
920 status
= hif_read_write_sync(dev
->ar
,
921 dev
->ar
->mbox_info
.htc_addr
,
922 packet
->buf
, padded_len
,
923 HIF_RD_SYNC_BLOCK_FIX
);
925 packet
->status
= status
;
931 * optimization for recv packets, we can indicate a
932 * "hint" that there are more single-packets to fetch
935 static void set_rxpkt_indication_flag(u32 lk_ahd
,
936 struct htc_endpoint
*endpoint
,
937 struct htc_packet
*packet
)
939 struct htc_frame_hdr
*htc_hdr
= (struct htc_frame_hdr
*)&lk_ahd
;
941 if (htc_hdr
->eid
== packet
->endpoint
) {
942 if (!list_empty(&endpoint
->rx_bufq
))
943 packet
->info
.rx
.indicat_flags
|=
944 HTC_RX_FLAGS_INDICATE_MORE_PKTS
;
948 static void chk_rx_water_mark(struct htc_endpoint
*endpoint
)
950 struct htc_ep_callbacks ep_cb
= endpoint
->ep_cb
;
952 if (ep_cb
.rx_refill_thresh
> 0) {
953 spin_lock_bh(&endpoint
->target
->rx_lock
);
954 if (get_queue_depth(&endpoint
->rx_bufq
)
955 < ep_cb
.rx_refill_thresh
) {
956 spin_unlock_bh(&endpoint
->target
->rx_lock
);
957 ep_cb
.rx_refill(endpoint
->target
, endpoint
->eid
);
960 spin_unlock_bh(&endpoint
->target
->rx_lock
);
964 /* This function is called with rx_lock held */
965 static int htc_setup_rxpkts(struct htc_target
*target
, struct htc_endpoint
*ep
,
966 u32
*lk_ahds
, struct list_head
*queue
, int n_msg
)
968 struct htc_packet
*packet
;
969 /* FIXME: type of lk_ahds can't be right */
970 struct htc_frame_hdr
*htc_hdr
= (struct htc_frame_hdr
*)lk_ahds
;
971 struct htc_ep_callbacks ep_cb
;
972 int status
= 0, j
, full_len
;
975 full_len
= CALC_TXRX_PADDED_LEN(target
->dev
,
976 le16_to_cpu(htc_hdr
->payld_len
) +
979 if (!htc_valid_rx_frame_len(target
, ep
->eid
, full_len
)) {
980 ath6kl_warn("Rx buffer requested with invalid length\n");
985 for (j
= 0; j
< n_msg
; j
++) {
988 * Reset flag, any packets allocated using the
989 * rx_alloc() API cannot be recycled on
990 * cleanup,they must be explicitly returned.
994 if (ep_cb
.rx_allocthresh
&&
995 (full_len
> ep_cb
.rx_alloc_thresh
)) {
996 ep
->ep_st
.rx_alloc_thresh_hit
+= 1;
997 ep
->ep_st
.rxalloc_thresh_byte
+=
998 le16_to_cpu(htc_hdr
->payld_len
);
1000 spin_unlock_bh(&target
->rx_lock
);
1003 packet
= ep_cb
.rx_allocthresh(ep
->target
, ep
->eid
,
1005 spin_lock_bh(&target
->rx_lock
);
1007 /* refill handler is being used */
1008 if (list_empty(&ep
->rx_bufq
)) {
1009 if (ep_cb
.rx_refill
) {
1010 spin_unlock_bh(&target
->rx_lock
);
1011 ep_cb
.rx_refill(ep
->target
, ep
->eid
);
1012 spin_lock_bh(&target
->rx_lock
);
1016 if (list_empty(&ep
->rx_bufq
))
1019 packet
= list_first_entry(&ep
->rx_bufq
,
1020 struct htc_packet
, list
);
1021 list_del(&packet
->list
);
1026 target
->rx_st_flags
|= HTC_RECV_WAIT_BUFFERS
;
1027 target
->ep_waiting
= ep
->eid
;
1032 packet
->info
.rx
.rx_flags
= 0;
1033 packet
->info
.rx
.indicat_flags
= 0;
1038 * flag that these packets cannot be
1039 * recycled, they have to be returned to
1042 packet
->info
.rx
.rx_flags
|= HTC_RX_PKT_NO_RECYCLE
;
1044 /* Caller needs to free this upon any failure */
1045 list_add_tail(&packet
->list
, queue
);
1047 if (target
->htc_flags
& HTC_OP_STATE_STOPPING
) {
1048 status
= -ECANCELED
;
1053 packet
->info
.rx
.rx_flags
|= HTC_RX_PKT_REFRESH_HDR
;
1054 packet
->info
.rx
.exp_hdr
= 0xFFFFFFFF;
1056 /* set expected look ahead */
1057 packet
->info
.rx
.exp_hdr
= *lk_ahds
;
1059 packet
->act_len
= le16_to_cpu(htc_hdr
->payld_len
) +
1066 static int alloc_and_prep_rxpkts(struct htc_target
*target
,
1067 u32 lk_ahds
[], int msg
,
1068 struct htc_endpoint
*endpoint
,
1069 struct list_head
*queue
)
1072 struct htc_packet
*packet
, *tmp_pkt
;
1073 struct htc_frame_hdr
*htc_hdr
;
1076 spin_lock_bh(&target
->rx_lock
);
1078 for (i
= 0; i
< msg
; i
++) {
1080 htc_hdr
= (struct htc_frame_hdr
*)&lk_ahds
[i
];
1082 if (htc_hdr
->eid
>= ENDPOINT_MAX
) {
1083 ath6kl_err("invalid ep in look-ahead: %d\n",
1089 if (htc_hdr
->eid
!= endpoint
->eid
) {
1090 ath6kl_err("invalid ep in look-ahead: %d should be : %d (index:%d)\n",
1091 htc_hdr
->eid
, endpoint
->eid
, i
);
1096 if (le16_to_cpu(htc_hdr
->payld_len
) > HTC_MAX_PAYLOAD_LENGTH
) {
1097 ath6kl_err("payload len %d exceeds max htc : %d !\n",
1099 (u32
) HTC_MAX_PAYLOAD_LENGTH
);
1104 if (endpoint
->svc_id
== 0) {
1105 ath6kl_err("ep %d is not connected !\n", htc_hdr
->eid
);
1110 if (htc_hdr
->flags
& HTC_FLG_RX_BNDL_CNT
) {
1112 * HTC header indicates that every packet to follow
1113 * has the same padded length so that it can be
1114 * optimally fetched as a full bundle.
1116 n_msg
= (htc_hdr
->flags
& HTC_FLG_RX_BNDL_CNT
) >>
1117 HTC_FLG_RX_BNDL_CNT_S
;
1119 /* the count doesn't include the starter frame */
1121 if (n_msg
> target
->msg_per_bndl_max
) {
1126 endpoint
->ep_st
.rx_bundle_from_hdr
+= 1;
1127 ath6kl_dbg(ATH6KL_DBG_HTC_RECV
,
1128 "htc hdr indicates :%d msg can be fetched as a bundle\n",
1131 /* HTC header only indicates 1 message to fetch */
1134 /* Setup packet buffers for each message */
1135 status
= htc_setup_rxpkts(target
, endpoint
, &lk_ahds
[i
], queue
,
1139 * This is due to unavailabilty of buffers to rx entire data.
1140 * Return no error so that free buffers from queue can be used
1141 * to receive partial data.
1143 if (status
== -ENOSPC
) {
1144 spin_unlock_bh(&target
->rx_lock
);
1152 spin_unlock_bh(&target
->rx_lock
);
1155 list_for_each_entry_safe(packet
, tmp_pkt
, queue
, list
) {
1156 list_del(&packet
->list
);
1157 htc_reclaim_rxbuf(target
, packet
,
1158 &target
->endpoint
[packet
->endpoint
]);
1165 static void htc_ctrl_rx(struct htc_target
*context
, struct htc_packet
*packets
)
1167 if (packets
->endpoint
!= ENDPOINT_0
) {
1172 if (packets
->status
== -ECANCELED
) {
1173 reclaim_rx_ctrl_buf(context
, packets
);
1177 if (packets
->act_len
> 0) {
1178 ath6kl_err("htc_ctrl_rx, got message with len:%zu\n",
1179 packets
->act_len
+ HTC_HDR_LENGTH
);
1181 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES
,
1182 "Unexpected ENDPOINT 0 Message",
1183 packets
->buf
- HTC_HDR_LENGTH
,
1184 packets
->act_len
+ HTC_HDR_LENGTH
);
1187 htc_reclaim_rxbuf(context
, packets
, &context
->endpoint
[0]);
1190 static void htc_proc_cred_rpt(struct htc_target
*target
,
1191 struct htc_credit_report
*rpt
,
1193 enum htc_endpoint_id from_ep
)
1195 struct htc_endpoint
*endpoint
;
1196 int tot_credits
= 0, i
;
1199 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
,
1200 "htc_proc_cred_rpt, credit report entries:%d\n", n_entries
);
1202 spin_lock_bh(&target
->tx_lock
);
1204 for (i
= 0; i
< n_entries
; i
++, rpt
++) {
1205 if (rpt
->eid
>= ENDPOINT_MAX
) {
1207 spin_unlock_bh(&target
->tx_lock
);
1211 endpoint
= &target
->endpoint
[rpt
->eid
];
1213 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
, " ep %d got %d credits\n",
1214 rpt
->eid
, rpt
->credits
);
1216 endpoint
->ep_st
.tx_cred_rpt
+= 1;
1217 endpoint
->ep_st
.cred_retnd
+= rpt
->credits
;
1219 if (from_ep
== rpt
->eid
) {
1221 * This credit report arrived on the same endpoint
1222 * indicating it arrived in an RX packet.
1224 endpoint
->ep_st
.cred_from_rx
+= rpt
->credits
;
1225 endpoint
->ep_st
.cred_rpt_from_rx
+= 1;
1226 } else if (from_ep
== ENDPOINT_0
) {
1227 /* credit arrived on endpoint 0 as a NULL message */
1228 endpoint
->ep_st
.cred_from_ep0
+= rpt
->credits
;
1229 endpoint
->ep_st
.cred_rpt_ep0
+= 1;
1231 endpoint
->ep_st
.cred_from_other
+= rpt
->credits
;
1232 endpoint
->ep_st
.cred_rpt_from_other
+= 1;
1235 if (ENDPOINT_0
== rpt
->eid
)
1236 /* always give endpoint 0 credits back */
1237 endpoint
->cred_dist
.credits
+= rpt
->credits
;
1239 endpoint
->cred_dist
.cred_to_dist
+= rpt
->credits
;
1244 * Refresh tx depth for distribution function that will
1245 * recover these credits NOTE: this is only valid when
1246 * there are credits to recover!
1248 endpoint
->cred_dist
.txq_depth
=
1249 get_queue_depth(&endpoint
->txq
);
1251 tot_credits
+= rpt
->credits
;
1254 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
,
1255 "report indicated %d credits to distribute\n",
1260 * This was a credit return based on a completed send
1261 * operations note, this is done with the lock held
1263 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
, "ctxt:0x%p dist:0x%p\n",
1264 target
->cred_dist_cntxt
, &target
->cred_dist_list
);
1266 ath6k_credit_distribute(target
->cred_dist_cntxt
,
1267 &target
->cred_dist_list
,
1268 HTC_CREDIT_DIST_SEND_COMPLETE
);
1271 spin_unlock_bh(&target
->tx_lock
);
1274 htc_chk_ep_txq(target
);
1277 static int htc_parse_trailer(struct htc_target
*target
,
1278 struct htc_record_hdr
*record
,
1279 u8
*record_buf
, u32
*next_lk_ahds
,
1280 enum htc_endpoint_id endpoint
,
1283 struct htc_bundle_lkahd_rpt
*bundle_lkahd_rpt
;
1284 struct htc_lookahead_report
*lk_ahd
;
1287 switch (record
->rec_id
) {
1288 case HTC_RECORD_CREDITS
:
1289 len
= record
->len
/ sizeof(struct htc_credit_report
);
1295 htc_proc_cred_rpt(target
,
1296 (struct htc_credit_report
*) record_buf
,
1299 case HTC_RECORD_LOOKAHEAD
:
1300 len
= record
->len
/ sizeof(*lk_ahd
);
1306 lk_ahd
= (struct htc_lookahead_report
*) record_buf
;
1307 if ((lk_ahd
->pre_valid
== ((~lk_ahd
->post_valid
) & 0xFF))
1310 ath6kl_dbg(ATH6KL_DBG_HTC_RECV
,
1311 "lk_ahd report found (pre valid:0x%X, post valid:0x%X)\n",
1312 lk_ahd
->pre_valid
, lk_ahd
->post_valid
);
1314 /* look ahead bytes are valid, copy them over */
1315 memcpy((u8
*)&next_lk_ahds
[0], lk_ahd
->lk_ahd
, 4);
1317 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES
, "Next Look Ahead",
1323 case HTC_RECORD_LOOKAHEAD_BUNDLE
:
1324 len
= record
->len
/ sizeof(*bundle_lkahd_rpt
);
1325 if (!len
|| (len
> HTC_HOST_MAX_MSG_PER_BUNDLE
)) {
1334 (struct htc_bundle_lkahd_rpt
*) record_buf
;
1336 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES
, "Bundle lk_ahd",
1337 record_buf
, record
->len
);
1339 for (i
= 0; i
< len
; i
++) {
1340 memcpy((u8
*)&next_lk_ahds
[i
],
1341 bundle_lkahd_rpt
->lk_ahd
, 4);
1349 ath6kl_err("unhandled record: id:%d len:%d\n",
1350 record
->rec_id
, record
->len
);
1358 static int htc_proc_trailer(struct htc_target
*target
,
1359 u8
*buf
, int len
, u32
*next_lk_ahds
,
1360 int *n_lk_ahds
, enum htc_endpoint_id endpoint
)
1362 struct htc_record_hdr
*record
;
1368 ath6kl_dbg(ATH6KL_DBG_HTC_RECV
, "+htc_proc_trailer (len:%d)\n", len
);
1370 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES
, "Recv Trailer", buf
, len
);
1378 if (len
< sizeof(struct htc_record_hdr
)) {
1382 /* these are byte aligned structs */
1383 record
= (struct htc_record_hdr
*) buf
;
1384 len
-= sizeof(struct htc_record_hdr
);
1385 buf
+= sizeof(struct htc_record_hdr
);
1387 if (record
->len
> len
) {
1388 ath6kl_err("invalid record len: %d (id:%d) buf has: %d bytes left\n",
1389 record
->len
, record
->rec_id
, len
);
1395 status
= htc_parse_trailer(target
, record
, record_buf
,
1396 next_lk_ahds
, endpoint
, n_lk_ahds
);
1401 /* advance buffer past this record for next time around */
1406 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES
, "BAD Recv Trailer",
1407 orig_buf
, orig_len
);
1412 static int htc_proc_rxhdr(struct htc_target
*target
,
1413 struct htc_packet
*packet
,
1414 u32
*next_lkahds
, int *n_lkahds
)
1419 struct htc_frame_hdr
*htc_hdr
= (struct htc_frame_hdr
*)packet
->buf
;
1421 if (n_lkahds
!= NULL
)
1424 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES
, "HTC Recv PKT", packet
->buf
,
1428 * NOTE: we cannot assume the alignment of buf, so we use the safe
1429 * macros to retrieve 16 bit fields.
1431 payload_len
= le16_to_cpu(get_unaligned(&htc_hdr
->payld_len
));
1433 memcpy((u8
*)&lk_ahd
, packet
->buf
, sizeof(lk_ahd
));
1435 if (packet
->info
.rx
.rx_flags
& HTC_RX_PKT_REFRESH_HDR
) {
1437 * Refresh the expected header and the actual length as it
1438 * was unknown when this packet was grabbed as part of the
1441 packet
->info
.rx
.exp_hdr
= lk_ahd
;
1442 packet
->act_len
= payload_len
+ HTC_HDR_LENGTH
;
1444 /* validate the actual header that was refreshed */
1445 if (packet
->act_len
> packet
->buf_len
) {
1446 ath6kl_err("refreshed hdr payload len (%d) in bundled recv is invalid (hdr: 0x%X)\n",
1447 payload_len
, lk_ahd
);
1449 * Limit this to max buffer just to print out some
1452 packet
->act_len
= min(packet
->act_len
, packet
->buf_len
);
1457 if (packet
->endpoint
!= htc_hdr
->eid
) {
1458 ath6kl_err("refreshed hdr ep (%d) does not match expected ep (%d)\n",
1459 htc_hdr
->eid
, packet
->endpoint
);
1465 if (lk_ahd
!= packet
->info
.rx
.exp_hdr
) {
1466 ath6kl_err("htc_proc_rxhdr, lk_ahd mismatch! (pPkt:0x%p flags:0x%X)\n",
1467 packet
, packet
->info
.rx
.rx_flags
);
1468 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES
, "Expected Message lk_ahd",
1469 &packet
->info
.rx
.exp_hdr
, 4);
1470 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES
, "Current Frame Header",
1471 (u8
*)&lk_ahd
, sizeof(lk_ahd
));
1476 if (htc_hdr
->flags
& HTC_FLG_RX_TRAILER
) {
1477 if (htc_hdr
->ctrl
[0] < sizeof(struct htc_record_hdr
) ||
1478 htc_hdr
->ctrl
[0] > payload_len
) {
1479 ath6kl_err("htc_proc_rxhdr, invalid hdr (payload len should be :%d, CB[0] is:%d)\n",
1480 payload_len
, htc_hdr
->ctrl
[0]);
1485 if (packet
->info
.rx
.rx_flags
& HTC_RX_PKT_IGNORE_LOOKAHEAD
) {
1490 status
= htc_proc_trailer(target
, packet
->buf
+ HTC_HDR_LENGTH
1491 + payload_len
- htc_hdr
->ctrl
[0],
1492 htc_hdr
->ctrl
[0], next_lkahds
,
1493 n_lkahds
, packet
->endpoint
);
1498 packet
->act_len
-= htc_hdr
->ctrl
[0];
1501 packet
->buf
+= HTC_HDR_LENGTH
;
1502 packet
->act_len
-= HTC_HDR_LENGTH
;
1506 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES
, "BAD HTC Recv PKT",
1508 packet
->act_len
< 256 ? packet
->act_len
: 256);
1510 if (packet
->act_len
> 0)
1511 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES
,
1512 "HTC - Application Msg",
1513 packet
->buf
, packet
->act_len
);
1519 static void do_rx_completion(struct htc_endpoint
*endpoint
,
1520 struct htc_packet
*packet
)
1522 ath6kl_dbg(ATH6KL_DBG_HTC_RECV
,
1523 "htc calling ep %d recv callback on packet 0x%p\n",
1524 endpoint
->eid
, packet
);
1525 endpoint
->ep_cb
.rx(endpoint
->target
, packet
);
1528 static int htc_issue_rxpkt_bundle(struct htc_target
*target
,
1529 struct list_head
*rxq
,
1530 struct list_head
*sync_compq
,
1531 int *n_pkt_fetched
, bool part_bundle
)
1533 struct hif_scatter_req
*scat_req
;
1534 struct htc_packet
*packet
;
1535 int rem_space
= target
->dev
->max_rx_bndl_sz
;
1536 int n_scat_pkt
, status
= 0, i
, len
;
1538 n_scat_pkt
= get_queue_depth(rxq
);
1539 n_scat_pkt
= min(n_scat_pkt
, target
->msg_per_bndl_max
);
1541 if ((get_queue_depth(rxq
) - n_scat_pkt
) > 0) {
1543 * We were forced to split this bundle receive operation
1544 * all packets in this partial bundle must have their
1545 * lookaheads ignored.
1550 * This would only happen if the target ignored our max
1553 ath6kl_warn("htc_issue_rxpkt_bundle : partial bundle detected num:%d , %d\n",
1554 get_queue_depth(rxq
), n_scat_pkt
);
1559 ath6kl_dbg(ATH6KL_DBG_HTC_RECV
,
1560 "htc_issue_rxpkt_bundle (numpackets: %d , actual : %d)\n",
1561 get_queue_depth(rxq
), n_scat_pkt
);
1563 scat_req
= hif_scatter_req_get(target
->dev
->ar
);
1565 if (scat_req
== NULL
)
1568 for (i
= 0; i
< n_scat_pkt
; i
++) {
1571 packet
= list_first_entry(rxq
, struct htc_packet
, list
);
1572 list_del(&packet
->list
);
1574 pad_len
= CALC_TXRX_PADDED_LEN(target
->dev
,
1577 if ((rem_space
- pad_len
) < 0) {
1578 list_add(&packet
->list
, rxq
);
1582 rem_space
-= pad_len
;
1584 if (part_bundle
|| (i
< (n_scat_pkt
- 1)))
1586 * Packet 0..n-1 cannot be checked for look-aheads
1587 * since we are fetching a bundle the last packet
1588 * however can have it's lookahead used
1590 packet
->info
.rx
.rx_flags
|=
1591 HTC_RX_PKT_IGNORE_LOOKAHEAD
;
1593 /* NOTE: 1 HTC packet per scatter entry */
1594 scat_req
->scat_list
[i
].buf
= packet
->buf
;
1595 scat_req
->scat_list
[i
].len
= pad_len
;
1597 packet
->info
.rx
.rx_flags
|= HTC_RX_PKT_PART_OF_BUNDLE
;
1599 list_add_tail(&packet
->list
, sync_compq
);
1601 WARN_ON(!scat_req
->scat_list
[i
].len
);
1602 len
+= scat_req
->scat_list
[i
].len
;
1605 scat_req
->len
= len
;
1606 scat_req
->scat_entries
= i
;
1608 status
= ath6kldev_submit_scat_req(target
->dev
, scat_req
, true);
1613 /* free scatter request */
1614 hif_scatter_req_add(target
->dev
->ar
, scat_req
);
1621 static int htc_proc_fetched_rxpkts(struct htc_target
*target
,
1622 struct list_head
*comp_pktq
, u32 lk_ahds
[],
1625 struct htc_packet
*packet
, *tmp_pkt
;
1626 struct htc_endpoint
*ep
;
1629 list_for_each_entry_safe(packet
, tmp_pkt
, comp_pktq
, list
) {
1630 list_del(&packet
->list
);
1631 ep
= &target
->endpoint
[packet
->endpoint
];
1633 /* process header for each of the recv packet */
1634 status
= htc_proc_rxhdr(target
, packet
, lk_ahds
, n_lk_ahd
);
1638 if (list_empty(comp_pktq
)) {
1640 * Last packet's more packet flag is set
1641 * based on the lookahead.
1644 set_rxpkt_indication_flag(lk_ahds
[0],
1648 * Packets in a bundle automatically have
1651 packet
->info
.rx
.indicat_flags
|=
1652 HTC_RX_FLAGS_INDICATE_MORE_PKTS
;
1654 htc_update_rx_stats(ep
, *n_lk_ahd
);
1656 if (packet
->info
.rx
.rx_flags
& HTC_RX_PKT_PART_OF_BUNDLE
)
1657 ep
->ep_st
.rx_bundl
+= 1;
1659 do_rx_completion(ep
, packet
);
1665 static int htc_fetch_rxpkts(struct htc_target
*target
,
1666 struct list_head
*rx_pktq
,
1667 struct list_head
*comp_pktq
)
1670 bool part_bundle
= false;
1673 /* now go fetch the list of HTC packets */
1674 while (!list_empty(rx_pktq
)) {
1677 if (target
->rx_bndl_enable
&& (get_queue_depth(rx_pktq
) > 1)) {
1679 * There are enough packets to attempt a
1680 * bundle transfer and recv bundling is
1683 status
= htc_issue_rxpkt_bundle(target
, rx_pktq
,
1690 if (!list_empty(rx_pktq
))
1694 if (!fetched_pkts
) {
1695 struct htc_packet
*packet
;
1697 packet
= list_first_entry(rx_pktq
, struct htc_packet
,
1700 list_del(&packet
->list
);
1702 /* fully synchronous */
1703 packet
->completion
= NULL
;
1705 if (!list_empty(rx_pktq
))
1707 * look_aheads in all packet
1708 * except the last one in the
1709 * bundle must be ignored
1711 packet
->info
.rx
.rx_flags
|=
1712 HTC_RX_PKT_IGNORE_LOOKAHEAD
;
1714 /* go fetch the packet */
1715 status
= dev_rx_pkt(target
, packet
, packet
->act_len
);
1719 list_add_tail(&packet
->list
, comp_pktq
);
1726 int htc_rxmsg_pending_handler(struct htc_target
*target
, u32 msg_look_ahead
[],
1729 struct htc_packet
*packets
, *tmp_pkt
;
1730 struct htc_endpoint
*endpoint
;
1731 struct list_head rx_pktq
, comp_pktq
;
1733 u32 look_aheads
[HTC_HOST_MAX_MSG_PER_BUNDLE
];
1734 int num_look_ahead
= 1;
1735 enum htc_endpoint_id id
;
1741 * On first entry copy the look_aheads into our temp array for
1744 memcpy(look_aheads
, msg_look_ahead
, sizeof(look_aheads
));
1749 * First lookahead sets the expected endpoint IDs for all
1750 * packets in a bundle.
1752 id
= ((struct htc_frame_hdr
*)&look_aheads
[0])->eid
;
1753 endpoint
= &target
->endpoint
[id
];
1755 if (id
>= ENDPOINT_MAX
) {
1756 ath6kl_err("MsgPend, invalid endpoint in look-ahead: %d\n",
1762 INIT_LIST_HEAD(&rx_pktq
);
1763 INIT_LIST_HEAD(&comp_pktq
);
1766 * Try to allocate as many HTC RX packets indicated by the
1769 status
= alloc_and_prep_rxpkts(target
, look_aheads
,
1770 num_look_ahead
, endpoint
,
1775 if (get_queue_depth(&rx_pktq
) >= 2)
1777 * A recv bundle was detected, force IRQ status
1780 target
->dev
->chk_irq_status_cnt
= 1;
1782 n_fetched
+= get_queue_depth(&rx_pktq
);
1786 status
= htc_fetch_rxpkts(target
, &rx_pktq
, &comp_pktq
);
1789 chk_rx_water_mark(endpoint
);
1791 /* Process fetched packets */
1792 status
= htc_proc_fetched_rxpkts(target
, &comp_pktq
,
1793 look_aheads
, &num_look_ahead
);
1795 if (!num_look_ahead
|| status
)
1799 * For SYNCH processing, if we get here, we are running
1800 * through the loop again due to a detected lookahead. Set
1801 * flag that we should re-check IRQ status registers again
1802 * before leaving IRQ processing, this can net better
1803 * performance in high throughput situations.
1805 target
->dev
->chk_irq_status_cnt
= 1;
1809 ath6kl_err("failed to get pending recv messages: %d\n",
1812 * Cleanup any packets we allocated but didn't use to
1813 * actually fetch any packets.
1815 list_for_each_entry_safe(packets
, tmp_pkt
, &rx_pktq
, list
) {
1816 list_del(&packets
->list
);
1817 htc_reclaim_rxbuf(target
, packets
,
1818 &target
->endpoint
[packets
->endpoint
]);
1821 /* cleanup any packets in sync completion queue */
1822 list_for_each_entry_safe(packets
, tmp_pkt
, &comp_pktq
, list
) {
1823 list_del(&packets
->list
);
1824 htc_reclaim_rxbuf(target
, packets
,
1825 &target
->endpoint
[packets
->endpoint
]);
1828 if (target
->htc_flags
& HTC_OP_STATE_STOPPING
) {
1829 ath6kl_warn("host is going to stop blocking receiver for htc_stop\n");
1830 ath6kldev_rx_control(target
->dev
, false);
1835 * Before leaving, check to see if host ran out of buffers and
1836 * needs to stop the receiver.
1838 if (target
->rx_st_flags
& HTC_RECV_WAIT_BUFFERS
) {
1839 ath6kl_warn("host has no rx buffers blocking receiver to prevent overrun\n");
1840 ath6kldev_rx_control(target
->dev
, false);
1842 *num_pkts
= n_fetched
;
1848 * Synchronously wait for a control message from the target,
1849 * This function is used at initialization time ONLY. At init messages
1850 * on ENDPOINT 0 are expected.
1852 static struct htc_packet
*htc_wait_for_ctrl_msg(struct htc_target
*target
)
1854 struct htc_packet
*packet
= NULL
;
1855 struct htc_frame_hdr
*htc_hdr
;
1858 if (ath6kldev_poll_mboxmsg_rx(target
->dev
, &look_ahead
,
1859 HTC_TARGET_RESPONSE_TIMEOUT
))
1862 ath6kl_dbg(ATH6KL_DBG_HTC_RECV
,
1863 "htc_wait_for_ctrl_msg: look_ahead : 0x%X\n", look_ahead
);
1865 htc_hdr
= (struct htc_frame_hdr
*)&look_ahead
;
1867 if (htc_hdr
->eid
!= ENDPOINT_0
)
1870 packet
= htc_get_control_buf(target
, false);
1875 packet
->info
.rx
.rx_flags
= 0;
1876 packet
->info
.rx
.exp_hdr
= look_ahead
;
1877 packet
->act_len
= le16_to_cpu(htc_hdr
->payld_len
) + HTC_HDR_LENGTH
;
1879 if (packet
->act_len
> packet
->buf_len
)
1882 /* we want synchronous operation */
1883 packet
->completion
= NULL
;
1885 /* get the message from the device, this will block */
1886 if (dev_rx_pkt(target
, packet
, packet
->act_len
))
1889 /* process receive header */
1890 packet
->status
= htc_proc_rxhdr(target
, packet
, NULL
, NULL
);
1892 if (packet
->status
) {
1893 ath6kl_err("htc_wait_for_ctrl_msg, htc_proc_rxhdr failed (status = %d)\n",
1901 if (packet
!= NULL
) {
1902 htc_rxpkt_reset(packet
);
1903 reclaim_rx_ctrl_buf(target
, packet
);
1909 int htc_add_rxbuf_multiple(struct htc_target
*target
,
1910 struct list_head
*pkt_queue
)
1912 struct htc_endpoint
*endpoint
;
1913 struct htc_packet
*first_pkt
;
1914 bool rx_unblock
= false;
1915 int status
= 0, depth
;
1917 if (list_empty(pkt_queue
))
1920 first_pkt
= list_first_entry(pkt_queue
, struct htc_packet
, list
);
1922 if (first_pkt
->endpoint
>= ENDPOINT_MAX
)
1925 depth
= get_queue_depth(pkt_queue
);
1927 ath6kl_dbg(ATH6KL_DBG_HTC_RECV
,
1928 "htc_add_rxbuf_multiple: ep id: %d, cnt:%d, len: %d\n",
1929 first_pkt
->endpoint
, depth
, first_pkt
->buf_len
);
1931 endpoint
= &target
->endpoint
[first_pkt
->endpoint
];
1933 if (target
->htc_flags
& HTC_OP_STATE_STOPPING
) {
1934 struct htc_packet
*packet
, *tmp_pkt
;
1936 /* walk through queue and mark each one canceled */
1937 list_for_each_entry_safe(packet
, tmp_pkt
, pkt_queue
, list
) {
1938 packet
->status
= -ECANCELED
;
1939 list_del(&packet
->list
);
1940 do_rx_completion(endpoint
, packet
);
1946 spin_lock_bh(&target
->rx_lock
);
1948 list_splice_tail_init(pkt_queue
, &endpoint
->rx_bufq
);
1950 /* check if we are blocked waiting for a new buffer */
1951 if (target
->rx_st_flags
& HTC_RECV_WAIT_BUFFERS
) {
1952 if (target
->ep_waiting
== first_pkt
->endpoint
) {
1953 ath6kl_dbg(ATH6KL_DBG_HTC_RECV
,
1954 "receiver was blocked on ep:%d, unblocking.\n",
1955 target
->ep_waiting
);
1956 target
->rx_st_flags
&= ~HTC_RECV_WAIT_BUFFERS
;
1957 target
->ep_waiting
= ENDPOINT_MAX
;
1962 spin_unlock_bh(&target
->rx_lock
);
1964 if (rx_unblock
&& !(target
->htc_flags
& HTC_OP_STATE_STOPPING
))
1965 /* TODO : implement a buffer threshold count? */
1966 ath6kldev_rx_control(target
->dev
, true);
1971 void htc_flush_rx_buf(struct htc_target
*target
)
1973 struct htc_endpoint
*endpoint
;
1974 struct htc_packet
*packet
, *tmp_pkt
;
1977 for (i
= ENDPOINT_0
; i
< ENDPOINT_MAX
; i
++) {
1978 endpoint
= &target
->endpoint
[i
];
1979 if (!endpoint
->svc_id
)
1983 spin_lock_bh(&target
->rx_lock
);
1984 list_for_each_entry_safe(packet
, tmp_pkt
,
1985 &endpoint
->rx_bufq
, list
) {
1986 list_del(&packet
->list
);
1987 spin_unlock_bh(&target
->rx_lock
);
1988 ath6kl_dbg(ATH6KL_DBG_HTC_RECV
,
1989 "flushing rx pkt:0x%p, len:%d, ep:%d\n",
1990 packet
, packet
->buf_len
,
1992 dev_kfree_skb(packet
->pkt_cntxt
);
1993 spin_lock_bh(&target
->rx_lock
);
1995 spin_unlock_bh(&target
->rx_lock
);
1999 int htc_conn_service(struct htc_target
*target
,
2000 struct htc_service_connect_req
*conn_req
,
2001 struct htc_service_connect_resp
*conn_resp
)
2003 struct htc_packet
*rx_pkt
= NULL
;
2004 struct htc_packet
*tx_pkt
= NULL
;
2005 struct htc_conn_service_resp
*resp_msg
;
2006 struct htc_conn_service_msg
*conn_msg
;
2007 struct htc_endpoint
*endpoint
;
2008 enum htc_endpoint_id assigned_ep
= ENDPOINT_MAX
;
2009 unsigned int max_msg_sz
= 0;
2012 ath6kl_dbg(ATH6KL_DBG_TRC
,
2013 "htc_conn_service, target:0x%p service id:0x%X\n",
2014 target
, conn_req
->svc_id
);
2016 if (conn_req
->svc_id
== HTC_CTRL_RSVD_SVC
) {
2017 /* special case for pseudo control service */
2018 assigned_ep
= ENDPOINT_0
;
2019 max_msg_sz
= HTC_MAX_CTRL_MSG_LEN
;
2021 /* allocate a packet to send to the target */
2022 tx_pkt
= htc_get_control_buf(target
, true);
2027 conn_msg
= (struct htc_conn_service_msg
*)tx_pkt
->buf
;
2028 memset(conn_msg
, 0, sizeof(*conn_msg
));
2029 conn_msg
->msg_id
= cpu_to_le16(HTC_MSG_CONN_SVC_ID
);
2030 conn_msg
->svc_id
= cpu_to_le16(conn_req
->svc_id
);
2031 conn_msg
->conn_flags
= cpu_to_le16(conn_req
->conn_flags
);
2033 set_htc_pkt_info(tx_pkt
, NULL
, (u8
*) conn_msg
,
2034 sizeof(*conn_msg
) + conn_msg
->svc_meta_len
,
2035 ENDPOINT_0
, HTC_SERVICE_TX_PACKET_TAG
);
2037 /* we want synchronous operation */
2038 tx_pkt
->completion
= NULL
;
2039 htc_prep_send_pkt(tx_pkt
, 0, 0, 0);
2040 status
= htc_issue_send(target
, tx_pkt
);
2045 /* wait for response */
2046 rx_pkt
= htc_wait_for_ctrl_msg(target
);
2053 resp_msg
= (struct htc_conn_service_resp
*)rx_pkt
->buf
;
2055 if ((le16_to_cpu(resp_msg
->msg_id
) != HTC_MSG_CONN_SVC_RESP_ID
)
2056 || (rx_pkt
->act_len
< sizeof(*resp_msg
))) {
2061 conn_resp
->resp_code
= resp_msg
->status
;
2062 /* check response status */
2063 if (resp_msg
->status
!= HTC_SERVICE_SUCCESS
) {
2064 ath6kl_err("target failed service 0x%X connect request (status:%d)\n",
2065 resp_msg
->svc_id
, resp_msg
->status
);
2070 assigned_ep
= (enum htc_endpoint_id
)resp_msg
->eid
;
2071 max_msg_sz
= le16_to_cpu(resp_msg
->max_msg_sz
);
2074 if (assigned_ep
>= ENDPOINT_MAX
|| !max_msg_sz
) {
2079 endpoint
= &target
->endpoint
[assigned_ep
];
2080 endpoint
->eid
= assigned_ep
;
2081 if (endpoint
->svc_id
) {
2086 /* return assigned endpoint to caller */
2087 conn_resp
->endpoint
= assigned_ep
;
2088 conn_resp
->len_max
= max_msg_sz
;
2090 /* setup the endpoint */
2092 /* this marks the endpoint in use */
2093 endpoint
->svc_id
= conn_req
->svc_id
;
2095 endpoint
->max_txq_depth
= conn_req
->max_txq_depth
;
2096 endpoint
->len_max
= max_msg_sz
;
2097 endpoint
->ep_cb
= conn_req
->ep_cb
;
2098 endpoint
->cred_dist
.svc_id
= conn_req
->svc_id
;
2099 endpoint
->cred_dist
.htc_rsvd
= endpoint
;
2100 endpoint
->cred_dist
.endpoint
= assigned_ep
;
2101 endpoint
->cred_dist
.cred_sz
= target
->tgt_cred_sz
;
2103 if (conn_req
->max_rxmsg_sz
) {
2105 * Override cred_per_msg calculation, this optimizes
2106 * the credit-low indications since the host will actually
2107 * issue smaller messages in the Send path.
2109 if (conn_req
->max_rxmsg_sz
> max_msg_sz
) {
2113 endpoint
->cred_dist
.cred_per_msg
=
2114 conn_req
->max_rxmsg_sz
/ target
->tgt_cred_sz
;
2116 endpoint
->cred_dist
.cred_per_msg
=
2117 max_msg_sz
/ target
->tgt_cred_sz
;
2119 if (!endpoint
->cred_dist
.cred_per_msg
)
2120 endpoint
->cred_dist
.cred_per_msg
= 1;
2122 /* save local connection flags */
2123 endpoint
->conn_flags
= conn_req
->flags
;
2127 htc_reclaim_txctrl_buf(target
, tx_pkt
);
2130 htc_rxpkt_reset(rx_pkt
);
2131 reclaim_rx_ctrl_buf(target
, rx_pkt
);
2137 static void reset_ep_state(struct htc_target
*target
)
2139 struct htc_endpoint
*endpoint
;
2142 for (i
= ENDPOINT_0
; i
< ENDPOINT_MAX
; i
++) {
2143 endpoint
= &target
->endpoint
[i
];
2144 memset(&endpoint
->cred_dist
, 0, sizeof(endpoint
->cred_dist
));
2145 endpoint
->svc_id
= 0;
2146 endpoint
->len_max
= 0;
2147 endpoint
->max_txq_depth
= 0;
2148 memset(&endpoint
->ep_st
, 0,
2149 sizeof(endpoint
->ep_st
));
2150 INIT_LIST_HEAD(&endpoint
->rx_bufq
);
2151 INIT_LIST_HEAD(&endpoint
->txq
);
2152 endpoint
->target
= target
;
2155 /* reset distribution list */
2156 INIT_LIST_HEAD(&target
->cred_dist_list
);
2159 int htc_get_rxbuf_num(struct htc_target
*target
, enum htc_endpoint_id endpoint
)
2163 spin_lock_bh(&target
->rx_lock
);
2164 num
= get_queue_depth(&(target
->endpoint
[endpoint
].rx_bufq
));
2165 spin_unlock_bh(&target
->rx_lock
);
2169 static void htc_setup_msg_bndl(struct htc_target
*target
)
2171 struct hif_dev_scat_sup_info
*scat_info
= &target
->dev
->hif_scat_info
;
2173 /* limit what HTC can handle */
2174 target
->msg_per_bndl_max
= min(HTC_HOST_MAX_MSG_PER_BUNDLE
,
2175 target
->msg_per_bndl_max
);
2177 if (ath6kl_hif_enable_scatter(target
->dev
->ar
, scat_info
)) {
2178 target
->msg_per_bndl_max
= 0;
2182 /* limit bundle what the device layer can handle */
2183 target
->msg_per_bndl_max
= min(scat_info
->max_scat_entries
,
2184 target
->msg_per_bndl_max
);
2186 ath6kl_dbg(ATH6KL_DBG_TRC
,
2187 "htc bundling allowed. max msg per htc bundle: %d\n",
2188 target
->msg_per_bndl_max
);
2190 /* Max rx bundle size is limited by the max tx bundle size */
2191 target
->dev
->max_rx_bndl_sz
= scat_info
->max_xfer_szper_scatreq
;
2192 /* Max tx bundle size if limited by the extended mbox address range */
2193 target
->dev
->max_tx_bndl_sz
= min(HIF_MBOX0_EXT_WIDTH
,
2194 scat_info
->max_xfer_szper_scatreq
);
2196 ath6kl_dbg(ATH6KL_DBG_ANY
, "max recv: %d max send: %d\n",
2197 target
->dev
->max_rx_bndl_sz
, target
->dev
->max_tx_bndl_sz
);
2199 if (target
->dev
->max_tx_bndl_sz
)
2200 target
->tx_bndl_enable
= true;
2202 if (target
->dev
->max_rx_bndl_sz
)
2203 target
->rx_bndl_enable
= true;
2205 if ((target
->tgt_cred_sz
% target
->dev
->block_sz
) != 0) {
2206 ath6kl_warn("credit size: %d is not block aligned! Disabling send bundling\n",
2207 target
->tgt_cred_sz
);
2210 * Disallow send bundling since the credit size is
2211 * not aligned to a block size the I/O block
2212 * padding will spill into the next credit buffer
2215 target
->tx_bndl_enable
= false;
2219 int htc_wait_target(struct htc_target
*target
)
2221 struct htc_packet
*packet
= NULL
;
2222 struct htc_ready_ext_msg
*rdy_msg
;
2223 struct htc_service_connect_req connect
;
2224 struct htc_service_connect_resp resp
;
2227 /* we should be getting 1 control message that the target is ready */
2228 packet
= htc_wait_for_ctrl_msg(target
);
2233 /* we controlled the buffer creation so it's properly aligned */
2234 rdy_msg
= (struct htc_ready_ext_msg
*)packet
->buf
;
2236 if ((le16_to_cpu(rdy_msg
->ver2_0_info
.msg_id
) != HTC_MSG_READY_ID
) ||
2237 (packet
->act_len
< sizeof(struct htc_ready_msg
))) {
2239 goto fail_wait_target
;
2242 if (!rdy_msg
->ver2_0_info
.cred_cnt
|| !rdy_msg
->ver2_0_info
.cred_sz
) {
2244 goto fail_wait_target
;
2247 target
->tgt_creds
= le16_to_cpu(rdy_msg
->ver2_0_info
.cred_cnt
);
2248 target
->tgt_cred_sz
= le16_to_cpu(rdy_msg
->ver2_0_info
.cred_sz
);
2250 ath6kl_dbg(ATH6KL_DBG_HTC_RECV
,
2251 "target ready: credits: %d credit size: %d\n",
2252 target
->tgt_creds
, target
->tgt_cred_sz
);
2254 /* check if this is an extended ready message */
2255 if (packet
->act_len
>= sizeof(struct htc_ready_ext_msg
)) {
2256 /* this is an extended message */
2257 target
->htc_tgt_ver
= rdy_msg
->htc_ver
;
2258 target
->msg_per_bndl_max
= rdy_msg
->msg_per_htc_bndl
;
2261 target
->htc_tgt_ver
= HTC_VERSION_2P0
;
2262 target
->msg_per_bndl_max
= 0;
2265 ath6kl_dbg(ATH6KL_DBG_TRC
, "using htc protocol version : %s (%d)\n",
2266 (target
->htc_tgt_ver
== HTC_VERSION_2P0
) ? "2.0" : ">= 2.1",
2267 target
->htc_tgt_ver
);
2269 if (target
->msg_per_bndl_max
> 0)
2270 htc_setup_msg_bndl(target
);
2272 /* setup our pseudo HTC control endpoint connection */
2273 memset(&connect
, 0, sizeof(connect
));
2274 memset(&resp
, 0, sizeof(resp
));
2275 connect
.ep_cb
.rx
= htc_ctrl_rx
;
2276 connect
.ep_cb
.rx_refill
= NULL
;
2277 connect
.ep_cb
.tx_full
= NULL
;
2278 connect
.max_txq_depth
= NUM_CONTROL_BUFFERS
;
2279 connect
.svc_id
= HTC_CTRL_RSVD_SVC
;
2281 /* connect fake service */
2282 status
= htc_conn_service((void *)target
, &connect
, &resp
);
2285 ath6kl_hif_cleanup_scatter(target
->dev
->ar
);
2289 htc_rxpkt_reset(packet
);
2290 reclaim_rx_ctrl_buf(target
, packet
);
2297 * Start HTC, enable interrupts and let the target know
2298 * host has finished setup.
2300 int htc_start(struct htc_target
*target
)
2302 struct htc_packet
*packet
;
2305 /* Disable interrupts at the chip level */
2306 ath6kldev_disable_intrs(target
->dev
);
2308 target
->htc_flags
= 0;
2309 target
->rx_st_flags
= 0;
2311 /* Push control receive buffers into htc control endpoint */
2312 while ((packet
= htc_get_control_buf(target
, false)) != NULL
) {
2313 status
= htc_add_rxbuf(target
, packet
);
2318 /* NOTE: the first entry in the distribution list is ENDPOINT_0 */
2319 ath6k_credit_init(target
->cred_dist_cntxt
, &target
->cred_dist_list
,
2322 dump_cred_dist_stats(target
);
2324 /* Indicate to the target of the setup completion */
2325 status
= htc_setup_tx_complete(target
);
2330 /* unmask interrupts */
2331 status
= ath6kldev_unmask_intrs(target
->dev
);
2339 /* htc_stop: stop interrupt reception, and flush all queued buffers */
2340 void htc_stop(struct htc_target
*target
)
2342 spin_lock_bh(&target
->htc_lock
);
2343 target
->htc_flags
|= HTC_OP_STATE_STOPPING
;
2344 spin_unlock_bh(&target
->htc_lock
);
2347 * Masking interrupts is a synchronous operation, when this
2348 * function returns all pending HIF I/O has completed, we can
2349 * safely flush the queues.
2351 ath6kldev_mask_intrs(target
->dev
);
2353 htc_flush_txep_all(target
);
2355 htc_flush_rx_buf(target
);
2357 reset_ep_state(target
);
2360 void *htc_create(struct ath6kl
*ar
)
2362 struct htc_target
*target
= NULL
;
2363 struct htc_packet
*packet
;
2364 int status
= 0, i
= 0;
2365 u32 block_size
, ctrl_bufsz
;
2367 target
= kzalloc(sizeof(*target
), GFP_KERNEL
);
2369 ath6kl_err("unable to allocate memory\n");
2373 target
->dev
= kzalloc(sizeof(*target
->dev
), GFP_KERNEL
);
2375 ath6kl_err("unable to allocate memory\n");
2377 goto fail_create_htc
;
2380 spin_lock_init(&target
->htc_lock
);
2381 spin_lock_init(&target
->rx_lock
);
2382 spin_lock_init(&target
->tx_lock
);
2384 INIT_LIST_HEAD(&target
->free_ctrl_txbuf
);
2385 INIT_LIST_HEAD(&target
->free_ctrl_rxbuf
);
2386 INIT_LIST_HEAD(&target
->cred_dist_list
);
2388 target
->dev
->ar
= ar
;
2389 target
->dev
->htc_cnxt
= target
;
2390 target
->ep_waiting
= ENDPOINT_MAX
;
2392 reset_ep_state(target
);
2394 status
= ath6kldev_setup(target
->dev
);
2397 goto fail_create_htc
;
2399 block_size
= ar
->mbox_info
.block_size
;
2401 ctrl_bufsz
= (block_size
> HTC_MAX_CTRL_MSG_LEN
) ?
2402 (block_size
+ HTC_HDR_LENGTH
) :
2403 (HTC_MAX_CTRL_MSG_LEN
+ HTC_HDR_LENGTH
);
2405 for (i
= 0; i
< NUM_CONTROL_BUFFERS
; i
++) {
2406 packet
= kzalloc(sizeof(*packet
), GFP_KERNEL
);
2410 packet
->buf_start
= kzalloc(ctrl_bufsz
, GFP_KERNEL
);
2411 if (!packet
->buf_start
) {
2416 packet
->buf_len
= ctrl_bufsz
;
2417 if (i
< NUM_CONTROL_RX_BUFFERS
) {
2418 packet
->act_len
= 0;
2419 packet
->buf
= packet
->buf_start
;
2420 packet
->endpoint
= ENDPOINT_0
;
2421 list_add_tail(&packet
->list
, &target
->free_ctrl_rxbuf
);
2423 list_add_tail(&packet
->list
, &target
->free_ctrl_txbuf
);
2427 if (i
!= NUM_CONTROL_BUFFERS
|| status
) {
2429 htc_cleanup(target
);
2437 /* cleanup the HTC instance */
2438 void htc_cleanup(struct htc_target
*target
)
2440 struct htc_packet
*packet
, *tmp_packet
;
2442 ath6kl_hif_cleanup_scatter(target
->dev
->ar
);
2444 list_for_each_entry_safe(packet
, tmp_packet
,
2445 &target
->free_ctrl_txbuf
, list
) {
2446 list_del(&packet
->list
);
2447 kfree(packet
->buf_start
);
2451 list_for_each_entry_safe(packet
, tmp_packet
,
2452 &target
->free_ctrl_rxbuf
, list
) {
2453 list_del(&packet
->list
);
2454 kfree(packet
->buf_start
);