ath6kl: fix indentation in htc_issued_send()
[deliverable/linux.git] / drivers / net / wireless / ath / ath6kl / htc.c
1 /*
2 * Copyright (c) 2007-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include "core.h"
18 #include "htc_hif.h"
19 #include "debug.h"
20 #include "hif-ops.h"
21 #include <asm/unaligned.h>
22
23 #define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask))
24
25 static void htc_prep_send_pkt(struct htc_packet *packet, u8 flags, int ctrl0,
26 int ctrl1)
27 {
28 struct htc_frame_hdr *hdr;
29
30 packet->buf -= HTC_HDR_LENGTH;
31 hdr = (struct htc_frame_hdr *)packet->buf;
32
33 /* Endianess? */
34 put_unaligned((u16)packet->act_len, &hdr->payld_len);
35 hdr->flags = flags;
36 hdr->eid = packet->endpoint;
37 hdr->ctrl[0] = ctrl0;
38 hdr->ctrl[1] = ctrl1;
39 }
40
41 static void htc_reclaim_txctrl_buf(struct htc_target *target,
42 struct htc_packet *pkt)
43 {
44 spin_lock_bh(&target->htc_lock);
45 list_add_tail(&pkt->list, &target->free_ctrl_txbuf);
46 spin_unlock_bh(&target->htc_lock);
47 }
48
49 static struct htc_packet *htc_get_control_buf(struct htc_target *target,
50 bool tx)
51 {
52 struct htc_packet *packet = NULL;
53 struct list_head *buf_list;
54
55 buf_list = tx ? &target->free_ctrl_txbuf : &target->free_ctrl_rxbuf;
56
57 spin_lock_bh(&target->htc_lock);
58
59 if (list_empty(buf_list)) {
60 spin_unlock_bh(&target->htc_lock);
61 return NULL;
62 }
63
64 packet = list_first_entry(buf_list, struct htc_packet, list);
65 list_del(&packet->list);
66 spin_unlock_bh(&target->htc_lock);
67
68 if (tx)
69 packet->buf = packet->buf_start + HTC_HDR_LENGTH;
70
71 return packet;
72 }
73
74 static void htc_tx_comp_update(struct htc_target *target,
75 struct htc_endpoint *endpoint,
76 struct htc_packet *packet)
77 {
78 packet->completion = NULL;
79 packet->buf += HTC_HDR_LENGTH;
80
81 if (!packet->status)
82 return;
83
84 ath6kl_err("req failed (status:%d, ep:%d, len:%d creds:%d)\n",
85 packet->status, packet->endpoint, packet->act_len,
86 packet->info.tx.cred_used);
87
88 /* on failure to submit, reclaim credits for this packet */
89 spin_lock_bh(&target->tx_lock);
90 endpoint->cred_dist.cred_to_dist +=
91 packet->info.tx.cred_used;
92 endpoint->cred_dist.txq_depth = get_queue_depth(&endpoint->txq);
93
94 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
95 target->cred_dist_cntxt, &target->cred_dist_list);
96
97 ath6k_credit_distribute(target->cred_dist_cntxt,
98 &target->cred_dist_list,
99 HTC_CREDIT_DIST_SEND_COMPLETE);
100
101 spin_unlock_bh(&target->tx_lock);
102 }
103
104 static void htc_tx_complete(struct htc_endpoint *endpoint,
105 struct list_head *txq)
106 {
107 if (list_empty(txq))
108 return;
109
110 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
111 "send complete ep %d, (%d pkts)\n",
112 endpoint->eid, get_queue_depth(txq));
113
114 ath6kl_tx_complete(endpoint->target->dev->ar, txq);
115 }
116
117 static void htc_tx_comp_handler(struct htc_target *target,
118 struct htc_packet *packet)
119 {
120 struct htc_endpoint *endpoint = &target->endpoint[packet->endpoint];
121 struct list_head container;
122
123 htc_tx_comp_update(target, endpoint, packet);
124 INIT_LIST_HEAD(&container);
125 list_add_tail(&packet->list, &container);
126 /* do completion */
127 htc_tx_complete(endpoint, &container);
128 }
129
130 static void htc_async_tx_scat_complete(struct htc_target *target,
131 struct hif_scatter_req *scat_req)
132 {
133 struct htc_endpoint *endpoint;
134 struct htc_packet *packet;
135 struct list_head tx_compq;
136 int i;
137
138 INIT_LIST_HEAD(&tx_compq);
139
140 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
141 "htc_async_tx_scat_complete total len: %d entries: %d\n",
142 scat_req->len, scat_req->scat_entries);
143
144 if (scat_req->status)
145 ath6kl_err("send scatter req failed: %d\n", scat_req->status);
146
147 packet = scat_req->scat_list[0].packet;
148 endpoint = &target->endpoint[packet->endpoint];
149
150 /* walk through the scatter list and process */
151 for (i = 0; i < scat_req->scat_entries; i++) {
152 packet = scat_req->scat_list[i].packet;
153 if (!packet) {
154 WARN_ON(1);
155 return;
156 }
157
158 packet->status = scat_req->status;
159 htc_tx_comp_update(target, endpoint, packet);
160 list_add_tail(&packet->list, &tx_compq);
161 }
162
163 /* free scatter request */
164 hif_scatter_req_add(target->dev->ar, scat_req);
165
166 /* complete all packets */
167 htc_tx_complete(endpoint, &tx_compq);
168 }
169
170 static int htc_issue_send(struct htc_target *target, struct htc_packet *packet)
171 {
172 int status;
173 bool sync = false;
174 u32 padded_len, send_len;
175
176 if (!packet->completion)
177 sync = true;
178
179 send_len = packet->act_len + HTC_HDR_LENGTH;
180
181 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "%s: transmit len : %d (%s)\n",
182 __func__, send_len, sync ? "sync" : "async");
183
184 padded_len = CALC_TXRX_PADDED_LEN(target, send_len);
185
186 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
187 "DevSendPacket, padded len: %d mbox:0x%X (mode:%s)\n",
188 padded_len,
189 target->dev->ar->mbox_info.htc_addr,
190 sync ? "sync" : "async");
191
192 if (sync) {
193 status = hif_read_write_sync(target->dev->ar,
194 target->dev->ar->mbox_info.htc_addr,
195 packet->buf, padded_len,
196 HIF_WR_SYNC_BLOCK_INC);
197
198 packet->status = status;
199 packet->buf += HTC_HDR_LENGTH;
200 } else
201 status = hif_write_async(target->dev->ar,
202 target->dev->ar->mbox_info.htc_addr,
203 packet->buf, padded_len,
204 HIF_WR_ASYNC_BLOCK_INC, packet);
205
206 return status;
207 }
208
209 static int htc_check_credits(struct htc_target *target,
210 struct htc_endpoint *ep, u8 *flags,
211 enum htc_endpoint_id eid, unsigned int len,
212 int *req_cred)
213 {
214
215 *req_cred = (len > target->tgt_cred_sz) ?
216 DIV_ROUND_UP(len, target->tgt_cred_sz) : 1;
217
218 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "creds required:%d got:%d\n",
219 *req_cred, ep->cred_dist.credits);
220
221 if (ep->cred_dist.credits < *req_cred) {
222 if (eid == ENDPOINT_0)
223 return -EINVAL;
224
225 /* Seek more credits */
226 ep->cred_dist.seek_cred = *req_cred - ep->cred_dist.credits;
227
228 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
229 target->cred_dist_cntxt, &ep->cred_dist);
230
231 ath6k_seek_credits(target->cred_dist_cntxt, &ep->cred_dist);
232
233 ep->cred_dist.seek_cred = 0;
234
235 if (ep->cred_dist.credits < *req_cred) {
236 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
237 "not enough credits for ep %d - leaving packet in queue\n",
238 eid);
239 return -EINVAL;
240 }
241 }
242
243 ep->cred_dist.credits -= *req_cred;
244 ep->ep_st.cred_cosumd += *req_cred;
245
246 /* When we are getting low on credits, ask for more */
247 if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
248 ep->cred_dist.seek_cred =
249 ep->cred_dist.cred_per_msg - ep->cred_dist.credits;
250
251 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
252 target->cred_dist_cntxt, &ep->cred_dist);
253
254 ath6k_seek_credits(target->cred_dist_cntxt, &ep->cred_dist);
255
256 /* see if we were successful in getting more */
257 if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
258 /* tell the target we need credits ASAP! */
259 *flags |= HTC_FLAGS_NEED_CREDIT_UPDATE;
260 ep->ep_st.cred_low_indicate += 1;
261 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "host needs credits\n");
262 }
263 }
264
265 return 0;
266 }
267
268 static void htc_tx_pkts_get(struct htc_target *target,
269 struct htc_endpoint *endpoint,
270 struct list_head *queue)
271 {
272 int req_cred;
273 u8 flags;
274 struct htc_packet *packet;
275 unsigned int len;
276
277 while (true) {
278
279 flags = 0;
280
281 if (list_empty(&endpoint->txq))
282 break;
283 packet = list_first_entry(&endpoint->txq, struct htc_packet,
284 list);
285
286 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
287 "got head pkt:0x%p , queue depth: %d\n",
288 packet, get_queue_depth(&endpoint->txq));
289
290 len = CALC_TXRX_PADDED_LEN(target,
291 packet->act_len + HTC_HDR_LENGTH);
292
293 if (htc_check_credits(target, endpoint, &flags,
294 packet->endpoint, len, &req_cred))
295 break;
296
297 /* now we can fully move onto caller's queue */
298 packet = list_first_entry(&endpoint->txq, struct htc_packet,
299 list);
300 list_move_tail(&packet->list, queue);
301
302 /* save the number of credits this packet consumed */
303 packet->info.tx.cred_used = req_cred;
304
305 /* all TX packets are handled asynchronously */
306 packet->completion = htc_tx_comp_handler;
307 packet->context = target;
308 endpoint->ep_st.tx_issued += 1;
309
310 /* save send flags */
311 packet->info.tx.flags = flags;
312 packet->info.tx.seqno = endpoint->seqno;
313 endpoint->seqno++;
314 }
315 }
316
317 /* See if the padded tx length falls on a credit boundary */
318 static int htc_get_credit_padding(unsigned int cred_sz, int *len,
319 struct htc_endpoint *ep)
320 {
321 int rem_cred, cred_pad;
322
323 rem_cred = *len % cred_sz;
324
325 /* No padding needed */
326 if (!rem_cred)
327 return 0;
328
329 if (!(ep->conn_flags & HTC_FLGS_TX_BNDL_PAD_EN))
330 return -1;
331
332 /*
333 * The transfer consumes a "partial" credit, this
334 * packet cannot be bundled unless we add
335 * additional "dummy" padding (max 255 bytes) to
336 * consume the entire credit.
337 */
338 cred_pad = *len < cred_sz ? (cred_sz - *len) : rem_cred;
339
340 if ((cred_pad > 0) && (cred_pad <= 255))
341 *len += cred_pad;
342 else
343 /* The amount of padding is too large, send as non-bundled */
344 return -1;
345
346 return cred_pad;
347 }
348
349 static int htc_setup_send_scat_list(struct htc_target *target,
350 struct htc_endpoint *endpoint,
351 struct hif_scatter_req *scat_req,
352 int n_scat,
353 struct list_head *queue)
354 {
355 struct htc_packet *packet;
356 int i, len, rem_scat, cred_pad;
357 int status = 0;
358
359 rem_scat = target->max_tx_bndl_sz;
360
361 for (i = 0; i < n_scat; i++) {
362 scat_req->scat_list[i].packet = NULL;
363
364 if (list_empty(queue))
365 break;
366
367 packet = list_first_entry(queue, struct htc_packet, list);
368 len = CALC_TXRX_PADDED_LEN(target,
369 packet->act_len + HTC_HDR_LENGTH);
370
371 cred_pad = htc_get_credit_padding(target->tgt_cred_sz,
372 &len, endpoint);
373 if (cred_pad < 0) {
374 status = -EINVAL;
375 break;
376 }
377
378 if (rem_scat < len) {
379 /* exceeds what we can transfer */
380 status = -ENOSPC;
381 break;
382 }
383
384 rem_scat -= len;
385 /* now remove it from the queue */
386 packet = list_first_entry(queue, struct htc_packet, list);
387 list_del(&packet->list);
388
389 scat_req->scat_list[i].packet = packet;
390 /* prepare packet and flag message as part of a send bundle */
391 htc_prep_send_pkt(packet,
392 packet->info.tx.flags | HTC_FLAGS_SEND_BUNDLE,
393 cred_pad, packet->info.tx.seqno);
394 scat_req->scat_list[i].buf = packet->buf;
395 scat_req->scat_list[i].len = len;
396
397 scat_req->len += len;
398 scat_req->scat_entries++;
399 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
400 "%d, adding pkt : 0x%p len:%d (remaining space:%d)\n",
401 i, packet, len, rem_scat);
402 }
403
404 /* Roll back scatter setup in case of any failure */
405 if (status || (scat_req->scat_entries < HTC_MIN_HTC_MSGS_TO_BUNDLE)) {
406 for (i = scat_req->scat_entries - 1; i >= 0; i--) {
407 packet = scat_req->scat_list[i].packet;
408 if (packet) {
409 packet->buf += HTC_HDR_LENGTH;
410 list_add(&packet->list, queue);
411 }
412 }
413 return -EINVAL;
414 }
415
416 return 0;
417 }
418
419 /*
420 * htc_issue_send_bundle: drain a queue and send as bundles
421 * this function may return without fully draining the queue
422 * when
423 *
424 * 1. scatter resources are exhausted
425 * 2. a message that will consume a partial credit will stop the
426 * bundling process early
427 * 3. we drop below the minimum number of messages for a bundle
428 */
429 static void htc_issue_send_bundle(struct htc_endpoint *endpoint,
430 struct list_head *queue,
431 int *sent_bundle, int *n_bundle_pkts)
432 {
433 struct htc_target *target = endpoint->target;
434 struct hif_scatter_req *scat_req = NULL;
435 int n_scat, n_sent_bundle = 0, tot_pkts_bundle = 0;
436
437 while (true) {
438 n_scat = get_queue_depth(queue);
439 n_scat = min(n_scat, target->msg_per_bndl_max);
440
441 if (n_scat < HTC_MIN_HTC_MSGS_TO_BUNDLE)
442 /* not enough to bundle */
443 break;
444
445 scat_req = hif_scatter_req_get(target->dev->ar);
446
447 if (!scat_req) {
448 /* no scatter resources */
449 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
450 "no more scatter resources\n");
451 break;
452 }
453
454 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "pkts to scatter: %d\n",
455 n_scat);
456
457 scat_req->len = 0;
458 scat_req->scat_entries = 0;
459
460 if (htc_setup_send_scat_list(target, endpoint, scat_req,
461 n_scat, queue)) {
462 hif_scatter_req_add(target->dev->ar, scat_req);
463 break;
464 }
465
466 /* send path is always asynchronous */
467 scat_req->complete = htc_async_tx_scat_complete;
468 n_sent_bundle++;
469 tot_pkts_bundle += scat_req->scat_entries;
470
471 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
472 "send scatter total bytes: %d , entries: %d\n",
473 scat_req->len, scat_req->scat_entries);
474 ath6kldev_submit_scat_req(target->dev, scat_req, false);
475 }
476
477 *sent_bundle = n_sent_bundle;
478 *n_bundle_pkts = tot_pkts_bundle;
479 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "htc_issue_send_bundle (sent:%d)\n",
480 n_sent_bundle);
481
482 return;
483 }
484
485 static void htc_tx_from_ep_txq(struct htc_target *target,
486 struct htc_endpoint *endpoint)
487 {
488 struct list_head txq;
489 struct htc_packet *packet;
490 int bundle_sent;
491 int n_pkts_bundle;
492
493 spin_lock_bh(&target->tx_lock);
494
495 endpoint->tx_proc_cnt++;
496 if (endpoint->tx_proc_cnt > 1) {
497 endpoint->tx_proc_cnt--;
498 spin_unlock_bh(&target->tx_lock);
499 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "htc_try_send (busy)\n");
500 return;
501 }
502
503 /*
504 * drain the endpoint TX queue for transmission as long
505 * as we have enough credits.
506 */
507 INIT_LIST_HEAD(&txq);
508
509 while (true) {
510
511 if (list_empty(&endpoint->txq))
512 break;
513
514 htc_tx_pkts_get(target, endpoint, &txq);
515
516 if (list_empty(&txq))
517 break;
518
519 spin_unlock_bh(&target->tx_lock);
520
521 bundle_sent = 0;
522 n_pkts_bundle = 0;
523
524 while (true) {
525 /* try to send a bundle on each pass */
526 if ((target->tx_bndl_enable) &&
527 (get_queue_depth(&txq) >=
528 HTC_MIN_HTC_MSGS_TO_BUNDLE)) {
529 int temp1 = 0, temp2 = 0;
530
531 htc_issue_send_bundle(endpoint, &txq,
532 &temp1, &temp2);
533 bundle_sent += temp1;
534 n_pkts_bundle += temp2;
535 }
536
537 if (list_empty(&txq))
538 break;
539
540 packet = list_first_entry(&txq, struct htc_packet,
541 list);
542 list_del(&packet->list);
543
544 htc_prep_send_pkt(packet, packet->info.tx.flags,
545 0, packet->info.tx.seqno);
546 htc_issue_send(target, packet);
547 }
548
549 spin_lock_bh(&target->tx_lock);
550
551 endpoint->ep_st.tx_bundles += bundle_sent;
552 endpoint->ep_st.tx_pkt_bundled += n_pkts_bundle;
553 }
554
555 endpoint->tx_proc_cnt = 0;
556 spin_unlock_bh(&target->tx_lock);
557 }
558
559 static bool htc_try_send(struct htc_target *target,
560 struct htc_endpoint *endpoint,
561 struct htc_packet *tx_pkt)
562 {
563 struct htc_ep_callbacks ep_cb;
564 int txq_depth;
565 bool overflow = false;
566
567 ep_cb = endpoint->ep_cb;
568
569 spin_lock_bh(&target->tx_lock);
570 txq_depth = get_queue_depth(&endpoint->txq);
571 spin_unlock_bh(&target->tx_lock);
572
573 if (txq_depth >= endpoint->max_txq_depth)
574 overflow = true;
575
576 if (overflow)
577 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
578 "ep %d, tx queue will overflow :%d , tx depth:%d, max:%d\n",
579 endpoint->eid, overflow, txq_depth,
580 endpoint->max_txq_depth);
581
582 if (overflow && ep_cb.tx_full) {
583 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
584 "indicating overflowed tx packet: 0x%p\n", tx_pkt);
585
586 if (ep_cb.tx_full(endpoint->target, tx_pkt) ==
587 HTC_SEND_FULL_DROP) {
588 endpoint->ep_st.tx_dropped += 1;
589 return false;
590 }
591 }
592
593 spin_lock_bh(&target->tx_lock);
594 list_add_tail(&tx_pkt->list, &endpoint->txq);
595 spin_unlock_bh(&target->tx_lock);
596
597 htc_tx_from_ep_txq(target, endpoint);
598
599 return true;
600 }
601
602 static void htc_chk_ep_txq(struct htc_target *target)
603 {
604 struct htc_endpoint *endpoint;
605 struct htc_endpoint_credit_dist *cred_dist;
606
607 /*
608 * Run through the credit distribution list to see if there are
609 * packets queued. NOTE: no locks need to be taken since the
610 * distribution list is not dynamic (cannot be re-ordered) and we
611 * are not modifying any state.
612 */
613 list_for_each_entry(cred_dist, &target->cred_dist_list, list) {
614 endpoint = (struct htc_endpoint *)cred_dist->htc_rsvd;
615
616 spin_lock_bh(&target->tx_lock);
617 if (!list_empty(&endpoint->txq)) {
618 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
619 "ep %d has %d credits and %d packets in tx queue\n",
620 cred_dist->endpoint,
621 endpoint->cred_dist.credits,
622 get_queue_depth(&endpoint->txq));
623 spin_unlock_bh(&target->tx_lock);
624 /*
625 * Try to start the stalled queue, this list is
626 * ordered by priority. If there are credits
627 * available the highest priority queue will get a
628 * chance to reclaim credits from lower priority
629 * ones.
630 */
631 htc_tx_from_ep_txq(target, endpoint);
632 spin_lock_bh(&target->tx_lock);
633 }
634 spin_unlock_bh(&target->tx_lock);
635 }
636 }
637
638 static int htc_setup_tx_complete(struct htc_target *target)
639 {
640 struct htc_packet *send_pkt = NULL;
641 int status;
642
643 send_pkt = htc_get_control_buf(target, true);
644
645 if (!send_pkt)
646 return -ENOMEM;
647
648 if (target->htc_tgt_ver >= HTC_VERSION_2P1) {
649 struct htc_setup_comp_ext_msg *setup_comp_ext;
650 u32 flags = 0;
651
652 setup_comp_ext =
653 (struct htc_setup_comp_ext_msg *)send_pkt->buf;
654 memset(setup_comp_ext, 0, sizeof(*setup_comp_ext));
655 setup_comp_ext->msg_id =
656 cpu_to_le16(HTC_MSG_SETUP_COMPLETE_EX_ID);
657
658 if (target->msg_per_bndl_max > 0) {
659 /* Indicate HTC bundling to the target */
660 flags |= HTC_SETUP_COMP_FLG_RX_BNDL_EN;
661 setup_comp_ext->msg_per_rxbndl =
662 target->msg_per_bndl_max;
663 }
664
665 memcpy(&setup_comp_ext->flags, &flags,
666 sizeof(setup_comp_ext->flags));
667 set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp_ext,
668 sizeof(struct htc_setup_comp_ext_msg),
669 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
670
671 } else {
672 struct htc_setup_comp_msg *setup_comp;
673 setup_comp = (struct htc_setup_comp_msg *)send_pkt->buf;
674 memset(setup_comp, 0, sizeof(struct htc_setup_comp_msg));
675 setup_comp->msg_id = cpu_to_le16(HTC_MSG_SETUP_COMPLETE_ID);
676 set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp,
677 sizeof(struct htc_setup_comp_msg),
678 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
679 }
680
681 /* we want synchronous operation */
682 send_pkt->completion = NULL;
683 htc_prep_send_pkt(send_pkt, 0, 0, 0);
684 status = htc_issue_send(target, send_pkt);
685
686 if (send_pkt != NULL)
687 htc_reclaim_txctrl_buf(target, send_pkt);
688
689 return status;
690 }
691
692 void ath6kl_htc_set_credit_dist(struct htc_target *target,
693 struct htc_credit_state_info *cred_dist_cntxt,
694 u16 srvc_pri_order[], int list_len)
695 {
696 struct htc_endpoint *endpoint;
697 int i, ep;
698
699 target->cred_dist_cntxt = cred_dist_cntxt;
700
701 list_add_tail(&target->endpoint[ENDPOINT_0].cred_dist.list,
702 &target->cred_dist_list);
703
704 for (i = 0; i < list_len; i++) {
705 for (ep = ENDPOINT_1; ep < ENDPOINT_MAX; ep++) {
706 endpoint = &target->endpoint[ep];
707 if (endpoint->svc_id == srvc_pri_order[i]) {
708 list_add_tail(&endpoint->cred_dist.list,
709 &target->cred_dist_list);
710 break;
711 }
712 }
713 if (ep >= ENDPOINT_MAX) {
714 WARN_ON(1);
715 return;
716 }
717 }
718 }
719
720 int ath6kl_htc_tx(struct htc_target *target, struct htc_packet *packet)
721 {
722 struct htc_endpoint *endpoint;
723 struct list_head queue;
724
725 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
726 "htc_tx: ep id: %d, buf: 0x%p, len: %d\n",
727 packet->endpoint, packet->buf, packet->act_len);
728
729 if (packet->endpoint >= ENDPOINT_MAX) {
730 WARN_ON(1);
731 return -EINVAL;
732 }
733
734 endpoint = &target->endpoint[packet->endpoint];
735
736 if (!htc_try_send(target, endpoint, packet)) {
737 packet->status = (target->htc_flags & HTC_OP_STATE_STOPPING) ?
738 -ECANCELED : -ENOSPC;
739 INIT_LIST_HEAD(&queue);
740 list_add(&packet->list, &queue);
741 htc_tx_complete(endpoint, &queue);
742 }
743
744 return 0;
745 }
746
747 /* flush endpoint TX queue */
748 void ath6kl_htc_flush_txep(struct htc_target *target,
749 enum htc_endpoint_id eid, u16 tag)
750 {
751 struct htc_packet *packet, *tmp_pkt;
752 struct list_head discard_q, container;
753 struct htc_endpoint *endpoint = &target->endpoint[eid];
754
755 if (!endpoint->svc_id) {
756 WARN_ON(1);
757 return;
758 }
759
760 /* initialize the discard queue */
761 INIT_LIST_HEAD(&discard_q);
762
763 spin_lock_bh(&target->tx_lock);
764
765 list_for_each_entry_safe(packet, tmp_pkt, &endpoint->txq, list) {
766 if ((tag == HTC_TX_PACKET_TAG_ALL) ||
767 (tag == packet->info.tx.tag))
768 list_move_tail(&packet->list, &discard_q);
769 }
770
771 spin_unlock_bh(&target->tx_lock);
772
773 list_for_each_entry_safe(packet, tmp_pkt, &discard_q, list) {
774 packet->status = -ECANCELED;
775 list_del(&packet->list);
776 ath6kl_dbg(ATH6KL_DBG_TRC,
777 "flushing tx pkt:0x%p, len:%d, ep:%d tag:0x%X\n",
778 packet, packet->act_len,
779 packet->endpoint, packet->info.tx.tag);
780
781 INIT_LIST_HEAD(&container);
782 list_add_tail(&packet->list, &container);
783 htc_tx_complete(endpoint, &container);
784 }
785
786 }
787
788 static void ath6kl_htc_flush_txep_all(struct htc_target *target)
789 {
790 struct htc_endpoint *endpoint;
791 int i;
792
793 dump_cred_dist_stats(target);
794
795 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
796 endpoint = &target->endpoint[i];
797 if (endpoint->svc_id == 0)
798 /* not in use.. */
799 continue;
800 ath6kl_htc_flush_txep(target, i, HTC_TX_PACKET_TAG_ALL);
801 }
802 }
803
804 void ath6kl_htc_indicate_activity_change(struct htc_target *target,
805 enum htc_endpoint_id eid, bool active)
806 {
807 struct htc_endpoint *endpoint = &target->endpoint[eid];
808 bool dist = false;
809
810 if (endpoint->svc_id == 0) {
811 WARN_ON(1);
812 return;
813 }
814
815 spin_lock_bh(&target->tx_lock);
816
817 if (active) {
818 if (!(endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE)) {
819 endpoint->cred_dist.dist_flags |= HTC_EP_ACTIVE;
820 dist = true;
821 }
822 } else {
823 if (endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE) {
824 endpoint->cred_dist.dist_flags &= ~HTC_EP_ACTIVE;
825 dist = true;
826 }
827 }
828
829 if (dist) {
830 endpoint->cred_dist.txq_depth =
831 get_queue_depth(&endpoint->txq);
832
833 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
834 target->cred_dist_cntxt, &target->cred_dist_list);
835
836 ath6k_credit_distribute(target->cred_dist_cntxt,
837 &target->cred_dist_list,
838 HTC_CREDIT_DIST_ACTIVITY_CHANGE);
839 }
840
841 spin_unlock_bh(&target->tx_lock);
842
843 if (dist && !active)
844 htc_chk_ep_txq(target);
845 }
846
847 /* HTC Rx */
848
849 static inline void htc_update_rx_stats(struct htc_endpoint *endpoint,
850 int n_look_ahds)
851 {
852 endpoint->ep_st.rx_pkts++;
853 if (n_look_ahds == 1)
854 endpoint->ep_st.rx_lkahds++;
855 else if (n_look_ahds > 1)
856 endpoint->ep_st.rx_bundle_lkahd++;
857 }
858
859 static inline bool htc_valid_rx_frame_len(struct htc_target *target,
860 enum htc_endpoint_id eid, int len)
861 {
862 return (eid == target->dev->ar->ctrl_ep) ?
863 len <= ATH6KL_BUFFER_SIZE : len <= ATH6KL_AMSDU_BUFFER_SIZE;
864 }
865
866 static int htc_add_rxbuf(struct htc_target *target, struct htc_packet *packet)
867 {
868 struct list_head queue;
869
870 INIT_LIST_HEAD(&queue);
871 list_add_tail(&packet->list, &queue);
872 return ath6kl_htc_add_rxbuf_multiple(target, &queue);
873 }
874
875 static void htc_reclaim_rxbuf(struct htc_target *target,
876 struct htc_packet *packet,
877 struct htc_endpoint *ep)
878 {
879 if (packet->info.rx.rx_flags & HTC_RX_PKT_NO_RECYCLE) {
880 htc_rxpkt_reset(packet);
881 packet->status = -ECANCELED;
882 ep->ep_cb.rx(ep->target, packet);
883 } else {
884 htc_rxpkt_reset(packet);
885 htc_add_rxbuf((void *)(target), packet);
886 }
887 }
888
889 static void reclaim_rx_ctrl_buf(struct htc_target *target,
890 struct htc_packet *packet)
891 {
892 spin_lock_bh(&target->htc_lock);
893 list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
894 spin_unlock_bh(&target->htc_lock);
895 }
896
897 static int dev_rx_pkt(struct htc_target *target, struct htc_packet *packet,
898 u32 rx_len)
899 {
900 struct ath6kl_device *dev = target->dev;
901 u32 padded_len;
902 int status;
903
904 padded_len = CALC_TXRX_PADDED_LEN(target, rx_len);
905
906 if (padded_len > packet->buf_len) {
907 ath6kl_err("not enough receive space for packet - padlen:%d recvlen:%d bufferlen:%d\n",
908 padded_len, rx_len, packet->buf_len);
909 return -ENOMEM;
910 }
911
912 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
913 "dev_rx_pkt (0x%p : hdr:0x%X) padded len: %d mbox:0x%X (mode:%s)\n",
914 packet, packet->info.rx.exp_hdr,
915 padded_len, dev->ar->mbox_info.htc_addr, "sync");
916
917 status = hif_read_write_sync(dev->ar,
918 dev->ar->mbox_info.htc_addr,
919 packet->buf, padded_len,
920 HIF_RD_SYNC_BLOCK_FIX);
921
922 packet->status = status;
923
924 return status;
925 }
926
927 /*
928 * optimization for recv packets, we can indicate a
929 * "hint" that there are more single-packets to fetch
930 * on this endpoint.
931 */
932 static void set_rxpkt_indication_flag(u32 lk_ahd,
933 struct htc_endpoint *endpoint,
934 struct htc_packet *packet)
935 {
936 struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)&lk_ahd;
937
938 if (htc_hdr->eid == packet->endpoint) {
939 if (!list_empty(&endpoint->rx_bufq))
940 packet->info.rx.indicat_flags |=
941 HTC_RX_FLAGS_INDICATE_MORE_PKTS;
942 }
943 }
944
945 static void chk_rx_water_mark(struct htc_endpoint *endpoint)
946 {
947 struct htc_ep_callbacks ep_cb = endpoint->ep_cb;
948
949 if (ep_cb.rx_refill_thresh > 0) {
950 spin_lock_bh(&endpoint->target->rx_lock);
951 if (get_queue_depth(&endpoint->rx_bufq)
952 < ep_cb.rx_refill_thresh) {
953 spin_unlock_bh(&endpoint->target->rx_lock);
954 ep_cb.rx_refill(endpoint->target, endpoint->eid);
955 return;
956 }
957 spin_unlock_bh(&endpoint->target->rx_lock);
958 }
959 }
960
961 /* This function is called with rx_lock held */
962 static int htc_setup_rxpkts(struct htc_target *target, struct htc_endpoint *ep,
963 u32 *lk_ahds, struct list_head *queue, int n_msg)
964 {
965 struct htc_packet *packet;
966 /* FIXME: type of lk_ahds can't be right */
967 struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)lk_ahds;
968 struct htc_ep_callbacks ep_cb;
969 int status = 0, j, full_len;
970 bool no_recycle;
971
972 full_len = CALC_TXRX_PADDED_LEN(target,
973 le16_to_cpu(htc_hdr->payld_len) +
974 sizeof(*htc_hdr));
975
976 if (!htc_valid_rx_frame_len(target, ep->eid, full_len)) {
977 ath6kl_warn("Rx buffer requested with invalid length\n");
978 return -EINVAL;
979 }
980
981 ep_cb = ep->ep_cb;
982 for (j = 0; j < n_msg; j++) {
983
984 /*
985 * Reset flag, any packets allocated using the
986 * rx_alloc() API cannot be recycled on
987 * cleanup,they must be explicitly returned.
988 */
989 no_recycle = false;
990
991 if (ep_cb.rx_allocthresh &&
992 (full_len > ep_cb.rx_alloc_thresh)) {
993 ep->ep_st.rx_alloc_thresh_hit += 1;
994 ep->ep_st.rxalloc_thresh_byte +=
995 le16_to_cpu(htc_hdr->payld_len);
996
997 spin_unlock_bh(&target->rx_lock);
998 no_recycle = true;
999
1000 packet = ep_cb.rx_allocthresh(ep->target, ep->eid,
1001 full_len);
1002 spin_lock_bh(&target->rx_lock);
1003 } else {
1004 /* refill handler is being used */
1005 if (list_empty(&ep->rx_bufq)) {
1006 if (ep_cb.rx_refill) {
1007 spin_unlock_bh(&target->rx_lock);
1008 ep_cb.rx_refill(ep->target, ep->eid);
1009 spin_lock_bh(&target->rx_lock);
1010 }
1011 }
1012
1013 if (list_empty(&ep->rx_bufq))
1014 packet = NULL;
1015 else {
1016 packet = list_first_entry(&ep->rx_bufq,
1017 struct htc_packet, list);
1018 list_del(&packet->list);
1019 }
1020 }
1021
1022 if (!packet) {
1023 target->rx_st_flags |= HTC_RECV_WAIT_BUFFERS;
1024 target->ep_waiting = ep->eid;
1025 return -ENOSPC;
1026 }
1027
1028 /* clear flags */
1029 packet->info.rx.rx_flags = 0;
1030 packet->info.rx.indicat_flags = 0;
1031 packet->status = 0;
1032
1033 if (no_recycle)
1034 /*
1035 * flag that these packets cannot be
1036 * recycled, they have to be returned to
1037 * the user
1038 */
1039 packet->info.rx.rx_flags |= HTC_RX_PKT_NO_RECYCLE;
1040
1041 /* Caller needs to free this upon any failure */
1042 list_add_tail(&packet->list, queue);
1043
1044 if (target->htc_flags & HTC_OP_STATE_STOPPING) {
1045 status = -ECANCELED;
1046 break;
1047 }
1048
1049 if (j) {
1050 packet->info.rx.rx_flags |= HTC_RX_PKT_REFRESH_HDR;
1051 packet->info.rx.exp_hdr = 0xFFFFFFFF;
1052 } else
1053 /* set expected look ahead */
1054 packet->info.rx.exp_hdr = *lk_ahds;
1055
1056 packet->act_len = le16_to_cpu(htc_hdr->payld_len) +
1057 HTC_HDR_LENGTH;
1058 }
1059
1060 return status;
1061 }
1062
1063 static int alloc_and_prep_rxpkts(struct htc_target *target,
1064 u32 lk_ahds[], int msg,
1065 struct htc_endpoint *endpoint,
1066 struct list_head *queue)
1067 {
1068 int status = 0;
1069 struct htc_packet *packet, *tmp_pkt;
1070 struct htc_frame_hdr *htc_hdr;
1071 int i, n_msg;
1072
1073 spin_lock_bh(&target->rx_lock);
1074
1075 for (i = 0; i < msg; i++) {
1076
1077 htc_hdr = (struct htc_frame_hdr *)&lk_ahds[i];
1078
1079 if (htc_hdr->eid >= ENDPOINT_MAX) {
1080 ath6kl_err("invalid ep in look-ahead: %d\n",
1081 htc_hdr->eid);
1082 status = -ENOMEM;
1083 break;
1084 }
1085
1086 if (htc_hdr->eid != endpoint->eid) {
1087 ath6kl_err("invalid ep in look-ahead: %d should be : %d (index:%d)\n",
1088 htc_hdr->eid, endpoint->eid, i);
1089 status = -ENOMEM;
1090 break;
1091 }
1092
1093 if (le16_to_cpu(htc_hdr->payld_len) > HTC_MAX_PAYLOAD_LENGTH) {
1094 ath6kl_err("payload len %d exceeds max htc : %d !\n",
1095 htc_hdr->payld_len,
1096 (u32) HTC_MAX_PAYLOAD_LENGTH);
1097 status = -ENOMEM;
1098 break;
1099 }
1100
1101 if (endpoint->svc_id == 0) {
1102 ath6kl_err("ep %d is not connected !\n", htc_hdr->eid);
1103 status = -ENOMEM;
1104 break;
1105 }
1106
1107 if (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) {
1108 /*
1109 * HTC header indicates that every packet to follow
1110 * has the same padded length so that it can be
1111 * optimally fetched as a full bundle.
1112 */
1113 n_msg = (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) >>
1114 HTC_FLG_RX_BNDL_CNT_S;
1115
1116 /* the count doesn't include the starter frame */
1117 n_msg++;
1118 if (n_msg > target->msg_per_bndl_max) {
1119 status = -ENOMEM;
1120 break;
1121 }
1122
1123 endpoint->ep_st.rx_bundle_from_hdr += 1;
1124 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1125 "htc hdr indicates :%d msg can be fetched as a bundle\n",
1126 n_msg);
1127 } else
1128 /* HTC header only indicates 1 message to fetch */
1129 n_msg = 1;
1130
1131 /* Setup packet buffers for each message */
1132 status = htc_setup_rxpkts(target, endpoint, &lk_ahds[i], queue,
1133 n_msg);
1134
1135 /*
1136 * This is due to unavailabilty of buffers to rx entire data.
1137 * Return no error so that free buffers from queue can be used
1138 * to receive partial data.
1139 */
1140 if (status == -ENOSPC) {
1141 spin_unlock_bh(&target->rx_lock);
1142 return 0;
1143 }
1144
1145 if (status)
1146 break;
1147 }
1148
1149 spin_unlock_bh(&target->rx_lock);
1150
1151 if (status) {
1152 list_for_each_entry_safe(packet, tmp_pkt, queue, list) {
1153 list_del(&packet->list);
1154 htc_reclaim_rxbuf(target, packet,
1155 &target->endpoint[packet->endpoint]);
1156 }
1157 }
1158
1159 return status;
1160 }
1161
1162 static void htc_ctrl_rx(struct htc_target *context, struct htc_packet *packets)
1163 {
1164 if (packets->endpoint != ENDPOINT_0) {
1165 WARN_ON(1);
1166 return;
1167 }
1168
1169 if (packets->status == -ECANCELED) {
1170 reclaim_rx_ctrl_buf(context, packets);
1171 return;
1172 }
1173
1174 if (packets->act_len > 0) {
1175 ath6kl_err("htc_ctrl_rx, got message with len:%zu\n",
1176 packets->act_len + HTC_HDR_LENGTH);
1177
1178 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES,
1179 "Unexpected ENDPOINT 0 Message",
1180 packets->buf - HTC_HDR_LENGTH,
1181 packets->act_len + HTC_HDR_LENGTH);
1182 }
1183
1184 htc_reclaim_rxbuf(context, packets, &context->endpoint[0]);
1185 }
1186
1187 static void htc_proc_cred_rpt(struct htc_target *target,
1188 struct htc_credit_report *rpt,
1189 int n_entries,
1190 enum htc_endpoint_id from_ep)
1191 {
1192 struct htc_endpoint *endpoint;
1193 int tot_credits = 0, i;
1194 bool dist = false;
1195
1196 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
1197 "htc_proc_cred_rpt, credit report entries:%d\n", n_entries);
1198
1199 spin_lock_bh(&target->tx_lock);
1200
1201 for (i = 0; i < n_entries; i++, rpt++) {
1202 if (rpt->eid >= ENDPOINT_MAX) {
1203 WARN_ON(1);
1204 spin_unlock_bh(&target->tx_lock);
1205 return;
1206 }
1207
1208 endpoint = &target->endpoint[rpt->eid];
1209
1210 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, " ep %d got %d credits\n",
1211 rpt->eid, rpt->credits);
1212
1213 endpoint->ep_st.tx_cred_rpt += 1;
1214 endpoint->ep_st.cred_retnd += rpt->credits;
1215
1216 if (from_ep == rpt->eid) {
1217 /*
1218 * This credit report arrived on the same endpoint
1219 * indicating it arrived in an RX packet.
1220 */
1221 endpoint->ep_st.cred_from_rx += rpt->credits;
1222 endpoint->ep_st.cred_rpt_from_rx += 1;
1223 } else if (from_ep == ENDPOINT_0) {
1224 /* credit arrived on endpoint 0 as a NULL message */
1225 endpoint->ep_st.cred_from_ep0 += rpt->credits;
1226 endpoint->ep_st.cred_rpt_ep0 += 1;
1227 } else {
1228 endpoint->ep_st.cred_from_other += rpt->credits;
1229 endpoint->ep_st.cred_rpt_from_other += 1;
1230 }
1231
1232 if (rpt->eid == ENDPOINT_0)
1233 /* always give endpoint 0 credits back */
1234 endpoint->cred_dist.credits += rpt->credits;
1235 else {
1236 endpoint->cred_dist.cred_to_dist += rpt->credits;
1237 dist = true;
1238 }
1239
1240 /*
1241 * Refresh tx depth for distribution function that will
1242 * recover these credits NOTE: this is only valid when
1243 * there are credits to recover!
1244 */
1245 endpoint->cred_dist.txq_depth =
1246 get_queue_depth(&endpoint->txq);
1247
1248 tot_credits += rpt->credits;
1249 }
1250
1251 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
1252 "report indicated %d credits to distribute\n",
1253 tot_credits);
1254
1255 if (dist) {
1256 /*
1257 * This was a credit return based on a completed send
1258 * operations note, this is done with the lock held
1259 */
1260 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
1261 target->cred_dist_cntxt, &target->cred_dist_list);
1262
1263 ath6k_credit_distribute(target->cred_dist_cntxt,
1264 &target->cred_dist_list,
1265 HTC_CREDIT_DIST_SEND_COMPLETE);
1266 }
1267
1268 spin_unlock_bh(&target->tx_lock);
1269
1270 if (tot_credits)
1271 htc_chk_ep_txq(target);
1272 }
1273
1274 static int htc_parse_trailer(struct htc_target *target,
1275 struct htc_record_hdr *record,
1276 u8 *record_buf, u32 *next_lk_ahds,
1277 enum htc_endpoint_id endpoint,
1278 int *n_lk_ahds)
1279 {
1280 struct htc_bundle_lkahd_rpt *bundle_lkahd_rpt;
1281 struct htc_lookahead_report *lk_ahd;
1282 int len;
1283
1284 switch (record->rec_id) {
1285 case HTC_RECORD_CREDITS:
1286 len = record->len / sizeof(struct htc_credit_report);
1287 if (!len) {
1288 WARN_ON(1);
1289 return -EINVAL;
1290 }
1291
1292 htc_proc_cred_rpt(target,
1293 (struct htc_credit_report *) record_buf,
1294 len, endpoint);
1295 break;
1296 case HTC_RECORD_LOOKAHEAD:
1297 len = record->len / sizeof(*lk_ahd);
1298 if (!len) {
1299 WARN_ON(1);
1300 return -EINVAL;
1301 }
1302
1303 lk_ahd = (struct htc_lookahead_report *) record_buf;
1304 if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF))
1305 && next_lk_ahds) {
1306
1307 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1308 "lk_ahd report found (pre valid:0x%X, post valid:0x%X)\n",
1309 lk_ahd->pre_valid, lk_ahd->post_valid);
1310
1311 /* look ahead bytes are valid, copy them over */
1312 memcpy((u8 *)&next_lk_ahds[0], lk_ahd->lk_ahd, 4);
1313
1314 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Next Look Ahead",
1315 next_lk_ahds, 4);
1316
1317 *n_lk_ahds = 1;
1318 }
1319 break;
1320 case HTC_RECORD_LOOKAHEAD_BUNDLE:
1321 len = record->len / sizeof(*bundle_lkahd_rpt);
1322 if (!len || (len > HTC_HOST_MAX_MSG_PER_BUNDLE)) {
1323 WARN_ON(1);
1324 return -EINVAL;
1325 }
1326
1327 if (next_lk_ahds) {
1328 int i;
1329
1330 bundle_lkahd_rpt =
1331 (struct htc_bundle_lkahd_rpt *) record_buf;
1332
1333 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Bundle lk_ahd",
1334 record_buf, record->len);
1335
1336 for (i = 0; i < len; i++) {
1337 memcpy((u8 *)&next_lk_ahds[i],
1338 bundle_lkahd_rpt->lk_ahd, 4);
1339 bundle_lkahd_rpt++;
1340 }
1341
1342 *n_lk_ahds = i;
1343 }
1344 break;
1345 default:
1346 ath6kl_err("unhandled record: id:%d len:%d\n",
1347 record->rec_id, record->len);
1348 break;
1349 }
1350
1351 return 0;
1352
1353 }
1354
1355 static int htc_proc_trailer(struct htc_target *target,
1356 u8 *buf, int len, u32 *next_lk_ahds,
1357 int *n_lk_ahds, enum htc_endpoint_id endpoint)
1358 {
1359 struct htc_record_hdr *record;
1360 int orig_len;
1361 int status;
1362 u8 *record_buf;
1363 u8 *orig_buf;
1364
1365 ath6kl_dbg(ATH6KL_DBG_HTC_RECV, "+htc_proc_trailer (len:%d)\n", len);
1366
1367 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Recv Trailer", buf, len);
1368
1369 orig_buf = buf;
1370 orig_len = len;
1371 status = 0;
1372
1373 while (len > 0) {
1374
1375 if (len < sizeof(struct htc_record_hdr)) {
1376 status = -ENOMEM;
1377 break;
1378 }
1379 /* these are byte aligned structs */
1380 record = (struct htc_record_hdr *) buf;
1381 len -= sizeof(struct htc_record_hdr);
1382 buf += sizeof(struct htc_record_hdr);
1383
1384 if (record->len > len) {
1385 ath6kl_err("invalid record len: %d (id:%d) buf has: %d bytes left\n",
1386 record->len, record->rec_id, len);
1387 status = -ENOMEM;
1388 break;
1389 }
1390 record_buf = buf;
1391
1392 status = htc_parse_trailer(target, record, record_buf,
1393 next_lk_ahds, endpoint, n_lk_ahds);
1394
1395 if (status)
1396 break;
1397
1398 /* advance buffer past this record for next time around */
1399 buf += record->len;
1400 len -= record->len;
1401 }
1402
1403 if (status)
1404 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "BAD Recv Trailer",
1405 orig_buf, orig_len);
1406
1407 return status;
1408 }
1409
1410 static int htc_proc_rxhdr(struct htc_target *target,
1411 struct htc_packet *packet,
1412 u32 *next_lkahds, int *n_lkahds)
1413 {
1414 int status = 0;
1415 u16 payload_len;
1416 u32 lk_ahd;
1417 struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)packet->buf;
1418
1419 if (n_lkahds != NULL)
1420 *n_lkahds = 0;
1421
1422 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "HTC Recv PKT", packet->buf,
1423 packet->act_len);
1424
1425 /*
1426 * NOTE: we cannot assume the alignment of buf, so we use the safe
1427 * macros to retrieve 16 bit fields.
1428 */
1429 payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len));
1430
1431 memcpy((u8 *)&lk_ahd, packet->buf, sizeof(lk_ahd));
1432
1433 if (packet->info.rx.rx_flags & HTC_RX_PKT_REFRESH_HDR) {
1434 /*
1435 * Refresh the expected header and the actual length as it
1436 * was unknown when this packet was grabbed as part of the
1437 * bundle.
1438 */
1439 packet->info.rx.exp_hdr = lk_ahd;
1440 packet->act_len = payload_len + HTC_HDR_LENGTH;
1441
1442 /* validate the actual header that was refreshed */
1443 if (packet->act_len > packet->buf_len) {
1444 ath6kl_err("refreshed hdr payload len (%d) in bundled recv is invalid (hdr: 0x%X)\n",
1445 payload_len, lk_ahd);
1446 /*
1447 * Limit this to max buffer just to print out some
1448 * of the buffer.
1449 */
1450 packet->act_len = min(packet->act_len, packet->buf_len);
1451 status = -ENOMEM;
1452 goto fail_rx;
1453 }
1454
1455 if (packet->endpoint != htc_hdr->eid) {
1456 ath6kl_err("refreshed hdr ep (%d) does not match expected ep (%d)\n",
1457 htc_hdr->eid, packet->endpoint);
1458 status = -ENOMEM;
1459 goto fail_rx;
1460 }
1461 }
1462
1463 if (lk_ahd != packet->info.rx.exp_hdr) {
1464 ath6kl_err("htc_proc_rxhdr, lk_ahd mismatch! (pPkt:0x%p flags:0x%X)\n",
1465 packet, packet->info.rx.rx_flags);
1466 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Expected Message lk_ahd",
1467 &packet->info.rx.exp_hdr, 4);
1468 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Current Frame Header",
1469 (u8 *)&lk_ahd, sizeof(lk_ahd));
1470 status = -ENOMEM;
1471 goto fail_rx;
1472 }
1473
1474 if (htc_hdr->flags & HTC_FLG_RX_TRAILER) {
1475 if (htc_hdr->ctrl[0] < sizeof(struct htc_record_hdr) ||
1476 htc_hdr->ctrl[0] > payload_len) {
1477 ath6kl_err("htc_proc_rxhdr, invalid hdr (payload len should be :%d, CB[0] is:%d)\n",
1478 payload_len, htc_hdr->ctrl[0]);
1479 status = -ENOMEM;
1480 goto fail_rx;
1481 }
1482
1483 if (packet->info.rx.rx_flags & HTC_RX_PKT_IGNORE_LOOKAHEAD) {
1484 next_lkahds = NULL;
1485 n_lkahds = NULL;
1486 }
1487
1488 status = htc_proc_trailer(target, packet->buf + HTC_HDR_LENGTH
1489 + payload_len - htc_hdr->ctrl[0],
1490 htc_hdr->ctrl[0], next_lkahds,
1491 n_lkahds, packet->endpoint);
1492
1493 if (status)
1494 goto fail_rx;
1495
1496 packet->act_len -= htc_hdr->ctrl[0];
1497 }
1498
1499 packet->buf += HTC_HDR_LENGTH;
1500 packet->act_len -= HTC_HDR_LENGTH;
1501
1502 fail_rx:
1503 if (status)
1504 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "BAD HTC Recv PKT",
1505 packet->buf,
1506 packet->act_len < 256 ? packet->act_len : 256);
1507 else {
1508 if (packet->act_len > 0)
1509 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES,
1510 "HTC - Application Msg",
1511 packet->buf, packet->act_len);
1512 }
1513
1514 return status;
1515 }
1516
1517 static void do_rx_completion(struct htc_endpoint *endpoint,
1518 struct htc_packet *packet)
1519 {
1520 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1521 "htc calling ep %d recv callback on packet 0x%p\n",
1522 endpoint->eid, packet);
1523 endpoint->ep_cb.rx(endpoint->target, packet);
1524 }
1525
1526 static int htc_issue_rxpkt_bundle(struct htc_target *target,
1527 struct list_head *rxq,
1528 struct list_head *sync_compq,
1529 int *n_pkt_fetched, bool part_bundle)
1530 {
1531 struct hif_scatter_req *scat_req;
1532 struct htc_packet *packet;
1533 int rem_space = target->max_rx_bndl_sz;
1534 int n_scat_pkt, status = 0, i, len;
1535
1536 n_scat_pkt = get_queue_depth(rxq);
1537 n_scat_pkt = min(n_scat_pkt, target->msg_per_bndl_max);
1538
1539 if ((get_queue_depth(rxq) - n_scat_pkt) > 0) {
1540 /*
1541 * We were forced to split this bundle receive operation
1542 * all packets in this partial bundle must have their
1543 * lookaheads ignored.
1544 */
1545 part_bundle = true;
1546
1547 /*
1548 * This would only happen if the target ignored our max
1549 * bundle limit.
1550 */
1551 ath6kl_warn("htc_issue_rxpkt_bundle : partial bundle detected num:%d , %d\n",
1552 get_queue_depth(rxq), n_scat_pkt);
1553 }
1554
1555 len = 0;
1556
1557 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1558 "htc_issue_rxpkt_bundle (numpackets: %d , actual : %d)\n",
1559 get_queue_depth(rxq), n_scat_pkt);
1560
1561 scat_req = hif_scatter_req_get(target->dev->ar);
1562
1563 if (scat_req == NULL)
1564 goto fail_rx_pkt;
1565
1566 for (i = 0; i < n_scat_pkt; i++) {
1567 int pad_len;
1568
1569 packet = list_first_entry(rxq, struct htc_packet, list);
1570 list_del(&packet->list);
1571
1572 pad_len = CALC_TXRX_PADDED_LEN(target,
1573 packet->act_len);
1574
1575 if ((rem_space - pad_len) < 0) {
1576 list_add(&packet->list, rxq);
1577 break;
1578 }
1579
1580 rem_space -= pad_len;
1581
1582 if (part_bundle || (i < (n_scat_pkt - 1)))
1583 /*
1584 * Packet 0..n-1 cannot be checked for look-aheads
1585 * since we are fetching a bundle the last packet
1586 * however can have it's lookahead used
1587 */
1588 packet->info.rx.rx_flags |=
1589 HTC_RX_PKT_IGNORE_LOOKAHEAD;
1590
1591 /* NOTE: 1 HTC packet per scatter entry */
1592 scat_req->scat_list[i].buf = packet->buf;
1593 scat_req->scat_list[i].len = pad_len;
1594
1595 packet->info.rx.rx_flags |= HTC_RX_PKT_PART_OF_BUNDLE;
1596
1597 list_add_tail(&packet->list, sync_compq);
1598
1599 WARN_ON(!scat_req->scat_list[i].len);
1600 len += scat_req->scat_list[i].len;
1601 }
1602
1603 scat_req->len = len;
1604 scat_req->scat_entries = i;
1605
1606 status = ath6kldev_submit_scat_req(target->dev, scat_req, true);
1607
1608 if (!status)
1609 *n_pkt_fetched = i;
1610
1611 /* free scatter request */
1612 hif_scatter_req_add(target->dev->ar, scat_req);
1613
1614 fail_rx_pkt:
1615
1616 return status;
1617 }
1618
1619 static int htc_proc_fetched_rxpkts(struct htc_target *target,
1620 struct list_head *comp_pktq, u32 lk_ahds[],
1621 int *n_lk_ahd)
1622 {
1623 struct htc_packet *packet, *tmp_pkt;
1624 struct htc_endpoint *ep;
1625 int status = 0;
1626
1627 list_for_each_entry_safe(packet, tmp_pkt, comp_pktq, list) {
1628 list_del(&packet->list);
1629 ep = &target->endpoint[packet->endpoint];
1630
1631 /* process header for each of the recv packet */
1632 status = htc_proc_rxhdr(target, packet, lk_ahds, n_lk_ahd);
1633 if (status)
1634 return status;
1635
1636 if (list_empty(comp_pktq)) {
1637 /*
1638 * Last packet's more packet flag is set
1639 * based on the lookahead.
1640 */
1641 if (*n_lk_ahd > 0)
1642 set_rxpkt_indication_flag(lk_ahds[0],
1643 ep, packet);
1644 } else
1645 /*
1646 * Packets in a bundle automatically have
1647 * this flag set.
1648 */
1649 packet->info.rx.indicat_flags |=
1650 HTC_RX_FLAGS_INDICATE_MORE_PKTS;
1651
1652 htc_update_rx_stats(ep, *n_lk_ahd);
1653
1654 if (packet->info.rx.rx_flags & HTC_RX_PKT_PART_OF_BUNDLE)
1655 ep->ep_st.rx_bundl += 1;
1656
1657 do_rx_completion(ep, packet);
1658 }
1659
1660 return status;
1661 }
1662
1663 static int htc_fetch_rxpkts(struct htc_target *target,
1664 struct list_head *rx_pktq,
1665 struct list_head *comp_pktq)
1666 {
1667 int fetched_pkts;
1668 bool part_bundle = false;
1669 int status = 0;
1670
1671 /* now go fetch the list of HTC packets */
1672 while (!list_empty(rx_pktq)) {
1673 fetched_pkts = 0;
1674
1675 if (target->rx_bndl_enable && (get_queue_depth(rx_pktq) > 1)) {
1676 /*
1677 * There are enough packets to attempt a
1678 * bundle transfer and recv bundling is
1679 * allowed.
1680 */
1681 status = htc_issue_rxpkt_bundle(target, rx_pktq,
1682 comp_pktq,
1683 &fetched_pkts,
1684 part_bundle);
1685 if (status)
1686 return status;
1687
1688 if (!list_empty(rx_pktq))
1689 part_bundle = true;
1690 }
1691
1692 if (!fetched_pkts) {
1693 struct htc_packet *packet;
1694
1695 packet = list_first_entry(rx_pktq, struct htc_packet,
1696 list);
1697
1698 list_del(&packet->list);
1699
1700 /* fully synchronous */
1701 packet->completion = NULL;
1702
1703 if (!list_empty(rx_pktq))
1704 /*
1705 * look_aheads in all packet
1706 * except the last one in the
1707 * bundle must be ignored
1708 */
1709 packet->info.rx.rx_flags |=
1710 HTC_RX_PKT_IGNORE_LOOKAHEAD;
1711
1712 /* go fetch the packet */
1713 status = dev_rx_pkt(target, packet, packet->act_len);
1714 if (status)
1715 return status;
1716
1717 list_add_tail(&packet->list, comp_pktq);
1718 }
1719 }
1720
1721 return status;
1722 }
1723
1724 int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
1725 u32 msg_look_ahead[], int *num_pkts)
1726 {
1727 struct htc_packet *packets, *tmp_pkt;
1728 struct htc_endpoint *endpoint;
1729 struct list_head rx_pktq, comp_pktq;
1730 int status = 0;
1731 u32 look_aheads[HTC_HOST_MAX_MSG_PER_BUNDLE];
1732 int num_look_ahead = 1;
1733 enum htc_endpoint_id id;
1734 int n_fetched = 0;
1735
1736 *num_pkts = 0;
1737
1738 /*
1739 * On first entry copy the look_aheads into our temp array for
1740 * processing
1741 */
1742 memcpy(look_aheads, msg_look_ahead, sizeof(look_aheads));
1743
1744 while (true) {
1745
1746 /*
1747 * First lookahead sets the expected endpoint IDs for all
1748 * packets in a bundle.
1749 */
1750 id = ((struct htc_frame_hdr *)&look_aheads[0])->eid;
1751 endpoint = &target->endpoint[id];
1752
1753 if (id >= ENDPOINT_MAX) {
1754 ath6kl_err("MsgPend, invalid endpoint in look-ahead: %d\n",
1755 id);
1756 status = -ENOMEM;
1757 break;
1758 }
1759
1760 INIT_LIST_HEAD(&rx_pktq);
1761 INIT_LIST_HEAD(&comp_pktq);
1762
1763 /*
1764 * Try to allocate as many HTC RX packets indicated by the
1765 * look_aheads.
1766 */
1767 status = alloc_and_prep_rxpkts(target, look_aheads,
1768 num_look_ahead, endpoint,
1769 &rx_pktq);
1770 if (status)
1771 break;
1772
1773 if (get_queue_depth(&rx_pktq) >= 2)
1774 /*
1775 * A recv bundle was detected, force IRQ status
1776 * re-check again
1777 */
1778 target->chk_irq_status_cnt = 1;
1779
1780 n_fetched += get_queue_depth(&rx_pktq);
1781
1782 num_look_ahead = 0;
1783
1784 status = htc_fetch_rxpkts(target, &rx_pktq, &comp_pktq);
1785
1786 if (!status)
1787 chk_rx_water_mark(endpoint);
1788
1789 /* Process fetched packets */
1790 status = htc_proc_fetched_rxpkts(target, &comp_pktq,
1791 look_aheads, &num_look_ahead);
1792
1793 if (!num_look_ahead || status)
1794 break;
1795
1796 /*
1797 * For SYNCH processing, if we get here, we are running
1798 * through the loop again due to a detected lookahead. Set
1799 * flag that we should re-check IRQ status registers again
1800 * before leaving IRQ processing, this can net better
1801 * performance in high throughput situations.
1802 */
1803 target->chk_irq_status_cnt = 1;
1804 }
1805
1806 if (status) {
1807 ath6kl_err("failed to get pending recv messages: %d\n",
1808 status);
1809 /*
1810 * Cleanup any packets we allocated but didn't use to
1811 * actually fetch any packets.
1812 */
1813 list_for_each_entry_safe(packets, tmp_pkt, &rx_pktq, list) {
1814 list_del(&packets->list);
1815 htc_reclaim_rxbuf(target, packets,
1816 &target->endpoint[packets->endpoint]);
1817 }
1818
1819 /* cleanup any packets in sync completion queue */
1820 list_for_each_entry_safe(packets, tmp_pkt, &comp_pktq, list) {
1821 list_del(&packets->list);
1822 htc_reclaim_rxbuf(target, packets,
1823 &target->endpoint[packets->endpoint]);
1824 }
1825
1826 if (target->htc_flags & HTC_OP_STATE_STOPPING) {
1827 ath6kl_warn("host is going to stop blocking receiver for htc_stop\n");
1828 ath6kldev_rx_control(target->dev, false);
1829 }
1830 }
1831
1832 /*
1833 * Before leaving, check to see if host ran out of buffers and
1834 * needs to stop the receiver.
1835 */
1836 if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
1837 ath6kl_warn("host has no rx buffers blocking receiver to prevent overrun\n");
1838 ath6kldev_rx_control(target->dev, false);
1839 }
1840 *num_pkts = n_fetched;
1841
1842 return status;
1843 }
1844
1845 /*
1846 * Synchronously wait for a control message from the target,
1847 * This function is used at initialization time ONLY. At init messages
1848 * on ENDPOINT 0 are expected.
1849 */
1850 static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target)
1851 {
1852 struct htc_packet *packet = NULL;
1853 struct htc_frame_hdr *htc_hdr;
1854 u32 look_ahead;
1855
1856 if (ath6kldev_poll_mboxmsg_rx(target->dev, &look_ahead,
1857 HTC_TARGET_RESPONSE_TIMEOUT))
1858 return NULL;
1859
1860 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1861 "htc_wait_for_ctrl_msg: look_ahead : 0x%X\n", look_ahead);
1862
1863 htc_hdr = (struct htc_frame_hdr *)&look_ahead;
1864
1865 if (htc_hdr->eid != ENDPOINT_0)
1866 return NULL;
1867
1868 packet = htc_get_control_buf(target, false);
1869
1870 if (!packet)
1871 return NULL;
1872
1873 packet->info.rx.rx_flags = 0;
1874 packet->info.rx.exp_hdr = look_ahead;
1875 packet->act_len = le16_to_cpu(htc_hdr->payld_len) + HTC_HDR_LENGTH;
1876
1877 if (packet->act_len > packet->buf_len)
1878 goto fail_ctrl_rx;
1879
1880 /* we want synchronous operation */
1881 packet->completion = NULL;
1882
1883 /* get the message from the device, this will block */
1884 if (dev_rx_pkt(target, packet, packet->act_len))
1885 goto fail_ctrl_rx;
1886
1887 /* process receive header */
1888 packet->status = htc_proc_rxhdr(target, packet, NULL, NULL);
1889
1890 if (packet->status) {
1891 ath6kl_err("htc_wait_for_ctrl_msg, htc_proc_rxhdr failed (status = %d)\n",
1892 packet->status);
1893 goto fail_ctrl_rx;
1894 }
1895
1896 return packet;
1897
1898 fail_ctrl_rx:
1899 if (packet != NULL) {
1900 htc_rxpkt_reset(packet);
1901 reclaim_rx_ctrl_buf(target, packet);
1902 }
1903
1904 return NULL;
1905 }
1906
1907 int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
1908 struct list_head *pkt_queue)
1909 {
1910 struct htc_endpoint *endpoint;
1911 struct htc_packet *first_pkt;
1912 bool rx_unblock = false;
1913 int status = 0, depth;
1914
1915 if (list_empty(pkt_queue))
1916 return -ENOMEM;
1917
1918 first_pkt = list_first_entry(pkt_queue, struct htc_packet, list);
1919
1920 if (first_pkt->endpoint >= ENDPOINT_MAX)
1921 return status;
1922
1923 depth = get_queue_depth(pkt_queue);
1924
1925 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1926 "htc_add_rxbuf_multiple: ep id: %d, cnt:%d, len: %d\n",
1927 first_pkt->endpoint, depth, first_pkt->buf_len);
1928
1929 endpoint = &target->endpoint[first_pkt->endpoint];
1930
1931 if (target->htc_flags & HTC_OP_STATE_STOPPING) {
1932 struct htc_packet *packet, *tmp_pkt;
1933
1934 /* walk through queue and mark each one canceled */
1935 list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
1936 packet->status = -ECANCELED;
1937 list_del(&packet->list);
1938 do_rx_completion(endpoint, packet);
1939 }
1940
1941 return status;
1942 }
1943
1944 spin_lock_bh(&target->rx_lock);
1945
1946 list_splice_tail_init(pkt_queue, &endpoint->rx_bufq);
1947
1948 /* check if we are blocked waiting for a new buffer */
1949 if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
1950 if (target->ep_waiting == first_pkt->endpoint) {
1951 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1952 "receiver was blocked on ep:%d, unblocking.\n",
1953 target->ep_waiting);
1954 target->rx_st_flags &= ~HTC_RECV_WAIT_BUFFERS;
1955 target->ep_waiting = ENDPOINT_MAX;
1956 rx_unblock = true;
1957 }
1958 }
1959
1960 spin_unlock_bh(&target->rx_lock);
1961
1962 if (rx_unblock && !(target->htc_flags & HTC_OP_STATE_STOPPING))
1963 /* TODO : implement a buffer threshold count? */
1964 ath6kldev_rx_control(target->dev, true);
1965
1966 return status;
1967 }
1968
1969 void ath6kl_htc_flush_rx_buf(struct htc_target *target)
1970 {
1971 struct htc_endpoint *endpoint;
1972 struct htc_packet *packet, *tmp_pkt;
1973 int i;
1974
1975 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
1976 endpoint = &target->endpoint[i];
1977 if (!endpoint->svc_id)
1978 /* not in use.. */
1979 continue;
1980
1981 spin_lock_bh(&target->rx_lock);
1982 list_for_each_entry_safe(packet, tmp_pkt,
1983 &endpoint->rx_bufq, list) {
1984 list_del(&packet->list);
1985 spin_unlock_bh(&target->rx_lock);
1986 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1987 "flushing rx pkt:0x%p, len:%d, ep:%d\n",
1988 packet, packet->buf_len,
1989 packet->endpoint);
1990 dev_kfree_skb(packet->pkt_cntxt);
1991 spin_lock_bh(&target->rx_lock);
1992 }
1993 spin_unlock_bh(&target->rx_lock);
1994 }
1995 }
1996
1997 int ath6kl_htc_conn_service(struct htc_target *target,
1998 struct htc_service_connect_req *conn_req,
1999 struct htc_service_connect_resp *conn_resp)
2000 {
2001 struct htc_packet *rx_pkt = NULL;
2002 struct htc_packet *tx_pkt = NULL;
2003 struct htc_conn_service_resp *resp_msg;
2004 struct htc_conn_service_msg *conn_msg;
2005 struct htc_endpoint *endpoint;
2006 enum htc_endpoint_id assigned_ep = ENDPOINT_MAX;
2007 unsigned int max_msg_sz = 0;
2008 int status = 0;
2009
2010 ath6kl_dbg(ATH6KL_DBG_TRC,
2011 "htc_conn_service, target:0x%p service id:0x%X\n",
2012 target, conn_req->svc_id);
2013
2014 if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) {
2015 /* special case for pseudo control service */
2016 assigned_ep = ENDPOINT_0;
2017 max_msg_sz = HTC_MAX_CTRL_MSG_LEN;
2018 } else {
2019 /* allocate a packet to send to the target */
2020 tx_pkt = htc_get_control_buf(target, true);
2021
2022 if (!tx_pkt)
2023 return -ENOMEM;
2024
2025 conn_msg = (struct htc_conn_service_msg *)tx_pkt->buf;
2026 memset(conn_msg, 0, sizeof(*conn_msg));
2027 conn_msg->msg_id = cpu_to_le16(HTC_MSG_CONN_SVC_ID);
2028 conn_msg->svc_id = cpu_to_le16(conn_req->svc_id);
2029 conn_msg->conn_flags = cpu_to_le16(conn_req->conn_flags);
2030
2031 set_htc_pkt_info(tx_pkt, NULL, (u8 *) conn_msg,
2032 sizeof(*conn_msg) + conn_msg->svc_meta_len,
2033 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
2034
2035 /* we want synchronous operation */
2036 tx_pkt->completion = NULL;
2037 htc_prep_send_pkt(tx_pkt, 0, 0, 0);
2038 status = htc_issue_send(target, tx_pkt);
2039
2040 if (status)
2041 goto fail_tx;
2042
2043 /* wait for response */
2044 rx_pkt = htc_wait_for_ctrl_msg(target);
2045
2046 if (!rx_pkt) {
2047 status = -ENOMEM;
2048 goto fail_tx;
2049 }
2050
2051 resp_msg = (struct htc_conn_service_resp *)rx_pkt->buf;
2052
2053 if ((le16_to_cpu(resp_msg->msg_id) != HTC_MSG_CONN_SVC_RESP_ID)
2054 || (rx_pkt->act_len < sizeof(*resp_msg))) {
2055 status = -ENOMEM;
2056 goto fail_tx;
2057 }
2058
2059 conn_resp->resp_code = resp_msg->status;
2060 /* check response status */
2061 if (resp_msg->status != HTC_SERVICE_SUCCESS) {
2062 ath6kl_err("target failed service 0x%X connect request (status:%d)\n",
2063 resp_msg->svc_id, resp_msg->status);
2064 status = -ENOMEM;
2065 goto fail_tx;
2066 }
2067
2068 assigned_ep = (enum htc_endpoint_id)resp_msg->eid;
2069 max_msg_sz = le16_to_cpu(resp_msg->max_msg_sz);
2070 }
2071
2072 if (assigned_ep >= ENDPOINT_MAX || !max_msg_sz) {
2073 status = -ENOMEM;
2074 goto fail_tx;
2075 }
2076
2077 endpoint = &target->endpoint[assigned_ep];
2078 endpoint->eid = assigned_ep;
2079 if (endpoint->svc_id) {
2080 status = -ENOMEM;
2081 goto fail_tx;
2082 }
2083
2084 /* return assigned endpoint to caller */
2085 conn_resp->endpoint = assigned_ep;
2086 conn_resp->len_max = max_msg_sz;
2087
2088 /* setup the endpoint */
2089
2090 /* this marks the endpoint in use */
2091 endpoint->svc_id = conn_req->svc_id;
2092
2093 endpoint->max_txq_depth = conn_req->max_txq_depth;
2094 endpoint->len_max = max_msg_sz;
2095 endpoint->ep_cb = conn_req->ep_cb;
2096 endpoint->cred_dist.svc_id = conn_req->svc_id;
2097 endpoint->cred_dist.htc_rsvd = endpoint;
2098 endpoint->cred_dist.endpoint = assigned_ep;
2099 endpoint->cred_dist.cred_sz = target->tgt_cred_sz;
2100
2101 if (conn_req->max_rxmsg_sz) {
2102 /*
2103 * Override cred_per_msg calculation, this optimizes
2104 * the credit-low indications since the host will actually
2105 * issue smaller messages in the Send path.
2106 */
2107 if (conn_req->max_rxmsg_sz > max_msg_sz) {
2108 status = -ENOMEM;
2109 goto fail_tx;
2110 }
2111 endpoint->cred_dist.cred_per_msg =
2112 conn_req->max_rxmsg_sz / target->tgt_cred_sz;
2113 } else
2114 endpoint->cred_dist.cred_per_msg =
2115 max_msg_sz / target->tgt_cred_sz;
2116
2117 if (!endpoint->cred_dist.cred_per_msg)
2118 endpoint->cred_dist.cred_per_msg = 1;
2119
2120 /* save local connection flags */
2121 endpoint->conn_flags = conn_req->flags;
2122
2123 fail_tx:
2124 if (tx_pkt)
2125 htc_reclaim_txctrl_buf(target, tx_pkt);
2126
2127 if (rx_pkt) {
2128 htc_rxpkt_reset(rx_pkt);
2129 reclaim_rx_ctrl_buf(target, rx_pkt);
2130 }
2131
2132 return status;
2133 }
2134
2135 static void reset_ep_state(struct htc_target *target)
2136 {
2137 struct htc_endpoint *endpoint;
2138 int i;
2139
2140 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
2141 endpoint = &target->endpoint[i];
2142 memset(&endpoint->cred_dist, 0, sizeof(endpoint->cred_dist));
2143 endpoint->svc_id = 0;
2144 endpoint->len_max = 0;
2145 endpoint->max_txq_depth = 0;
2146 memset(&endpoint->ep_st, 0,
2147 sizeof(endpoint->ep_st));
2148 INIT_LIST_HEAD(&endpoint->rx_bufq);
2149 INIT_LIST_HEAD(&endpoint->txq);
2150 endpoint->target = target;
2151 }
2152
2153 /* reset distribution list */
2154 INIT_LIST_HEAD(&target->cred_dist_list);
2155 }
2156
2157 int ath6kl_htc_get_rxbuf_num(struct htc_target *target,
2158 enum htc_endpoint_id endpoint)
2159 {
2160 int num;
2161
2162 spin_lock_bh(&target->rx_lock);
2163 num = get_queue_depth(&(target->endpoint[endpoint].rx_bufq));
2164 spin_unlock_bh(&target->rx_lock);
2165 return num;
2166 }
2167
2168 static void htc_setup_msg_bndl(struct htc_target *target)
2169 {
2170 /* limit what HTC can handle */
2171 target->msg_per_bndl_max = min(HTC_HOST_MAX_MSG_PER_BUNDLE,
2172 target->msg_per_bndl_max);
2173
2174 if (ath6kl_hif_enable_scatter(target->dev->ar)) {
2175 target->msg_per_bndl_max = 0;
2176 return;
2177 }
2178
2179 /* limit bundle what the device layer can handle */
2180 target->msg_per_bndl_max = min(target->max_scat_entries,
2181 target->msg_per_bndl_max);
2182
2183 ath6kl_dbg(ATH6KL_DBG_TRC,
2184 "htc bundling allowed. max msg per htc bundle: %d\n",
2185 target->msg_per_bndl_max);
2186
2187 /* Max rx bundle size is limited by the max tx bundle size */
2188 target->max_rx_bndl_sz = target->max_xfer_szper_scatreq;
2189 /* Max tx bundle size if limited by the extended mbox address range */
2190 target->max_tx_bndl_sz = min(HIF_MBOX0_EXT_WIDTH,
2191 target->max_xfer_szper_scatreq);
2192
2193 ath6kl_dbg(ATH6KL_DBG_ANY, "max recv: %d max send: %d\n",
2194 target->max_rx_bndl_sz, target->max_tx_bndl_sz);
2195
2196 if (target->max_tx_bndl_sz)
2197 target->tx_bndl_enable = true;
2198
2199 if (target->max_rx_bndl_sz)
2200 target->rx_bndl_enable = true;
2201
2202 if ((target->tgt_cred_sz % target->block_sz) != 0) {
2203 ath6kl_warn("credit size: %d is not block aligned! Disabling send bundling\n",
2204 target->tgt_cred_sz);
2205
2206 /*
2207 * Disallow send bundling since the credit size is
2208 * not aligned to a block size the I/O block
2209 * padding will spill into the next credit buffer
2210 * which is fatal.
2211 */
2212 target->tx_bndl_enable = false;
2213 }
2214 }
2215
2216 int ath6kl_htc_wait_target(struct htc_target *target)
2217 {
2218 struct htc_packet *packet = NULL;
2219 struct htc_ready_ext_msg *rdy_msg;
2220 struct htc_service_connect_req connect;
2221 struct htc_service_connect_resp resp;
2222 int status;
2223
2224 /* we should be getting 1 control message that the target is ready */
2225 packet = htc_wait_for_ctrl_msg(target);
2226
2227 if (!packet)
2228 return -ENOMEM;
2229
2230 /* we controlled the buffer creation so it's properly aligned */
2231 rdy_msg = (struct htc_ready_ext_msg *)packet->buf;
2232
2233 if ((le16_to_cpu(rdy_msg->ver2_0_info.msg_id) != HTC_MSG_READY_ID) ||
2234 (packet->act_len < sizeof(struct htc_ready_msg))) {
2235 status = -ENOMEM;
2236 goto fail_wait_target;
2237 }
2238
2239 if (!rdy_msg->ver2_0_info.cred_cnt || !rdy_msg->ver2_0_info.cred_sz) {
2240 status = -ENOMEM;
2241 goto fail_wait_target;
2242 }
2243
2244 target->tgt_creds = le16_to_cpu(rdy_msg->ver2_0_info.cred_cnt);
2245 target->tgt_cred_sz = le16_to_cpu(rdy_msg->ver2_0_info.cred_sz);
2246
2247 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
2248 "target ready: credits: %d credit size: %d\n",
2249 target->tgt_creds, target->tgt_cred_sz);
2250
2251 /* check if this is an extended ready message */
2252 if (packet->act_len >= sizeof(struct htc_ready_ext_msg)) {
2253 /* this is an extended message */
2254 target->htc_tgt_ver = rdy_msg->htc_ver;
2255 target->msg_per_bndl_max = rdy_msg->msg_per_htc_bndl;
2256 } else {
2257 /* legacy */
2258 target->htc_tgt_ver = HTC_VERSION_2P0;
2259 target->msg_per_bndl_max = 0;
2260 }
2261
2262 ath6kl_dbg(ATH6KL_DBG_TRC, "using htc protocol version : %s (%d)\n",
2263 (target->htc_tgt_ver == HTC_VERSION_2P0) ? "2.0" : ">= 2.1",
2264 target->htc_tgt_ver);
2265
2266 if (target->msg_per_bndl_max > 0)
2267 htc_setup_msg_bndl(target);
2268
2269 /* setup our pseudo HTC control endpoint connection */
2270 memset(&connect, 0, sizeof(connect));
2271 memset(&resp, 0, sizeof(resp));
2272 connect.ep_cb.rx = htc_ctrl_rx;
2273 connect.ep_cb.rx_refill = NULL;
2274 connect.ep_cb.tx_full = NULL;
2275 connect.max_txq_depth = NUM_CONTROL_BUFFERS;
2276 connect.svc_id = HTC_CTRL_RSVD_SVC;
2277
2278 /* connect fake service */
2279 status = ath6kl_htc_conn_service((void *)target, &connect, &resp);
2280
2281 if (status)
2282 ath6kl_hif_cleanup_scatter(target->dev->ar);
2283
2284 fail_wait_target:
2285 if (packet) {
2286 htc_rxpkt_reset(packet);
2287 reclaim_rx_ctrl_buf(target, packet);
2288 }
2289
2290 return status;
2291 }
2292
2293 /*
2294 * Start HTC, enable interrupts and let the target know
2295 * host has finished setup.
2296 */
2297 int ath6kl_htc_start(struct htc_target *target)
2298 {
2299 struct htc_packet *packet;
2300 int status;
2301
2302 /* Disable interrupts at the chip level */
2303 ath6kldev_disable_intrs(target->dev);
2304
2305 target->htc_flags = 0;
2306 target->rx_st_flags = 0;
2307
2308 /* Push control receive buffers into htc control endpoint */
2309 while ((packet = htc_get_control_buf(target, false)) != NULL) {
2310 status = htc_add_rxbuf(target, packet);
2311 if (status)
2312 return status;
2313 }
2314
2315 /* NOTE: the first entry in the distribution list is ENDPOINT_0 */
2316 ath6k_credit_init(target->cred_dist_cntxt, &target->cred_dist_list,
2317 target->tgt_creds);
2318
2319 dump_cred_dist_stats(target);
2320
2321 /* Indicate to the target of the setup completion */
2322 status = htc_setup_tx_complete(target);
2323
2324 if (status)
2325 return status;
2326
2327 /* unmask interrupts */
2328 status = ath6kldev_unmask_intrs(target->dev);
2329
2330 if (status)
2331 ath6kl_htc_stop(target);
2332
2333 return status;
2334 }
2335
2336 /* htc_stop: stop interrupt reception, and flush all queued buffers */
2337 void ath6kl_htc_stop(struct htc_target *target)
2338 {
2339 spin_lock_bh(&target->htc_lock);
2340 target->htc_flags |= HTC_OP_STATE_STOPPING;
2341 spin_unlock_bh(&target->htc_lock);
2342
2343 /*
2344 * Masking interrupts is a synchronous operation, when this
2345 * function returns all pending HIF I/O has completed, we can
2346 * safely flush the queues.
2347 */
2348 ath6kldev_mask_intrs(target->dev);
2349
2350 ath6kl_htc_flush_txep_all(target);
2351
2352 ath6kl_htc_flush_rx_buf(target);
2353
2354 reset_ep_state(target);
2355 }
2356
2357 void *ath6kl_htc_create(struct ath6kl *ar)
2358 {
2359 struct htc_target *target = NULL;
2360 struct htc_packet *packet;
2361 int status = 0, i = 0;
2362 u32 block_size, ctrl_bufsz;
2363
2364 target = kzalloc(sizeof(*target), GFP_KERNEL);
2365 if (!target) {
2366 ath6kl_err("unable to allocate memory\n");
2367 return NULL;
2368 }
2369
2370 target->dev = kzalloc(sizeof(*target->dev), GFP_KERNEL);
2371 if (!target->dev) {
2372 ath6kl_err("unable to allocate memory\n");
2373 status = -ENOMEM;
2374 goto fail_create_htc;
2375 }
2376
2377 spin_lock_init(&target->htc_lock);
2378 spin_lock_init(&target->rx_lock);
2379 spin_lock_init(&target->tx_lock);
2380
2381 INIT_LIST_HEAD(&target->free_ctrl_txbuf);
2382 INIT_LIST_HEAD(&target->free_ctrl_rxbuf);
2383 INIT_LIST_HEAD(&target->cred_dist_list);
2384
2385 target->dev->ar = ar;
2386 target->dev->htc_cnxt = target;
2387 target->ep_waiting = ENDPOINT_MAX;
2388
2389 reset_ep_state(target);
2390
2391 status = ath6kldev_setup(target->dev);
2392
2393 if (status)
2394 goto fail_create_htc;
2395
2396 block_size = ar->mbox_info.block_size;
2397
2398 ctrl_bufsz = (block_size > HTC_MAX_CTRL_MSG_LEN) ?
2399 (block_size + HTC_HDR_LENGTH) :
2400 (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH);
2401
2402 for (i = 0; i < NUM_CONTROL_BUFFERS; i++) {
2403 packet = kzalloc(sizeof(*packet), GFP_KERNEL);
2404 if (!packet)
2405 break;
2406
2407 packet->buf_start = kzalloc(ctrl_bufsz, GFP_KERNEL);
2408 if (!packet->buf_start) {
2409 kfree(packet);
2410 break;
2411 }
2412
2413 packet->buf_len = ctrl_bufsz;
2414 if (i < NUM_CONTROL_RX_BUFFERS) {
2415 packet->act_len = 0;
2416 packet->buf = packet->buf_start;
2417 packet->endpoint = ENDPOINT_0;
2418 list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
2419 } else
2420 list_add_tail(&packet->list, &target->free_ctrl_txbuf);
2421 }
2422
2423 fail_create_htc:
2424 if (i != NUM_CONTROL_BUFFERS || status) {
2425 if (target) {
2426 ath6kl_htc_cleanup(target);
2427 target = NULL;
2428 }
2429 }
2430
2431 return target;
2432 }
2433
2434 /* cleanup the HTC instance */
2435 void ath6kl_htc_cleanup(struct htc_target *target)
2436 {
2437 struct htc_packet *packet, *tmp_packet;
2438
2439 ath6kl_hif_cleanup_scatter(target->dev->ar);
2440
2441 list_for_each_entry_safe(packet, tmp_packet,
2442 &target->free_ctrl_txbuf, list) {
2443 list_del(&packet->list);
2444 kfree(packet->buf_start);
2445 kfree(packet);
2446 }
2447
2448 list_for_each_entry_safe(packet, tmp_packet,
2449 &target->free_ctrl_rxbuf, list) {
2450 list_del(&packet->list);
2451 kfree(packet->buf_start);
2452 kfree(packet);
2453 }
2454
2455 kfree(target->dev);
2456 kfree(target);
2457 }
This page took 0.113488 seconds and 5 git commands to generate.