ath6kl: Avoid taking struct as argument in ath6kl_wmi_set_ip_cmd
[deliverable/linux.git] / drivers / net / wireless / ath / ath6kl / htc.c
1 /*
2 * Copyright (c) 2007-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include "core.h"
18 #include "hif.h"
19 #include "debug.h"
20 #include "hif-ops.h"
21 #include <asm/unaligned.h>
22
23 #define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask))
24
25 /* Functions for Tx credit handling */
26 static void ath6kl_credit_deposit(struct ath6kl_htc_credit_info *cred_info,
27 struct htc_endpoint_credit_dist *ep_dist,
28 int credits)
29 {
30 ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit deposit ep %d credits %d\n",
31 ep_dist->endpoint, credits);
32
33 ep_dist->credits += credits;
34 ep_dist->cred_assngd += credits;
35 cred_info->cur_free_credits -= credits;
36 }
37
38 static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info,
39 struct list_head *ep_list,
40 int tot_credits)
41 {
42 struct htc_endpoint_credit_dist *cur_ep_dist;
43 int count;
44
45 ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit init total %d\n", tot_credits);
46
47 cred_info->cur_free_credits = tot_credits;
48 cred_info->total_avail_credits = tot_credits;
49
50 list_for_each_entry(cur_ep_dist, ep_list, list) {
51 if (cur_ep_dist->endpoint == ENDPOINT_0)
52 continue;
53
54 cur_ep_dist->cred_min = cur_ep_dist->cred_per_msg;
55
56 if (tot_credits > 4) {
57 if ((cur_ep_dist->svc_id == WMI_DATA_BK_SVC) ||
58 (cur_ep_dist->svc_id == WMI_DATA_BE_SVC)) {
59 ath6kl_credit_deposit(cred_info,
60 cur_ep_dist,
61 cur_ep_dist->cred_min);
62 cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
63 }
64 }
65
66 if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) {
67 ath6kl_credit_deposit(cred_info, cur_ep_dist,
68 cur_ep_dist->cred_min);
69 /*
70 * Control service is always marked active, it
71 * never goes inactive EVER.
72 */
73 cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
74 } else if (cur_ep_dist->svc_id == WMI_DATA_BK_SVC)
75 /* this is the lowest priority data endpoint */
76 /* FIXME: this looks fishy, check */
77 cred_info->lowestpri_ep_dist = cur_ep_dist->list;
78
79 /*
80 * Streams have to be created (explicit | implicit) for all
81 * kinds of traffic. BE endpoints are also inactive in the
82 * beginning. When BE traffic starts it creates implicit
83 * streams that redistributes credits.
84 *
85 * Note: all other endpoints have minimums set but are
86 * initially given NO credits. credits will be distributed
87 * as traffic activity demands
88 */
89 }
90
91 WARN_ON(cred_info->cur_free_credits <= 0);
92
93 list_for_each_entry(cur_ep_dist, ep_list, list) {
94 if (cur_ep_dist->endpoint == ENDPOINT_0)
95 continue;
96
97 if (cur_ep_dist->svc_id == WMI_CONTROL_SVC)
98 cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg;
99 else {
100 /*
101 * For the remaining data endpoints, we assume that
102 * each cred_per_msg are the same. We use a simple
103 * calculation here, we take the remaining credits
104 * and determine how many max messages this can
105 * cover and then set each endpoint's normal value
106 * equal to 3/4 this amount.
107 */
108 count = (cred_info->cur_free_credits /
109 cur_ep_dist->cred_per_msg)
110 * cur_ep_dist->cred_per_msg;
111 count = (count * 3) >> 2;
112 count = max(count, cur_ep_dist->cred_per_msg);
113 cur_ep_dist->cred_norm = count;
114
115 }
116
117 ath6kl_dbg(ATH6KL_DBG_CREDIT,
118 "credit ep %d svc_id %d credits %d per_msg %d norm %d min %d\n",
119 cur_ep_dist->endpoint,
120 cur_ep_dist->svc_id,
121 cur_ep_dist->credits,
122 cur_ep_dist->cred_per_msg,
123 cur_ep_dist->cred_norm,
124 cur_ep_dist->cred_min);
125 }
126 }
127
128 /* initialize and setup credit distribution */
129 int ath6kl_credit_setup(void *htc_handle,
130 struct ath6kl_htc_credit_info *cred_info)
131 {
132 u16 servicepriority[5];
133
134 memset(cred_info, 0, sizeof(struct ath6kl_htc_credit_info));
135
136 servicepriority[0] = WMI_CONTROL_SVC; /* highest */
137 servicepriority[1] = WMI_DATA_VO_SVC;
138 servicepriority[2] = WMI_DATA_VI_SVC;
139 servicepriority[3] = WMI_DATA_BE_SVC;
140 servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */
141
142 /* set priority list */
143 ath6kl_htc_set_credit_dist(htc_handle, cred_info, servicepriority, 5);
144
145 return 0;
146 }
147
148 /* reduce an ep's credits back to a set limit */
149 static void ath6kl_credit_reduce(struct ath6kl_htc_credit_info *cred_info,
150 struct htc_endpoint_credit_dist *ep_dist,
151 int limit)
152 {
153 int credits;
154
155 ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit reduce ep %d limit %d\n",
156 ep_dist->endpoint, limit);
157
158 ep_dist->cred_assngd = limit;
159
160 if (ep_dist->credits <= limit)
161 return;
162
163 credits = ep_dist->credits - limit;
164 ep_dist->credits -= credits;
165 cred_info->cur_free_credits += credits;
166 }
167
168 static void ath6kl_credit_update(struct ath6kl_htc_credit_info *cred_info,
169 struct list_head *epdist_list)
170 {
171 struct htc_endpoint_credit_dist *cur_dist_list;
172
173 list_for_each_entry(cur_dist_list, epdist_list, list) {
174 if (cur_dist_list->endpoint == ENDPOINT_0)
175 continue;
176
177 if (cur_dist_list->cred_to_dist > 0) {
178 cur_dist_list->credits +=
179 cur_dist_list->cred_to_dist;
180 cur_dist_list->cred_to_dist = 0;
181 if (cur_dist_list->credits >
182 cur_dist_list->cred_assngd)
183 ath6kl_credit_reduce(cred_info,
184 cur_dist_list,
185 cur_dist_list->cred_assngd);
186
187 if (cur_dist_list->credits >
188 cur_dist_list->cred_norm)
189 ath6kl_credit_reduce(cred_info, cur_dist_list,
190 cur_dist_list->cred_norm);
191
192 if (!(cur_dist_list->dist_flags & HTC_EP_ACTIVE)) {
193 if (cur_dist_list->txq_depth == 0)
194 ath6kl_credit_reduce(cred_info,
195 cur_dist_list, 0);
196 }
197 }
198 }
199 }
200
201 /*
202 * HTC has an endpoint that needs credits, ep_dist is the endpoint in
203 * question.
204 */
205 static void ath6kl_credit_seek(struct ath6kl_htc_credit_info *cred_info,
206 struct htc_endpoint_credit_dist *ep_dist)
207 {
208 struct htc_endpoint_credit_dist *curdist_list;
209 int credits = 0;
210 int need;
211
212 if (ep_dist->svc_id == WMI_CONTROL_SVC)
213 goto out;
214
215 if ((ep_dist->svc_id == WMI_DATA_VI_SVC) ||
216 (ep_dist->svc_id == WMI_DATA_VO_SVC))
217 if ((ep_dist->cred_assngd >= ep_dist->cred_norm))
218 goto out;
219
220 /*
221 * For all other services, we follow a simple algorithm of:
222 *
223 * 1. checking the free pool for credits
224 * 2. checking lower priority endpoints for credits to take
225 */
226
227 credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
228
229 if (credits >= ep_dist->seek_cred)
230 goto out;
231
232 /*
233 * We don't have enough in the free pool, try taking away from
234 * lower priority services The rule for taking away credits:
235 *
236 * 1. Only take from lower priority endpoints
237 * 2. Only take what is allocated above the minimum (never
238 * starve an endpoint completely)
239 * 3. Only take what you need.
240 */
241
242 list_for_each_entry_reverse(curdist_list,
243 &cred_info->lowestpri_ep_dist,
244 list) {
245 if (curdist_list == ep_dist)
246 break;
247
248 need = ep_dist->seek_cred - cred_info->cur_free_credits;
249
250 if ((curdist_list->cred_assngd - need) >=
251 curdist_list->cred_min) {
252 /*
253 * The current one has been allocated more than
254 * it's minimum and it has enough credits assigned
255 * above it's minimum to fulfill our need try to
256 * take away just enough to fulfill our need.
257 */
258 ath6kl_credit_reduce(cred_info, curdist_list,
259 curdist_list->cred_assngd - need);
260
261 if (cred_info->cur_free_credits >=
262 ep_dist->seek_cred)
263 break;
264 }
265
266 if (curdist_list->endpoint == ENDPOINT_0)
267 break;
268 }
269
270 credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
271
272 out:
273 /* did we find some credits? */
274 if (credits)
275 ath6kl_credit_deposit(cred_info, ep_dist, credits);
276
277 ep_dist->seek_cred = 0;
278 }
279
280 /* redistribute credits based on activity change */
281 static void ath6kl_credit_redistribute(struct ath6kl_htc_credit_info *info,
282 struct list_head *ep_dist_list)
283 {
284 struct htc_endpoint_credit_dist *curdist_list;
285
286 list_for_each_entry(curdist_list, ep_dist_list, list) {
287 if (curdist_list->endpoint == ENDPOINT_0)
288 continue;
289
290 if ((curdist_list->svc_id == WMI_DATA_BK_SVC) ||
291 (curdist_list->svc_id == WMI_DATA_BE_SVC))
292 curdist_list->dist_flags |= HTC_EP_ACTIVE;
293
294 if ((curdist_list->svc_id != WMI_CONTROL_SVC) &&
295 !(curdist_list->dist_flags & HTC_EP_ACTIVE)) {
296 if (curdist_list->txq_depth == 0)
297 ath6kl_credit_reduce(info, curdist_list, 0);
298 else
299 ath6kl_credit_reduce(info,
300 curdist_list,
301 curdist_list->cred_min);
302 }
303 }
304 }
305
306 /*
307 *
308 * This function is invoked whenever endpoints require credit
309 * distributions. A lock is held while this function is invoked, this
310 * function shall NOT block. The ep_dist_list is a list of distribution
311 * structures in prioritized order as defined by the call to the
312 * htc_set_credit_dist() api.
313 */
314 static void ath6kl_credit_distribute(struct ath6kl_htc_credit_info *cred_info,
315 struct list_head *ep_dist_list,
316 enum htc_credit_dist_reason reason)
317 {
318 switch (reason) {
319 case HTC_CREDIT_DIST_SEND_COMPLETE:
320 ath6kl_credit_update(cred_info, ep_dist_list);
321 break;
322 case HTC_CREDIT_DIST_ACTIVITY_CHANGE:
323 ath6kl_credit_redistribute(cred_info, ep_dist_list);
324 break;
325 default:
326 break;
327 }
328
329 WARN_ON(cred_info->cur_free_credits > cred_info->total_avail_credits);
330 WARN_ON(cred_info->cur_free_credits < 0);
331 }
332
333 static void ath6kl_htc_tx_buf_align(u8 **buf, unsigned long len)
334 {
335 u8 *align_addr;
336
337 if (!IS_ALIGNED((unsigned long) *buf, 4)) {
338 align_addr = PTR_ALIGN(*buf - 4, 4);
339 memmove(align_addr, *buf, len);
340 *buf = align_addr;
341 }
342 }
343
344 static void ath6kl_htc_tx_prep_pkt(struct htc_packet *packet, u8 flags,
345 int ctrl0, int ctrl1)
346 {
347 struct htc_frame_hdr *hdr;
348
349 packet->buf -= HTC_HDR_LENGTH;
350 hdr = (struct htc_frame_hdr *)packet->buf;
351
352 /* Endianess? */
353 put_unaligned((u16)packet->act_len, &hdr->payld_len);
354 hdr->flags = flags;
355 hdr->eid = packet->endpoint;
356 hdr->ctrl[0] = ctrl0;
357 hdr->ctrl[1] = ctrl1;
358 }
359
360 static void htc_reclaim_txctrl_buf(struct htc_target *target,
361 struct htc_packet *pkt)
362 {
363 spin_lock_bh(&target->htc_lock);
364 list_add_tail(&pkt->list, &target->free_ctrl_txbuf);
365 spin_unlock_bh(&target->htc_lock);
366 }
367
368 static struct htc_packet *htc_get_control_buf(struct htc_target *target,
369 bool tx)
370 {
371 struct htc_packet *packet = NULL;
372 struct list_head *buf_list;
373
374 buf_list = tx ? &target->free_ctrl_txbuf : &target->free_ctrl_rxbuf;
375
376 spin_lock_bh(&target->htc_lock);
377
378 if (list_empty(buf_list)) {
379 spin_unlock_bh(&target->htc_lock);
380 return NULL;
381 }
382
383 packet = list_first_entry(buf_list, struct htc_packet, list);
384 list_del(&packet->list);
385 spin_unlock_bh(&target->htc_lock);
386
387 if (tx)
388 packet->buf = packet->buf_start + HTC_HDR_LENGTH;
389
390 return packet;
391 }
392
393 static void htc_tx_comp_update(struct htc_target *target,
394 struct htc_endpoint *endpoint,
395 struct htc_packet *packet)
396 {
397 packet->completion = NULL;
398 packet->buf += HTC_HDR_LENGTH;
399
400 if (!packet->status)
401 return;
402
403 ath6kl_err("req failed (status:%d, ep:%d, len:%d creds:%d)\n",
404 packet->status, packet->endpoint, packet->act_len,
405 packet->info.tx.cred_used);
406
407 /* on failure to submit, reclaim credits for this packet */
408 spin_lock_bh(&target->tx_lock);
409 endpoint->cred_dist.cred_to_dist +=
410 packet->info.tx.cred_used;
411 endpoint->cred_dist.txq_depth = get_queue_depth(&endpoint->txq);
412
413 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx ctxt 0x%p dist 0x%p\n",
414 target->credit_info, &target->cred_dist_list);
415
416 ath6kl_credit_distribute(target->credit_info,
417 &target->cred_dist_list,
418 HTC_CREDIT_DIST_SEND_COMPLETE);
419
420 spin_unlock_bh(&target->tx_lock);
421 }
422
423 static void htc_tx_complete(struct htc_endpoint *endpoint,
424 struct list_head *txq)
425 {
426 if (list_empty(txq))
427 return;
428
429 ath6kl_dbg(ATH6KL_DBG_HTC,
430 "htc tx complete ep %d pkts %d\n",
431 endpoint->eid, get_queue_depth(txq));
432
433 ath6kl_tx_complete(endpoint->target->dev->ar, txq);
434 }
435
436 static void htc_tx_comp_handler(struct htc_target *target,
437 struct htc_packet *packet)
438 {
439 struct htc_endpoint *endpoint = &target->endpoint[packet->endpoint];
440 struct list_head container;
441
442 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx complete seqno %d\n",
443 packet->info.tx.seqno);
444
445 htc_tx_comp_update(target, endpoint, packet);
446 INIT_LIST_HEAD(&container);
447 list_add_tail(&packet->list, &container);
448 /* do completion */
449 htc_tx_complete(endpoint, &container);
450 }
451
452 static void htc_async_tx_scat_complete(struct htc_target *target,
453 struct hif_scatter_req *scat_req)
454 {
455 struct htc_endpoint *endpoint;
456 struct htc_packet *packet;
457 struct list_head tx_compq;
458 int i;
459
460 INIT_LIST_HEAD(&tx_compq);
461
462 ath6kl_dbg(ATH6KL_DBG_HTC,
463 "htc tx scat complete len %d entries %d\n",
464 scat_req->len, scat_req->scat_entries);
465
466 if (scat_req->status)
467 ath6kl_err("send scatter req failed: %d\n", scat_req->status);
468
469 packet = scat_req->scat_list[0].packet;
470 endpoint = &target->endpoint[packet->endpoint];
471
472 /* walk through the scatter list and process */
473 for (i = 0; i < scat_req->scat_entries; i++) {
474 packet = scat_req->scat_list[i].packet;
475 if (!packet) {
476 WARN_ON(1);
477 return;
478 }
479
480 packet->status = scat_req->status;
481 htc_tx_comp_update(target, endpoint, packet);
482 list_add_tail(&packet->list, &tx_compq);
483 }
484
485 /* free scatter request */
486 hif_scatter_req_add(target->dev->ar, scat_req);
487
488 /* complete all packets */
489 htc_tx_complete(endpoint, &tx_compq);
490 }
491
492 static int ath6kl_htc_tx_issue(struct htc_target *target,
493 struct htc_packet *packet)
494 {
495 int status;
496 bool sync = false;
497 u32 padded_len, send_len;
498
499 if (!packet->completion)
500 sync = true;
501
502 send_len = packet->act_len + HTC_HDR_LENGTH;
503
504 padded_len = CALC_TXRX_PADDED_LEN(target, send_len);
505
506 ath6kl_dbg(ATH6KL_DBG_HTC,
507 "htc tx issue len %d seqno %d padded_len %d mbox 0x%X %s\n",
508 send_len, packet->info.tx.seqno, padded_len,
509 target->dev->ar->mbox_info.htc_addr,
510 sync ? "sync" : "async");
511
512 if (sync) {
513 status = hif_read_write_sync(target->dev->ar,
514 target->dev->ar->mbox_info.htc_addr,
515 packet->buf, padded_len,
516 HIF_WR_SYNC_BLOCK_INC);
517
518 packet->status = status;
519 packet->buf += HTC_HDR_LENGTH;
520 } else
521 status = hif_write_async(target->dev->ar,
522 target->dev->ar->mbox_info.htc_addr,
523 packet->buf, padded_len,
524 HIF_WR_ASYNC_BLOCK_INC, packet);
525
526 return status;
527 }
528
529 static int htc_check_credits(struct htc_target *target,
530 struct htc_endpoint *ep, u8 *flags,
531 enum htc_endpoint_id eid, unsigned int len,
532 int *req_cred)
533 {
534
535 *req_cred = (len > target->tgt_cred_sz) ?
536 DIV_ROUND_UP(len, target->tgt_cred_sz) : 1;
537
538 ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit check need %d got %d\n",
539 *req_cred, ep->cred_dist.credits);
540
541 if (ep->cred_dist.credits < *req_cred) {
542 if (eid == ENDPOINT_0)
543 return -EINVAL;
544
545 /* Seek more credits */
546 ep->cred_dist.seek_cred = *req_cred - ep->cred_dist.credits;
547
548 ath6kl_credit_seek(target->credit_info, &ep->cred_dist);
549
550 ep->cred_dist.seek_cred = 0;
551
552 if (ep->cred_dist.credits < *req_cred) {
553 ath6kl_dbg(ATH6KL_DBG_CREDIT,
554 "credit not found for ep %d\n",
555 eid);
556 return -EINVAL;
557 }
558 }
559
560 ep->cred_dist.credits -= *req_cred;
561 ep->ep_st.cred_cosumd += *req_cred;
562
563 /* When we are getting low on credits, ask for more */
564 if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
565 ep->cred_dist.seek_cred =
566 ep->cred_dist.cred_per_msg - ep->cred_dist.credits;
567
568 ath6kl_credit_seek(target->credit_info, &ep->cred_dist);
569
570 /* see if we were successful in getting more */
571 if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
572 /* tell the target we need credits ASAP! */
573 *flags |= HTC_FLAGS_NEED_CREDIT_UPDATE;
574 ep->ep_st.cred_low_indicate += 1;
575 ath6kl_dbg(ATH6KL_DBG_CREDIT,
576 "credit we need credits asap\n");
577 }
578 }
579
580 return 0;
581 }
582
583 static void ath6kl_htc_tx_pkts_get(struct htc_target *target,
584 struct htc_endpoint *endpoint,
585 struct list_head *queue)
586 {
587 int req_cred;
588 u8 flags;
589 struct htc_packet *packet;
590 unsigned int len;
591
592 while (true) {
593
594 flags = 0;
595
596 if (list_empty(&endpoint->txq))
597 break;
598 packet = list_first_entry(&endpoint->txq, struct htc_packet,
599 list);
600
601 ath6kl_dbg(ATH6KL_DBG_HTC,
602 "htc tx got packet 0x%p queue depth %d\n",
603 packet, get_queue_depth(&endpoint->txq));
604
605 len = CALC_TXRX_PADDED_LEN(target,
606 packet->act_len + HTC_HDR_LENGTH);
607
608 if (htc_check_credits(target, endpoint, &flags,
609 packet->endpoint, len, &req_cred))
610 break;
611
612 /* now we can fully move onto caller's queue */
613 packet = list_first_entry(&endpoint->txq, struct htc_packet,
614 list);
615 list_move_tail(&packet->list, queue);
616
617 /* save the number of credits this packet consumed */
618 packet->info.tx.cred_used = req_cred;
619
620 /* all TX packets are handled asynchronously */
621 packet->completion = htc_tx_comp_handler;
622 packet->context = target;
623 endpoint->ep_st.tx_issued += 1;
624
625 /* save send flags */
626 packet->info.tx.flags = flags;
627 packet->info.tx.seqno = endpoint->seqno;
628 endpoint->seqno++;
629 }
630 }
631
632 /* See if the padded tx length falls on a credit boundary */
633 static int htc_get_credit_padding(unsigned int cred_sz, int *len,
634 struct htc_endpoint *ep)
635 {
636 int rem_cred, cred_pad;
637
638 rem_cred = *len % cred_sz;
639
640 /* No padding needed */
641 if (!rem_cred)
642 return 0;
643
644 if (!(ep->conn_flags & HTC_FLGS_TX_BNDL_PAD_EN))
645 return -1;
646
647 /*
648 * The transfer consumes a "partial" credit, this
649 * packet cannot be bundled unless we add
650 * additional "dummy" padding (max 255 bytes) to
651 * consume the entire credit.
652 */
653 cred_pad = *len < cred_sz ? (cred_sz - *len) : rem_cred;
654
655 if ((cred_pad > 0) && (cred_pad <= 255))
656 *len += cred_pad;
657 else
658 /* The amount of padding is too large, send as non-bundled */
659 return -1;
660
661 return cred_pad;
662 }
663
664 static int ath6kl_htc_tx_setup_scat_list(struct htc_target *target,
665 struct htc_endpoint *endpoint,
666 struct hif_scatter_req *scat_req,
667 int n_scat,
668 struct list_head *queue)
669 {
670 struct htc_packet *packet;
671 int i, len, rem_scat, cred_pad;
672 int status = 0;
673
674 rem_scat = target->max_tx_bndl_sz;
675
676 for (i = 0; i < n_scat; i++) {
677 scat_req->scat_list[i].packet = NULL;
678
679 if (list_empty(queue))
680 break;
681
682 packet = list_first_entry(queue, struct htc_packet, list);
683 len = CALC_TXRX_PADDED_LEN(target,
684 packet->act_len + HTC_HDR_LENGTH);
685
686 cred_pad = htc_get_credit_padding(target->tgt_cred_sz,
687 &len, endpoint);
688 if (cred_pad < 0 || rem_scat < len) {
689 status = -ENOSPC;
690 break;
691 }
692
693 rem_scat -= len;
694 /* now remove it from the queue */
695 list_del(&packet->list);
696
697 scat_req->scat_list[i].packet = packet;
698 /* prepare packet and flag message as part of a send bundle */
699 ath6kl_htc_tx_prep_pkt(packet,
700 packet->info.tx.flags | HTC_FLAGS_SEND_BUNDLE,
701 cred_pad, packet->info.tx.seqno);
702 /* Make sure the buffer is 4-byte aligned */
703 ath6kl_htc_tx_buf_align(&packet->buf,
704 packet->act_len + HTC_HDR_LENGTH);
705 scat_req->scat_list[i].buf = packet->buf;
706 scat_req->scat_list[i].len = len;
707
708 scat_req->len += len;
709 scat_req->scat_entries++;
710 ath6kl_dbg(ATH6KL_DBG_HTC,
711 "htc tx adding (%d) pkt 0x%p seqno %d len %d remaining %d\n",
712 i, packet, packet->info.tx.seqno, len, rem_scat);
713 }
714
715 /* Roll back scatter setup in case of any failure */
716 if (scat_req->scat_entries < HTC_MIN_HTC_MSGS_TO_BUNDLE) {
717 for (i = scat_req->scat_entries - 1; i >= 0; i--) {
718 packet = scat_req->scat_list[i].packet;
719 if (packet) {
720 packet->buf += HTC_HDR_LENGTH;
721 list_add(&packet->list, queue);
722 }
723 }
724 return -EAGAIN;
725 }
726
727 return status;
728 }
729
730 /*
731 * Drain a queue and send as bundles this function may return without fully
732 * draining the queue when
733 *
734 * 1. scatter resources are exhausted
735 * 2. a message that will consume a partial credit will stop the
736 * bundling process early
737 * 3. we drop below the minimum number of messages for a bundle
738 */
739 static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
740 struct list_head *queue,
741 int *sent_bundle, int *n_bundle_pkts)
742 {
743 struct htc_target *target = endpoint->target;
744 struct hif_scatter_req *scat_req = NULL;
745 int n_scat, n_sent_bundle = 0, tot_pkts_bundle = 0;
746 int status;
747
748 while (true) {
749 status = 0;
750 n_scat = get_queue_depth(queue);
751 n_scat = min(n_scat, target->msg_per_bndl_max);
752
753 if (n_scat < HTC_MIN_HTC_MSGS_TO_BUNDLE)
754 /* not enough to bundle */
755 break;
756
757 scat_req = hif_scatter_req_get(target->dev->ar);
758
759 if (!scat_req) {
760 /* no scatter resources */
761 ath6kl_dbg(ATH6KL_DBG_HTC,
762 "htc tx no more scatter resources\n");
763 break;
764 }
765
766 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx pkts to scatter: %d\n",
767 n_scat);
768
769 scat_req->len = 0;
770 scat_req->scat_entries = 0;
771
772 status = ath6kl_htc_tx_setup_scat_list(target, endpoint,
773 scat_req, n_scat,
774 queue);
775 if (status == -EAGAIN) {
776 hif_scatter_req_add(target->dev->ar, scat_req);
777 break;
778 }
779
780 /* send path is always asynchronous */
781 scat_req->complete = htc_async_tx_scat_complete;
782 n_sent_bundle++;
783 tot_pkts_bundle += scat_req->scat_entries;
784
785 ath6kl_dbg(ATH6KL_DBG_HTC,
786 "htc tx scatter bytes %d entries %d\n",
787 scat_req->len, scat_req->scat_entries);
788 ath6kl_hif_submit_scat_req(target->dev, scat_req, false);
789
790 if (status)
791 break;
792 }
793
794 *sent_bundle = n_sent_bundle;
795 *n_bundle_pkts = tot_pkts_bundle;
796 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx bundle sent %d pkts\n",
797 n_sent_bundle);
798
799 return;
800 }
801
802 static void ath6kl_htc_tx_from_queue(struct htc_target *target,
803 struct htc_endpoint *endpoint)
804 {
805 struct list_head txq;
806 struct htc_packet *packet;
807 int bundle_sent;
808 int n_pkts_bundle;
809
810 spin_lock_bh(&target->tx_lock);
811
812 endpoint->tx_proc_cnt++;
813 if (endpoint->tx_proc_cnt > 1) {
814 endpoint->tx_proc_cnt--;
815 spin_unlock_bh(&target->tx_lock);
816 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx busy\n");
817 return;
818 }
819
820 /*
821 * drain the endpoint TX queue for transmission as long
822 * as we have enough credits.
823 */
824 INIT_LIST_HEAD(&txq);
825
826 while (true) {
827
828 if (list_empty(&endpoint->txq))
829 break;
830
831 ath6kl_htc_tx_pkts_get(target, endpoint, &txq);
832
833 if (list_empty(&txq))
834 break;
835
836 spin_unlock_bh(&target->tx_lock);
837
838 bundle_sent = 0;
839 n_pkts_bundle = 0;
840
841 while (true) {
842 /* try to send a bundle on each pass */
843 if ((target->tx_bndl_enable) &&
844 (get_queue_depth(&txq) >=
845 HTC_MIN_HTC_MSGS_TO_BUNDLE)) {
846 int temp1 = 0, temp2 = 0;
847
848 ath6kl_htc_tx_bundle(endpoint, &txq,
849 &temp1, &temp2);
850 bundle_sent += temp1;
851 n_pkts_bundle += temp2;
852 }
853
854 if (list_empty(&txq))
855 break;
856
857 packet = list_first_entry(&txq, struct htc_packet,
858 list);
859 list_del(&packet->list);
860
861 ath6kl_htc_tx_prep_pkt(packet, packet->info.tx.flags,
862 0, packet->info.tx.seqno);
863 ath6kl_htc_tx_issue(target, packet);
864 }
865
866 spin_lock_bh(&target->tx_lock);
867
868 endpoint->ep_st.tx_bundles += bundle_sent;
869 endpoint->ep_st.tx_pkt_bundled += n_pkts_bundle;
870 }
871
872 endpoint->tx_proc_cnt = 0;
873 spin_unlock_bh(&target->tx_lock);
874 }
875
876 static bool ath6kl_htc_tx_try(struct htc_target *target,
877 struct htc_endpoint *endpoint,
878 struct htc_packet *tx_pkt)
879 {
880 struct htc_ep_callbacks ep_cb;
881 int txq_depth;
882 bool overflow = false;
883
884 ep_cb = endpoint->ep_cb;
885
886 spin_lock_bh(&target->tx_lock);
887 txq_depth = get_queue_depth(&endpoint->txq);
888 spin_unlock_bh(&target->tx_lock);
889
890 if (txq_depth >= endpoint->max_txq_depth)
891 overflow = true;
892
893 if (overflow)
894 ath6kl_dbg(ATH6KL_DBG_HTC,
895 "htc tx overflow ep %d depth %d max %d\n",
896 endpoint->eid, txq_depth,
897 endpoint->max_txq_depth);
898
899 if (overflow && ep_cb.tx_full) {
900 if (ep_cb.tx_full(endpoint->target, tx_pkt) ==
901 HTC_SEND_FULL_DROP) {
902 endpoint->ep_st.tx_dropped += 1;
903 return false;
904 }
905 }
906
907 spin_lock_bh(&target->tx_lock);
908 list_add_tail(&tx_pkt->list, &endpoint->txq);
909 spin_unlock_bh(&target->tx_lock);
910
911 ath6kl_htc_tx_from_queue(target, endpoint);
912
913 return true;
914 }
915
916 static void htc_chk_ep_txq(struct htc_target *target)
917 {
918 struct htc_endpoint *endpoint;
919 struct htc_endpoint_credit_dist *cred_dist;
920
921 /*
922 * Run through the credit distribution list to see if there are
923 * packets queued. NOTE: no locks need to be taken since the
924 * distribution list is not dynamic (cannot be re-ordered) and we
925 * are not modifying any state.
926 */
927 list_for_each_entry(cred_dist, &target->cred_dist_list, list) {
928 endpoint = cred_dist->htc_ep;
929
930 spin_lock_bh(&target->tx_lock);
931 if (!list_empty(&endpoint->txq)) {
932 ath6kl_dbg(ATH6KL_DBG_HTC,
933 "htc creds ep %d credits %d pkts %d\n",
934 cred_dist->endpoint,
935 endpoint->cred_dist.credits,
936 get_queue_depth(&endpoint->txq));
937 spin_unlock_bh(&target->tx_lock);
938 /*
939 * Try to start the stalled queue, this list is
940 * ordered by priority. If there are credits
941 * available the highest priority queue will get a
942 * chance to reclaim credits from lower priority
943 * ones.
944 */
945 ath6kl_htc_tx_from_queue(target, endpoint);
946 spin_lock_bh(&target->tx_lock);
947 }
948 spin_unlock_bh(&target->tx_lock);
949 }
950 }
951
952 static int htc_setup_tx_complete(struct htc_target *target)
953 {
954 struct htc_packet *send_pkt = NULL;
955 int status;
956
957 send_pkt = htc_get_control_buf(target, true);
958
959 if (!send_pkt)
960 return -ENOMEM;
961
962 if (target->htc_tgt_ver >= HTC_VERSION_2P1) {
963 struct htc_setup_comp_ext_msg *setup_comp_ext;
964 u32 flags = 0;
965
966 setup_comp_ext =
967 (struct htc_setup_comp_ext_msg *)send_pkt->buf;
968 memset(setup_comp_ext, 0, sizeof(*setup_comp_ext));
969 setup_comp_ext->msg_id =
970 cpu_to_le16(HTC_MSG_SETUP_COMPLETE_EX_ID);
971
972 if (target->msg_per_bndl_max > 0) {
973 /* Indicate HTC bundling to the target */
974 flags |= HTC_SETUP_COMP_FLG_RX_BNDL_EN;
975 setup_comp_ext->msg_per_rxbndl =
976 target->msg_per_bndl_max;
977 }
978
979 memcpy(&setup_comp_ext->flags, &flags,
980 sizeof(setup_comp_ext->flags));
981 set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp_ext,
982 sizeof(struct htc_setup_comp_ext_msg),
983 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
984
985 } else {
986 struct htc_setup_comp_msg *setup_comp;
987 setup_comp = (struct htc_setup_comp_msg *)send_pkt->buf;
988 memset(setup_comp, 0, sizeof(struct htc_setup_comp_msg));
989 setup_comp->msg_id = cpu_to_le16(HTC_MSG_SETUP_COMPLETE_ID);
990 set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp,
991 sizeof(struct htc_setup_comp_msg),
992 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
993 }
994
995 /* we want synchronous operation */
996 send_pkt->completion = NULL;
997 ath6kl_htc_tx_prep_pkt(send_pkt, 0, 0, 0);
998 status = ath6kl_htc_tx_issue(target, send_pkt);
999
1000 if (send_pkt != NULL)
1001 htc_reclaim_txctrl_buf(target, send_pkt);
1002
1003 return status;
1004 }
1005
1006 void ath6kl_htc_set_credit_dist(struct htc_target *target,
1007 struct ath6kl_htc_credit_info *credit_info,
1008 u16 srvc_pri_order[], int list_len)
1009 {
1010 struct htc_endpoint *endpoint;
1011 int i, ep;
1012
1013 target->credit_info = credit_info;
1014
1015 list_add_tail(&target->endpoint[ENDPOINT_0].cred_dist.list,
1016 &target->cred_dist_list);
1017
1018 for (i = 0; i < list_len; i++) {
1019 for (ep = ENDPOINT_1; ep < ENDPOINT_MAX; ep++) {
1020 endpoint = &target->endpoint[ep];
1021 if (endpoint->svc_id == srvc_pri_order[i]) {
1022 list_add_tail(&endpoint->cred_dist.list,
1023 &target->cred_dist_list);
1024 break;
1025 }
1026 }
1027 if (ep >= ENDPOINT_MAX) {
1028 WARN_ON(1);
1029 return;
1030 }
1031 }
1032 }
1033
1034 int ath6kl_htc_tx(struct htc_target *target, struct htc_packet *packet)
1035 {
1036 struct htc_endpoint *endpoint;
1037 struct list_head queue;
1038
1039 ath6kl_dbg(ATH6KL_DBG_HTC,
1040 "htc tx ep id %d buf 0x%p len %d\n",
1041 packet->endpoint, packet->buf, packet->act_len);
1042
1043 if (packet->endpoint >= ENDPOINT_MAX) {
1044 WARN_ON(1);
1045 return -EINVAL;
1046 }
1047
1048 endpoint = &target->endpoint[packet->endpoint];
1049
1050 if (!ath6kl_htc_tx_try(target, endpoint, packet)) {
1051 packet->status = (target->htc_flags & HTC_OP_STATE_STOPPING) ?
1052 -ECANCELED : -ENOSPC;
1053 INIT_LIST_HEAD(&queue);
1054 list_add(&packet->list, &queue);
1055 htc_tx_complete(endpoint, &queue);
1056 }
1057
1058 return 0;
1059 }
1060
1061 /* flush endpoint TX queue */
1062 void ath6kl_htc_flush_txep(struct htc_target *target,
1063 enum htc_endpoint_id eid, u16 tag)
1064 {
1065 struct htc_packet *packet, *tmp_pkt;
1066 struct list_head discard_q, container;
1067 struct htc_endpoint *endpoint = &target->endpoint[eid];
1068
1069 if (!endpoint->svc_id) {
1070 WARN_ON(1);
1071 return;
1072 }
1073
1074 /* initialize the discard queue */
1075 INIT_LIST_HEAD(&discard_q);
1076
1077 spin_lock_bh(&target->tx_lock);
1078
1079 list_for_each_entry_safe(packet, tmp_pkt, &endpoint->txq, list) {
1080 if ((tag == HTC_TX_PACKET_TAG_ALL) ||
1081 (tag == packet->info.tx.tag))
1082 list_move_tail(&packet->list, &discard_q);
1083 }
1084
1085 spin_unlock_bh(&target->tx_lock);
1086
1087 list_for_each_entry_safe(packet, tmp_pkt, &discard_q, list) {
1088 packet->status = -ECANCELED;
1089 list_del(&packet->list);
1090 ath6kl_dbg(ATH6KL_DBG_HTC,
1091 "htc tx flushing pkt 0x%p len %d ep %d tag 0x%x\n",
1092 packet, packet->act_len,
1093 packet->endpoint, packet->info.tx.tag);
1094
1095 INIT_LIST_HEAD(&container);
1096 list_add_tail(&packet->list, &container);
1097 htc_tx_complete(endpoint, &container);
1098 }
1099
1100 }
1101
1102 static void ath6kl_htc_flush_txep_all(struct htc_target *target)
1103 {
1104 struct htc_endpoint *endpoint;
1105 int i;
1106
1107 dump_cred_dist_stats(target);
1108
1109 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
1110 endpoint = &target->endpoint[i];
1111 if (endpoint->svc_id == 0)
1112 /* not in use.. */
1113 continue;
1114 ath6kl_htc_flush_txep(target, i, HTC_TX_PACKET_TAG_ALL);
1115 }
1116 }
1117
1118 void ath6kl_htc_indicate_activity_change(struct htc_target *target,
1119 enum htc_endpoint_id eid, bool active)
1120 {
1121 struct htc_endpoint *endpoint = &target->endpoint[eid];
1122 bool dist = false;
1123
1124 if (endpoint->svc_id == 0) {
1125 WARN_ON(1);
1126 return;
1127 }
1128
1129 spin_lock_bh(&target->tx_lock);
1130
1131 if (active) {
1132 if (!(endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE)) {
1133 endpoint->cred_dist.dist_flags |= HTC_EP_ACTIVE;
1134 dist = true;
1135 }
1136 } else {
1137 if (endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE) {
1138 endpoint->cred_dist.dist_flags &= ~HTC_EP_ACTIVE;
1139 dist = true;
1140 }
1141 }
1142
1143 if (dist) {
1144 endpoint->cred_dist.txq_depth =
1145 get_queue_depth(&endpoint->txq);
1146
1147 ath6kl_dbg(ATH6KL_DBG_HTC,
1148 "htc tx activity ctxt 0x%p dist 0x%p\n",
1149 target->credit_info, &target->cred_dist_list);
1150
1151 ath6kl_credit_distribute(target->credit_info,
1152 &target->cred_dist_list,
1153 HTC_CREDIT_DIST_ACTIVITY_CHANGE);
1154 }
1155
1156 spin_unlock_bh(&target->tx_lock);
1157
1158 if (dist && !active)
1159 htc_chk_ep_txq(target);
1160 }
1161
1162 /* HTC Rx */
1163
1164 static inline void ath6kl_htc_rx_update_stats(struct htc_endpoint *endpoint,
1165 int n_look_ahds)
1166 {
1167 endpoint->ep_st.rx_pkts++;
1168 if (n_look_ahds == 1)
1169 endpoint->ep_st.rx_lkahds++;
1170 else if (n_look_ahds > 1)
1171 endpoint->ep_st.rx_bundle_lkahd++;
1172 }
1173
1174 static inline bool htc_valid_rx_frame_len(struct htc_target *target,
1175 enum htc_endpoint_id eid, int len)
1176 {
1177 return (eid == target->dev->ar->ctrl_ep) ?
1178 len <= ATH6KL_BUFFER_SIZE : len <= ATH6KL_AMSDU_BUFFER_SIZE;
1179 }
1180
1181 static int htc_add_rxbuf(struct htc_target *target, struct htc_packet *packet)
1182 {
1183 struct list_head queue;
1184
1185 INIT_LIST_HEAD(&queue);
1186 list_add_tail(&packet->list, &queue);
1187 return ath6kl_htc_add_rxbuf_multiple(target, &queue);
1188 }
1189
1190 static void htc_reclaim_rxbuf(struct htc_target *target,
1191 struct htc_packet *packet,
1192 struct htc_endpoint *ep)
1193 {
1194 if (packet->info.rx.rx_flags & HTC_RX_PKT_NO_RECYCLE) {
1195 htc_rxpkt_reset(packet);
1196 packet->status = -ECANCELED;
1197 ep->ep_cb.rx(ep->target, packet);
1198 } else {
1199 htc_rxpkt_reset(packet);
1200 htc_add_rxbuf((void *)(target), packet);
1201 }
1202 }
1203
1204 static void reclaim_rx_ctrl_buf(struct htc_target *target,
1205 struct htc_packet *packet)
1206 {
1207 spin_lock_bh(&target->htc_lock);
1208 list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
1209 spin_unlock_bh(&target->htc_lock);
1210 }
1211
1212 static int ath6kl_htc_rx_packet(struct htc_target *target,
1213 struct htc_packet *packet,
1214 u32 rx_len)
1215 {
1216 struct ath6kl_device *dev = target->dev;
1217 u32 padded_len;
1218 int status;
1219
1220 padded_len = CALC_TXRX_PADDED_LEN(target, rx_len);
1221
1222 if (padded_len > packet->buf_len) {
1223 ath6kl_err("not enough receive space for packet - padlen %d recvlen %d bufferlen %d\n",
1224 padded_len, rx_len, packet->buf_len);
1225 return -ENOMEM;
1226 }
1227
1228 ath6kl_dbg(ATH6KL_DBG_HTC,
1229 "htc rx 0x%p hdr x%x len %d mbox 0x%x\n",
1230 packet, packet->info.rx.exp_hdr,
1231 padded_len, dev->ar->mbox_info.htc_addr);
1232
1233 status = hif_read_write_sync(dev->ar,
1234 dev->ar->mbox_info.htc_addr,
1235 packet->buf, padded_len,
1236 HIF_RD_SYNC_BLOCK_FIX);
1237
1238 packet->status = status;
1239
1240 return status;
1241 }
1242
1243 /*
1244 * optimization for recv packets, we can indicate a
1245 * "hint" that there are more single-packets to fetch
1246 * on this endpoint.
1247 */
1248 static void ath6kl_htc_rx_set_indicate(u32 lk_ahd,
1249 struct htc_endpoint *endpoint,
1250 struct htc_packet *packet)
1251 {
1252 struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)&lk_ahd;
1253
1254 if (htc_hdr->eid == packet->endpoint) {
1255 if (!list_empty(&endpoint->rx_bufq))
1256 packet->info.rx.indicat_flags |=
1257 HTC_RX_FLAGS_INDICATE_MORE_PKTS;
1258 }
1259 }
1260
1261 static void ath6kl_htc_rx_chk_water_mark(struct htc_endpoint *endpoint)
1262 {
1263 struct htc_ep_callbacks ep_cb = endpoint->ep_cb;
1264
1265 if (ep_cb.rx_refill_thresh > 0) {
1266 spin_lock_bh(&endpoint->target->rx_lock);
1267 if (get_queue_depth(&endpoint->rx_bufq)
1268 < ep_cb.rx_refill_thresh) {
1269 spin_unlock_bh(&endpoint->target->rx_lock);
1270 ep_cb.rx_refill(endpoint->target, endpoint->eid);
1271 return;
1272 }
1273 spin_unlock_bh(&endpoint->target->rx_lock);
1274 }
1275 }
1276
1277 /* This function is called with rx_lock held */
1278 static int ath6kl_htc_rx_setup(struct htc_target *target,
1279 struct htc_endpoint *ep,
1280 u32 *lk_ahds, struct list_head *queue, int n_msg)
1281 {
1282 struct htc_packet *packet;
1283 /* FIXME: type of lk_ahds can't be right */
1284 struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)lk_ahds;
1285 struct htc_ep_callbacks ep_cb;
1286 int status = 0, j, full_len;
1287 bool no_recycle;
1288
1289 full_len = CALC_TXRX_PADDED_LEN(target,
1290 le16_to_cpu(htc_hdr->payld_len) +
1291 sizeof(*htc_hdr));
1292
1293 if (!htc_valid_rx_frame_len(target, ep->eid, full_len)) {
1294 ath6kl_warn("Rx buffer requested with invalid length\n");
1295 return -EINVAL;
1296 }
1297
1298 ep_cb = ep->ep_cb;
1299 for (j = 0; j < n_msg; j++) {
1300
1301 /*
1302 * Reset flag, any packets allocated using the
1303 * rx_alloc() API cannot be recycled on
1304 * cleanup,they must be explicitly returned.
1305 */
1306 no_recycle = false;
1307
1308 if (ep_cb.rx_allocthresh &&
1309 (full_len > ep_cb.rx_alloc_thresh)) {
1310 ep->ep_st.rx_alloc_thresh_hit += 1;
1311 ep->ep_st.rxalloc_thresh_byte +=
1312 le16_to_cpu(htc_hdr->payld_len);
1313
1314 spin_unlock_bh(&target->rx_lock);
1315 no_recycle = true;
1316
1317 packet = ep_cb.rx_allocthresh(ep->target, ep->eid,
1318 full_len);
1319 spin_lock_bh(&target->rx_lock);
1320 } else {
1321 /* refill handler is being used */
1322 if (list_empty(&ep->rx_bufq)) {
1323 if (ep_cb.rx_refill) {
1324 spin_unlock_bh(&target->rx_lock);
1325 ep_cb.rx_refill(ep->target, ep->eid);
1326 spin_lock_bh(&target->rx_lock);
1327 }
1328 }
1329
1330 if (list_empty(&ep->rx_bufq))
1331 packet = NULL;
1332 else {
1333 packet = list_first_entry(&ep->rx_bufq,
1334 struct htc_packet, list);
1335 list_del(&packet->list);
1336 }
1337 }
1338
1339 if (!packet) {
1340 target->rx_st_flags |= HTC_RECV_WAIT_BUFFERS;
1341 target->ep_waiting = ep->eid;
1342 return -ENOSPC;
1343 }
1344
1345 /* clear flags */
1346 packet->info.rx.rx_flags = 0;
1347 packet->info.rx.indicat_flags = 0;
1348 packet->status = 0;
1349
1350 if (no_recycle)
1351 /*
1352 * flag that these packets cannot be
1353 * recycled, they have to be returned to
1354 * the user
1355 */
1356 packet->info.rx.rx_flags |= HTC_RX_PKT_NO_RECYCLE;
1357
1358 /* Caller needs to free this upon any failure */
1359 list_add_tail(&packet->list, queue);
1360
1361 if (target->htc_flags & HTC_OP_STATE_STOPPING) {
1362 status = -ECANCELED;
1363 break;
1364 }
1365
1366 if (j) {
1367 packet->info.rx.rx_flags |= HTC_RX_PKT_REFRESH_HDR;
1368 packet->info.rx.exp_hdr = 0xFFFFFFFF;
1369 } else
1370 /* set expected look ahead */
1371 packet->info.rx.exp_hdr = *lk_ahds;
1372
1373 packet->act_len = le16_to_cpu(htc_hdr->payld_len) +
1374 HTC_HDR_LENGTH;
1375 }
1376
1377 return status;
1378 }
1379
1380 static int ath6kl_htc_rx_alloc(struct htc_target *target,
1381 u32 lk_ahds[], int msg,
1382 struct htc_endpoint *endpoint,
1383 struct list_head *queue)
1384 {
1385 int status = 0;
1386 struct htc_packet *packet, *tmp_pkt;
1387 struct htc_frame_hdr *htc_hdr;
1388 int i, n_msg;
1389
1390 spin_lock_bh(&target->rx_lock);
1391
1392 for (i = 0; i < msg; i++) {
1393
1394 htc_hdr = (struct htc_frame_hdr *)&lk_ahds[i];
1395
1396 if (htc_hdr->eid >= ENDPOINT_MAX) {
1397 ath6kl_err("invalid ep in look-ahead: %d\n",
1398 htc_hdr->eid);
1399 status = -ENOMEM;
1400 break;
1401 }
1402
1403 if (htc_hdr->eid != endpoint->eid) {
1404 ath6kl_err("invalid ep in look-ahead: %d should be : %d (index:%d)\n",
1405 htc_hdr->eid, endpoint->eid, i);
1406 status = -ENOMEM;
1407 break;
1408 }
1409
1410 if (le16_to_cpu(htc_hdr->payld_len) > HTC_MAX_PAYLOAD_LENGTH) {
1411 ath6kl_err("payload len %d exceeds max htc : %d !\n",
1412 htc_hdr->payld_len,
1413 (u32) HTC_MAX_PAYLOAD_LENGTH);
1414 status = -ENOMEM;
1415 break;
1416 }
1417
1418 if (endpoint->svc_id == 0) {
1419 ath6kl_err("ep %d is not connected !\n", htc_hdr->eid);
1420 status = -ENOMEM;
1421 break;
1422 }
1423
1424 if (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) {
1425 /*
1426 * HTC header indicates that every packet to follow
1427 * has the same padded length so that it can be
1428 * optimally fetched as a full bundle.
1429 */
1430 n_msg = (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) >>
1431 HTC_FLG_RX_BNDL_CNT_S;
1432
1433 /* the count doesn't include the starter frame */
1434 n_msg++;
1435 if (n_msg > target->msg_per_bndl_max) {
1436 status = -ENOMEM;
1437 break;
1438 }
1439
1440 endpoint->ep_st.rx_bundle_from_hdr += 1;
1441 ath6kl_dbg(ATH6KL_DBG_HTC,
1442 "htc rx bundle pkts %d\n",
1443 n_msg);
1444 } else
1445 /* HTC header only indicates 1 message to fetch */
1446 n_msg = 1;
1447
1448 /* Setup packet buffers for each message */
1449 status = ath6kl_htc_rx_setup(target, endpoint, &lk_ahds[i],
1450 queue, n_msg);
1451
1452 /*
1453 * This is due to unavailabilty of buffers to rx entire data.
1454 * Return no error so that free buffers from queue can be used
1455 * to receive partial data.
1456 */
1457 if (status == -ENOSPC) {
1458 spin_unlock_bh(&target->rx_lock);
1459 return 0;
1460 }
1461
1462 if (status)
1463 break;
1464 }
1465
1466 spin_unlock_bh(&target->rx_lock);
1467
1468 if (status) {
1469 list_for_each_entry_safe(packet, tmp_pkt, queue, list) {
1470 list_del(&packet->list);
1471 htc_reclaim_rxbuf(target, packet,
1472 &target->endpoint[packet->endpoint]);
1473 }
1474 }
1475
1476 return status;
1477 }
1478
1479 static void htc_ctrl_rx(struct htc_target *context, struct htc_packet *packets)
1480 {
1481 if (packets->endpoint != ENDPOINT_0) {
1482 WARN_ON(1);
1483 return;
1484 }
1485
1486 if (packets->status == -ECANCELED) {
1487 reclaim_rx_ctrl_buf(context, packets);
1488 return;
1489 }
1490
1491 if (packets->act_len > 0) {
1492 ath6kl_err("htc_ctrl_rx, got message with len:%zu\n",
1493 packets->act_len + HTC_HDR_LENGTH);
1494
1495 ath6kl_dbg_dump(ATH6KL_DBG_HTC,
1496 "htc rx unexpected endpoint 0 message", "",
1497 packets->buf - HTC_HDR_LENGTH,
1498 packets->act_len + HTC_HDR_LENGTH);
1499 }
1500
1501 htc_reclaim_rxbuf(context, packets, &context->endpoint[0]);
1502 }
1503
1504 static void htc_proc_cred_rpt(struct htc_target *target,
1505 struct htc_credit_report *rpt,
1506 int n_entries,
1507 enum htc_endpoint_id from_ep)
1508 {
1509 struct htc_endpoint *endpoint;
1510 int tot_credits = 0, i;
1511 bool dist = false;
1512
1513 spin_lock_bh(&target->tx_lock);
1514
1515 for (i = 0; i < n_entries; i++, rpt++) {
1516 if (rpt->eid >= ENDPOINT_MAX) {
1517 WARN_ON(1);
1518 spin_unlock_bh(&target->tx_lock);
1519 return;
1520 }
1521
1522 endpoint = &target->endpoint[rpt->eid];
1523
1524 ath6kl_dbg(ATH6KL_DBG_CREDIT,
1525 "credit report ep %d credits %d\n",
1526 rpt->eid, rpt->credits);
1527
1528 endpoint->ep_st.tx_cred_rpt += 1;
1529 endpoint->ep_st.cred_retnd += rpt->credits;
1530
1531 if (from_ep == rpt->eid) {
1532 /*
1533 * This credit report arrived on the same endpoint
1534 * indicating it arrived in an RX packet.
1535 */
1536 endpoint->ep_st.cred_from_rx += rpt->credits;
1537 endpoint->ep_st.cred_rpt_from_rx += 1;
1538 } else if (from_ep == ENDPOINT_0) {
1539 /* credit arrived on endpoint 0 as a NULL message */
1540 endpoint->ep_st.cred_from_ep0 += rpt->credits;
1541 endpoint->ep_st.cred_rpt_ep0 += 1;
1542 } else {
1543 endpoint->ep_st.cred_from_other += rpt->credits;
1544 endpoint->ep_st.cred_rpt_from_other += 1;
1545 }
1546
1547 if (rpt->eid == ENDPOINT_0)
1548 /* always give endpoint 0 credits back */
1549 endpoint->cred_dist.credits += rpt->credits;
1550 else {
1551 endpoint->cred_dist.cred_to_dist += rpt->credits;
1552 dist = true;
1553 }
1554
1555 /*
1556 * Refresh tx depth for distribution function that will
1557 * recover these credits NOTE: this is only valid when
1558 * there are credits to recover!
1559 */
1560 endpoint->cred_dist.txq_depth =
1561 get_queue_depth(&endpoint->txq);
1562
1563 tot_credits += rpt->credits;
1564 }
1565
1566 if (dist) {
1567 /*
1568 * This was a credit return based on a completed send
1569 * operations note, this is done with the lock held
1570 */
1571 ath6kl_credit_distribute(target->credit_info,
1572 &target->cred_dist_list,
1573 HTC_CREDIT_DIST_SEND_COMPLETE);
1574 }
1575
1576 spin_unlock_bh(&target->tx_lock);
1577
1578 if (tot_credits)
1579 htc_chk_ep_txq(target);
1580 }
1581
1582 static int htc_parse_trailer(struct htc_target *target,
1583 struct htc_record_hdr *record,
1584 u8 *record_buf, u32 *next_lk_ahds,
1585 enum htc_endpoint_id endpoint,
1586 int *n_lk_ahds)
1587 {
1588 struct htc_bundle_lkahd_rpt *bundle_lkahd_rpt;
1589 struct htc_lookahead_report *lk_ahd;
1590 int len;
1591
1592 switch (record->rec_id) {
1593 case HTC_RECORD_CREDITS:
1594 len = record->len / sizeof(struct htc_credit_report);
1595 if (!len) {
1596 WARN_ON(1);
1597 return -EINVAL;
1598 }
1599
1600 htc_proc_cred_rpt(target,
1601 (struct htc_credit_report *) record_buf,
1602 len, endpoint);
1603 break;
1604 case HTC_RECORD_LOOKAHEAD:
1605 len = record->len / sizeof(*lk_ahd);
1606 if (!len) {
1607 WARN_ON(1);
1608 return -EINVAL;
1609 }
1610
1611 lk_ahd = (struct htc_lookahead_report *) record_buf;
1612 if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF))
1613 && next_lk_ahds) {
1614
1615 ath6kl_dbg(ATH6KL_DBG_HTC,
1616 "htc rx lk_ahd found pre_valid 0x%x post_valid 0x%x\n",
1617 lk_ahd->pre_valid, lk_ahd->post_valid);
1618
1619 /* look ahead bytes are valid, copy them over */
1620 memcpy((u8 *)&next_lk_ahds[0], lk_ahd->lk_ahd, 4);
1621
1622 ath6kl_dbg_dump(ATH6KL_DBG_HTC,
1623 "htc rx next look ahead",
1624 "", next_lk_ahds, 4);
1625
1626 *n_lk_ahds = 1;
1627 }
1628 break;
1629 case HTC_RECORD_LOOKAHEAD_BUNDLE:
1630 len = record->len / sizeof(*bundle_lkahd_rpt);
1631 if (!len || (len > HTC_HOST_MAX_MSG_PER_BUNDLE)) {
1632 WARN_ON(1);
1633 return -EINVAL;
1634 }
1635
1636 if (next_lk_ahds) {
1637 int i;
1638
1639 bundle_lkahd_rpt =
1640 (struct htc_bundle_lkahd_rpt *) record_buf;
1641
1642 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bundle lk_ahd",
1643 "", record_buf, record->len);
1644
1645 for (i = 0; i < len; i++) {
1646 memcpy((u8 *)&next_lk_ahds[i],
1647 bundle_lkahd_rpt->lk_ahd, 4);
1648 bundle_lkahd_rpt++;
1649 }
1650
1651 *n_lk_ahds = i;
1652 }
1653 break;
1654 default:
1655 ath6kl_err("unhandled record: id:%d len:%d\n",
1656 record->rec_id, record->len);
1657 break;
1658 }
1659
1660 return 0;
1661
1662 }
1663
1664 static int htc_proc_trailer(struct htc_target *target,
1665 u8 *buf, int len, u32 *next_lk_ahds,
1666 int *n_lk_ahds, enum htc_endpoint_id endpoint)
1667 {
1668 struct htc_record_hdr *record;
1669 int orig_len;
1670 int status;
1671 u8 *record_buf;
1672 u8 *orig_buf;
1673
1674 ath6kl_dbg(ATH6KL_DBG_HTC, "htc rx trailer len %d\n", len);
1675 ath6kl_dbg_dump(ATH6KL_DBG_HTC, NULL, "", buf, len);
1676
1677 orig_buf = buf;
1678 orig_len = len;
1679 status = 0;
1680
1681 while (len > 0) {
1682
1683 if (len < sizeof(struct htc_record_hdr)) {
1684 status = -ENOMEM;
1685 break;
1686 }
1687 /* these are byte aligned structs */
1688 record = (struct htc_record_hdr *) buf;
1689 len -= sizeof(struct htc_record_hdr);
1690 buf += sizeof(struct htc_record_hdr);
1691
1692 if (record->len > len) {
1693 ath6kl_err("invalid record len: %d (id:%d) buf has: %d bytes left\n",
1694 record->len, record->rec_id, len);
1695 status = -ENOMEM;
1696 break;
1697 }
1698 record_buf = buf;
1699
1700 status = htc_parse_trailer(target, record, record_buf,
1701 next_lk_ahds, endpoint, n_lk_ahds);
1702
1703 if (status)
1704 break;
1705
1706 /* advance buffer past this record for next time around */
1707 buf += record->len;
1708 len -= record->len;
1709 }
1710
1711 if (status)
1712 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad trailer",
1713 "", orig_buf, orig_len);
1714
1715 return status;
1716 }
1717
1718 static int ath6kl_htc_rx_process_hdr(struct htc_target *target,
1719 struct htc_packet *packet,
1720 u32 *next_lkahds, int *n_lkahds)
1721 {
1722 int status = 0;
1723 u16 payload_len;
1724 u32 lk_ahd;
1725 struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)packet->buf;
1726
1727 if (n_lkahds != NULL)
1728 *n_lkahds = 0;
1729
1730 /*
1731 * NOTE: we cannot assume the alignment of buf, so we use the safe
1732 * macros to retrieve 16 bit fields.
1733 */
1734 payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len));
1735
1736 memcpy((u8 *)&lk_ahd, packet->buf, sizeof(lk_ahd));
1737
1738 if (packet->info.rx.rx_flags & HTC_RX_PKT_REFRESH_HDR) {
1739 /*
1740 * Refresh the expected header and the actual length as it
1741 * was unknown when this packet was grabbed as part of the
1742 * bundle.
1743 */
1744 packet->info.rx.exp_hdr = lk_ahd;
1745 packet->act_len = payload_len + HTC_HDR_LENGTH;
1746
1747 /* validate the actual header that was refreshed */
1748 if (packet->act_len > packet->buf_len) {
1749 ath6kl_err("refreshed hdr payload len (%d) in bundled recv is invalid (hdr: 0x%X)\n",
1750 payload_len, lk_ahd);
1751 /*
1752 * Limit this to max buffer just to print out some
1753 * of the buffer.
1754 */
1755 packet->act_len = min(packet->act_len, packet->buf_len);
1756 status = -ENOMEM;
1757 goto fail_rx;
1758 }
1759
1760 if (packet->endpoint != htc_hdr->eid) {
1761 ath6kl_err("refreshed hdr ep (%d) does not match expected ep (%d)\n",
1762 htc_hdr->eid, packet->endpoint);
1763 status = -ENOMEM;
1764 goto fail_rx;
1765 }
1766 }
1767
1768 if (lk_ahd != packet->info.rx.exp_hdr) {
1769 ath6kl_err("%s(): lk_ahd mismatch! (pPkt:0x%p flags:0x%X)\n",
1770 __func__, packet, packet->info.rx.rx_flags);
1771 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx expected lk_ahd",
1772 "", &packet->info.rx.exp_hdr, 4);
1773 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx current header",
1774 "", (u8 *)&lk_ahd, sizeof(lk_ahd));
1775 status = -ENOMEM;
1776 goto fail_rx;
1777 }
1778
1779 if (htc_hdr->flags & HTC_FLG_RX_TRAILER) {
1780 if (htc_hdr->ctrl[0] < sizeof(struct htc_record_hdr) ||
1781 htc_hdr->ctrl[0] > payload_len) {
1782 ath6kl_err("%s(): invalid hdr (payload len should be :%d, CB[0] is:%d)\n",
1783 __func__, payload_len, htc_hdr->ctrl[0]);
1784 status = -ENOMEM;
1785 goto fail_rx;
1786 }
1787
1788 if (packet->info.rx.rx_flags & HTC_RX_PKT_IGNORE_LOOKAHEAD) {
1789 next_lkahds = NULL;
1790 n_lkahds = NULL;
1791 }
1792
1793 status = htc_proc_trailer(target, packet->buf + HTC_HDR_LENGTH
1794 + payload_len - htc_hdr->ctrl[0],
1795 htc_hdr->ctrl[0], next_lkahds,
1796 n_lkahds, packet->endpoint);
1797
1798 if (status)
1799 goto fail_rx;
1800
1801 packet->act_len -= htc_hdr->ctrl[0];
1802 }
1803
1804 packet->buf += HTC_HDR_LENGTH;
1805 packet->act_len -= HTC_HDR_LENGTH;
1806
1807 fail_rx:
1808 if (status)
1809 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad packet",
1810 "", packet->buf, packet->act_len);
1811
1812 return status;
1813 }
1814
1815 static void ath6kl_htc_rx_complete(struct htc_endpoint *endpoint,
1816 struct htc_packet *packet)
1817 {
1818 ath6kl_dbg(ATH6KL_DBG_HTC,
1819 "htc rx complete ep %d packet 0x%p\n",
1820 endpoint->eid, packet);
1821 endpoint->ep_cb.rx(endpoint->target, packet);
1822 }
1823
1824 static int ath6kl_htc_rx_bundle(struct htc_target *target,
1825 struct list_head *rxq,
1826 struct list_head *sync_compq,
1827 int *n_pkt_fetched, bool part_bundle)
1828 {
1829 struct hif_scatter_req *scat_req;
1830 struct htc_packet *packet;
1831 int rem_space = target->max_rx_bndl_sz;
1832 int n_scat_pkt, status = 0, i, len;
1833
1834 n_scat_pkt = get_queue_depth(rxq);
1835 n_scat_pkt = min(n_scat_pkt, target->msg_per_bndl_max);
1836
1837 if ((get_queue_depth(rxq) - n_scat_pkt) > 0) {
1838 /*
1839 * We were forced to split this bundle receive operation
1840 * all packets in this partial bundle must have their
1841 * lookaheads ignored.
1842 */
1843 part_bundle = true;
1844
1845 /*
1846 * This would only happen if the target ignored our max
1847 * bundle limit.
1848 */
1849 ath6kl_warn("%s(): partial bundle detected num:%d , %d\n",
1850 __func__, get_queue_depth(rxq), n_scat_pkt);
1851 }
1852
1853 len = 0;
1854
1855 ath6kl_dbg(ATH6KL_DBG_HTC,
1856 "htc rx bundle depth %d pkts %d\n",
1857 get_queue_depth(rxq), n_scat_pkt);
1858
1859 scat_req = hif_scatter_req_get(target->dev->ar);
1860
1861 if (scat_req == NULL)
1862 goto fail_rx_pkt;
1863
1864 for (i = 0; i < n_scat_pkt; i++) {
1865 int pad_len;
1866
1867 packet = list_first_entry(rxq, struct htc_packet, list);
1868 list_del(&packet->list);
1869
1870 pad_len = CALC_TXRX_PADDED_LEN(target,
1871 packet->act_len);
1872
1873 if ((rem_space - pad_len) < 0) {
1874 list_add(&packet->list, rxq);
1875 break;
1876 }
1877
1878 rem_space -= pad_len;
1879
1880 if (part_bundle || (i < (n_scat_pkt - 1)))
1881 /*
1882 * Packet 0..n-1 cannot be checked for look-aheads
1883 * since we are fetching a bundle the last packet
1884 * however can have it's lookahead used
1885 */
1886 packet->info.rx.rx_flags |=
1887 HTC_RX_PKT_IGNORE_LOOKAHEAD;
1888
1889 /* NOTE: 1 HTC packet per scatter entry */
1890 scat_req->scat_list[i].buf = packet->buf;
1891 scat_req->scat_list[i].len = pad_len;
1892
1893 packet->info.rx.rx_flags |= HTC_RX_PKT_PART_OF_BUNDLE;
1894
1895 list_add_tail(&packet->list, sync_compq);
1896
1897 WARN_ON(!scat_req->scat_list[i].len);
1898 len += scat_req->scat_list[i].len;
1899 }
1900
1901 scat_req->len = len;
1902 scat_req->scat_entries = i;
1903
1904 status = ath6kl_hif_submit_scat_req(target->dev, scat_req, true);
1905
1906 if (!status)
1907 *n_pkt_fetched = i;
1908
1909 /* free scatter request */
1910 hif_scatter_req_add(target->dev->ar, scat_req);
1911
1912 fail_rx_pkt:
1913
1914 return status;
1915 }
1916
1917 static int ath6kl_htc_rx_process_packets(struct htc_target *target,
1918 struct list_head *comp_pktq,
1919 u32 lk_ahds[],
1920 int *n_lk_ahd)
1921 {
1922 struct htc_packet *packet, *tmp_pkt;
1923 struct htc_endpoint *ep;
1924 int status = 0;
1925
1926 list_for_each_entry_safe(packet, tmp_pkt, comp_pktq, list) {
1927 ep = &target->endpoint[packet->endpoint];
1928
1929 /* process header for each of the recv packet */
1930 status = ath6kl_htc_rx_process_hdr(target, packet, lk_ahds,
1931 n_lk_ahd);
1932 if (status)
1933 return status;
1934
1935 list_del(&packet->list);
1936
1937 if (list_empty(comp_pktq)) {
1938 /*
1939 * Last packet's more packet flag is set
1940 * based on the lookahead.
1941 */
1942 if (*n_lk_ahd > 0)
1943 ath6kl_htc_rx_set_indicate(lk_ahds[0],
1944 ep, packet);
1945 } else
1946 /*
1947 * Packets in a bundle automatically have
1948 * this flag set.
1949 */
1950 packet->info.rx.indicat_flags |=
1951 HTC_RX_FLAGS_INDICATE_MORE_PKTS;
1952
1953 ath6kl_htc_rx_update_stats(ep, *n_lk_ahd);
1954
1955 if (packet->info.rx.rx_flags & HTC_RX_PKT_PART_OF_BUNDLE)
1956 ep->ep_st.rx_bundl += 1;
1957
1958 ath6kl_htc_rx_complete(ep, packet);
1959 }
1960
1961 return status;
1962 }
1963
1964 static int ath6kl_htc_rx_fetch(struct htc_target *target,
1965 struct list_head *rx_pktq,
1966 struct list_head *comp_pktq)
1967 {
1968 int fetched_pkts;
1969 bool part_bundle = false;
1970 int status = 0;
1971 struct list_head tmp_rxq;
1972 struct htc_packet *packet, *tmp_pkt;
1973
1974 /* now go fetch the list of HTC packets */
1975 while (!list_empty(rx_pktq)) {
1976 fetched_pkts = 0;
1977
1978 INIT_LIST_HEAD(&tmp_rxq);
1979
1980 if (target->rx_bndl_enable && (get_queue_depth(rx_pktq) > 1)) {
1981 /*
1982 * There are enough packets to attempt a
1983 * bundle transfer and recv bundling is
1984 * allowed.
1985 */
1986 status = ath6kl_htc_rx_bundle(target, rx_pktq,
1987 &tmp_rxq,
1988 &fetched_pkts,
1989 part_bundle);
1990 if (status)
1991 goto fail_rx;
1992
1993 if (!list_empty(rx_pktq))
1994 part_bundle = true;
1995
1996 list_splice_tail_init(&tmp_rxq, comp_pktq);
1997 }
1998
1999 if (!fetched_pkts) {
2000
2001 packet = list_first_entry(rx_pktq, struct htc_packet,
2002 list);
2003
2004 /* fully synchronous */
2005 packet->completion = NULL;
2006
2007 if (!list_is_singular(rx_pktq))
2008 /*
2009 * look_aheads in all packet
2010 * except the last one in the
2011 * bundle must be ignored
2012 */
2013 packet->info.rx.rx_flags |=
2014 HTC_RX_PKT_IGNORE_LOOKAHEAD;
2015
2016 /* go fetch the packet */
2017 status = ath6kl_htc_rx_packet(target, packet,
2018 packet->act_len);
2019
2020 list_move_tail(&packet->list, &tmp_rxq);
2021
2022 if (status)
2023 goto fail_rx;
2024
2025 list_splice_tail_init(&tmp_rxq, comp_pktq);
2026 }
2027 }
2028
2029 return 0;
2030
2031 fail_rx:
2032
2033 /*
2034 * Cleanup any packets we allocated but didn't use to
2035 * actually fetch any packets.
2036 */
2037
2038 list_for_each_entry_safe(packet, tmp_pkt, rx_pktq, list) {
2039 list_del(&packet->list);
2040 htc_reclaim_rxbuf(target, packet,
2041 &target->endpoint[packet->endpoint]);
2042 }
2043
2044 list_for_each_entry_safe(packet, tmp_pkt, &tmp_rxq, list) {
2045 list_del(&packet->list);
2046 htc_reclaim_rxbuf(target, packet,
2047 &target->endpoint[packet->endpoint]);
2048 }
2049
2050 return status;
2051 }
2052
2053 int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
2054 u32 msg_look_ahead, int *num_pkts)
2055 {
2056 struct htc_packet *packets, *tmp_pkt;
2057 struct htc_endpoint *endpoint;
2058 struct list_head rx_pktq, comp_pktq;
2059 int status = 0;
2060 u32 look_aheads[HTC_HOST_MAX_MSG_PER_BUNDLE];
2061 int num_look_ahead = 1;
2062 enum htc_endpoint_id id;
2063 int n_fetched = 0;
2064
2065 *num_pkts = 0;
2066
2067 /*
2068 * On first entry copy the look_aheads into our temp array for
2069 * processing
2070 */
2071 look_aheads[0] = msg_look_ahead;
2072
2073 while (true) {
2074
2075 /*
2076 * First lookahead sets the expected endpoint IDs for all
2077 * packets in a bundle.
2078 */
2079 id = ((struct htc_frame_hdr *)&look_aheads[0])->eid;
2080 endpoint = &target->endpoint[id];
2081
2082 if (id >= ENDPOINT_MAX) {
2083 ath6kl_err("MsgPend, invalid endpoint in look-ahead: %d\n",
2084 id);
2085 status = -ENOMEM;
2086 break;
2087 }
2088
2089 INIT_LIST_HEAD(&rx_pktq);
2090 INIT_LIST_HEAD(&comp_pktq);
2091
2092 /*
2093 * Try to allocate as many HTC RX packets indicated by the
2094 * look_aheads.
2095 */
2096 status = ath6kl_htc_rx_alloc(target, look_aheads,
2097 num_look_ahead, endpoint,
2098 &rx_pktq);
2099 if (status)
2100 break;
2101
2102 if (get_queue_depth(&rx_pktq) >= 2)
2103 /*
2104 * A recv bundle was detected, force IRQ status
2105 * re-check again
2106 */
2107 target->chk_irq_status_cnt = 1;
2108
2109 n_fetched += get_queue_depth(&rx_pktq);
2110
2111 num_look_ahead = 0;
2112
2113 status = ath6kl_htc_rx_fetch(target, &rx_pktq, &comp_pktq);
2114
2115 if (!status)
2116 ath6kl_htc_rx_chk_water_mark(endpoint);
2117
2118 /* Process fetched packets */
2119 status = ath6kl_htc_rx_process_packets(target, &comp_pktq,
2120 look_aheads,
2121 &num_look_ahead);
2122
2123 if (!num_look_ahead || status)
2124 break;
2125
2126 /*
2127 * For SYNCH processing, if we get here, we are running
2128 * through the loop again due to a detected lookahead. Set
2129 * flag that we should re-check IRQ status registers again
2130 * before leaving IRQ processing, this can net better
2131 * performance in high throughput situations.
2132 */
2133 target->chk_irq_status_cnt = 1;
2134 }
2135
2136 if (status) {
2137 ath6kl_err("failed to get pending recv messages: %d\n",
2138 status);
2139
2140 /* cleanup any packets in sync completion queue */
2141 list_for_each_entry_safe(packets, tmp_pkt, &comp_pktq, list) {
2142 list_del(&packets->list);
2143 htc_reclaim_rxbuf(target, packets,
2144 &target->endpoint[packets->endpoint]);
2145 }
2146
2147 if (target->htc_flags & HTC_OP_STATE_STOPPING) {
2148 ath6kl_warn("host is going to stop blocking receiver for htc_stop\n");
2149 ath6kl_hif_rx_control(target->dev, false);
2150 }
2151 }
2152
2153 /*
2154 * Before leaving, check to see if host ran out of buffers and
2155 * needs to stop the receiver.
2156 */
2157 if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
2158 ath6kl_warn("host has no rx buffers blocking receiver to prevent overrun\n");
2159 ath6kl_hif_rx_control(target->dev, false);
2160 }
2161 *num_pkts = n_fetched;
2162
2163 return status;
2164 }
2165
2166 /*
2167 * Synchronously wait for a control message from the target,
2168 * This function is used at initialization time ONLY. At init messages
2169 * on ENDPOINT 0 are expected.
2170 */
2171 static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target)
2172 {
2173 struct htc_packet *packet = NULL;
2174 struct htc_frame_hdr *htc_hdr;
2175 u32 look_ahead;
2176
2177 if (ath6kl_hif_poll_mboxmsg_rx(target->dev, &look_ahead,
2178 HTC_TARGET_RESPONSE_TIMEOUT))
2179 return NULL;
2180
2181 ath6kl_dbg(ATH6KL_DBG_HTC,
2182 "htc rx wait ctrl look_ahead 0x%X\n", look_ahead);
2183
2184 htc_hdr = (struct htc_frame_hdr *)&look_ahead;
2185
2186 if (htc_hdr->eid != ENDPOINT_0)
2187 return NULL;
2188
2189 packet = htc_get_control_buf(target, false);
2190
2191 if (!packet)
2192 return NULL;
2193
2194 packet->info.rx.rx_flags = 0;
2195 packet->info.rx.exp_hdr = look_ahead;
2196 packet->act_len = le16_to_cpu(htc_hdr->payld_len) + HTC_HDR_LENGTH;
2197
2198 if (packet->act_len > packet->buf_len)
2199 goto fail_ctrl_rx;
2200
2201 /* we want synchronous operation */
2202 packet->completion = NULL;
2203
2204 /* get the message from the device, this will block */
2205 if (ath6kl_htc_rx_packet(target, packet, packet->act_len))
2206 goto fail_ctrl_rx;
2207
2208 /* process receive header */
2209 packet->status = ath6kl_htc_rx_process_hdr(target, packet, NULL, NULL);
2210
2211 if (packet->status) {
2212 ath6kl_err("htc_wait_for_ctrl_msg, ath6kl_htc_rx_process_hdr failed (status = %d)\n",
2213 packet->status);
2214 goto fail_ctrl_rx;
2215 }
2216
2217 return packet;
2218
2219 fail_ctrl_rx:
2220 if (packet != NULL) {
2221 htc_rxpkt_reset(packet);
2222 reclaim_rx_ctrl_buf(target, packet);
2223 }
2224
2225 return NULL;
2226 }
2227
2228 int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
2229 struct list_head *pkt_queue)
2230 {
2231 struct htc_endpoint *endpoint;
2232 struct htc_packet *first_pkt;
2233 bool rx_unblock = false;
2234 int status = 0, depth;
2235
2236 if (list_empty(pkt_queue))
2237 return -ENOMEM;
2238
2239 first_pkt = list_first_entry(pkt_queue, struct htc_packet, list);
2240
2241 if (first_pkt->endpoint >= ENDPOINT_MAX)
2242 return status;
2243
2244 depth = get_queue_depth(pkt_queue);
2245
2246 ath6kl_dbg(ATH6KL_DBG_HTC,
2247 "htc rx add multiple ep id %d cnt %d len %d\n",
2248 first_pkt->endpoint, depth, first_pkt->buf_len);
2249
2250 endpoint = &target->endpoint[first_pkt->endpoint];
2251
2252 if (target->htc_flags & HTC_OP_STATE_STOPPING) {
2253 struct htc_packet *packet, *tmp_pkt;
2254
2255 /* walk through queue and mark each one canceled */
2256 list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
2257 packet->status = -ECANCELED;
2258 list_del(&packet->list);
2259 ath6kl_htc_rx_complete(endpoint, packet);
2260 }
2261
2262 return status;
2263 }
2264
2265 spin_lock_bh(&target->rx_lock);
2266
2267 list_splice_tail_init(pkt_queue, &endpoint->rx_bufq);
2268
2269 /* check if we are blocked waiting for a new buffer */
2270 if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
2271 if (target->ep_waiting == first_pkt->endpoint) {
2272 ath6kl_dbg(ATH6KL_DBG_HTC,
2273 "htc rx blocked on ep %d, unblocking\n",
2274 target->ep_waiting);
2275 target->rx_st_flags &= ~HTC_RECV_WAIT_BUFFERS;
2276 target->ep_waiting = ENDPOINT_MAX;
2277 rx_unblock = true;
2278 }
2279 }
2280
2281 spin_unlock_bh(&target->rx_lock);
2282
2283 if (rx_unblock && !(target->htc_flags & HTC_OP_STATE_STOPPING))
2284 /* TODO : implement a buffer threshold count? */
2285 ath6kl_hif_rx_control(target->dev, true);
2286
2287 return status;
2288 }
2289
2290 void ath6kl_htc_flush_rx_buf(struct htc_target *target)
2291 {
2292 struct htc_endpoint *endpoint;
2293 struct htc_packet *packet, *tmp_pkt;
2294 int i;
2295
2296 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
2297 endpoint = &target->endpoint[i];
2298 if (!endpoint->svc_id)
2299 /* not in use.. */
2300 continue;
2301
2302 spin_lock_bh(&target->rx_lock);
2303 list_for_each_entry_safe(packet, tmp_pkt,
2304 &endpoint->rx_bufq, list) {
2305 list_del(&packet->list);
2306 spin_unlock_bh(&target->rx_lock);
2307 ath6kl_dbg(ATH6KL_DBG_HTC,
2308 "htc rx flush pkt 0x%p len %d ep %d\n",
2309 packet, packet->buf_len,
2310 packet->endpoint);
2311 dev_kfree_skb(packet->pkt_cntxt);
2312 spin_lock_bh(&target->rx_lock);
2313 }
2314 spin_unlock_bh(&target->rx_lock);
2315 }
2316 }
2317
2318 int ath6kl_htc_conn_service(struct htc_target *target,
2319 struct htc_service_connect_req *conn_req,
2320 struct htc_service_connect_resp *conn_resp)
2321 {
2322 struct htc_packet *rx_pkt = NULL;
2323 struct htc_packet *tx_pkt = NULL;
2324 struct htc_conn_service_resp *resp_msg;
2325 struct htc_conn_service_msg *conn_msg;
2326 struct htc_endpoint *endpoint;
2327 enum htc_endpoint_id assigned_ep = ENDPOINT_MAX;
2328 unsigned int max_msg_sz = 0;
2329 int status = 0;
2330
2331 ath6kl_dbg(ATH6KL_DBG_HTC,
2332 "htc connect service target 0x%p service id 0x%x\n",
2333 target, conn_req->svc_id);
2334
2335 if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) {
2336 /* special case for pseudo control service */
2337 assigned_ep = ENDPOINT_0;
2338 max_msg_sz = HTC_MAX_CTRL_MSG_LEN;
2339 } else {
2340 /* allocate a packet to send to the target */
2341 tx_pkt = htc_get_control_buf(target, true);
2342
2343 if (!tx_pkt)
2344 return -ENOMEM;
2345
2346 conn_msg = (struct htc_conn_service_msg *)tx_pkt->buf;
2347 memset(conn_msg, 0, sizeof(*conn_msg));
2348 conn_msg->msg_id = cpu_to_le16(HTC_MSG_CONN_SVC_ID);
2349 conn_msg->svc_id = cpu_to_le16(conn_req->svc_id);
2350 conn_msg->conn_flags = cpu_to_le16(conn_req->conn_flags);
2351
2352 set_htc_pkt_info(tx_pkt, NULL, (u8 *) conn_msg,
2353 sizeof(*conn_msg) + conn_msg->svc_meta_len,
2354 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
2355
2356 /* we want synchronous operation */
2357 tx_pkt->completion = NULL;
2358 ath6kl_htc_tx_prep_pkt(tx_pkt, 0, 0, 0);
2359 status = ath6kl_htc_tx_issue(target, tx_pkt);
2360
2361 if (status)
2362 goto fail_tx;
2363
2364 /* wait for response */
2365 rx_pkt = htc_wait_for_ctrl_msg(target);
2366
2367 if (!rx_pkt) {
2368 status = -ENOMEM;
2369 goto fail_tx;
2370 }
2371
2372 resp_msg = (struct htc_conn_service_resp *)rx_pkt->buf;
2373
2374 if ((le16_to_cpu(resp_msg->msg_id) != HTC_MSG_CONN_SVC_RESP_ID)
2375 || (rx_pkt->act_len < sizeof(*resp_msg))) {
2376 status = -ENOMEM;
2377 goto fail_tx;
2378 }
2379
2380 conn_resp->resp_code = resp_msg->status;
2381 /* check response status */
2382 if (resp_msg->status != HTC_SERVICE_SUCCESS) {
2383 ath6kl_err("target failed service 0x%X connect request (status:%d)\n",
2384 resp_msg->svc_id, resp_msg->status);
2385 status = -ENOMEM;
2386 goto fail_tx;
2387 }
2388
2389 assigned_ep = (enum htc_endpoint_id)resp_msg->eid;
2390 max_msg_sz = le16_to_cpu(resp_msg->max_msg_sz);
2391 }
2392
2393 if (assigned_ep >= ENDPOINT_MAX || !max_msg_sz) {
2394 status = -ENOMEM;
2395 goto fail_tx;
2396 }
2397
2398 endpoint = &target->endpoint[assigned_ep];
2399 endpoint->eid = assigned_ep;
2400 if (endpoint->svc_id) {
2401 status = -ENOMEM;
2402 goto fail_tx;
2403 }
2404
2405 /* return assigned endpoint to caller */
2406 conn_resp->endpoint = assigned_ep;
2407 conn_resp->len_max = max_msg_sz;
2408
2409 /* setup the endpoint */
2410
2411 /* this marks the endpoint in use */
2412 endpoint->svc_id = conn_req->svc_id;
2413
2414 endpoint->max_txq_depth = conn_req->max_txq_depth;
2415 endpoint->len_max = max_msg_sz;
2416 endpoint->ep_cb = conn_req->ep_cb;
2417 endpoint->cred_dist.svc_id = conn_req->svc_id;
2418 endpoint->cred_dist.htc_ep = endpoint;
2419 endpoint->cred_dist.endpoint = assigned_ep;
2420 endpoint->cred_dist.cred_sz = target->tgt_cred_sz;
2421
2422 if (conn_req->max_rxmsg_sz) {
2423 /*
2424 * Override cred_per_msg calculation, this optimizes
2425 * the credit-low indications since the host will actually
2426 * issue smaller messages in the Send path.
2427 */
2428 if (conn_req->max_rxmsg_sz > max_msg_sz) {
2429 status = -ENOMEM;
2430 goto fail_tx;
2431 }
2432 endpoint->cred_dist.cred_per_msg =
2433 conn_req->max_rxmsg_sz / target->tgt_cred_sz;
2434 } else
2435 endpoint->cred_dist.cred_per_msg =
2436 max_msg_sz / target->tgt_cred_sz;
2437
2438 if (!endpoint->cred_dist.cred_per_msg)
2439 endpoint->cred_dist.cred_per_msg = 1;
2440
2441 /* save local connection flags */
2442 endpoint->conn_flags = conn_req->flags;
2443
2444 fail_tx:
2445 if (tx_pkt)
2446 htc_reclaim_txctrl_buf(target, tx_pkt);
2447
2448 if (rx_pkt) {
2449 htc_rxpkt_reset(rx_pkt);
2450 reclaim_rx_ctrl_buf(target, rx_pkt);
2451 }
2452
2453 return status;
2454 }
2455
2456 static void reset_ep_state(struct htc_target *target)
2457 {
2458 struct htc_endpoint *endpoint;
2459 int i;
2460
2461 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
2462 endpoint = &target->endpoint[i];
2463 memset(&endpoint->cred_dist, 0, sizeof(endpoint->cred_dist));
2464 endpoint->svc_id = 0;
2465 endpoint->len_max = 0;
2466 endpoint->max_txq_depth = 0;
2467 memset(&endpoint->ep_st, 0,
2468 sizeof(endpoint->ep_st));
2469 INIT_LIST_HEAD(&endpoint->rx_bufq);
2470 INIT_LIST_HEAD(&endpoint->txq);
2471 endpoint->target = target;
2472 }
2473
2474 /* reset distribution list */
2475 /* FIXME: free existing entries */
2476 INIT_LIST_HEAD(&target->cred_dist_list);
2477 }
2478
2479 int ath6kl_htc_get_rxbuf_num(struct htc_target *target,
2480 enum htc_endpoint_id endpoint)
2481 {
2482 int num;
2483
2484 spin_lock_bh(&target->rx_lock);
2485 num = get_queue_depth(&(target->endpoint[endpoint].rx_bufq));
2486 spin_unlock_bh(&target->rx_lock);
2487 return num;
2488 }
2489
2490 static void htc_setup_msg_bndl(struct htc_target *target)
2491 {
2492 /* limit what HTC can handle */
2493 target->msg_per_bndl_max = min(HTC_HOST_MAX_MSG_PER_BUNDLE,
2494 target->msg_per_bndl_max);
2495
2496 if (ath6kl_hif_enable_scatter(target->dev->ar)) {
2497 target->msg_per_bndl_max = 0;
2498 return;
2499 }
2500
2501 /* limit bundle what the device layer can handle */
2502 target->msg_per_bndl_max = min(target->max_scat_entries,
2503 target->msg_per_bndl_max);
2504
2505 ath6kl_dbg(ATH6KL_DBG_BOOT,
2506 "htc bundling allowed msg_per_bndl_max %d\n",
2507 target->msg_per_bndl_max);
2508
2509 /* Max rx bundle size is limited by the max tx bundle size */
2510 target->max_rx_bndl_sz = target->max_xfer_szper_scatreq;
2511 /* Max tx bundle size if limited by the extended mbox address range */
2512 target->max_tx_bndl_sz = min(HIF_MBOX0_EXT_WIDTH,
2513 target->max_xfer_szper_scatreq);
2514
2515 ath6kl_dbg(ATH6KL_DBG_BOOT, "htc max_rx_bndl_sz %d max_tx_bndl_sz %d\n",
2516 target->max_rx_bndl_sz, target->max_tx_bndl_sz);
2517
2518 if (target->max_tx_bndl_sz)
2519 target->tx_bndl_enable = true;
2520
2521 if (target->max_rx_bndl_sz)
2522 target->rx_bndl_enable = true;
2523
2524 if ((target->tgt_cred_sz % target->block_sz) != 0) {
2525 ath6kl_warn("credit size: %d is not block aligned! Disabling send bundling\n",
2526 target->tgt_cred_sz);
2527
2528 /*
2529 * Disallow send bundling since the credit size is
2530 * not aligned to a block size the I/O block
2531 * padding will spill into the next credit buffer
2532 * which is fatal.
2533 */
2534 target->tx_bndl_enable = false;
2535 }
2536 }
2537
2538 int ath6kl_htc_wait_target(struct htc_target *target)
2539 {
2540 struct htc_packet *packet = NULL;
2541 struct htc_ready_ext_msg *rdy_msg;
2542 struct htc_service_connect_req connect;
2543 struct htc_service_connect_resp resp;
2544 int status;
2545
2546 /* FIXME: remove once USB support is implemented */
2547 if (target->dev->ar->hif_type == ATH6KL_HIF_TYPE_USB) {
2548 ath6kl_err("HTC doesn't support USB yet. Patience!\n");
2549 return -EOPNOTSUPP;
2550 }
2551
2552 /* we should be getting 1 control message that the target is ready */
2553 packet = htc_wait_for_ctrl_msg(target);
2554
2555 if (!packet)
2556 return -ENOMEM;
2557
2558 /* we controlled the buffer creation so it's properly aligned */
2559 rdy_msg = (struct htc_ready_ext_msg *)packet->buf;
2560
2561 if ((le16_to_cpu(rdy_msg->ver2_0_info.msg_id) != HTC_MSG_READY_ID) ||
2562 (packet->act_len < sizeof(struct htc_ready_msg))) {
2563 status = -ENOMEM;
2564 goto fail_wait_target;
2565 }
2566
2567 if (!rdy_msg->ver2_0_info.cred_cnt || !rdy_msg->ver2_0_info.cred_sz) {
2568 status = -ENOMEM;
2569 goto fail_wait_target;
2570 }
2571
2572 target->tgt_creds = le16_to_cpu(rdy_msg->ver2_0_info.cred_cnt);
2573 target->tgt_cred_sz = le16_to_cpu(rdy_msg->ver2_0_info.cred_sz);
2574
2575 ath6kl_dbg(ATH6KL_DBG_BOOT,
2576 "htc target ready credits %d size %d\n",
2577 target->tgt_creds, target->tgt_cred_sz);
2578
2579 /* check if this is an extended ready message */
2580 if (packet->act_len >= sizeof(struct htc_ready_ext_msg)) {
2581 /* this is an extended message */
2582 target->htc_tgt_ver = rdy_msg->htc_ver;
2583 target->msg_per_bndl_max = rdy_msg->msg_per_htc_bndl;
2584 } else {
2585 /* legacy */
2586 target->htc_tgt_ver = HTC_VERSION_2P0;
2587 target->msg_per_bndl_max = 0;
2588 }
2589
2590 ath6kl_dbg(ATH6KL_DBG_BOOT, "htc using protocol %s (%d)\n",
2591 (target->htc_tgt_ver == HTC_VERSION_2P0) ? "2.0" : ">= 2.1",
2592 target->htc_tgt_ver);
2593
2594 if (target->msg_per_bndl_max > 0)
2595 htc_setup_msg_bndl(target);
2596
2597 /* setup our pseudo HTC control endpoint connection */
2598 memset(&connect, 0, sizeof(connect));
2599 memset(&resp, 0, sizeof(resp));
2600 connect.ep_cb.rx = htc_ctrl_rx;
2601 connect.ep_cb.rx_refill = NULL;
2602 connect.ep_cb.tx_full = NULL;
2603 connect.max_txq_depth = NUM_CONTROL_BUFFERS;
2604 connect.svc_id = HTC_CTRL_RSVD_SVC;
2605
2606 /* connect fake service */
2607 status = ath6kl_htc_conn_service((void *)target, &connect, &resp);
2608
2609 if (status)
2610 /*
2611 * FIXME: this call doesn't make sense, the caller should
2612 * call ath6kl_htc_cleanup() when it wants remove htc
2613 */
2614 ath6kl_hif_cleanup_scatter(target->dev->ar);
2615
2616 fail_wait_target:
2617 if (packet) {
2618 htc_rxpkt_reset(packet);
2619 reclaim_rx_ctrl_buf(target, packet);
2620 }
2621
2622 return status;
2623 }
2624
2625 /*
2626 * Start HTC, enable interrupts and let the target know
2627 * host has finished setup.
2628 */
2629 int ath6kl_htc_start(struct htc_target *target)
2630 {
2631 struct htc_packet *packet;
2632 int status;
2633
2634 memset(&target->dev->irq_proc_reg, 0,
2635 sizeof(target->dev->irq_proc_reg));
2636
2637 /* Disable interrupts at the chip level */
2638 ath6kl_hif_disable_intrs(target->dev);
2639
2640 target->htc_flags = 0;
2641 target->rx_st_flags = 0;
2642
2643 /* Push control receive buffers into htc control endpoint */
2644 while ((packet = htc_get_control_buf(target, false)) != NULL) {
2645 status = htc_add_rxbuf(target, packet);
2646 if (status)
2647 return status;
2648 }
2649
2650 /* NOTE: the first entry in the distribution list is ENDPOINT_0 */
2651 ath6kl_credit_init(target->credit_info, &target->cred_dist_list,
2652 target->tgt_creds);
2653
2654 dump_cred_dist_stats(target);
2655
2656 /* Indicate to the target of the setup completion */
2657 status = htc_setup_tx_complete(target);
2658
2659 if (status)
2660 return status;
2661
2662 /* unmask interrupts */
2663 status = ath6kl_hif_unmask_intrs(target->dev);
2664
2665 if (status)
2666 ath6kl_htc_stop(target);
2667
2668 return status;
2669 }
2670
2671 static int ath6kl_htc_reset(struct htc_target *target)
2672 {
2673 u32 block_size, ctrl_bufsz;
2674 struct htc_packet *packet;
2675 int i;
2676
2677 reset_ep_state(target);
2678
2679 block_size = target->dev->ar->mbox_info.block_size;
2680
2681 ctrl_bufsz = (block_size > HTC_MAX_CTRL_MSG_LEN) ?
2682 (block_size + HTC_HDR_LENGTH) :
2683 (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH);
2684
2685 for (i = 0; i < NUM_CONTROL_BUFFERS; i++) {
2686 packet = kzalloc(sizeof(*packet), GFP_KERNEL);
2687 if (!packet)
2688 return -ENOMEM;
2689
2690 packet->buf_start = kzalloc(ctrl_bufsz, GFP_KERNEL);
2691 if (!packet->buf_start) {
2692 kfree(packet);
2693 return -ENOMEM;
2694 }
2695
2696 packet->buf_len = ctrl_bufsz;
2697 if (i < NUM_CONTROL_RX_BUFFERS) {
2698 packet->act_len = 0;
2699 packet->buf = packet->buf_start;
2700 packet->endpoint = ENDPOINT_0;
2701 list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
2702 } else
2703 list_add_tail(&packet->list, &target->free_ctrl_txbuf);
2704 }
2705
2706 return 0;
2707 }
2708
2709 /* htc_stop: stop interrupt reception, and flush all queued buffers */
2710 void ath6kl_htc_stop(struct htc_target *target)
2711 {
2712 spin_lock_bh(&target->htc_lock);
2713 target->htc_flags |= HTC_OP_STATE_STOPPING;
2714 spin_unlock_bh(&target->htc_lock);
2715
2716 /*
2717 * Masking interrupts is a synchronous operation, when this
2718 * function returns all pending HIF I/O has completed, we can
2719 * safely flush the queues.
2720 */
2721 ath6kl_hif_mask_intrs(target->dev);
2722
2723 ath6kl_htc_flush_txep_all(target);
2724
2725 ath6kl_htc_flush_rx_buf(target);
2726
2727 ath6kl_htc_reset(target);
2728 }
2729
2730 void *ath6kl_htc_create(struct ath6kl *ar)
2731 {
2732 struct htc_target *target = NULL;
2733 int status = 0;
2734
2735 target = kzalloc(sizeof(*target), GFP_KERNEL);
2736 if (!target) {
2737 ath6kl_err("unable to allocate memory\n");
2738 return NULL;
2739 }
2740
2741 target->dev = kzalloc(sizeof(*target->dev), GFP_KERNEL);
2742 if (!target->dev) {
2743 ath6kl_err("unable to allocate memory\n");
2744 status = -ENOMEM;
2745 goto err_htc_cleanup;
2746 }
2747
2748 spin_lock_init(&target->htc_lock);
2749 spin_lock_init(&target->rx_lock);
2750 spin_lock_init(&target->tx_lock);
2751
2752 INIT_LIST_HEAD(&target->free_ctrl_txbuf);
2753 INIT_LIST_HEAD(&target->free_ctrl_rxbuf);
2754 INIT_LIST_HEAD(&target->cred_dist_list);
2755
2756 target->dev->ar = ar;
2757 target->dev->htc_cnxt = target;
2758 target->ep_waiting = ENDPOINT_MAX;
2759
2760 status = ath6kl_hif_setup(target->dev);
2761 if (status)
2762 goto err_htc_cleanup;
2763
2764 status = ath6kl_htc_reset(target);
2765 if (status)
2766 goto err_htc_cleanup;
2767
2768 return target;
2769
2770 err_htc_cleanup:
2771 ath6kl_htc_cleanup(target);
2772
2773 return NULL;
2774 }
2775
2776 /* cleanup the HTC instance */
2777 void ath6kl_htc_cleanup(struct htc_target *target)
2778 {
2779 struct htc_packet *packet, *tmp_packet;
2780
2781 /* FIXME: remove check once USB support is implemented */
2782 if (target->dev->ar->hif_type != ATH6KL_HIF_TYPE_USB)
2783 ath6kl_hif_cleanup_scatter(target->dev->ar);
2784
2785 list_for_each_entry_safe(packet, tmp_packet,
2786 &target->free_ctrl_txbuf, list) {
2787 list_del(&packet->list);
2788 kfree(packet->buf_start);
2789 kfree(packet);
2790 }
2791
2792 list_for_each_entry_safe(packet, tmp_packet,
2793 &target->free_ctrl_rxbuf, list) {
2794 list_del(&packet->list);
2795 kfree(packet->buf_start);
2796 kfree(packet);
2797 }
2798
2799 kfree(target->dev);
2800 kfree(target);
2801 }
This page took 0.093754 seconds and 5 git commands to generate.