Merge branch 'kconfig' of git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild
[deliverable/linux.git] / drivers / net / wireless / ath / ath6kl / txrx.c
1 /*
2 * Copyright (c) 2004-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include "core.h"
18 #include "debug.h"
19
20 static u8 ath6kl_ibss_map_epid(struct sk_buff *skb, struct net_device *dev,
21 u32 *map_no)
22 {
23 struct ath6kl *ar = ath6kl_priv(dev);
24 struct ethhdr *eth_hdr;
25 u32 i, ep_map = -1;
26 u8 *datap;
27
28 *map_no = 0;
29 datap = skb->data;
30 eth_hdr = (struct ethhdr *) (datap + sizeof(struct wmi_data_hdr));
31
32 if (is_multicast_ether_addr(eth_hdr->h_dest))
33 return ENDPOINT_2;
34
35 for (i = 0; i < ar->node_num; i++) {
36 if (memcmp(eth_hdr->h_dest, ar->node_map[i].mac_addr,
37 ETH_ALEN) == 0) {
38 *map_no = i + 1;
39 ar->node_map[i].tx_pend++;
40 return ar->node_map[i].ep_id;
41 }
42
43 if ((ep_map == -1) && !ar->node_map[i].tx_pend)
44 ep_map = i;
45 }
46
47 if (ep_map == -1) {
48 ep_map = ar->node_num;
49 ar->node_num++;
50 if (ar->node_num > MAX_NODE_NUM)
51 return ENDPOINT_UNUSED;
52 }
53
54 memcpy(ar->node_map[ep_map].mac_addr, eth_hdr->h_dest, ETH_ALEN);
55
56 for (i = ENDPOINT_2; i <= ENDPOINT_5; i++) {
57 if (!ar->tx_pending[i]) {
58 ar->node_map[ep_map].ep_id = i;
59 break;
60 }
61
62 /*
63 * No free endpoint is available, start redistribution on
64 * the inuse endpoints.
65 */
66 if (i == ENDPOINT_5) {
67 ar->node_map[ep_map].ep_id = ar->next_ep_id;
68 ar->next_ep_id++;
69 if (ar->next_ep_id > ENDPOINT_5)
70 ar->next_ep_id = ENDPOINT_2;
71 }
72 }
73
74 *map_no = ep_map + 1;
75 ar->node_map[ep_map].tx_pend++;
76
77 return ar->node_map[ep_map].ep_id;
78 }
79
80 static bool ath6kl_powersave_ap(struct ath6kl *ar, struct sk_buff *skb,
81 bool *more_data)
82 {
83 struct ethhdr *datap = (struct ethhdr *) skb->data;
84 struct ath6kl_sta *conn = NULL;
85 bool ps_queued = false, is_psq_empty = false;
86
87 if (is_multicast_ether_addr(datap->h_dest)) {
88 u8 ctr = 0;
89 bool q_mcast = false;
90
91 for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
92 if (ar->sta_list[ctr].sta_flags & STA_PS_SLEEP) {
93 q_mcast = true;
94 break;
95 }
96 }
97
98 if (q_mcast) {
99 /*
100 * If this transmit is not because of a Dtim Expiry
101 * q it.
102 */
103 if (!test_bit(DTIM_EXPIRED, &ar->flag)) {
104 bool is_mcastq_empty = false;
105
106 spin_lock_bh(&ar->mcastpsq_lock);
107 is_mcastq_empty =
108 skb_queue_empty(&ar->mcastpsq);
109 skb_queue_tail(&ar->mcastpsq, skb);
110 spin_unlock_bh(&ar->mcastpsq_lock);
111
112 /*
113 * If this is the first Mcast pkt getting
114 * queued indicate to the target to set the
115 * BitmapControl LSB of the TIM IE.
116 */
117 if (is_mcastq_empty)
118 ath6kl_wmi_set_pvb_cmd(ar->wmi,
119 MCAST_AID, 1);
120
121 ps_queued = true;
122 } else {
123 /*
124 * This transmit is because of Dtim expiry.
125 * Determine if MoreData bit has to be set.
126 */
127 spin_lock_bh(&ar->mcastpsq_lock);
128 if (!skb_queue_empty(&ar->mcastpsq))
129 *more_data = true;
130 spin_unlock_bh(&ar->mcastpsq_lock);
131 }
132 }
133 } else {
134 conn = ath6kl_find_sta(ar, datap->h_dest);
135 if (!conn) {
136 dev_kfree_skb(skb);
137
138 /* Inform the caller that the skb is consumed */
139 return true;
140 }
141
142 if (conn->sta_flags & STA_PS_SLEEP) {
143 if (!(conn->sta_flags & STA_PS_POLLED)) {
144 /* Queue the frames if the STA is sleeping */
145 spin_lock_bh(&conn->psq_lock);
146 is_psq_empty = skb_queue_empty(&conn->psq);
147 skb_queue_tail(&conn->psq, skb);
148 spin_unlock_bh(&conn->psq_lock);
149
150 /*
151 * If this is the first pkt getting queued
152 * for this STA, update the PVB for this
153 * STA.
154 */
155 if (is_psq_empty)
156 ath6kl_wmi_set_pvb_cmd(ar->wmi,
157 conn->aid, 1);
158
159 ps_queued = true;
160 } else {
161 /*
162 * This tx is because of a PsPoll.
163 * Determine if MoreData bit has to be set.
164 */
165 spin_lock_bh(&conn->psq_lock);
166 if (!skb_queue_empty(&conn->psq))
167 *more_data = true;
168 spin_unlock_bh(&conn->psq_lock);
169 }
170 }
171 }
172
173 return ps_queued;
174 }
175
176 /* Tx functions */
177
178 int ath6kl_control_tx(void *devt, struct sk_buff *skb,
179 enum htc_endpoint_id eid)
180 {
181 struct ath6kl *ar = devt;
182 int status = 0;
183 struct ath6kl_cookie *cookie = NULL;
184
185 spin_lock_bh(&ar->lock);
186
187 ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
188 "%s: skb=0x%p, len=0x%x eid =%d\n", __func__,
189 skb, skb->len, eid);
190
191 if (test_bit(WMI_CTRL_EP_FULL, &ar->flag) && (eid == ar->ctrl_ep)) {
192 /*
193 * Control endpoint is full, don't allocate resources, we
194 * are just going to drop this packet.
195 */
196 cookie = NULL;
197 ath6kl_err("wmi ctrl ep full, dropping pkt : 0x%p, len:%d\n",
198 skb, skb->len);
199 } else
200 cookie = ath6kl_alloc_cookie(ar);
201
202 if (cookie == NULL) {
203 spin_unlock_bh(&ar->lock);
204 status = -ENOMEM;
205 goto fail_ctrl_tx;
206 }
207
208 ar->tx_pending[eid]++;
209
210 if (eid != ar->ctrl_ep)
211 ar->total_tx_data_pend++;
212
213 spin_unlock_bh(&ar->lock);
214
215 cookie->skb = skb;
216 cookie->map_no = 0;
217 set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
218 eid, ATH6KL_CONTROL_PKT_TAG);
219
220 /*
221 * This interface is asynchronous, if there is an error, cleanup
222 * will happen in the TX completion callback.
223 */
224 ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt);
225
226 return 0;
227
228 fail_ctrl_tx:
229 dev_kfree_skb(skb);
230 return status;
231 }
232
233 int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
234 {
235 struct ath6kl *ar = ath6kl_priv(dev);
236 struct ath6kl_cookie *cookie = NULL;
237 enum htc_endpoint_id eid = ENDPOINT_UNUSED;
238 u32 map_no = 0;
239 u16 htc_tag = ATH6KL_DATA_PKT_TAG;
240 u8 ac = 99 ; /* initialize to unmapped ac */
241 bool chk_adhoc_ps_mapping = false, more_data = false;
242 int ret;
243
244 ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
245 "%s: skb=0x%p, data=0x%p, len=0x%x\n", __func__,
246 skb, skb->data, skb->len);
247
248 /* If target is not associated */
249 if (!test_bit(CONNECTED, &ar->flag)) {
250 dev_kfree_skb(skb);
251 return 0;
252 }
253
254 if (!test_bit(WMI_READY, &ar->flag))
255 goto fail_tx;
256
257 /* AP mode Power saving processing */
258 if (ar->nw_type == AP_NETWORK) {
259 if (ath6kl_powersave_ap(ar, skb, &more_data))
260 return 0;
261 }
262
263 if (test_bit(WMI_ENABLED, &ar->flag)) {
264 if (skb_headroom(skb) < dev->needed_headroom) {
265 WARN_ON(1);
266 goto fail_tx;
267 }
268
269 if (ath6kl_wmi_dix_2_dot3(ar->wmi, skb)) {
270 ath6kl_err("ath6kl_wmi_dix_2_dot3 failed\n");
271 goto fail_tx;
272 }
273
274 if (ath6kl_wmi_data_hdr_add(ar->wmi, skb, DATA_MSGTYPE,
275 more_data, 0, 0, NULL)) {
276 ath6kl_err("wmi_data_hdr_add failed\n");
277 goto fail_tx;
278 }
279
280 if ((ar->nw_type == ADHOC_NETWORK) &&
281 ar->ibss_ps_enable && test_bit(CONNECTED, &ar->flag))
282 chk_adhoc_ps_mapping = true;
283 else {
284 /* get the stream mapping */
285 ret = ath6kl_wmi_implicit_create_pstream(ar->wmi, skb,
286 0, test_bit(WMM_ENABLED, &ar->flag), &ac);
287 if (ret)
288 goto fail_tx;
289 }
290 } else
291 goto fail_tx;
292
293 spin_lock_bh(&ar->lock);
294
295 if (chk_adhoc_ps_mapping)
296 eid = ath6kl_ibss_map_epid(skb, dev, &map_no);
297 else
298 eid = ar->ac2ep_map[ac];
299
300 if (eid == 0 || eid == ENDPOINT_UNUSED) {
301 ath6kl_err("eid %d is not mapped!\n", eid);
302 spin_unlock_bh(&ar->lock);
303 goto fail_tx;
304 }
305
306 /* allocate resource for this packet */
307 cookie = ath6kl_alloc_cookie(ar);
308
309 if (!cookie) {
310 spin_unlock_bh(&ar->lock);
311 goto fail_tx;
312 }
313
314 /* update counts while the lock is held */
315 ar->tx_pending[eid]++;
316 ar->total_tx_data_pend++;
317
318 spin_unlock_bh(&ar->lock);
319
320 if (!IS_ALIGNED((unsigned long) skb->data - HTC_HDR_LENGTH, 4) &&
321 skb_cloned(skb)) {
322 /*
323 * We will touch (move the buffer data to align it. Since the
324 * skb buffer is cloned and not only the header is changed, we
325 * have to copy it to allow the changes. Since we are copying
326 * the data here, we may as well align it by reserving suitable
327 * headroom to avoid the memmove in ath6kl_htc_tx_buf_align().
328 */
329 struct sk_buff *nskb;
330
331 nskb = skb_copy_expand(skb, HTC_HDR_LENGTH, 0, GFP_ATOMIC);
332 if (nskb == NULL)
333 goto fail_tx;
334 kfree_skb(skb);
335 skb = nskb;
336 }
337
338 cookie->skb = skb;
339 cookie->map_no = map_no;
340 set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
341 eid, htc_tag);
342
343 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "tx ",
344 skb->data, skb->len);
345
346 /*
347 * HTC interface is asynchronous, if this fails, cleanup will
348 * happen in the ath6kl_tx_complete callback.
349 */
350 ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt);
351
352 return 0;
353
354 fail_tx:
355 dev_kfree_skb(skb);
356
357 ar->net_stats.tx_dropped++;
358 ar->net_stats.tx_aborted_errors++;
359
360 return 0;
361 }
362
363 /* indicate tx activity or inactivity on a WMI stream */
364 void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active)
365 {
366 struct ath6kl *ar = devt;
367 enum htc_endpoint_id eid;
368 int i;
369
370 eid = ar->ac2ep_map[traffic_class];
371
372 if (!test_bit(WMI_ENABLED, &ar->flag))
373 goto notify_htc;
374
375 spin_lock_bh(&ar->lock);
376
377 ar->ac_stream_active[traffic_class] = active;
378
379 if (active) {
380 /*
381 * Keep track of the active stream with the highest
382 * priority.
383 */
384 if (ar->ac_stream_pri_map[traffic_class] >
385 ar->hiac_stream_active_pri)
386 /* set the new highest active priority */
387 ar->hiac_stream_active_pri =
388 ar->ac_stream_pri_map[traffic_class];
389
390 } else {
391 /*
392 * We may have to search for the next active stream
393 * that is the highest priority.
394 */
395 if (ar->hiac_stream_active_pri ==
396 ar->ac_stream_pri_map[traffic_class]) {
397 /*
398 * The highest priority stream just went inactive
399 * reset and search for the "next" highest "active"
400 * priority stream.
401 */
402 ar->hiac_stream_active_pri = 0;
403
404 for (i = 0; i < WMM_NUM_AC; i++) {
405 if (ar->ac_stream_active[i] &&
406 (ar->ac_stream_pri_map[i] >
407 ar->hiac_stream_active_pri))
408 /*
409 * Set the new highest active
410 * priority.
411 */
412 ar->hiac_stream_active_pri =
413 ar->ac_stream_pri_map[i];
414 }
415 }
416 }
417
418 spin_unlock_bh(&ar->lock);
419
420 notify_htc:
421 /* notify HTC, this may cause credit distribution changes */
422 ath6kl_htc_indicate_activity_change(ar->htc_target, eid, active);
423 }
424
425 enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
426 struct htc_packet *packet)
427 {
428 struct ath6kl *ar = target->dev->ar;
429 enum htc_endpoint_id endpoint = packet->endpoint;
430
431 if (endpoint == ar->ctrl_ep) {
432 /*
433 * Under normal WMI if this is getting full, then something
434 * is running rampant the host should not be exhausting the
435 * WMI queue with too many commands the only exception to
436 * this is during testing using endpointping.
437 */
438 spin_lock_bh(&ar->lock);
439 set_bit(WMI_CTRL_EP_FULL, &ar->flag);
440 spin_unlock_bh(&ar->lock);
441 ath6kl_err("wmi ctrl ep is full\n");
442 return HTC_SEND_FULL_KEEP;
443 }
444
445 if (packet->info.tx.tag == ATH6KL_CONTROL_PKT_TAG)
446 return HTC_SEND_FULL_KEEP;
447
448 if (ar->nw_type == ADHOC_NETWORK)
449 /*
450 * In adhoc mode, we cannot differentiate traffic
451 * priorities so there is no need to continue, however we
452 * should stop the network.
453 */
454 goto stop_net_queues;
455
456 /*
457 * The last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for
458 * the highest active stream.
459 */
460 if (ar->ac_stream_pri_map[ar->ep2ac_map[endpoint]] <
461 ar->hiac_stream_active_pri &&
462 ar->cookie_count <= MAX_HI_COOKIE_NUM)
463 /*
464 * Give preference to the highest priority stream by
465 * dropping the packets which overflowed.
466 */
467 return HTC_SEND_FULL_DROP;
468
469 stop_net_queues:
470 spin_lock_bh(&ar->lock);
471 set_bit(NETQ_STOPPED, &ar->flag);
472 spin_unlock_bh(&ar->lock);
473 netif_stop_queue(ar->net_dev);
474
475 return HTC_SEND_FULL_KEEP;
476 }
477
478 /* TODO this needs to be looked at */
479 static void ath6kl_tx_clear_node_map(struct ath6kl *ar,
480 enum htc_endpoint_id eid, u32 map_no)
481 {
482 u32 i;
483
484 if (ar->nw_type != ADHOC_NETWORK)
485 return;
486
487 if (!ar->ibss_ps_enable)
488 return;
489
490 if (eid == ar->ctrl_ep)
491 return;
492
493 if (map_no == 0)
494 return;
495
496 map_no--;
497 ar->node_map[map_no].tx_pend--;
498
499 if (ar->node_map[map_no].tx_pend)
500 return;
501
502 if (map_no != (ar->node_num - 1))
503 return;
504
505 for (i = ar->node_num; i > 0; i--) {
506 if (ar->node_map[i - 1].tx_pend)
507 break;
508
509 memset(&ar->node_map[i - 1], 0,
510 sizeof(struct ath6kl_node_mapping));
511 ar->node_num--;
512 }
513 }
514
515 void ath6kl_tx_complete(void *context, struct list_head *packet_queue)
516 {
517 struct ath6kl *ar = context;
518 struct sk_buff_head skb_queue;
519 struct htc_packet *packet;
520 struct sk_buff *skb;
521 struct ath6kl_cookie *ath6kl_cookie;
522 u32 map_no = 0;
523 int status;
524 enum htc_endpoint_id eid;
525 bool wake_event = false;
526 bool flushing = false;
527
528 skb_queue_head_init(&skb_queue);
529
530 /* lock the driver as we update internal state */
531 spin_lock_bh(&ar->lock);
532
533 /* reap completed packets */
534 while (!list_empty(packet_queue)) {
535
536 packet = list_first_entry(packet_queue, struct htc_packet,
537 list);
538 list_del(&packet->list);
539
540 ath6kl_cookie = (struct ath6kl_cookie *)packet->pkt_cntxt;
541 if (!ath6kl_cookie)
542 goto fatal;
543
544 status = packet->status;
545 skb = ath6kl_cookie->skb;
546 eid = packet->endpoint;
547 map_no = ath6kl_cookie->map_no;
548
549 if (!skb || !skb->data)
550 goto fatal;
551
552 packet->buf = skb->data;
553
554 __skb_queue_tail(&skb_queue, skb);
555
556 if (!status && (packet->act_len != skb->len))
557 goto fatal;
558
559 ar->tx_pending[eid]--;
560
561 if (eid != ar->ctrl_ep)
562 ar->total_tx_data_pend--;
563
564 if (eid == ar->ctrl_ep) {
565 if (test_bit(WMI_CTRL_EP_FULL, &ar->flag))
566 clear_bit(WMI_CTRL_EP_FULL, &ar->flag);
567
568 if (ar->tx_pending[eid] == 0)
569 wake_event = true;
570 }
571
572 if (status) {
573 if (status == -ECANCELED)
574 /* a packet was flushed */
575 flushing = true;
576
577 ar->net_stats.tx_errors++;
578
579 if (status != -ENOSPC)
580 ath6kl_err("tx error, status: 0x%x\n", status);
581 ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
582 "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
583 __func__, skb, packet->buf, packet->act_len,
584 eid, "error!");
585 } else {
586 ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
587 "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
588 __func__, skb, packet->buf, packet->act_len,
589 eid, "OK");
590
591 flushing = false;
592 ar->net_stats.tx_packets++;
593 ar->net_stats.tx_bytes += skb->len;
594 }
595
596 ath6kl_tx_clear_node_map(ar, eid, map_no);
597
598 ath6kl_free_cookie(ar, ath6kl_cookie);
599
600 if (test_bit(NETQ_STOPPED, &ar->flag))
601 clear_bit(NETQ_STOPPED, &ar->flag);
602 }
603
604 spin_unlock_bh(&ar->lock);
605
606 __skb_queue_purge(&skb_queue);
607
608 if (test_bit(CONNECTED, &ar->flag)) {
609 if (!flushing)
610 netif_wake_queue(ar->net_dev);
611 }
612
613 if (wake_event)
614 wake_up(&ar->event_wq);
615
616 return;
617
618 fatal:
619 WARN_ON(1);
620 spin_unlock_bh(&ar->lock);
621 return;
622 }
623
624 void ath6kl_tx_data_cleanup(struct ath6kl *ar)
625 {
626 int i;
627
628 /* flush all the data (non-control) streams */
629 for (i = 0; i < WMM_NUM_AC; i++)
630 ath6kl_htc_flush_txep(ar->htc_target, ar->ac2ep_map[i],
631 ATH6KL_DATA_PKT_TAG);
632 }
633
634 /* Rx functions */
635
636 static void ath6kl_deliver_frames_to_nw_stack(struct net_device *dev,
637 struct sk_buff *skb)
638 {
639 if (!skb)
640 return;
641
642 skb->dev = dev;
643
644 if (!(skb->dev->flags & IFF_UP)) {
645 dev_kfree_skb(skb);
646 return;
647 }
648
649 skb->protocol = eth_type_trans(skb, skb->dev);
650
651 netif_rx_ni(skb);
652 }
653
654 static void ath6kl_alloc_netbufs(struct sk_buff_head *q, u16 num)
655 {
656 struct sk_buff *skb;
657
658 while (num) {
659 skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE);
660 if (!skb) {
661 ath6kl_err("netbuf allocation failed\n");
662 return;
663 }
664 skb_queue_tail(q, skb);
665 num--;
666 }
667 }
668
669 static struct sk_buff *aggr_get_free_skb(struct aggr_info *p_aggr)
670 {
671 struct sk_buff *skb = NULL;
672
673 if (skb_queue_len(&p_aggr->free_q) < (AGGR_NUM_OF_FREE_NETBUFS >> 2))
674 ath6kl_alloc_netbufs(&p_aggr->free_q, AGGR_NUM_OF_FREE_NETBUFS);
675
676 skb = skb_dequeue(&p_aggr->free_q);
677
678 return skb;
679 }
680
681 void ath6kl_rx_refill(struct htc_target *target, enum htc_endpoint_id endpoint)
682 {
683 struct ath6kl *ar = target->dev->ar;
684 struct sk_buff *skb;
685 int rx_buf;
686 int n_buf_refill;
687 struct htc_packet *packet;
688 struct list_head queue;
689
690 n_buf_refill = ATH6KL_MAX_RX_BUFFERS -
691 ath6kl_htc_get_rxbuf_num(ar->htc_target, endpoint);
692
693 if (n_buf_refill <= 0)
694 return;
695
696 INIT_LIST_HEAD(&queue);
697
698 ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
699 "%s: providing htc with %d buffers at eid=%d\n",
700 __func__, n_buf_refill, endpoint);
701
702 for (rx_buf = 0; rx_buf < n_buf_refill; rx_buf++) {
703 skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE);
704 if (!skb)
705 break;
706
707 packet = (struct htc_packet *) skb->head;
708 if (!IS_ALIGNED((unsigned long) skb->data, 4))
709 skb->data = PTR_ALIGN(skb->data - 4, 4);
710 set_htc_rxpkt_info(packet, skb, skb->data,
711 ATH6KL_BUFFER_SIZE, endpoint);
712 list_add_tail(&packet->list, &queue);
713 }
714
715 if (!list_empty(&queue))
716 ath6kl_htc_add_rxbuf_multiple(ar->htc_target, &queue);
717 }
718
719 void ath6kl_refill_amsdu_rxbufs(struct ath6kl *ar, int count)
720 {
721 struct htc_packet *packet;
722 struct sk_buff *skb;
723
724 while (count) {
725 skb = ath6kl_buf_alloc(ATH6KL_AMSDU_BUFFER_SIZE);
726 if (!skb)
727 return;
728
729 packet = (struct htc_packet *) skb->head;
730 if (!IS_ALIGNED((unsigned long) skb->data, 4))
731 skb->data = PTR_ALIGN(skb->data - 4, 4);
732 set_htc_rxpkt_info(packet, skb, skb->data,
733 ATH6KL_AMSDU_BUFFER_SIZE, 0);
734 spin_lock_bh(&ar->lock);
735 list_add_tail(&packet->list, &ar->amsdu_rx_buffer_queue);
736 spin_unlock_bh(&ar->lock);
737 count--;
738 }
739 }
740
741 /*
742 * Callback to allocate a receive buffer for a pending packet. We use a
743 * pre-allocated list of buffers of maximum AMSDU size (4K).
744 */
745 struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target,
746 enum htc_endpoint_id endpoint,
747 int len)
748 {
749 struct ath6kl *ar = target->dev->ar;
750 struct htc_packet *packet = NULL;
751 struct list_head *pkt_pos;
752 int refill_cnt = 0, depth = 0;
753
754 ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: eid=%d, len:%d\n",
755 __func__, endpoint, len);
756
757 if ((len <= ATH6KL_BUFFER_SIZE) ||
758 (len > ATH6KL_AMSDU_BUFFER_SIZE))
759 return NULL;
760
761 spin_lock_bh(&ar->lock);
762
763 if (list_empty(&ar->amsdu_rx_buffer_queue)) {
764 spin_unlock_bh(&ar->lock);
765 refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS;
766 goto refill_buf;
767 }
768
769 packet = list_first_entry(&ar->amsdu_rx_buffer_queue,
770 struct htc_packet, list);
771 list_del(&packet->list);
772 list_for_each(pkt_pos, &ar->amsdu_rx_buffer_queue)
773 depth++;
774
775 refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS - depth;
776 spin_unlock_bh(&ar->lock);
777
778 /* set actual endpoint ID */
779 packet->endpoint = endpoint;
780
781 refill_buf:
782 if (refill_cnt >= ATH6KL_AMSDU_REFILL_THRESHOLD)
783 ath6kl_refill_amsdu_rxbufs(ar, refill_cnt);
784
785 return packet;
786 }
787
788 static void aggr_slice_amsdu(struct aggr_info *p_aggr,
789 struct rxtid *rxtid, struct sk_buff *skb)
790 {
791 struct sk_buff *new_skb;
792 struct ethhdr *hdr;
793 u16 frame_8023_len, payload_8023_len, mac_hdr_len, amsdu_len;
794 u8 *framep;
795
796 mac_hdr_len = sizeof(struct ethhdr);
797 framep = skb->data + mac_hdr_len;
798 amsdu_len = skb->len - mac_hdr_len;
799
800 while (amsdu_len > mac_hdr_len) {
801 hdr = (struct ethhdr *) framep;
802 payload_8023_len = ntohs(hdr->h_proto);
803
804 if (payload_8023_len < MIN_MSDU_SUBFRAME_PAYLOAD_LEN ||
805 payload_8023_len > MAX_MSDU_SUBFRAME_PAYLOAD_LEN) {
806 ath6kl_err("802.3 AMSDU frame bound check failed. len %d\n",
807 payload_8023_len);
808 break;
809 }
810
811 frame_8023_len = payload_8023_len + mac_hdr_len;
812 new_skb = aggr_get_free_skb(p_aggr);
813 if (!new_skb) {
814 ath6kl_err("no buffer available\n");
815 break;
816 }
817
818 memcpy(new_skb->data, framep, frame_8023_len);
819 skb_put(new_skb, frame_8023_len);
820 if (ath6kl_wmi_dot3_2_dix(new_skb)) {
821 ath6kl_err("dot3_2_dix error\n");
822 dev_kfree_skb(new_skb);
823 break;
824 }
825
826 skb_queue_tail(&rxtid->q, new_skb);
827
828 /* Is this the last subframe within this aggregate ? */
829 if ((amsdu_len - frame_8023_len) == 0)
830 break;
831
832 /* Add the length of A-MSDU subframe padding bytes -
833 * Round to nearest word.
834 */
835 frame_8023_len = ALIGN(frame_8023_len, 4);
836
837 framep += frame_8023_len;
838 amsdu_len -= frame_8023_len;
839 }
840
841 dev_kfree_skb(skb);
842 }
843
844 static void aggr_deque_frms(struct aggr_info *p_aggr, u8 tid,
845 u16 seq_no, u8 order)
846 {
847 struct sk_buff *skb;
848 struct rxtid *rxtid;
849 struct skb_hold_q *node;
850 u16 idx, idx_end, seq_end;
851 struct rxtid_stats *stats;
852
853 if (!p_aggr)
854 return;
855
856 rxtid = &p_aggr->rx_tid[tid];
857 stats = &p_aggr->stat[tid];
858
859 idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
860
861 /*
862 * idx_end is typically the last possible frame in the window,
863 * but changes to 'the' seq_no, when BAR comes. If seq_no
864 * is non-zero, we will go up to that and stop.
865 * Note: last seq no in current window will occupy the same
866 * index position as index that is just previous to start.
867 * An imp point : if win_sz is 7, for seq_no space of 4095,
868 * then, there would be holes when sequence wrap around occurs.
869 * Target should judiciously choose the win_sz, based on
870 * this condition. For 4095, (TID_WINDOW_SZ = 2 x win_sz
871 * 2, 4, 8, 16 win_sz works fine).
872 * We must deque from "idx" to "idx_end", including both.
873 */
874 seq_end = seq_no ? seq_no : rxtid->seq_next;
875 idx_end = AGGR_WIN_IDX(seq_end, rxtid->hold_q_sz);
876
877 spin_lock_bh(&rxtid->lock);
878
879 do {
880 node = &rxtid->hold_q[idx];
881 if ((order == 1) && (!node->skb))
882 break;
883
884 if (node->skb) {
885 if (node->is_amsdu)
886 aggr_slice_amsdu(p_aggr, rxtid, node->skb);
887 else
888 skb_queue_tail(&rxtid->q, node->skb);
889 node->skb = NULL;
890 } else
891 stats->num_hole++;
892
893 rxtid->seq_next = ATH6KL_NEXT_SEQ_NO(rxtid->seq_next);
894 idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
895 } while (idx != idx_end);
896
897 spin_unlock_bh(&rxtid->lock);
898
899 stats->num_delivered += skb_queue_len(&rxtid->q);
900
901 while ((skb = skb_dequeue(&rxtid->q)))
902 ath6kl_deliver_frames_to_nw_stack(p_aggr->dev, skb);
903 }
904
905 static bool aggr_process_recv_frm(struct aggr_info *agg_info, u8 tid,
906 u16 seq_no,
907 bool is_amsdu, struct sk_buff *frame)
908 {
909 struct rxtid *rxtid;
910 struct rxtid_stats *stats;
911 struct sk_buff *skb;
912 struct skb_hold_q *node;
913 u16 idx, st, cur, end;
914 bool is_queued = false;
915 u16 extended_end;
916
917 rxtid = &agg_info->rx_tid[tid];
918 stats = &agg_info->stat[tid];
919
920 stats->num_into_aggr++;
921
922 if (!rxtid->aggr) {
923 if (is_amsdu) {
924 aggr_slice_amsdu(agg_info, rxtid, frame);
925 is_queued = true;
926 stats->num_amsdu++;
927 while ((skb = skb_dequeue(&rxtid->q)))
928 ath6kl_deliver_frames_to_nw_stack(agg_info->dev,
929 skb);
930 }
931 return is_queued;
932 }
933
934 /* Check the incoming sequence no, if it's in the window */
935 st = rxtid->seq_next;
936 cur = seq_no;
937 end = (st + rxtid->hold_q_sz-1) & ATH6KL_MAX_SEQ_NO;
938
939 if (((st < end) && (cur < st || cur > end)) ||
940 ((st > end) && (cur > end) && (cur < st))) {
941 extended_end = (end + rxtid->hold_q_sz - 1) &
942 ATH6KL_MAX_SEQ_NO;
943
944 if (((end < extended_end) &&
945 (cur < end || cur > extended_end)) ||
946 ((end > extended_end) && (cur > extended_end) &&
947 (cur < end))) {
948 aggr_deque_frms(agg_info, tid, 0, 0);
949 if (cur >= rxtid->hold_q_sz - 1)
950 rxtid->seq_next = cur - (rxtid->hold_q_sz - 1);
951 else
952 rxtid->seq_next = ATH6KL_MAX_SEQ_NO -
953 (rxtid->hold_q_sz - 2 - cur);
954 } else {
955 /*
956 * Dequeue only those frames that are outside the
957 * new shifted window.
958 */
959 if (cur >= rxtid->hold_q_sz - 1)
960 st = cur - (rxtid->hold_q_sz - 1);
961 else
962 st = ATH6KL_MAX_SEQ_NO -
963 (rxtid->hold_q_sz - 2 - cur);
964
965 aggr_deque_frms(agg_info, tid, st, 0);
966 }
967
968 stats->num_oow++;
969 }
970
971 idx = AGGR_WIN_IDX(seq_no, rxtid->hold_q_sz);
972
973 node = &rxtid->hold_q[idx];
974
975 spin_lock_bh(&rxtid->lock);
976
977 /*
978 * Is the cur frame duplicate or something beyond our window(hold_q
979 * -> which is 2x, already)?
980 *
981 * 1. Duplicate is easy - drop incoming frame.
982 * 2. Not falling in current sliding window.
983 * 2a. is the frame_seq_no preceding current tid_seq_no?
984 * -> drop the frame. perhaps sender did not get our ACK.
985 * this is taken care of above.
986 * 2b. is the frame_seq_no beyond window(st, TID_WINDOW_SZ);
987 * -> Taken care of it above, by moving window forward.
988 */
989 dev_kfree_skb(node->skb);
990 stats->num_dups++;
991
992 node->skb = frame;
993 is_queued = true;
994 node->is_amsdu = is_amsdu;
995 node->seq_no = seq_no;
996
997 if (node->is_amsdu)
998 stats->num_amsdu++;
999 else
1000 stats->num_mpdu++;
1001
1002 spin_unlock_bh(&rxtid->lock);
1003
1004 aggr_deque_frms(agg_info, tid, 0, 1);
1005
1006 if (agg_info->timer_scheduled)
1007 rxtid->progress = true;
1008 else
1009 for (idx = 0 ; idx < rxtid->hold_q_sz; idx++) {
1010 if (rxtid->hold_q[idx].skb) {
1011 /*
1012 * There is a frame in the queue and no
1013 * timer so start a timer to ensure that
1014 * the frame doesn't remain stuck
1015 * forever.
1016 */
1017 agg_info->timer_scheduled = true;
1018 mod_timer(&agg_info->timer,
1019 (jiffies +
1020 HZ * (AGGR_RX_TIMEOUT) / 1000));
1021 rxtid->progress = false;
1022 rxtid->timer_mon = true;
1023 break;
1024 }
1025 }
1026
1027 return is_queued;
1028 }
1029
1030 void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
1031 {
1032 struct ath6kl *ar = target->dev->ar;
1033 struct sk_buff *skb = packet->pkt_cntxt;
1034 struct wmi_rx_meta_v2 *meta;
1035 struct wmi_data_hdr *dhdr;
1036 int min_hdr_len;
1037 u8 meta_type, dot11_hdr = 0;
1038 int status = packet->status;
1039 enum htc_endpoint_id ept = packet->endpoint;
1040 bool is_amsdu, prev_ps, ps_state = false;
1041 struct ath6kl_sta *conn = NULL;
1042 struct sk_buff *skb1 = NULL;
1043 struct ethhdr *datap = NULL;
1044 u16 seq_no, offset;
1045 u8 tid;
1046
1047 ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
1048 "%s: ar=0x%p eid=%d, skb=0x%p, data=0x%p, len=0x%x status:%d",
1049 __func__, ar, ept, skb, packet->buf,
1050 packet->act_len, status);
1051
1052 if (status || !(skb->data + HTC_HDR_LENGTH)) {
1053 ar->net_stats.rx_errors++;
1054 dev_kfree_skb(skb);
1055 return;
1056 }
1057
1058 /*
1059 * Take lock to protect buffer counts and adaptive power throughput
1060 * state.
1061 */
1062 spin_lock_bh(&ar->lock);
1063
1064 ar->net_stats.rx_packets++;
1065 ar->net_stats.rx_bytes += packet->act_len;
1066
1067 spin_unlock_bh(&ar->lock);
1068
1069 skb_put(skb, packet->act_len + HTC_HDR_LENGTH);
1070 skb_pull(skb, HTC_HDR_LENGTH);
1071
1072 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "rx ",
1073 skb->data, skb->len);
1074
1075 skb->dev = ar->net_dev;
1076
1077 if (!test_bit(WMI_ENABLED, &ar->flag)) {
1078 if (EPPING_ALIGNMENT_PAD > 0)
1079 skb_pull(skb, EPPING_ALIGNMENT_PAD);
1080 ath6kl_deliver_frames_to_nw_stack(ar->net_dev, skb);
1081 return;
1082 }
1083
1084 if (ept == ar->ctrl_ep) {
1085 ath6kl_wmi_control_rx(ar->wmi, skb);
1086 return;
1087 }
1088
1089 min_hdr_len = sizeof(struct ethhdr) + sizeof(struct wmi_data_hdr) +
1090 sizeof(struct ath6kl_llc_snap_hdr);
1091
1092 dhdr = (struct wmi_data_hdr *) skb->data;
1093
1094 /*
1095 * In the case of AP mode we may receive NULL data frames
1096 * that do not have LLC hdr. They are 16 bytes in size.
1097 * Allow these frames in the AP mode.
1098 */
1099 if (ar->nw_type != AP_NETWORK &&
1100 ((packet->act_len < min_hdr_len) ||
1101 (packet->act_len > WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH))) {
1102 ath6kl_info("frame len is too short or too long\n");
1103 ar->net_stats.rx_errors++;
1104 ar->net_stats.rx_length_errors++;
1105 dev_kfree_skb(skb);
1106 return;
1107 }
1108
1109 /* Get the Power save state of the STA */
1110 if (ar->nw_type == AP_NETWORK) {
1111 meta_type = wmi_data_hdr_get_meta(dhdr);
1112
1113 ps_state = !!((dhdr->info >> WMI_DATA_HDR_PS_SHIFT) &
1114 WMI_DATA_HDR_PS_MASK);
1115
1116 offset = sizeof(struct wmi_data_hdr);
1117
1118 switch (meta_type) {
1119 case 0:
1120 break;
1121 case WMI_META_VERSION_1:
1122 offset += sizeof(struct wmi_rx_meta_v1);
1123 break;
1124 case WMI_META_VERSION_2:
1125 offset += sizeof(struct wmi_rx_meta_v2);
1126 break;
1127 default:
1128 break;
1129 }
1130
1131 datap = (struct ethhdr *) (skb->data + offset);
1132 conn = ath6kl_find_sta(ar, datap->h_source);
1133
1134 if (!conn) {
1135 dev_kfree_skb(skb);
1136 return;
1137 }
1138
1139 /*
1140 * If there is a change in PS state of the STA,
1141 * take appropriate steps:
1142 *
1143 * 1. If Sleep-->Awake, flush the psq for the STA
1144 * Clear the PVB for the STA.
1145 * 2. If Awake-->Sleep, Starting queueing frames
1146 * the STA.
1147 */
1148 prev_ps = !!(conn->sta_flags & STA_PS_SLEEP);
1149
1150 if (ps_state)
1151 conn->sta_flags |= STA_PS_SLEEP;
1152 else
1153 conn->sta_flags &= ~STA_PS_SLEEP;
1154
1155 if (prev_ps ^ !!(conn->sta_flags & STA_PS_SLEEP)) {
1156 if (!(conn->sta_flags & STA_PS_SLEEP)) {
1157 struct sk_buff *skbuff = NULL;
1158
1159 spin_lock_bh(&conn->psq_lock);
1160 while ((skbuff = skb_dequeue(&conn->psq))
1161 != NULL) {
1162 spin_unlock_bh(&conn->psq_lock);
1163 ath6kl_data_tx(skbuff, ar->net_dev);
1164 spin_lock_bh(&conn->psq_lock);
1165 }
1166 spin_unlock_bh(&conn->psq_lock);
1167 /* Clear the PVB for this STA */
1168 ath6kl_wmi_set_pvb_cmd(ar->wmi, conn->aid, 0);
1169 }
1170 }
1171
1172 /* drop NULL data frames here */
1173 if ((packet->act_len < min_hdr_len) ||
1174 (packet->act_len >
1175 WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH)) {
1176 dev_kfree_skb(skb);
1177 return;
1178 }
1179 }
1180
1181 is_amsdu = wmi_data_hdr_is_amsdu(dhdr) ? true : false;
1182 tid = wmi_data_hdr_get_up(dhdr);
1183 seq_no = wmi_data_hdr_get_seqno(dhdr);
1184 meta_type = wmi_data_hdr_get_meta(dhdr);
1185 dot11_hdr = wmi_data_hdr_get_dot11(dhdr);
1186 skb_pull(skb, sizeof(struct wmi_data_hdr));
1187
1188 switch (meta_type) {
1189 case WMI_META_VERSION_1:
1190 skb_pull(skb, sizeof(struct wmi_rx_meta_v1));
1191 break;
1192 case WMI_META_VERSION_2:
1193 meta = (struct wmi_rx_meta_v2 *) skb->data;
1194 if (meta->csum_flags & 0x1) {
1195 skb->ip_summed = CHECKSUM_COMPLETE;
1196 skb->csum = (__force __wsum) meta->csum;
1197 }
1198 skb_pull(skb, sizeof(struct wmi_rx_meta_v2));
1199 break;
1200 default:
1201 break;
1202 }
1203
1204 if (dot11_hdr)
1205 status = ath6kl_wmi_dot11_hdr_remove(ar->wmi, skb);
1206 else if (!is_amsdu)
1207 status = ath6kl_wmi_dot3_2_dix(skb);
1208
1209 if (status) {
1210 /*
1211 * Drop frames that could not be processed (lack of
1212 * memory, etc.)
1213 */
1214 dev_kfree_skb(skb);
1215 return;
1216 }
1217
1218 if (!(ar->net_dev->flags & IFF_UP)) {
1219 dev_kfree_skb(skb);
1220 return;
1221 }
1222
1223 if (ar->nw_type == AP_NETWORK) {
1224 datap = (struct ethhdr *) skb->data;
1225 if (is_multicast_ether_addr(datap->h_dest))
1226 /*
1227 * Bcast/Mcast frames should be sent to the
1228 * OS stack as well as on the air.
1229 */
1230 skb1 = skb_copy(skb, GFP_ATOMIC);
1231 else {
1232 /*
1233 * Search for a connected STA with dstMac
1234 * as the Mac address. If found send the
1235 * frame to it on the air else send the
1236 * frame up the stack.
1237 */
1238 struct ath6kl_sta *conn = NULL;
1239 conn = ath6kl_find_sta(ar, datap->h_dest);
1240
1241 if (conn && ar->intra_bss) {
1242 skb1 = skb;
1243 skb = NULL;
1244 } else if (conn && !ar->intra_bss) {
1245 dev_kfree_skb(skb);
1246 skb = NULL;
1247 }
1248 }
1249 if (skb1)
1250 ath6kl_data_tx(skb1, ar->net_dev);
1251 }
1252
1253 datap = (struct ethhdr *) skb->data;
1254
1255 if (is_unicast_ether_addr(datap->h_dest) &&
1256 aggr_process_recv_frm(ar->aggr_cntxt, tid, seq_no,
1257 is_amsdu, skb))
1258 /* aggregation code will handle the skb */
1259 return;
1260
1261 ath6kl_deliver_frames_to_nw_stack(ar->net_dev, skb);
1262 }
1263
1264 static void aggr_timeout(unsigned long arg)
1265 {
1266 u8 i, j;
1267 struct aggr_info *p_aggr = (struct aggr_info *) arg;
1268 struct rxtid *rxtid;
1269 struct rxtid_stats *stats;
1270
1271 for (i = 0; i < NUM_OF_TIDS; i++) {
1272 rxtid = &p_aggr->rx_tid[i];
1273 stats = &p_aggr->stat[i];
1274
1275 if (!rxtid->aggr || !rxtid->timer_mon || rxtid->progress)
1276 continue;
1277
1278 stats->num_timeouts++;
1279 ath6kl_dbg(ATH6KL_DBG_AGGR,
1280 "aggr timeout (st %d end %d)\n",
1281 rxtid->seq_next,
1282 ((rxtid->seq_next + rxtid->hold_q_sz-1) &
1283 ATH6KL_MAX_SEQ_NO));
1284 aggr_deque_frms(p_aggr, i, 0, 0);
1285 }
1286
1287 p_aggr->timer_scheduled = false;
1288
1289 for (i = 0; i < NUM_OF_TIDS; i++) {
1290 rxtid = &p_aggr->rx_tid[i];
1291
1292 if (rxtid->aggr && rxtid->hold_q) {
1293 for (j = 0; j < rxtid->hold_q_sz; j++) {
1294 if (rxtid->hold_q[j].skb) {
1295 p_aggr->timer_scheduled = true;
1296 rxtid->timer_mon = true;
1297 rxtid->progress = false;
1298 break;
1299 }
1300 }
1301
1302 if (j >= rxtid->hold_q_sz)
1303 rxtid->timer_mon = false;
1304 }
1305 }
1306
1307 if (p_aggr->timer_scheduled)
1308 mod_timer(&p_aggr->timer,
1309 jiffies + msecs_to_jiffies(AGGR_RX_TIMEOUT));
1310 }
1311
1312 static void aggr_delete_tid_state(struct aggr_info *p_aggr, u8 tid)
1313 {
1314 struct rxtid *rxtid;
1315 struct rxtid_stats *stats;
1316
1317 if (!p_aggr || tid >= NUM_OF_TIDS)
1318 return;
1319
1320 rxtid = &p_aggr->rx_tid[tid];
1321 stats = &p_aggr->stat[tid];
1322
1323 if (rxtid->aggr)
1324 aggr_deque_frms(p_aggr, tid, 0, 0);
1325
1326 rxtid->aggr = false;
1327 rxtid->progress = false;
1328 rxtid->timer_mon = false;
1329 rxtid->win_sz = 0;
1330 rxtid->seq_next = 0;
1331 rxtid->hold_q_sz = 0;
1332
1333 kfree(rxtid->hold_q);
1334 rxtid->hold_q = NULL;
1335
1336 memset(stats, 0, sizeof(struct rxtid_stats));
1337 }
1338
1339 void aggr_recv_addba_req_evt(struct ath6kl *ar, u8 tid, u16 seq_no, u8 win_sz)
1340 {
1341 struct aggr_info *p_aggr = ar->aggr_cntxt;
1342 struct rxtid *rxtid;
1343 struct rxtid_stats *stats;
1344 u16 hold_q_size;
1345
1346 if (!p_aggr)
1347 return;
1348
1349 rxtid = &p_aggr->rx_tid[tid];
1350 stats = &p_aggr->stat[tid];
1351
1352 if (win_sz < AGGR_WIN_SZ_MIN || win_sz > AGGR_WIN_SZ_MAX)
1353 ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: win_sz %d, tid %d\n",
1354 __func__, win_sz, tid);
1355
1356 if (rxtid->aggr)
1357 aggr_delete_tid_state(p_aggr, tid);
1358
1359 rxtid->seq_next = seq_no;
1360 hold_q_size = TID_WINDOW_SZ(win_sz) * sizeof(struct skb_hold_q);
1361 rxtid->hold_q = kzalloc(hold_q_size, GFP_KERNEL);
1362 if (!rxtid->hold_q)
1363 return;
1364
1365 rxtid->win_sz = win_sz;
1366 rxtid->hold_q_sz = TID_WINDOW_SZ(win_sz);
1367 if (!skb_queue_empty(&rxtid->q))
1368 return;
1369
1370 rxtid->aggr = true;
1371 }
1372
1373 struct aggr_info *aggr_init(struct net_device *dev)
1374 {
1375 struct aggr_info *p_aggr = NULL;
1376 struct rxtid *rxtid;
1377 u8 i;
1378
1379 p_aggr = kzalloc(sizeof(struct aggr_info), GFP_KERNEL);
1380 if (!p_aggr) {
1381 ath6kl_err("failed to alloc memory for aggr_node\n");
1382 return NULL;
1383 }
1384
1385 p_aggr->aggr_sz = AGGR_SZ_DEFAULT;
1386 p_aggr->dev = dev;
1387 init_timer(&p_aggr->timer);
1388 p_aggr->timer.function = aggr_timeout;
1389 p_aggr->timer.data = (unsigned long) p_aggr;
1390
1391 p_aggr->timer_scheduled = false;
1392 skb_queue_head_init(&p_aggr->free_q);
1393
1394 ath6kl_alloc_netbufs(&p_aggr->free_q, AGGR_NUM_OF_FREE_NETBUFS);
1395
1396 for (i = 0; i < NUM_OF_TIDS; i++) {
1397 rxtid = &p_aggr->rx_tid[i];
1398 rxtid->aggr = false;
1399 rxtid->progress = false;
1400 rxtid->timer_mon = false;
1401 skb_queue_head_init(&rxtid->q);
1402 spin_lock_init(&rxtid->lock);
1403 }
1404
1405 return p_aggr;
1406 }
1407
1408 void aggr_recv_delba_req_evt(struct ath6kl *ar, u8 tid)
1409 {
1410 struct aggr_info *p_aggr = ar->aggr_cntxt;
1411 struct rxtid *rxtid;
1412
1413 if (!p_aggr)
1414 return;
1415
1416 rxtid = &p_aggr->rx_tid[tid];
1417
1418 if (rxtid->aggr)
1419 aggr_delete_tid_state(p_aggr, tid);
1420 }
1421
1422 void aggr_reset_state(struct aggr_info *aggr_info)
1423 {
1424 u8 tid;
1425
1426 for (tid = 0; tid < NUM_OF_TIDS; tid++)
1427 aggr_delete_tid_state(aggr_info, tid);
1428 }
1429
1430 /* clean up our amsdu buffer list */
1431 void ath6kl_cleanup_amsdu_rxbufs(struct ath6kl *ar)
1432 {
1433 struct htc_packet *packet, *tmp_pkt;
1434
1435 spin_lock_bh(&ar->lock);
1436 if (list_empty(&ar->amsdu_rx_buffer_queue)) {
1437 spin_unlock_bh(&ar->lock);
1438 return;
1439 }
1440
1441 list_for_each_entry_safe(packet, tmp_pkt, &ar->amsdu_rx_buffer_queue,
1442 list) {
1443 list_del(&packet->list);
1444 spin_unlock_bh(&ar->lock);
1445 dev_kfree_skb(packet->pkt_cntxt);
1446 spin_lock_bh(&ar->lock);
1447 }
1448
1449 spin_unlock_bh(&ar->lock);
1450 }
1451
1452 void aggr_module_destroy(struct aggr_info *aggr_info)
1453 {
1454 struct rxtid *rxtid;
1455 u8 i, k;
1456
1457 if (!aggr_info)
1458 return;
1459
1460 if (aggr_info->timer_scheduled) {
1461 del_timer(&aggr_info->timer);
1462 aggr_info->timer_scheduled = false;
1463 }
1464
1465 for (i = 0; i < NUM_OF_TIDS; i++) {
1466 rxtid = &aggr_info->rx_tid[i];
1467 if (rxtid->hold_q) {
1468 for (k = 0; k < rxtid->hold_q_sz; k++)
1469 dev_kfree_skb(rxtid->hold_q[k].skb);
1470 kfree(rxtid->hold_q);
1471 }
1472
1473 skb_queue_purge(&rxtid->q);
1474 }
1475
1476 skb_queue_purge(&aggr_info->free_q);
1477 kfree(aggr_info);
1478 }
This page took 0.059974 seconds and 6 git commands to generate.