ath6kl: Maintain firmware interface index in struct ath6kl_vif
[deliverable/linux.git] / drivers / net / wireless / ath / ath6kl / txrx.c
1 /*
2 * Copyright (c) 2004-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include "core.h"
18 #include "debug.h"
19
20 static u8 ath6kl_ibss_map_epid(struct sk_buff *skb, struct net_device *dev,
21 u32 *map_no)
22 {
23 struct ath6kl *ar = ath6kl_priv(dev);
24 struct ethhdr *eth_hdr;
25 u32 i, ep_map = -1;
26 u8 *datap;
27
28 *map_no = 0;
29 datap = skb->data;
30 eth_hdr = (struct ethhdr *) (datap + sizeof(struct wmi_data_hdr));
31
32 if (is_multicast_ether_addr(eth_hdr->h_dest))
33 return ENDPOINT_2;
34
35 for (i = 0; i < ar->node_num; i++) {
36 if (memcmp(eth_hdr->h_dest, ar->node_map[i].mac_addr,
37 ETH_ALEN) == 0) {
38 *map_no = i + 1;
39 ar->node_map[i].tx_pend++;
40 return ar->node_map[i].ep_id;
41 }
42
43 if ((ep_map == -1) && !ar->node_map[i].tx_pend)
44 ep_map = i;
45 }
46
47 if (ep_map == -1) {
48 ep_map = ar->node_num;
49 ar->node_num++;
50 if (ar->node_num > MAX_NODE_NUM)
51 return ENDPOINT_UNUSED;
52 }
53
54 memcpy(ar->node_map[ep_map].mac_addr, eth_hdr->h_dest, ETH_ALEN);
55
56 for (i = ENDPOINT_2; i <= ENDPOINT_5; i++) {
57 if (!ar->tx_pending[i]) {
58 ar->node_map[ep_map].ep_id = i;
59 break;
60 }
61
62 /*
63 * No free endpoint is available, start redistribution on
64 * the inuse endpoints.
65 */
66 if (i == ENDPOINT_5) {
67 ar->node_map[ep_map].ep_id = ar->next_ep_id;
68 ar->next_ep_id++;
69 if (ar->next_ep_id > ENDPOINT_5)
70 ar->next_ep_id = ENDPOINT_2;
71 }
72 }
73
74 *map_no = ep_map + 1;
75 ar->node_map[ep_map].tx_pend++;
76
77 return ar->node_map[ep_map].ep_id;
78 }
79
80 static bool ath6kl_powersave_ap(struct ath6kl *ar, struct sk_buff *skb,
81 bool *more_data)
82 {
83 struct ethhdr *datap = (struct ethhdr *) skb->data;
84 struct ath6kl_sta *conn = NULL;
85 bool ps_queued = false, is_psq_empty = false;
86 /* TODO: Findout vif */
87 struct ath6kl_vif *vif = ar->vif;
88
89 if (is_multicast_ether_addr(datap->h_dest)) {
90 u8 ctr = 0;
91 bool q_mcast = false;
92
93 for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
94 if (ar->sta_list[ctr].sta_flags & STA_PS_SLEEP) {
95 q_mcast = true;
96 break;
97 }
98 }
99
100 if (q_mcast) {
101 /*
102 * If this transmit is not because of a Dtim Expiry
103 * q it.
104 */
105 if (!test_bit(DTIM_EXPIRED, &vif->flags)) {
106 bool is_mcastq_empty = false;
107
108 spin_lock_bh(&ar->mcastpsq_lock);
109 is_mcastq_empty =
110 skb_queue_empty(&ar->mcastpsq);
111 skb_queue_tail(&ar->mcastpsq, skb);
112 spin_unlock_bh(&ar->mcastpsq_lock);
113
114 /*
115 * If this is the first Mcast pkt getting
116 * queued indicate to the target to set the
117 * BitmapControl LSB of the TIM IE.
118 */
119 if (is_mcastq_empty)
120 ath6kl_wmi_set_pvb_cmd(ar->wmi,
121 vif->fw_vif_idx,
122 MCAST_AID, 1);
123
124 ps_queued = true;
125 } else {
126 /*
127 * This transmit is because of Dtim expiry.
128 * Determine if MoreData bit has to be set.
129 */
130 spin_lock_bh(&ar->mcastpsq_lock);
131 if (!skb_queue_empty(&ar->mcastpsq))
132 *more_data = true;
133 spin_unlock_bh(&ar->mcastpsq_lock);
134 }
135 }
136 } else {
137 conn = ath6kl_find_sta(ar, datap->h_dest);
138 if (!conn) {
139 dev_kfree_skb(skb);
140
141 /* Inform the caller that the skb is consumed */
142 return true;
143 }
144
145 if (conn->sta_flags & STA_PS_SLEEP) {
146 if (!(conn->sta_flags & STA_PS_POLLED)) {
147 /* Queue the frames if the STA is sleeping */
148 spin_lock_bh(&conn->psq_lock);
149 is_psq_empty = skb_queue_empty(&conn->psq);
150 skb_queue_tail(&conn->psq, skb);
151 spin_unlock_bh(&conn->psq_lock);
152
153 /*
154 * If this is the first pkt getting queued
155 * for this STA, update the PVB for this
156 * STA.
157 */
158 if (is_psq_empty)
159 ath6kl_wmi_set_pvb_cmd(ar->wmi,
160 vif->fw_vif_idx,
161 conn->aid, 1);
162
163 ps_queued = true;
164 } else {
165 /*
166 * This tx is because of a PsPoll.
167 * Determine if MoreData bit has to be set.
168 */
169 spin_lock_bh(&conn->psq_lock);
170 if (!skb_queue_empty(&conn->psq))
171 *more_data = true;
172 spin_unlock_bh(&conn->psq_lock);
173 }
174 }
175 }
176
177 return ps_queued;
178 }
179
180 /* Tx functions */
181
182 int ath6kl_control_tx(void *devt, struct sk_buff *skb,
183 enum htc_endpoint_id eid)
184 {
185 struct ath6kl *ar = devt;
186 int status = 0;
187 struct ath6kl_cookie *cookie = NULL;
188
189 spin_lock_bh(&ar->lock);
190
191 ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
192 "%s: skb=0x%p, len=0x%x eid =%d\n", __func__,
193 skb, skb->len, eid);
194
195 if (test_bit(WMI_CTRL_EP_FULL, &ar->flag) && (eid == ar->ctrl_ep)) {
196 /*
197 * Control endpoint is full, don't allocate resources, we
198 * are just going to drop this packet.
199 */
200 cookie = NULL;
201 ath6kl_err("wmi ctrl ep full, dropping pkt : 0x%p, len:%d\n",
202 skb, skb->len);
203 } else
204 cookie = ath6kl_alloc_cookie(ar);
205
206 if (cookie == NULL) {
207 spin_unlock_bh(&ar->lock);
208 status = -ENOMEM;
209 goto fail_ctrl_tx;
210 }
211
212 ar->tx_pending[eid]++;
213
214 if (eid != ar->ctrl_ep)
215 ar->total_tx_data_pend++;
216
217 spin_unlock_bh(&ar->lock);
218
219 cookie->skb = skb;
220 cookie->map_no = 0;
221 set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
222 eid, ATH6KL_CONTROL_PKT_TAG);
223
224 /*
225 * This interface is asynchronous, if there is an error, cleanup
226 * will happen in the TX completion callback.
227 */
228 ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt);
229
230 return 0;
231
232 fail_ctrl_tx:
233 dev_kfree_skb(skb);
234 return status;
235 }
236
237 int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
238 {
239 struct ath6kl *ar = ath6kl_priv(dev);
240 struct ath6kl_cookie *cookie = NULL;
241 enum htc_endpoint_id eid = ENDPOINT_UNUSED;
242 struct ath6kl_vif *vif = netdev_priv(dev);
243 u32 map_no = 0;
244 u16 htc_tag = ATH6KL_DATA_PKT_TAG;
245 u8 ac = 99 ; /* initialize to unmapped ac */
246 bool chk_adhoc_ps_mapping = false, more_data = false;
247 int ret;
248
249 ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
250 "%s: skb=0x%p, data=0x%p, len=0x%x\n", __func__,
251 skb, skb->data, skb->len);
252
253 /* If target is not associated */
254 if (!test_bit(CONNECTED, &vif->flags)) {
255 dev_kfree_skb(skb);
256 return 0;
257 }
258
259 if (!test_bit(WMI_READY, &ar->flag))
260 goto fail_tx;
261
262 /* AP mode Power saving processing */
263 if (vif->nw_type == AP_NETWORK) {
264 if (ath6kl_powersave_ap(ar, skb, &more_data))
265 return 0;
266 }
267
268 if (test_bit(WMI_ENABLED, &ar->flag)) {
269 if (skb_headroom(skb) < dev->needed_headroom) {
270 WARN_ON(1);
271 goto fail_tx;
272 }
273
274 if (ath6kl_wmi_dix_2_dot3(ar->wmi, skb)) {
275 ath6kl_err("ath6kl_wmi_dix_2_dot3 failed\n");
276 goto fail_tx;
277 }
278
279 if (ath6kl_wmi_data_hdr_add(ar->wmi, skb, DATA_MSGTYPE,
280 more_data, 0, 0, NULL)) {
281 ath6kl_err("wmi_data_hdr_add failed\n");
282 goto fail_tx;
283 }
284
285 if ((vif->nw_type == ADHOC_NETWORK) &&
286 ar->ibss_ps_enable && test_bit(CONNECTED, &vif->flags))
287 chk_adhoc_ps_mapping = true;
288 else {
289 /* get the stream mapping */
290 ret = ath6kl_wmi_implicit_create_pstream(ar->wmi, skb,
291 0, test_bit(WMM_ENABLED, &vif->flags), &ac);
292 if (ret)
293 goto fail_tx;
294 }
295 } else
296 goto fail_tx;
297
298 spin_lock_bh(&ar->lock);
299
300 if (chk_adhoc_ps_mapping)
301 eid = ath6kl_ibss_map_epid(skb, dev, &map_no);
302 else
303 eid = ar->ac2ep_map[ac];
304
305 if (eid == 0 || eid == ENDPOINT_UNUSED) {
306 ath6kl_err("eid %d is not mapped!\n", eid);
307 spin_unlock_bh(&ar->lock);
308 goto fail_tx;
309 }
310
311 /* allocate resource for this packet */
312 cookie = ath6kl_alloc_cookie(ar);
313
314 if (!cookie) {
315 spin_unlock_bh(&ar->lock);
316 goto fail_tx;
317 }
318
319 /* update counts while the lock is held */
320 ar->tx_pending[eid]++;
321 ar->total_tx_data_pend++;
322
323 spin_unlock_bh(&ar->lock);
324
325 if (!IS_ALIGNED((unsigned long) skb->data - HTC_HDR_LENGTH, 4) &&
326 skb_cloned(skb)) {
327 /*
328 * We will touch (move the buffer data to align it. Since the
329 * skb buffer is cloned and not only the header is changed, we
330 * have to copy it to allow the changes. Since we are copying
331 * the data here, we may as well align it by reserving suitable
332 * headroom to avoid the memmove in ath6kl_htc_tx_buf_align().
333 */
334 struct sk_buff *nskb;
335
336 nskb = skb_copy_expand(skb, HTC_HDR_LENGTH, 0, GFP_ATOMIC);
337 if (nskb == NULL)
338 goto fail_tx;
339 kfree_skb(skb);
340 skb = nskb;
341 }
342
343 cookie->skb = skb;
344 cookie->map_no = map_no;
345 set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
346 eid, htc_tag);
347
348 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "tx ",
349 skb->data, skb->len);
350
351 /*
352 * HTC interface is asynchronous, if this fails, cleanup will
353 * happen in the ath6kl_tx_complete callback.
354 */
355 ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt);
356
357 return 0;
358
359 fail_tx:
360 dev_kfree_skb(skb);
361
362 vif->net_stats.tx_dropped++;
363 vif->net_stats.tx_aborted_errors++;
364
365 return 0;
366 }
367
368 /* indicate tx activity or inactivity on a WMI stream */
369 void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active)
370 {
371 struct ath6kl *ar = devt;
372 enum htc_endpoint_id eid;
373 int i;
374
375 eid = ar->ac2ep_map[traffic_class];
376
377 if (!test_bit(WMI_ENABLED, &ar->flag))
378 goto notify_htc;
379
380 spin_lock_bh(&ar->lock);
381
382 ar->ac_stream_active[traffic_class] = active;
383
384 if (active) {
385 /*
386 * Keep track of the active stream with the highest
387 * priority.
388 */
389 if (ar->ac_stream_pri_map[traffic_class] >
390 ar->hiac_stream_active_pri)
391 /* set the new highest active priority */
392 ar->hiac_stream_active_pri =
393 ar->ac_stream_pri_map[traffic_class];
394
395 } else {
396 /*
397 * We may have to search for the next active stream
398 * that is the highest priority.
399 */
400 if (ar->hiac_stream_active_pri ==
401 ar->ac_stream_pri_map[traffic_class]) {
402 /*
403 * The highest priority stream just went inactive
404 * reset and search for the "next" highest "active"
405 * priority stream.
406 */
407 ar->hiac_stream_active_pri = 0;
408
409 for (i = 0; i < WMM_NUM_AC; i++) {
410 if (ar->ac_stream_active[i] &&
411 (ar->ac_stream_pri_map[i] >
412 ar->hiac_stream_active_pri))
413 /*
414 * Set the new highest active
415 * priority.
416 */
417 ar->hiac_stream_active_pri =
418 ar->ac_stream_pri_map[i];
419 }
420 }
421 }
422
423 spin_unlock_bh(&ar->lock);
424
425 notify_htc:
426 /* notify HTC, this may cause credit distribution changes */
427 ath6kl_htc_indicate_activity_change(ar->htc_target, eid, active);
428 }
429
430 enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
431 struct htc_packet *packet)
432 {
433 struct ath6kl *ar = target->dev->ar;
434 /* TODO: Findout vif properly */
435 struct ath6kl_vif *vif = ar->vif;
436 enum htc_endpoint_id endpoint = packet->endpoint;
437
438 if (endpoint == ar->ctrl_ep) {
439 /*
440 * Under normal WMI if this is getting full, then something
441 * is running rampant the host should not be exhausting the
442 * WMI queue with too many commands the only exception to
443 * this is during testing using endpointping.
444 */
445 spin_lock_bh(&ar->lock);
446 set_bit(WMI_CTRL_EP_FULL, &ar->flag);
447 spin_unlock_bh(&ar->lock);
448 ath6kl_err("wmi ctrl ep is full\n");
449 return HTC_SEND_FULL_KEEP;
450 }
451
452 if (packet->info.tx.tag == ATH6KL_CONTROL_PKT_TAG)
453 return HTC_SEND_FULL_KEEP;
454
455 if (vif->nw_type == ADHOC_NETWORK)
456 /*
457 * In adhoc mode, we cannot differentiate traffic
458 * priorities so there is no need to continue, however we
459 * should stop the network.
460 */
461 goto stop_net_queues;
462
463 /*
464 * The last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for
465 * the highest active stream.
466 */
467 if (ar->ac_stream_pri_map[ar->ep2ac_map[endpoint]] <
468 ar->hiac_stream_active_pri &&
469 ar->cookie_count <= MAX_HI_COOKIE_NUM)
470 /*
471 * Give preference to the highest priority stream by
472 * dropping the packets which overflowed.
473 */
474 return HTC_SEND_FULL_DROP;
475
476 stop_net_queues:
477 spin_lock_bh(&ar->lock);
478 set_bit(NETQ_STOPPED, &vif->flags);
479 spin_unlock_bh(&ar->lock);
480 netif_stop_queue(ar->net_dev);
481
482 return HTC_SEND_FULL_KEEP;
483 }
484
485 /* TODO this needs to be looked at */
486 static void ath6kl_tx_clear_node_map(struct ath6kl *ar,
487 enum htc_endpoint_id eid, u32 map_no)
488 {
489 /* TODO: Findout vif */
490 struct ath6kl_vif *vif = ar->vif;
491 u32 i;
492
493 if (vif->nw_type != ADHOC_NETWORK)
494 return;
495
496 if (!ar->ibss_ps_enable)
497 return;
498
499 if (eid == ar->ctrl_ep)
500 return;
501
502 if (map_no == 0)
503 return;
504
505 map_no--;
506 ar->node_map[map_no].tx_pend--;
507
508 if (ar->node_map[map_no].tx_pend)
509 return;
510
511 if (map_no != (ar->node_num - 1))
512 return;
513
514 for (i = ar->node_num; i > 0; i--) {
515 if (ar->node_map[i - 1].tx_pend)
516 break;
517
518 memset(&ar->node_map[i - 1], 0,
519 sizeof(struct ath6kl_node_mapping));
520 ar->node_num--;
521 }
522 }
523
524 void ath6kl_tx_complete(void *context, struct list_head *packet_queue)
525 {
526 struct ath6kl *ar = context;
527 struct sk_buff_head skb_queue;
528 struct htc_packet *packet;
529 struct sk_buff *skb;
530 struct ath6kl_cookie *ath6kl_cookie;
531 u32 map_no = 0;
532 int status;
533 enum htc_endpoint_id eid;
534 bool wake_event = false;
535 bool flushing = false;
536 /* TODO: Findout vif */
537 struct ath6kl_vif *vif = ar->vif;
538
539 skb_queue_head_init(&skb_queue);
540
541 /* lock the driver as we update internal state */
542 spin_lock_bh(&ar->lock);
543
544 /* reap completed packets */
545 while (!list_empty(packet_queue)) {
546
547 packet = list_first_entry(packet_queue, struct htc_packet,
548 list);
549 list_del(&packet->list);
550
551 ath6kl_cookie = (struct ath6kl_cookie *)packet->pkt_cntxt;
552 if (!ath6kl_cookie)
553 goto fatal;
554
555 status = packet->status;
556 skb = ath6kl_cookie->skb;
557 eid = packet->endpoint;
558 map_no = ath6kl_cookie->map_no;
559
560 if (!skb || !skb->data)
561 goto fatal;
562
563 packet->buf = skb->data;
564
565 __skb_queue_tail(&skb_queue, skb);
566
567 if (!status && (packet->act_len != skb->len))
568 goto fatal;
569
570 ar->tx_pending[eid]--;
571
572 if (eid != ar->ctrl_ep)
573 ar->total_tx_data_pend--;
574
575 if (eid == ar->ctrl_ep) {
576 if (test_bit(WMI_CTRL_EP_FULL, &ar->flag))
577 clear_bit(WMI_CTRL_EP_FULL, &ar->flag);
578
579 if (ar->tx_pending[eid] == 0)
580 wake_event = true;
581 }
582
583 if (status) {
584 if (status == -ECANCELED)
585 /* a packet was flushed */
586 flushing = true;
587
588 vif->net_stats.tx_errors++;
589
590 if (status != -ENOSPC)
591 ath6kl_err("tx error, status: 0x%x\n", status);
592 ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
593 "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
594 __func__, skb, packet->buf, packet->act_len,
595 eid, "error!");
596 } else {
597 ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
598 "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
599 __func__, skb, packet->buf, packet->act_len,
600 eid, "OK");
601
602 flushing = false;
603 vif->net_stats.tx_packets++;
604 vif->net_stats.tx_bytes += skb->len;
605 }
606
607 ath6kl_tx_clear_node_map(ar, eid, map_no);
608
609 ath6kl_free_cookie(ar, ath6kl_cookie);
610
611 if (test_bit(NETQ_STOPPED, &vif->flags))
612 clear_bit(NETQ_STOPPED, &vif->flags);
613 }
614
615 spin_unlock_bh(&ar->lock);
616
617 __skb_queue_purge(&skb_queue);
618
619 if (test_bit(CONNECTED, &vif->flags)) {
620 if (!flushing)
621 netif_wake_queue(ar->net_dev);
622 }
623
624 if (wake_event)
625 wake_up(&ar->event_wq);
626
627 return;
628
629 fatal:
630 WARN_ON(1);
631 spin_unlock_bh(&ar->lock);
632 return;
633 }
634
635 void ath6kl_tx_data_cleanup(struct ath6kl *ar)
636 {
637 int i;
638
639 /* flush all the data (non-control) streams */
640 for (i = 0; i < WMM_NUM_AC; i++)
641 ath6kl_htc_flush_txep(ar->htc_target, ar->ac2ep_map[i],
642 ATH6KL_DATA_PKT_TAG);
643 }
644
645 /* Rx functions */
646
647 static void ath6kl_deliver_frames_to_nw_stack(struct net_device *dev,
648 struct sk_buff *skb)
649 {
650 if (!skb)
651 return;
652
653 skb->dev = dev;
654
655 if (!(skb->dev->flags & IFF_UP)) {
656 dev_kfree_skb(skb);
657 return;
658 }
659
660 skb->protocol = eth_type_trans(skb, skb->dev);
661
662 netif_rx_ni(skb);
663 }
664
665 static void ath6kl_alloc_netbufs(struct sk_buff_head *q, u16 num)
666 {
667 struct sk_buff *skb;
668
669 while (num) {
670 skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE);
671 if (!skb) {
672 ath6kl_err("netbuf allocation failed\n");
673 return;
674 }
675 skb_queue_tail(q, skb);
676 num--;
677 }
678 }
679
680 static struct sk_buff *aggr_get_free_skb(struct aggr_info *p_aggr)
681 {
682 struct sk_buff *skb = NULL;
683
684 if (skb_queue_len(&p_aggr->free_q) < (AGGR_NUM_OF_FREE_NETBUFS >> 2))
685 ath6kl_alloc_netbufs(&p_aggr->free_q, AGGR_NUM_OF_FREE_NETBUFS);
686
687 skb = skb_dequeue(&p_aggr->free_q);
688
689 return skb;
690 }
691
692 void ath6kl_rx_refill(struct htc_target *target, enum htc_endpoint_id endpoint)
693 {
694 struct ath6kl *ar = target->dev->ar;
695 struct sk_buff *skb;
696 int rx_buf;
697 int n_buf_refill;
698 struct htc_packet *packet;
699 struct list_head queue;
700
701 n_buf_refill = ATH6KL_MAX_RX_BUFFERS -
702 ath6kl_htc_get_rxbuf_num(ar->htc_target, endpoint);
703
704 if (n_buf_refill <= 0)
705 return;
706
707 INIT_LIST_HEAD(&queue);
708
709 ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
710 "%s: providing htc with %d buffers at eid=%d\n",
711 __func__, n_buf_refill, endpoint);
712
713 for (rx_buf = 0; rx_buf < n_buf_refill; rx_buf++) {
714 skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE);
715 if (!skb)
716 break;
717
718 packet = (struct htc_packet *) skb->head;
719 if (!IS_ALIGNED((unsigned long) skb->data, 4))
720 skb->data = PTR_ALIGN(skb->data - 4, 4);
721 set_htc_rxpkt_info(packet, skb, skb->data,
722 ATH6KL_BUFFER_SIZE, endpoint);
723 list_add_tail(&packet->list, &queue);
724 }
725
726 if (!list_empty(&queue))
727 ath6kl_htc_add_rxbuf_multiple(ar->htc_target, &queue);
728 }
729
730 void ath6kl_refill_amsdu_rxbufs(struct ath6kl *ar, int count)
731 {
732 struct htc_packet *packet;
733 struct sk_buff *skb;
734
735 while (count) {
736 skb = ath6kl_buf_alloc(ATH6KL_AMSDU_BUFFER_SIZE);
737 if (!skb)
738 return;
739
740 packet = (struct htc_packet *) skb->head;
741 if (!IS_ALIGNED((unsigned long) skb->data, 4))
742 skb->data = PTR_ALIGN(skb->data - 4, 4);
743 set_htc_rxpkt_info(packet, skb, skb->data,
744 ATH6KL_AMSDU_BUFFER_SIZE, 0);
745 spin_lock_bh(&ar->lock);
746 list_add_tail(&packet->list, &ar->amsdu_rx_buffer_queue);
747 spin_unlock_bh(&ar->lock);
748 count--;
749 }
750 }
751
752 /*
753 * Callback to allocate a receive buffer for a pending packet. We use a
754 * pre-allocated list of buffers of maximum AMSDU size (4K).
755 */
756 struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target,
757 enum htc_endpoint_id endpoint,
758 int len)
759 {
760 struct ath6kl *ar = target->dev->ar;
761 struct htc_packet *packet = NULL;
762 struct list_head *pkt_pos;
763 int refill_cnt = 0, depth = 0;
764
765 ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: eid=%d, len:%d\n",
766 __func__, endpoint, len);
767
768 if ((len <= ATH6KL_BUFFER_SIZE) ||
769 (len > ATH6KL_AMSDU_BUFFER_SIZE))
770 return NULL;
771
772 spin_lock_bh(&ar->lock);
773
774 if (list_empty(&ar->amsdu_rx_buffer_queue)) {
775 spin_unlock_bh(&ar->lock);
776 refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS;
777 goto refill_buf;
778 }
779
780 packet = list_first_entry(&ar->amsdu_rx_buffer_queue,
781 struct htc_packet, list);
782 list_del(&packet->list);
783 list_for_each(pkt_pos, &ar->amsdu_rx_buffer_queue)
784 depth++;
785
786 refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS - depth;
787 spin_unlock_bh(&ar->lock);
788
789 /* set actual endpoint ID */
790 packet->endpoint = endpoint;
791
792 refill_buf:
793 if (refill_cnt >= ATH6KL_AMSDU_REFILL_THRESHOLD)
794 ath6kl_refill_amsdu_rxbufs(ar, refill_cnt);
795
796 return packet;
797 }
798
799 static void aggr_slice_amsdu(struct aggr_info *p_aggr,
800 struct rxtid *rxtid, struct sk_buff *skb)
801 {
802 struct sk_buff *new_skb;
803 struct ethhdr *hdr;
804 u16 frame_8023_len, payload_8023_len, mac_hdr_len, amsdu_len;
805 u8 *framep;
806
807 mac_hdr_len = sizeof(struct ethhdr);
808 framep = skb->data + mac_hdr_len;
809 amsdu_len = skb->len - mac_hdr_len;
810
811 while (amsdu_len > mac_hdr_len) {
812 hdr = (struct ethhdr *) framep;
813 payload_8023_len = ntohs(hdr->h_proto);
814
815 if (payload_8023_len < MIN_MSDU_SUBFRAME_PAYLOAD_LEN ||
816 payload_8023_len > MAX_MSDU_SUBFRAME_PAYLOAD_LEN) {
817 ath6kl_err("802.3 AMSDU frame bound check failed. len %d\n",
818 payload_8023_len);
819 break;
820 }
821
822 frame_8023_len = payload_8023_len + mac_hdr_len;
823 new_skb = aggr_get_free_skb(p_aggr);
824 if (!new_skb) {
825 ath6kl_err("no buffer available\n");
826 break;
827 }
828
829 memcpy(new_skb->data, framep, frame_8023_len);
830 skb_put(new_skb, frame_8023_len);
831 if (ath6kl_wmi_dot3_2_dix(new_skb)) {
832 ath6kl_err("dot3_2_dix error\n");
833 dev_kfree_skb(new_skb);
834 break;
835 }
836
837 skb_queue_tail(&rxtid->q, new_skb);
838
839 /* Is this the last subframe within this aggregate ? */
840 if ((amsdu_len - frame_8023_len) == 0)
841 break;
842
843 /* Add the length of A-MSDU subframe padding bytes -
844 * Round to nearest word.
845 */
846 frame_8023_len = ALIGN(frame_8023_len, 4);
847
848 framep += frame_8023_len;
849 amsdu_len -= frame_8023_len;
850 }
851
852 dev_kfree_skb(skb);
853 }
854
855 static void aggr_deque_frms(struct aggr_info *p_aggr, u8 tid,
856 u16 seq_no, u8 order)
857 {
858 struct sk_buff *skb;
859 struct rxtid *rxtid;
860 struct skb_hold_q *node;
861 u16 idx, idx_end, seq_end;
862 struct rxtid_stats *stats;
863
864 if (!p_aggr)
865 return;
866
867 rxtid = &p_aggr->rx_tid[tid];
868 stats = &p_aggr->stat[tid];
869
870 idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
871
872 /*
873 * idx_end is typically the last possible frame in the window,
874 * but changes to 'the' seq_no, when BAR comes. If seq_no
875 * is non-zero, we will go up to that and stop.
876 * Note: last seq no in current window will occupy the same
877 * index position as index that is just previous to start.
878 * An imp point : if win_sz is 7, for seq_no space of 4095,
879 * then, there would be holes when sequence wrap around occurs.
880 * Target should judiciously choose the win_sz, based on
881 * this condition. For 4095, (TID_WINDOW_SZ = 2 x win_sz
882 * 2, 4, 8, 16 win_sz works fine).
883 * We must deque from "idx" to "idx_end", including both.
884 */
885 seq_end = seq_no ? seq_no : rxtid->seq_next;
886 idx_end = AGGR_WIN_IDX(seq_end, rxtid->hold_q_sz);
887
888 spin_lock_bh(&rxtid->lock);
889
890 do {
891 node = &rxtid->hold_q[idx];
892 if ((order == 1) && (!node->skb))
893 break;
894
895 if (node->skb) {
896 if (node->is_amsdu)
897 aggr_slice_amsdu(p_aggr, rxtid, node->skb);
898 else
899 skb_queue_tail(&rxtid->q, node->skb);
900 node->skb = NULL;
901 } else
902 stats->num_hole++;
903
904 rxtid->seq_next = ATH6KL_NEXT_SEQ_NO(rxtid->seq_next);
905 idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
906 } while (idx != idx_end);
907
908 spin_unlock_bh(&rxtid->lock);
909
910 stats->num_delivered += skb_queue_len(&rxtid->q);
911
912 while ((skb = skb_dequeue(&rxtid->q)))
913 ath6kl_deliver_frames_to_nw_stack(p_aggr->dev, skb);
914 }
915
916 static bool aggr_process_recv_frm(struct aggr_info *agg_info, u8 tid,
917 u16 seq_no,
918 bool is_amsdu, struct sk_buff *frame)
919 {
920 struct rxtid *rxtid;
921 struct rxtid_stats *stats;
922 struct sk_buff *skb;
923 struct skb_hold_q *node;
924 u16 idx, st, cur, end;
925 bool is_queued = false;
926 u16 extended_end;
927
928 rxtid = &agg_info->rx_tid[tid];
929 stats = &agg_info->stat[tid];
930
931 stats->num_into_aggr++;
932
933 if (!rxtid->aggr) {
934 if (is_amsdu) {
935 aggr_slice_amsdu(agg_info, rxtid, frame);
936 is_queued = true;
937 stats->num_amsdu++;
938 while ((skb = skb_dequeue(&rxtid->q)))
939 ath6kl_deliver_frames_to_nw_stack(agg_info->dev,
940 skb);
941 }
942 return is_queued;
943 }
944
945 /* Check the incoming sequence no, if it's in the window */
946 st = rxtid->seq_next;
947 cur = seq_no;
948 end = (st + rxtid->hold_q_sz-1) & ATH6KL_MAX_SEQ_NO;
949
950 if (((st < end) && (cur < st || cur > end)) ||
951 ((st > end) && (cur > end) && (cur < st))) {
952 extended_end = (end + rxtid->hold_q_sz - 1) &
953 ATH6KL_MAX_SEQ_NO;
954
955 if (((end < extended_end) &&
956 (cur < end || cur > extended_end)) ||
957 ((end > extended_end) && (cur > extended_end) &&
958 (cur < end))) {
959 aggr_deque_frms(agg_info, tid, 0, 0);
960 if (cur >= rxtid->hold_q_sz - 1)
961 rxtid->seq_next = cur - (rxtid->hold_q_sz - 1);
962 else
963 rxtid->seq_next = ATH6KL_MAX_SEQ_NO -
964 (rxtid->hold_q_sz - 2 - cur);
965 } else {
966 /*
967 * Dequeue only those frames that are outside the
968 * new shifted window.
969 */
970 if (cur >= rxtid->hold_q_sz - 1)
971 st = cur - (rxtid->hold_q_sz - 1);
972 else
973 st = ATH6KL_MAX_SEQ_NO -
974 (rxtid->hold_q_sz - 2 - cur);
975
976 aggr_deque_frms(agg_info, tid, st, 0);
977 }
978
979 stats->num_oow++;
980 }
981
982 idx = AGGR_WIN_IDX(seq_no, rxtid->hold_q_sz);
983
984 node = &rxtid->hold_q[idx];
985
986 spin_lock_bh(&rxtid->lock);
987
988 /*
989 * Is the cur frame duplicate or something beyond our window(hold_q
990 * -> which is 2x, already)?
991 *
992 * 1. Duplicate is easy - drop incoming frame.
993 * 2. Not falling in current sliding window.
994 * 2a. is the frame_seq_no preceding current tid_seq_no?
995 * -> drop the frame. perhaps sender did not get our ACK.
996 * this is taken care of above.
997 * 2b. is the frame_seq_no beyond window(st, TID_WINDOW_SZ);
998 * -> Taken care of it above, by moving window forward.
999 */
1000 dev_kfree_skb(node->skb);
1001 stats->num_dups++;
1002
1003 node->skb = frame;
1004 is_queued = true;
1005 node->is_amsdu = is_amsdu;
1006 node->seq_no = seq_no;
1007
1008 if (node->is_amsdu)
1009 stats->num_amsdu++;
1010 else
1011 stats->num_mpdu++;
1012
1013 spin_unlock_bh(&rxtid->lock);
1014
1015 aggr_deque_frms(agg_info, tid, 0, 1);
1016
1017 if (agg_info->timer_scheduled)
1018 rxtid->progress = true;
1019 else
1020 for (idx = 0 ; idx < rxtid->hold_q_sz; idx++) {
1021 if (rxtid->hold_q[idx].skb) {
1022 /*
1023 * There is a frame in the queue and no
1024 * timer so start a timer to ensure that
1025 * the frame doesn't remain stuck
1026 * forever.
1027 */
1028 agg_info->timer_scheduled = true;
1029 mod_timer(&agg_info->timer,
1030 (jiffies +
1031 HZ * (AGGR_RX_TIMEOUT) / 1000));
1032 rxtid->progress = false;
1033 rxtid->timer_mon = true;
1034 break;
1035 }
1036 }
1037
1038 return is_queued;
1039 }
1040
1041 void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
1042 {
1043 struct ath6kl *ar = target->dev->ar;
1044 struct sk_buff *skb = packet->pkt_cntxt;
1045 struct wmi_rx_meta_v2 *meta;
1046 struct wmi_data_hdr *dhdr;
1047 int min_hdr_len;
1048 u8 meta_type, dot11_hdr = 0;
1049 int status = packet->status;
1050 enum htc_endpoint_id ept = packet->endpoint;
1051 bool is_amsdu, prev_ps, ps_state = false;
1052 struct ath6kl_sta *conn = NULL;
1053 struct sk_buff *skb1 = NULL;
1054 struct ethhdr *datap = NULL;
1055 /* TODO: Findout vif */
1056 struct ath6kl_vif *vif = ar->vif;
1057 u16 seq_no, offset;
1058 u8 tid;
1059
1060 ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
1061 "%s: ar=0x%p eid=%d, skb=0x%p, data=0x%p, len=0x%x status:%d",
1062 __func__, ar, ept, skb, packet->buf,
1063 packet->act_len, status);
1064
1065 if (status || !(skb->data + HTC_HDR_LENGTH)) {
1066 vif->net_stats.rx_errors++;
1067 dev_kfree_skb(skb);
1068 return;
1069 }
1070
1071 /*
1072 * Take lock to protect buffer counts and adaptive power throughput
1073 * state.
1074 */
1075 spin_lock_bh(&ar->lock);
1076
1077 vif->net_stats.rx_packets++;
1078 vif->net_stats.rx_bytes += packet->act_len;
1079
1080 spin_unlock_bh(&ar->lock);
1081
1082 skb_put(skb, packet->act_len + HTC_HDR_LENGTH);
1083 skb_pull(skb, HTC_HDR_LENGTH);
1084
1085 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "rx ",
1086 skb->data, skb->len);
1087
1088 skb->dev = ar->net_dev;
1089
1090 if (!test_bit(WMI_ENABLED, &ar->flag)) {
1091 if (EPPING_ALIGNMENT_PAD > 0)
1092 skb_pull(skb, EPPING_ALIGNMENT_PAD);
1093 ath6kl_deliver_frames_to_nw_stack(ar->net_dev, skb);
1094 return;
1095 }
1096
1097 if (ept == ar->ctrl_ep) {
1098 ath6kl_wmi_control_rx(ar->wmi, skb);
1099 return;
1100 }
1101
1102 min_hdr_len = sizeof(struct ethhdr) + sizeof(struct wmi_data_hdr) +
1103 sizeof(struct ath6kl_llc_snap_hdr);
1104
1105 dhdr = (struct wmi_data_hdr *) skb->data;
1106
1107 /*
1108 * In the case of AP mode we may receive NULL data frames
1109 * that do not have LLC hdr. They are 16 bytes in size.
1110 * Allow these frames in the AP mode.
1111 */
1112 if (vif->nw_type != AP_NETWORK &&
1113 ((packet->act_len < min_hdr_len) ||
1114 (packet->act_len > WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH))) {
1115 ath6kl_info("frame len is too short or too long\n");
1116 vif->net_stats.rx_errors++;
1117 vif->net_stats.rx_length_errors++;
1118 dev_kfree_skb(skb);
1119 return;
1120 }
1121
1122 /* Get the Power save state of the STA */
1123 if (vif->nw_type == AP_NETWORK) {
1124 meta_type = wmi_data_hdr_get_meta(dhdr);
1125
1126 ps_state = !!((dhdr->info >> WMI_DATA_HDR_PS_SHIFT) &
1127 WMI_DATA_HDR_PS_MASK);
1128
1129 offset = sizeof(struct wmi_data_hdr);
1130
1131 switch (meta_type) {
1132 case 0:
1133 break;
1134 case WMI_META_VERSION_1:
1135 offset += sizeof(struct wmi_rx_meta_v1);
1136 break;
1137 case WMI_META_VERSION_2:
1138 offset += sizeof(struct wmi_rx_meta_v2);
1139 break;
1140 default:
1141 break;
1142 }
1143
1144 datap = (struct ethhdr *) (skb->data + offset);
1145 conn = ath6kl_find_sta(ar, datap->h_source);
1146
1147 if (!conn) {
1148 dev_kfree_skb(skb);
1149 return;
1150 }
1151
1152 /*
1153 * If there is a change in PS state of the STA,
1154 * take appropriate steps:
1155 *
1156 * 1. If Sleep-->Awake, flush the psq for the STA
1157 * Clear the PVB for the STA.
1158 * 2. If Awake-->Sleep, Starting queueing frames
1159 * the STA.
1160 */
1161 prev_ps = !!(conn->sta_flags & STA_PS_SLEEP);
1162
1163 if (ps_state)
1164 conn->sta_flags |= STA_PS_SLEEP;
1165 else
1166 conn->sta_flags &= ~STA_PS_SLEEP;
1167
1168 if (prev_ps ^ !!(conn->sta_flags & STA_PS_SLEEP)) {
1169 if (!(conn->sta_flags & STA_PS_SLEEP)) {
1170 struct sk_buff *skbuff = NULL;
1171
1172 spin_lock_bh(&conn->psq_lock);
1173 while ((skbuff = skb_dequeue(&conn->psq))
1174 != NULL) {
1175 spin_unlock_bh(&conn->psq_lock);
1176 ath6kl_data_tx(skbuff, ar->net_dev);
1177 spin_lock_bh(&conn->psq_lock);
1178 }
1179 spin_unlock_bh(&conn->psq_lock);
1180 /* Clear the PVB for this STA */
1181 ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx,
1182 conn->aid, 0);
1183 }
1184 }
1185
1186 /* drop NULL data frames here */
1187 if ((packet->act_len < min_hdr_len) ||
1188 (packet->act_len >
1189 WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH)) {
1190 dev_kfree_skb(skb);
1191 return;
1192 }
1193 }
1194
1195 is_amsdu = wmi_data_hdr_is_amsdu(dhdr) ? true : false;
1196 tid = wmi_data_hdr_get_up(dhdr);
1197 seq_no = wmi_data_hdr_get_seqno(dhdr);
1198 meta_type = wmi_data_hdr_get_meta(dhdr);
1199 dot11_hdr = wmi_data_hdr_get_dot11(dhdr);
1200 skb_pull(skb, sizeof(struct wmi_data_hdr));
1201
1202 switch (meta_type) {
1203 case WMI_META_VERSION_1:
1204 skb_pull(skb, sizeof(struct wmi_rx_meta_v1));
1205 break;
1206 case WMI_META_VERSION_2:
1207 meta = (struct wmi_rx_meta_v2 *) skb->data;
1208 if (meta->csum_flags & 0x1) {
1209 skb->ip_summed = CHECKSUM_COMPLETE;
1210 skb->csum = (__force __wsum) meta->csum;
1211 }
1212 skb_pull(skb, sizeof(struct wmi_rx_meta_v2));
1213 break;
1214 default:
1215 break;
1216 }
1217
1218 if (dot11_hdr)
1219 status = ath6kl_wmi_dot11_hdr_remove(ar->wmi, skb);
1220 else if (!is_amsdu)
1221 status = ath6kl_wmi_dot3_2_dix(skb);
1222
1223 if (status) {
1224 /*
1225 * Drop frames that could not be processed (lack of
1226 * memory, etc.)
1227 */
1228 dev_kfree_skb(skb);
1229 return;
1230 }
1231
1232 if (!(ar->net_dev->flags & IFF_UP)) {
1233 dev_kfree_skb(skb);
1234 return;
1235 }
1236
1237 if (vif->nw_type == AP_NETWORK) {
1238 datap = (struct ethhdr *) skb->data;
1239 if (is_multicast_ether_addr(datap->h_dest))
1240 /*
1241 * Bcast/Mcast frames should be sent to the
1242 * OS stack as well as on the air.
1243 */
1244 skb1 = skb_copy(skb, GFP_ATOMIC);
1245 else {
1246 /*
1247 * Search for a connected STA with dstMac
1248 * as the Mac address. If found send the
1249 * frame to it on the air else send the
1250 * frame up the stack.
1251 */
1252 conn = ath6kl_find_sta(ar, datap->h_dest);
1253
1254 if (conn && ar->intra_bss) {
1255 skb1 = skb;
1256 skb = NULL;
1257 } else if (conn && !ar->intra_bss) {
1258 dev_kfree_skb(skb);
1259 skb = NULL;
1260 }
1261 }
1262 if (skb1)
1263 ath6kl_data_tx(skb1, ar->net_dev);
1264
1265 if (skb == NULL) {
1266 /* nothing to deliver up the stack */
1267 return;
1268 }
1269 }
1270
1271 datap = (struct ethhdr *) skb->data;
1272
1273 if (is_unicast_ether_addr(datap->h_dest) &&
1274 aggr_process_recv_frm(vif->aggr_cntxt, tid, seq_no,
1275 is_amsdu, skb))
1276 /* aggregation code will handle the skb */
1277 return;
1278
1279 ath6kl_deliver_frames_to_nw_stack(ar->net_dev, skb);
1280 }
1281
1282 static void aggr_timeout(unsigned long arg)
1283 {
1284 u8 i, j;
1285 struct aggr_info *p_aggr = (struct aggr_info *) arg;
1286 struct rxtid *rxtid;
1287 struct rxtid_stats *stats;
1288
1289 for (i = 0; i < NUM_OF_TIDS; i++) {
1290 rxtid = &p_aggr->rx_tid[i];
1291 stats = &p_aggr->stat[i];
1292
1293 if (!rxtid->aggr || !rxtid->timer_mon || rxtid->progress)
1294 continue;
1295
1296 stats->num_timeouts++;
1297 ath6kl_dbg(ATH6KL_DBG_AGGR,
1298 "aggr timeout (st %d end %d)\n",
1299 rxtid->seq_next,
1300 ((rxtid->seq_next + rxtid->hold_q_sz-1) &
1301 ATH6KL_MAX_SEQ_NO));
1302 aggr_deque_frms(p_aggr, i, 0, 0);
1303 }
1304
1305 p_aggr->timer_scheduled = false;
1306
1307 for (i = 0; i < NUM_OF_TIDS; i++) {
1308 rxtid = &p_aggr->rx_tid[i];
1309
1310 if (rxtid->aggr && rxtid->hold_q) {
1311 for (j = 0; j < rxtid->hold_q_sz; j++) {
1312 if (rxtid->hold_q[j].skb) {
1313 p_aggr->timer_scheduled = true;
1314 rxtid->timer_mon = true;
1315 rxtid->progress = false;
1316 break;
1317 }
1318 }
1319
1320 if (j >= rxtid->hold_q_sz)
1321 rxtid->timer_mon = false;
1322 }
1323 }
1324
1325 if (p_aggr->timer_scheduled)
1326 mod_timer(&p_aggr->timer,
1327 jiffies + msecs_to_jiffies(AGGR_RX_TIMEOUT));
1328 }
1329
1330 static void aggr_delete_tid_state(struct aggr_info *p_aggr, u8 tid)
1331 {
1332 struct rxtid *rxtid;
1333 struct rxtid_stats *stats;
1334
1335 if (!p_aggr || tid >= NUM_OF_TIDS)
1336 return;
1337
1338 rxtid = &p_aggr->rx_tid[tid];
1339 stats = &p_aggr->stat[tid];
1340
1341 if (rxtid->aggr)
1342 aggr_deque_frms(p_aggr, tid, 0, 0);
1343
1344 rxtid->aggr = false;
1345 rxtid->progress = false;
1346 rxtid->timer_mon = false;
1347 rxtid->win_sz = 0;
1348 rxtid->seq_next = 0;
1349 rxtid->hold_q_sz = 0;
1350
1351 kfree(rxtid->hold_q);
1352 rxtid->hold_q = NULL;
1353
1354 memset(stats, 0, sizeof(struct rxtid_stats));
1355 }
1356
1357 void aggr_recv_addba_req_evt(struct ath6kl *ar, u8 tid, u16 seq_no, u8 win_sz)
1358 {
1359 /* TODO: Findout vif */
1360 struct ath6kl_vif *vif = ar->vif;
1361 struct aggr_info *p_aggr = vif->aggr_cntxt;
1362 struct rxtid *rxtid;
1363 struct rxtid_stats *stats;
1364 u16 hold_q_size;
1365
1366 if (!p_aggr)
1367 return;
1368
1369 rxtid = &p_aggr->rx_tid[tid];
1370 stats = &p_aggr->stat[tid];
1371
1372 if (win_sz < AGGR_WIN_SZ_MIN || win_sz > AGGR_WIN_SZ_MAX)
1373 ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: win_sz %d, tid %d\n",
1374 __func__, win_sz, tid);
1375
1376 if (rxtid->aggr)
1377 aggr_delete_tid_state(p_aggr, tid);
1378
1379 rxtid->seq_next = seq_no;
1380 hold_q_size = TID_WINDOW_SZ(win_sz) * sizeof(struct skb_hold_q);
1381 rxtid->hold_q = kzalloc(hold_q_size, GFP_KERNEL);
1382 if (!rxtid->hold_q)
1383 return;
1384
1385 rxtid->win_sz = win_sz;
1386 rxtid->hold_q_sz = TID_WINDOW_SZ(win_sz);
1387 if (!skb_queue_empty(&rxtid->q))
1388 return;
1389
1390 rxtid->aggr = true;
1391 }
1392
1393 struct aggr_info *aggr_init(struct net_device *dev)
1394 {
1395 struct aggr_info *p_aggr = NULL;
1396 struct rxtid *rxtid;
1397 u8 i;
1398
1399 p_aggr = kzalloc(sizeof(struct aggr_info), GFP_KERNEL);
1400 if (!p_aggr) {
1401 ath6kl_err("failed to alloc memory for aggr_node\n");
1402 return NULL;
1403 }
1404
1405 p_aggr->aggr_sz = AGGR_SZ_DEFAULT;
1406 p_aggr->dev = dev;
1407 init_timer(&p_aggr->timer);
1408 p_aggr->timer.function = aggr_timeout;
1409 p_aggr->timer.data = (unsigned long) p_aggr;
1410
1411 p_aggr->timer_scheduled = false;
1412 skb_queue_head_init(&p_aggr->free_q);
1413
1414 ath6kl_alloc_netbufs(&p_aggr->free_q, AGGR_NUM_OF_FREE_NETBUFS);
1415
1416 for (i = 0; i < NUM_OF_TIDS; i++) {
1417 rxtid = &p_aggr->rx_tid[i];
1418 rxtid->aggr = false;
1419 rxtid->progress = false;
1420 rxtid->timer_mon = false;
1421 skb_queue_head_init(&rxtid->q);
1422 spin_lock_init(&rxtid->lock);
1423 }
1424
1425 return p_aggr;
1426 }
1427
1428 void aggr_recv_delba_req_evt(struct ath6kl *ar, u8 tid)
1429 {
1430 /* TODO: Findout vif */
1431 struct ath6kl_vif *vif = ar->vif;
1432 struct aggr_info *p_aggr = vif->aggr_cntxt;
1433 struct rxtid *rxtid;
1434
1435 if (!p_aggr)
1436 return;
1437
1438 rxtid = &p_aggr->rx_tid[tid];
1439
1440 if (rxtid->aggr)
1441 aggr_delete_tid_state(p_aggr, tid);
1442 }
1443
1444 void aggr_reset_state(struct aggr_info *aggr_info)
1445 {
1446 u8 tid;
1447
1448 for (tid = 0; tid < NUM_OF_TIDS; tid++)
1449 aggr_delete_tid_state(aggr_info, tid);
1450 }
1451
1452 /* clean up our amsdu buffer list */
1453 void ath6kl_cleanup_amsdu_rxbufs(struct ath6kl *ar)
1454 {
1455 struct htc_packet *packet, *tmp_pkt;
1456
1457 spin_lock_bh(&ar->lock);
1458 if (list_empty(&ar->amsdu_rx_buffer_queue)) {
1459 spin_unlock_bh(&ar->lock);
1460 return;
1461 }
1462
1463 list_for_each_entry_safe(packet, tmp_pkt, &ar->amsdu_rx_buffer_queue,
1464 list) {
1465 list_del(&packet->list);
1466 spin_unlock_bh(&ar->lock);
1467 dev_kfree_skb(packet->pkt_cntxt);
1468 spin_lock_bh(&ar->lock);
1469 }
1470
1471 spin_unlock_bh(&ar->lock);
1472 }
1473
1474 void aggr_module_destroy(struct aggr_info *aggr_info)
1475 {
1476 struct rxtid *rxtid;
1477 u8 i, k;
1478
1479 if (!aggr_info)
1480 return;
1481
1482 if (aggr_info->timer_scheduled) {
1483 del_timer(&aggr_info->timer);
1484 aggr_info->timer_scheduled = false;
1485 }
1486
1487 for (i = 0; i < NUM_OF_TIDS; i++) {
1488 rxtid = &aggr_info->rx_tid[i];
1489 if (rxtid->hold_q) {
1490 for (k = 0; k < rxtid->hold_q_sz; k++)
1491 dev_kfree_skb(rxtid->hold_q[k].skb);
1492 kfree(rxtid->hold_q);
1493 }
1494
1495 skb_queue_purge(&rxtid->q);
1496 }
1497
1498 skb_queue_purge(&aggr_info->free_q);
1499 kfree(aggr_info);
1500 }
This page took 0.059704 seconds and 6 git commands to generate.