Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[deliverable/linux.git] / drivers / net / wireless / ath / ath6kl / txrx.c
CommitLineData
bdcd8170
KV
1/*
2 * Copyright (c) 2004-2011 Atheros Communications Inc.
1b2df407 3 * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
bdcd8170
KV
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
516304b0
JP
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
bdcd8170
KV
20#include "core.h"
21#include "debug.h"
e76ac2bf 22#include "htc-ops.h"
bdcd8170 23
3fdc0991
VT
24/*
25 * tid - tid_mux0..tid_mux3
26 * aid - tid_mux4..tid_mux7
27 */
28#define ATH6KL_TID_MASK 0xf
1d2a4456 29#define ATH6KL_AID_SHIFT 4
3fdc0991
VT
30
31static inline u8 ath6kl_get_tid(u8 tid_mux)
32{
33 return tid_mux & ATH6KL_TID_MASK;
34}
35
1d2a4456
VT
36static inline u8 ath6kl_get_aid(u8 tid_mux)
37{
38 return tid_mux >> ATH6KL_AID_SHIFT;
39}
40
bdcd8170
KV
41static u8 ath6kl_ibss_map_epid(struct sk_buff *skb, struct net_device *dev,
42 u32 *map_no)
43{
44 struct ath6kl *ar = ath6kl_priv(dev);
45 struct ethhdr *eth_hdr;
46 u32 i, ep_map = -1;
47 u8 *datap;
48
49 *map_no = 0;
50 datap = skb->data;
51 eth_hdr = (struct ethhdr *) (datap + sizeof(struct wmi_data_hdr));
52
53 if (is_multicast_ether_addr(eth_hdr->h_dest))
54 return ENDPOINT_2;
55
56 for (i = 0; i < ar->node_num; i++) {
57 if (memcmp(eth_hdr->h_dest, ar->node_map[i].mac_addr,
58 ETH_ALEN) == 0) {
59 *map_no = i + 1;
60 ar->node_map[i].tx_pend++;
61 return ar->node_map[i].ep_id;
62 }
63
64 if ((ep_map == -1) && !ar->node_map[i].tx_pend)
65 ep_map = i;
66 }
67
68 if (ep_map == -1) {
69 ep_map = ar->node_num;
70 ar->node_num++;
71 if (ar->node_num > MAX_NODE_NUM)
72 return ENDPOINT_UNUSED;
73 }
74
75 memcpy(ar->node_map[ep_map].mac_addr, eth_hdr->h_dest, ETH_ALEN);
76
77 for (i = ENDPOINT_2; i <= ENDPOINT_5; i++) {
78 if (!ar->tx_pending[i]) {
79 ar->node_map[ep_map].ep_id = i;
80 break;
81 }
82
83 /*
84 * No free endpoint is available, start redistribution on
85 * the inuse endpoints.
86 */
87 if (i == ENDPOINT_5) {
88 ar->node_map[ep_map].ep_id = ar->next_ep_id;
89 ar->next_ep_id++;
90 if (ar->next_ep_id > ENDPOINT_5)
91 ar->next_ep_id = ENDPOINT_2;
92 }
93 }
94
95 *map_no = ep_map + 1;
96 ar->node_map[ep_map].tx_pend++;
97
98 return ar->node_map[ep_map].ep_id;
99}
100
c1762a3f
TP
101static bool ath6kl_process_uapsdq(struct ath6kl_sta *conn,
102 struct ath6kl_vif *vif,
103 struct sk_buff *skb,
104 u32 *flags)
105{
106 struct ath6kl *ar = vif->ar;
107 bool is_apsdq_empty = false;
108 struct ethhdr *datap = (struct ethhdr *) skb->data;
e5726028 109 u8 up = 0, traffic_class, *ip_hdr;
c1762a3f
TP
110 u16 ether_type;
111 struct ath6kl_llc_snap_hdr *llc_hdr;
112
113 if (conn->sta_flags & STA_PS_APSD_TRIGGER) {
114 /*
115 * This tx is because of a uAPSD trigger, determine
116 * more and EOSP bit. Set EOSP if queue is empty
117 * or sufficient frames are delivered for this trigger.
118 */
119 spin_lock_bh(&conn->psq_lock);
120 if (!skb_queue_empty(&conn->apsdq))
121 *flags |= WMI_DATA_HDR_FLAGS_MORE;
122 else if (conn->sta_flags & STA_PS_APSD_EOSP)
123 *flags |= WMI_DATA_HDR_FLAGS_EOSP;
124 *flags |= WMI_DATA_HDR_FLAGS_UAPSD;
125 spin_unlock_bh(&conn->psq_lock);
126 return false;
127 } else if (!conn->apsd_info)
128 return false;
129
130 if (test_bit(WMM_ENABLED, &vif->flags)) {
131 ether_type = be16_to_cpu(datap->h_proto);
132 if (is_ethertype(ether_type)) {
133 /* packet is in DIX format */
134 ip_hdr = (u8 *)(datap + 1);
135 } else {
136 /* packet is in 802.3 format */
137 llc_hdr = (struct ath6kl_llc_snap_hdr *)
138 (datap + 1);
139 ether_type = be16_to_cpu(llc_hdr->eth_type);
140 ip_hdr = (u8 *)(llc_hdr + 1);
141 }
142
143 if (ether_type == IP_ETHERTYPE)
144 up = ath6kl_wmi_determine_user_priority(
145 ip_hdr, 0);
c1762a3f
TP
146 }
147
148 traffic_class = ath6kl_wmi_get_traffic_class(up);
149
150 if ((conn->apsd_info & (1 << traffic_class)) == 0)
151 return false;
152
153 /* Queue the frames if the STA is sleeping */
154 spin_lock_bh(&conn->psq_lock);
155 is_apsdq_empty = skb_queue_empty(&conn->apsdq);
156 skb_queue_tail(&conn->apsdq, skb);
157 spin_unlock_bh(&conn->psq_lock);
158
159 /*
160 * If this is the first pkt getting queued
161 * for this STA, update the PVB for this STA
162 */
163 if (is_apsdq_empty) {
164 ath6kl_wmi_set_apsd_bfrd_traf(ar->wmi,
96f1fadc
KV
165 vif->fw_vif_idx,
166 conn->aid, 1, 0);
c1762a3f
TP
167 }
168 *flags |= WMI_DATA_HDR_FLAGS_UAPSD;
169
170 return true;
171}
172
173static bool ath6kl_process_psq(struct ath6kl_sta *conn,
174 struct ath6kl_vif *vif,
175 struct sk_buff *skb,
176 u32 *flags)
177{
178 bool is_psq_empty = false;
179 struct ath6kl *ar = vif->ar;
180
181 if (conn->sta_flags & STA_PS_POLLED) {
182 spin_lock_bh(&conn->psq_lock);
183 if (!skb_queue_empty(&conn->psq))
184 *flags |= WMI_DATA_HDR_FLAGS_MORE;
185 spin_unlock_bh(&conn->psq_lock);
186 return false;
187 }
188
189 /* Queue the frames if the STA is sleeping */
190 spin_lock_bh(&conn->psq_lock);
191 is_psq_empty = skb_queue_empty(&conn->psq);
192 skb_queue_tail(&conn->psq, skb);
193 spin_unlock_bh(&conn->psq_lock);
194
195 /*
196 * If this is the first pkt getting queued
197 * for this STA, update the PVB for this
198 * STA.
199 */
200 if (is_psq_empty)
201 ath6kl_wmi_set_pvb_cmd(ar->wmi,
202 vif->fw_vif_idx,
203 conn->aid, 1);
204 return true;
205}
206
6765d0aa 207static bool ath6kl_powersave_ap(struct ath6kl_vif *vif, struct sk_buff *skb,
c1762a3f 208 u32 *flags)
bdcd8170
KV
209{
210 struct ethhdr *datap = (struct ethhdr *) skb->data;
211 struct ath6kl_sta *conn = NULL;
c1762a3f 212 bool ps_queued = false;
6765d0aa 213 struct ath6kl *ar = vif->ar;
bdcd8170
KV
214
215 if (is_multicast_ether_addr(datap->h_dest)) {
216 u8 ctr = 0;
217 bool q_mcast = false;
218
219 for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
220 if (ar->sta_list[ctr].sta_flags & STA_PS_SLEEP) {
221 q_mcast = true;
222 break;
223 }
224 }
225
226 if (q_mcast) {
227 /*
228 * If this transmit is not because of a Dtim Expiry
229 * q it.
230 */
59c98449 231 if (!test_bit(DTIM_EXPIRED, &vif->flags)) {
bdcd8170
KV
232 bool is_mcastq_empty = false;
233
234 spin_lock_bh(&ar->mcastpsq_lock);
235 is_mcastq_empty =
236 skb_queue_empty(&ar->mcastpsq);
237 skb_queue_tail(&ar->mcastpsq, skb);
238 spin_unlock_bh(&ar->mcastpsq_lock);
239
240 /*
241 * If this is the first Mcast pkt getting
242 * queued indicate to the target to set the
243 * BitmapControl LSB of the TIM IE.
244 */
245 if (is_mcastq_empty)
246 ath6kl_wmi_set_pvb_cmd(ar->wmi,
334234b5 247 vif->fw_vif_idx,
bdcd8170
KV
248 MCAST_AID, 1);
249
250 ps_queued = true;
251 } else {
252 /*
253 * This transmit is because of Dtim expiry.
254 * Determine if MoreData bit has to be set.
255 */
256 spin_lock_bh(&ar->mcastpsq_lock);
257 if (!skb_queue_empty(&ar->mcastpsq))
c1762a3f 258 *flags |= WMI_DATA_HDR_FLAGS_MORE;
bdcd8170
KV
259 spin_unlock_bh(&ar->mcastpsq_lock);
260 }
261 }
262 } else {
6765d0aa 263 conn = ath6kl_find_sta(vif, datap->h_dest);
bdcd8170
KV
264 if (!conn) {
265 dev_kfree_skb(skb);
266
267 /* Inform the caller that the skb is consumed */
268 return true;
269 }
270
271 if (conn->sta_flags & STA_PS_SLEEP) {
c1762a3f
TP
272 ps_queued = ath6kl_process_uapsdq(conn,
273 vif, skb, flags);
274 if (!(*flags & WMI_DATA_HDR_FLAGS_UAPSD))
275 ps_queued = ath6kl_process_psq(conn,
276 vif, skb, flags);
bdcd8170
KV
277 }
278 }
bdcd8170
KV
279 return ps_queued;
280}
281
282/* Tx functions */
283
284int ath6kl_control_tx(void *devt, struct sk_buff *skb,
285 enum htc_endpoint_id eid)
286{
287 struct ath6kl *ar = devt;
288 int status = 0;
289 struct ath6kl_cookie *cookie = NULL;
290
390a8c8f
RM
291 if (WARN_ON_ONCE(ar->state == ATH6KL_STATE_WOW))
292 return -EACCES;
293
bdcd8170
KV
294 spin_lock_bh(&ar->lock);
295
296 ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
297 "%s: skb=0x%p, len=0x%x eid =%d\n", __func__,
298 skb, skb->len, eid);
299
300 if (test_bit(WMI_CTRL_EP_FULL, &ar->flag) && (eid == ar->ctrl_ep)) {
301 /*
302 * Control endpoint is full, don't allocate resources, we
303 * are just going to drop this packet.
304 */
305 cookie = NULL;
306 ath6kl_err("wmi ctrl ep full, dropping pkt : 0x%p, len:%d\n",
307 skb, skb->len);
308 } else
309 cookie = ath6kl_alloc_cookie(ar);
310
311 if (cookie == NULL) {
312 spin_unlock_bh(&ar->lock);
313 status = -ENOMEM;
314 goto fail_ctrl_tx;
315 }
316
317 ar->tx_pending[eid]++;
318
319 if (eid != ar->ctrl_ep)
320 ar->total_tx_data_pend++;
321
322 spin_unlock_bh(&ar->lock);
323
324 cookie->skb = skb;
325 cookie->map_no = 0;
326 set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
327 eid, ATH6KL_CONTROL_PKT_TAG);
cfc10f24 328 cookie->htc_pkt.skb = skb;
bdcd8170
KV
329
330 /*
331 * This interface is asynchronous, if there is an error, cleanup
332 * will happen in the TX completion callback.
333 */
ad226ec2 334 ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt);
bdcd8170
KV
335
336 return 0;
337
338fail_ctrl_tx:
339 dev_kfree_skb(skb);
340 return status;
341}
342
343int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
344{
345 struct ath6kl *ar = ath6kl_priv(dev);
346 struct ath6kl_cookie *cookie = NULL;
347 enum htc_endpoint_id eid = ENDPOINT_UNUSED;
59c98449 348 struct ath6kl_vif *vif = netdev_priv(dev);
bdcd8170
KV
349 u32 map_no = 0;
350 u16 htc_tag = ATH6KL_DATA_PKT_TAG;
351 u8 ac = 99 ; /* initialize to unmapped ac */
c1762a3f 352 bool chk_adhoc_ps_mapping = false;
bdcd8170 353 int ret;
bc48ad31
RP
354 struct wmi_tx_meta_v2 meta_v2;
355 void *meta;
356 u8 csum_start = 0, csum_dest = 0, csum = skb->ip_summed;
357 u8 meta_ver = 0;
c1762a3f 358 u32 flags = 0;
bdcd8170
KV
359
360 ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
361 "%s: skb=0x%p, data=0x%p, len=0x%x\n", __func__,
362 skb, skb->data, skb->len);
363
364 /* If target is not associated */
59c98449 365 if (!test_bit(CONNECTED, &vif->flags)) {
bdcd8170
KV
366 dev_kfree_skb(skb);
367 return 0;
368 }
369
390a8c8f
RM
370 if (WARN_ON_ONCE(ar->state != ATH6KL_STATE_ON)) {
371 dev_kfree_skb(skb);
372 return 0;
373 }
374
bdcd8170
KV
375 if (!test_bit(WMI_READY, &ar->flag))
376 goto fail_tx;
377
378 /* AP mode Power saving processing */
f5938f24 379 if (vif->nw_type == AP_NETWORK) {
c1762a3f 380 if (ath6kl_powersave_ap(vif, skb, &flags))
bdcd8170
KV
381 return 0;
382 }
383
384 if (test_bit(WMI_ENABLED, &ar->flag)) {
bc48ad31 385 if ((dev->features & NETIF_F_IP_CSUM) &&
96f1fadc 386 (csum == CHECKSUM_PARTIAL)) {
bc48ad31
RP
387 csum_start = skb->csum_start -
388 (skb_network_header(skb) - skb->head) +
389 sizeof(struct ath6kl_llc_snap_hdr);
390 csum_dest = skb->csum_offset + csum_start;
391 }
392
bdcd8170 393 if (skb_headroom(skb) < dev->needed_headroom) {
a29517ce
VT
394 struct sk_buff *tmp_skb = skb;
395
396 skb = skb_realloc_headroom(skb, dev->needed_headroom);
397 kfree_skb(tmp_skb);
398 if (skb == NULL) {
399 vif->net_stats.tx_dropped++;
400 return 0;
401 }
bdcd8170
KV
402 }
403
404 if (ath6kl_wmi_dix_2_dot3(ar->wmi, skb)) {
405 ath6kl_err("ath6kl_wmi_dix_2_dot3 failed\n");
406 goto fail_tx;
407 }
408
bc48ad31 409 if ((dev->features & NETIF_F_IP_CSUM) &&
96f1fadc 410 (csum == CHECKSUM_PARTIAL)) {
bc48ad31
RP
411 meta_v2.csum_start = csum_start;
412 meta_v2.csum_dest = csum_dest;
413
414 /* instruct target to calculate checksum */
415 meta_v2.csum_flags = WMI_META_V2_FLAG_CSUM_OFFLOAD;
416 meta_ver = WMI_META_VERSION_2;
417 meta = &meta_v2;
418 } else {
419 meta_ver = 0;
420 meta = NULL;
421 }
422
423 ret = ath6kl_wmi_data_hdr_add(ar->wmi, skb,
c1762a3f 424 DATA_MSGTYPE, flags, 0,
bc48ad31
RP
425 meta_ver,
426 meta, vif->fw_vif_idx);
427
428 if (ret) {
429 ath6kl_warn("failed to add wmi data header:%d\n"
430 , ret);
bdcd8170
KV
431 goto fail_tx;
432 }
433
f5938f24 434 if ((vif->nw_type == ADHOC_NETWORK) &&
96f1fadc 435 ar->ibss_ps_enable && test_bit(CONNECTED, &vif->flags))
bdcd8170
KV
436 chk_adhoc_ps_mapping = true;
437 else {
438 /* get the stream mapping */
240d2799
VT
439 ret = ath6kl_wmi_implicit_create_pstream(ar->wmi,
440 vif->fw_vif_idx, skb,
59c98449 441 0, test_bit(WMM_ENABLED, &vif->flags), &ac);
bdcd8170
KV
442 if (ret)
443 goto fail_tx;
444 }
445 } else
446 goto fail_tx;
447
448 spin_lock_bh(&ar->lock);
449
450 if (chk_adhoc_ps_mapping)
451 eid = ath6kl_ibss_map_epid(skb, dev, &map_no);
452 else
453 eid = ar->ac2ep_map[ac];
454
455 if (eid == 0 || eid == ENDPOINT_UNUSED) {
456 ath6kl_err("eid %d is not mapped!\n", eid);
457 spin_unlock_bh(&ar->lock);
458 goto fail_tx;
459 }
460
461 /* allocate resource for this packet */
462 cookie = ath6kl_alloc_cookie(ar);
463
464 if (!cookie) {
465 spin_unlock_bh(&ar->lock);
466 goto fail_tx;
467 }
468
469 /* update counts while the lock is held */
470 ar->tx_pending[eid]++;
471 ar->total_tx_data_pend++;
472
473 spin_unlock_bh(&ar->lock);
474
00b1edf1
JM
475 if (!IS_ALIGNED((unsigned long) skb->data - HTC_HDR_LENGTH, 4) &&
476 skb_cloned(skb)) {
477 /*
478 * We will touch (move the buffer data to align it. Since the
479 * skb buffer is cloned and not only the header is changed, we
480 * have to copy it to allow the changes. Since we are copying
481 * the data here, we may as well align it by reserving suitable
482 * headroom to avoid the memmove in ath6kl_htc_tx_buf_align().
483 */
484 struct sk_buff *nskb;
485
486 nskb = skb_copy_expand(skb, HTC_HDR_LENGTH, 0, GFP_ATOMIC);
487 if (nskb == NULL)
488 goto fail_tx;
489 kfree_skb(skb);
490 skb = nskb;
491 }
492
bdcd8170
KV
493 cookie->skb = skb;
494 cookie->map_no = map_no;
495 set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
496 eid, htc_tag);
cfc10f24 497 cookie->htc_pkt.skb = skb;
bdcd8170 498
ef094103
KV
499 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "tx ",
500 skb->data, skb->len);
bdcd8170
KV
501
502 /*
503 * HTC interface is asynchronous, if this fails, cleanup will
504 * happen in the ath6kl_tx_complete callback.
505 */
ad226ec2 506 ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt);
bdcd8170
KV
507
508 return 0;
509
510fail_tx:
511 dev_kfree_skb(skb);
512
b95907a7
VT
513 vif->net_stats.tx_dropped++;
514 vif->net_stats.tx_aborted_errors++;
bdcd8170
KV
515
516 return 0;
517}
518
519/* indicate tx activity or inactivity on a WMI stream */
520void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active)
521{
522 struct ath6kl *ar = devt;
523 enum htc_endpoint_id eid;
524 int i;
525
526 eid = ar->ac2ep_map[traffic_class];
527
528 if (!test_bit(WMI_ENABLED, &ar->flag))
529 goto notify_htc;
530
531 spin_lock_bh(&ar->lock);
532
533 ar->ac_stream_active[traffic_class] = active;
534
535 if (active) {
536 /*
537 * Keep track of the active stream with the highest
538 * priority.
539 */
540 if (ar->ac_stream_pri_map[traffic_class] >
541 ar->hiac_stream_active_pri)
542 /* set the new highest active priority */
543 ar->hiac_stream_active_pri =
544 ar->ac_stream_pri_map[traffic_class];
545
546 } else {
547 /*
548 * We may have to search for the next active stream
549 * that is the highest priority.
550 */
551 if (ar->hiac_stream_active_pri ==
552 ar->ac_stream_pri_map[traffic_class]) {
553 /*
554 * The highest priority stream just went inactive
555 * reset and search for the "next" highest "active"
556 * priority stream.
557 */
558 ar->hiac_stream_active_pri = 0;
559
560 for (i = 0; i < WMM_NUM_AC; i++) {
561 if (ar->ac_stream_active[i] &&
562 (ar->ac_stream_pri_map[i] >
563 ar->hiac_stream_active_pri))
564 /*
565 * Set the new highest active
566 * priority.
567 */
568 ar->hiac_stream_active_pri =
569 ar->ac_stream_pri_map[i];
570 }
571 }
572 }
573
574 spin_unlock_bh(&ar->lock);
575
576notify_htc:
577 /* notify HTC, this may cause credit distribution changes */
e76ac2bf 578 ath6kl_htc_activity_changed(ar->htc_target, eid, active);
bdcd8170
KV
579}
580
581enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
582 struct htc_packet *packet)
583{
584 struct ath6kl *ar = target->dev->ar;
990bd915 585 struct ath6kl_vif *vif;
bdcd8170 586 enum htc_endpoint_id endpoint = packet->endpoint;
990bd915 587 enum htc_send_full_action action = HTC_SEND_FULL_KEEP;
bdcd8170
KV
588
589 if (endpoint == ar->ctrl_ep) {
590 /*
591 * Under normal WMI if this is getting full, then something
592 * is running rampant the host should not be exhausting the
593 * WMI queue with too many commands the only exception to
594 * this is during testing using endpointping.
595 */
bdcd8170 596 set_bit(WMI_CTRL_EP_FULL, &ar->flag);
bdcd8170 597 ath6kl_err("wmi ctrl ep is full\n");
901db39c 598 return action;
bdcd8170
KV
599 }
600
601 if (packet->info.tx.tag == ATH6KL_CONTROL_PKT_TAG)
901db39c 602 return action;
bdcd8170
KV
603
604 /*
605 * The last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for
606 * the highest active stream.
607 */
608 if (ar->ac_stream_pri_map[ar->ep2ac_map[endpoint]] <
609 ar->hiac_stream_active_pri &&
0ea10f2b
CN
610 ar->cookie_count <=
611 target->endpoint[endpoint].tx_drop_packet_threshold)
bdcd8170
KV
612 /*
613 * Give preference to the highest priority stream by
614 * dropping the packets which overflowed.
615 */
990bd915 616 action = HTC_SEND_FULL_DROP;
990bd915 617
990bd915 618 /* FIXME: Locking */
11f6e40d 619 spin_lock_bh(&ar->list_lock);
990bd915 620 list_for_each_entry(vif, &ar->vif_list, list) {
901db39c
VT
621 if (vif->nw_type == ADHOC_NETWORK ||
622 action != HTC_SEND_FULL_DROP) {
11f6e40d 623 spin_unlock_bh(&ar->list_lock);
bdcd8170 624
990bd915 625 set_bit(NETQ_STOPPED, &vif->flags);
990bd915 626 netif_stop_queue(vif->ndev);
bdcd8170 627
990bd915
VT
628 return action;
629 }
630 }
11f6e40d 631 spin_unlock_bh(&ar->list_lock);
990bd915
VT
632
633 return action;
bdcd8170
KV
634}
635
636/* TODO this needs to be looked at */
990bd915 637static void ath6kl_tx_clear_node_map(struct ath6kl_vif *vif,
bdcd8170
KV
638 enum htc_endpoint_id eid, u32 map_no)
639{
990bd915 640 struct ath6kl *ar = vif->ar;
bdcd8170
KV
641 u32 i;
642
f5938f24 643 if (vif->nw_type != ADHOC_NETWORK)
bdcd8170
KV
644 return;
645
646 if (!ar->ibss_ps_enable)
647 return;
648
649 if (eid == ar->ctrl_ep)
650 return;
651
652 if (map_no == 0)
653 return;
654
655 map_no--;
656 ar->node_map[map_no].tx_pend--;
657
658 if (ar->node_map[map_no].tx_pend)
659 return;
660
661 if (map_no != (ar->node_num - 1))
662 return;
663
664 for (i = ar->node_num; i > 0; i--) {
665 if (ar->node_map[i - 1].tx_pend)
666 break;
667
668 memset(&ar->node_map[i - 1], 0,
669 sizeof(struct ath6kl_node_mapping));
670 ar->node_num--;
671 }
672}
673
63de1112
KV
674void ath6kl_tx_complete(struct htc_target *target,
675 struct list_head *packet_queue)
bdcd8170 676{
63de1112 677 struct ath6kl *ar = target->dev->ar;
bdcd8170
KV
678 struct sk_buff_head skb_queue;
679 struct htc_packet *packet;
680 struct sk_buff *skb;
681 struct ath6kl_cookie *ath6kl_cookie;
682 u32 map_no = 0;
683 int status;
684 enum htc_endpoint_id eid;
685 bool wake_event = false;
71f96ee6 686 bool flushing[ATH6KL_VIF_MAX] = {false};
6765d0aa 687 u8 if_idx;
990bd915 688 struct ath6kl_vif *vif;
bdcd8170
KV
689
690 skb_queue_head_init(&skb_queue);
691
692 /* lock the driver as we update internal state */
693 spin_lock_bh(&ar->lock);
694
695 /* reap completed packets */
696 while (!list_empty(packet_queue)) {
697
698 packet = list_first_entry(packet_queue, struct htc_packet,
699 list);
700 list_del(&packet->list);
701
702 ath6kl_cookie = (struct ath6kl_cookie *)packet->pkt_cntxt;
703 if (!ath6kl_cookie)
704 goto fatal;
705
706 status = packet->status;
707 skb = ath6kl_cookie->skb;
708 eid = packet->endpoint;
709 map_no = ath6kl_cookie->map_no;
710
711 if (!skb || !skb->data)
712 goto fatal;
713
bdcd8170
KV
714 __skb_queue_tail(&skb_queue, skb);
715
716 if (!status && (packet->act_len != skb->len))
717 goto fatal;
718
719 ar->tx_pending[eid]--;
720
721 if (eid != ar->ctrl_ep)
722 ar->total_tx_data_pend--;
723
724 if (eid == ar->ctrl_ep) {
725 if (test_bit(WMI_CTRL_EP_FULL, &ar->flag))
726 clear_bit(WMI_CTRL_EP_FULL, &ar->flag);
727
728 if (ar->tx_pending[eid] == 0)
729 wake_event = true;
730 }
731
6765d0aa
VT
732 if (eid == ar->ctrl_ep) {
733 if_idx = wmi_cmd_hdr_get_if_idx(
f3803eb2 734 (struct wmi_cmd_hdr *) packet->buf);
6765d0aa
VT
735 } else {
736 if_idx = wmi_data_hdr_get_if_idx(
f3803eb2 737 (struct wmi_data_hdr *) packet->buf);
6765d0aa
VT
738 }
739
740 vif = ath6kl_get_vif_by_index(ar, if_idx);
741 if (!vif) {
742 ath6kl_free_cookie(ar, ath6kl_cookie);
743 continue;
744 }
745
bdcd8170
KV
746 if (status) {
747 if (status == -ECANCELED)
748 /* a packet was flushed */
990bd915 749 flushing[if_idx] = true;
bdcd8170 750
b95907a7 751 vif->net_stats.tx_errors++;
bdcd8170 752
778e6502
KV
753 if (status != -ENOSPC && status != -ECANCELED)
754 ath6kl_warn("tx complete error: %d\n", status);
755
bdcd8170
KV
756 ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
757 "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
758 __func__, skb, packet->buf, packet->act_len,
759 eid, "error!");
760 } else {
761 ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
762 "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
763 __func__, skb, packet->buf, packet->act_len,
764 eid, "OK");
765
990bd915 766 flushing[if_idx] = false;
b95907a7
VT
767 vif->net_stats.tx_packets++;
768 vif->net_stats.tx_bytes += skb->len;
bdcd8170
KV
769 }
770
990bd915 771 ath6kl_tx_clear_node_map(vif, eid, map_no);
bdcd8170
KV
772
773 ath6kl_free_cookie(ar, ath6kl_cookie);
774
59c98449
VT
775 if (test_bit(NETQ_STOPPED, &vif->flags))
776 clear_bit(NETQ_STOPPED, &vif->flags);
bdcd8170
KV
777 }
778
779 spin_unlock_bh(&ar->lock);
780
781 __skb_queue_purge(&skb_queue);
782
990bd915 783 /* FIXME: Locking */
11f6e40d 784 spin_lock_bh(&ar->list_lock);
990bd915
VT
785 list_for_each_entry(vif, &ar->vif_list, list) {
786 if (test_bit(CONNECTED, &vif->flags) &&
787 !flushing[vif->fw_vif_idx]) {
11f6e40d 788 spin_unlock_bh(&ar->list_lock);
28ae58dd 789 netif_wake_queue(vif->ndev);
11f6e40d 790 spin_lock_bh(&ar->list_lock);
990bd915 791 }
bdcd8170 792 }
11f6e40d 793 spin_unlock_bh(&ar->list_lock);
bdcd8170
KV
794
795 if (wake_event)
796 wake_up(&ar->event_wq);
797
798 return;
799
800fatal:
801 WARN_ON(1);
802 spin_unlock_bh(&ar->lock);
803 return;
804}
805
806void ath6kl_tx_data_cleanup(struct ath6kl *ar)
807{
808 int i;
809
810 /* flush all the data (non-control) streams */
811 for (i = 0; i < WMM_NUM_AC; i++)
ad226ec2
KV
812 ath6kl_htc_flush_txep(ar->htc_target, ar->ac2ep_map[i],
813 ATH6KL_DATA_PKT_TAG);
bdcd8170
KV
814}
815
816/* Rx functions */
817
818static void ath6kl_deliver_frames_to_nw_stack(struct net_device *dev,
819 struct sk_buff *skb)
820{
821 if (!skb)
822 return;
823
824 skb->dev = dev;
825
826 if (!(skb->dev->flags & IFF_UP)) {
827 dev_kfree_skb(skb);
828 return;
829 }
830
831 skb->protocol = eth_type_trans(skb, skb->dev);
832
833 netif_rx_ni(skb);
834}
835
836static void ath6kl_alloc_netbufs(struct sk_buff_head *q, u16 num)
837{
838 struct sk_buff *skb;
839
840 while (num) {
841 skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE);
842 if (!skb) {
843 ath6kl_err("netbuf allocation failed\n");
844 return;
845 }
846 skb_queue_tail(q, skb);
847 num--;
848 }
849}
850
851static struct sk_buff *aggr_get_free_skb(struct aggr_info *p_aggr)
852{
853 struct sk_buff *skb = NULL;
854
7baef812
VT
855 if (skb_queue_len(&p_aggr->rx_amsdu_freeq) <
856 (AGGR_NUM_OF_FREE_NETBUFS >> 2))
857 ath6kl_alloc_netbufs(&p_aggr->rx_amsdu_freeq,
858 AGGR_NUM_OF_FREE_NETBUFS);
bdcd8170 859
7baef812 860 skb = skb_dequeue(&p_aggr->rx_amsdu_freeq);
bdcd8170
KV
861
862 return skb;
863}
864
865void ath6kl_rx_refill(struct htc_target *target, enum htc_endpoint_id endpoint)
866{
867 struct ath6kl *ar = target->dev->ar;
868 struct sk_buff *skb;
869 int rx_buf;
870 int n_buf_refill;
871 struct htc_packet *packet;
872 struct list_head queue;
873
874 n_buf_refill = ATH6KL_MAX_RX_BUFFERS -
ad226ec2 875 ath6kl_htc_get_rxbuf_num(ar->htc_target, endpoint);
bdcd8170
KV
876
877 if (n_buf_refill <= 0)
878 return;
879
880 INIT_LIST_HEAD(&queue);
881
882 ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
883 "%s: providing htc with %d buffers at eid=%d\n",
884 __func__, n_buf_refill, endpoint);
885
886 for (rx_buf = 0; rx_buf < n_buf_refill; rx_buf++) {
887 skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE);
888 if (!skb)
889 break;
890
891 packet = (struct htc_packet *) skb->head;
94e532d1
VT
892 if (!IS_ALIGNED((unsigned long) skb->data, 4))
893 skb->data = PTR_ALIGN(skb->data - 4, 4);
bdcd8170 894 set_htc_rxpkt_info(packet, skb, skb->data,
96f1fadc 895 ATH6KL_BUFFER_SIZE, endpoint);
cfc10f24 896 packet->skb = skb;
bdcd8170
KV
897 list_add_tail(&packet->list, &queue);
898 }
899
900 if (!list_empty(&queue))
ad226ec2 901 ath6kl_htc_add_rxbuf_multiple(ar->htc_target, &queue);
bdcd8170
KV
902}
903
904void ath6kl_refill_amsdu_rxbufs(struct ath6kl *ar, int count)
905{
906 struct htc_packet *packet;
907 struct sk_buff *skb;
908
909 while (count) {
910 skb = ath6kl_buf_alloc(ATH6KL_AMSDU_BUFFER_SIZE);
911 if (!skb)
912 return;
913
914 packet = (struct htc_packet *) skb->head;
94e532d1
VT
915 if (!IS_ALIGNED((unsigned long) skb->data, 4))
916 skb->data = PTR_ALIGN(skb->data - 4, 4);
bdcd8170
KV
917 set_htc_rxpkt_info(packet, skb, skb->data,
918 ATH6KL_AMSDU_BUFFER_SIZE, 0);
cfc10f24
KV
919 packet->skb = skb;
920
bdcd8170
KV
921 spin_lock_bh(&ar->lock);
922 list_add_tail(&packet->list, &ar->amsdu_rx_buffer_queue);
923 spin_unlock_bh(&ar->lock);
924 count--;
925 }
926}
927
928/*
929 * Callback to allocate a receive buffer for a pending packet. We use a
930 * pre-allocated list of buffers of maximum AMSDU size (4K).
931 */
932struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target,
933 enum htc_endpoint_id endpoint,
934 int len)
935{
936 struct ath6kl *ar = target->dev->ar;
937 struct htc_packet *packet = NULL;
938 struct list_head *pkt_pos;
939 int refill_cnt = 0, depth = 0;
940
941 ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: eid=%d, len:%d\n",
942 __func__, endpoint, len);
943
944 if ((len <= ATH6KL_BUFFER_SIZE) ||
945 (len > ATH6KL_AMSDU_BUFFER_SIZE))
946 return NULL;
947
948 spin_lock_bh(&ar->lock);
949
950 if (list_empty(&ar->amsdu_rx_buffer_queue)) {
951 spin_unlock_bh(&ar->lock);
952 refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS;
953 goto refill_buf;
954 }
955
956 packet = list_first_entry(&ar->amsdu_rx_buffer_queue,
957 struct htc_packet, list);
958 list_del(&packet->list);
959 list_for_each(pkt_pos, &ar->amsdu_rx_buffer_queue)
960 depth++;
961
962 refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS - depth;
963 spin_unlock_bh(&ar->lock);
964
965 /* set actual endpoint ID */
966 packet->endpoint = endpoint;
967
968refill_buf:
969 if (refill_cnt >= ATH6KL_AMSDU_REFILL_THRESHOLD)
970 ath6kl_refill_amsdu_rxbufs(ar, refill_cnt);
971
972 return packet;
973}
974
975static void aggr_slice_amsdu(struct aggr_info *p_aggr,
976 struct rxtid *rxtid, struct sk_buff *skb)
977{
978 struct sk_buff *new_skb;
979 struct ethhdr *hdr;
980 u16 frame_8023_len, payload_8023_len, mac_hdr_len, amsdu_len;
981 u8 *framep;
982
983 mac_hdr_len = sizeof(struct ethhdr);
984 framep = skb->data + mac_hdr_len;
985 amsdu_len = skb->len - mac_hdr_len;
986
987 while (amsdu_len > mac_hdr_len) {
988 hdr = (struct ethhdr *) framep;
989 payload_8023_len = ntohs(hdr->h_proto);
990
991 if (payload_8023_len < MIN_MSDU_SUBFRAME_PAYLOAD_LEN ||
992 payload_8023_len > MAX_MSDU_SUBFRAME_PAYLOAD_LEN) {
993 ath6kl_err("802.3 AMSDU frame bound check failed. len %d\n",
994 payload_8023_len);
995 break;
996 }
997
998 frame_8023_len = payload_8023_len + mac_hdr_len;
999 new_skb = aggr_get_free_skb(p_aggr);
1000 if (!new_skb) {
1001 ath6kl_err("no buffer available\n");
1002 break;
1003 }
1004
1005 memcpy(new_skb->data, framep, frame_8023_len);
1006 skb_put(new_skb, frame_8023_len);
1007 if (ath6kl_wmi_dot3_2_dix(new_skb)) {
1008 ath6kl_err("dot3_2_dix error\n");
1009 dev_kfree_skb(new_skb);
1010 break;
1011 }
1012
1013 skb_queue_tail(&rxtid->q, new_skb);
1014
1015 /* Is this the last subframe within this aggregate ? */
1016 if ((amsdu_len - frame_8023_len) == 0)
1017 break;
1018
1019 /* Add the length of A-MSDU subframe padding bytes -
1020 * Round to nearest word.
1021 */
13e34ea1 1022 frame_8023_len = ALIGN(frame_8023_len, 4);
bdcd8170
KV
1023
1024 framep += frame_8023_len;
1025 amsdu_len -= frame_8023_len;
1026 }
1027
1028 dev_kfree_skb(skb);
1029}
1030
1d2a4456 1031static void aggr_deque_frms(struct aggr_info_conn *agg_conn, u8 tid,
bdcd8170
KV
1032 u16 seq_no, u8 order)
1033{
1034 struct sk_buff *skb;
1035 struct rxtid *rxtid;
1036 struct skb_hold_q *node;
1037 u16 idx, idx_end, seq_end;
1038 struct rxtid_stats *stats;
1039
7baef812
VT
1040 rxtid = &agg_conn->rx_tid[tid];
1041 stats = &agg_conn->stat[tid];
bdcd8170
KV
1042
1043 idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
1044
1045 /*
1046 * idx_end is typically the last possible frame in the window,
1047 * but changes to 'the' seq_no, when BAR comes. If seq_no
1048 * is non-zero, we will go up to that and stop.
1049 * Note: last seq no in current window will occupy the same
1050 * index position as index that is just previous to start.
1051 * An imp point : if win_sz is 7, for seq_no space of 4095,
1052 * then, there would be holes when sequence wrap around occurs.
1053 * Target should judiciously choose the win_sz, based on
1054 * this condition. For 4095, (TID_WINDOW_SZ = 2 x win_sz
1055 * 2, 4, 8, 16 win_sz works fine).
1056 * We must deque from "idx" to "idx_end", including both.
1057 */
1058 seq_end = seq_no ? seq_no : rxtid->seq_next;
1059 idx_end = AGGR_WIN_IDX(seq_end, rxtid->hold_q_sz);
1060
1061 spin_lock_bh(&rxtid->lock);
1062
1063 do {
1064 node = &rxtid->hold_q[idx];
1065 if ((order == 1) && (!node->skb))
1066 break;
1067
1068 if (node->skb) {
1069 if (node->is_amsdu)
1d2a4456
VT
1070 aggr_slice_amsdu(agg_conn->aggr_info, rxtid,
1071 node->skb);
bdcd8170
KV
1072 else
1073 skb_queue_tail(&rxtid->q, node->skb);
1074 node->skb = NULL;
1075 } else
1076 stats->num_hole++;
1077
1078 rxtid->seq_next = ATH6KL_NEXT_SEQ_NO(rxtid->seq_next);
1079 idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
1080 } while (idx != idx_end);
1081
1082 spin_unlock_bh(&rxtid->lock);
1083
1084 stats->num_delivered += skb_queue_len(&rxtid->q);
1085
1086 while ((skb = skb_dequeue(&rxtid->q)))
7baef812 1087 ath6kl_deliver_frames_to_nw_stack(agg_conn->dev, skb);
bdcd8170
KV
1088}
1089
1d2a4456 1090static bool aggr_process_recv_frm(struct aggr_info_conn *agg_conn, u8 tid,
bdcd8170
KV
1091 u16 seq_no,
1092 bool is_amsdu, struct sk_buff *frame)
1093{
1094 struct rxtid *rxtid;
1095 struct rxtid_stats *stats;
1096 struct sk_buff *skb;
1097 struct skb_hold_q *node;
1098 u16 idx, st, cur, end;
1099 bool is_queued = false;
1100 u16 extended_end;
1101
7baef812
VT
1102 rxtid = &agg_conn->rx_tid[tid];
1103 stats = &agg_conn->stat[tid];
bdcd8170
KV
1104
1105 stats->num_into_aggr++;
1106
1107 if (!rxtid->aggr) {
1108 if (is_amsdu) {
1d2a4456 1109 aggr_slice_amsdu(agg_conn->aggr_info, rxtid, frame);
bdcd8170
KV
1110 is_queued = true;
1111 stats->num_amsdu++;
1112 while ((skb = skb_dequeue(&rxtid->q)))
7baef812 1113 ath6kl_deliver_frames_to_nw_stack(agg_conn->dev,
bdcd8170
KV
1114 skb);
1115 }
1116 return is_queued;
1117 }
1118
1119 /* Check the incoming sequence no, if it's in the window */
1120 st = rxtid->seq_next;
1121 cur = seq_no;
1122 end = (st + rxtid->hold_q_sz-1) & ATH6KL_MAX_SEQ_NO;
1123
1124 if (((st < end) && (cur < st || cur > end)) ||
1125 ((st > end) && (cur > end) && (cur < st))) {
1126 extended_end = (end + rxtid->hold_q_sz - 1) &
1127 ATH6KL_MAX_SEQ_NO;
1128
1129 if (((end < extended_end) &&
1130 (cur < end || cur > extended_end)) ||
1131 ((end > extended_end) && (cur > extended_end) &&
1132 (cur < end))) {
1d2a4456 1133 aggr_deque_frms(agg_conn, tid, 0, 0);
bdcd8170
KV
1134 if (cur >= rxtid->hold_q_sz - 1)
1135 rxtid->seq_next = cur - (rxtid->hold_q_sz - 1);
1136 else
1137 rxtid->seq_next = ATH6KL_MAX_SEQ_NO -
1138 (rxtid->hold_q_sz - 2 - cur);
1139 } else {
1140 /*
1141 * Dequeue only those frames that are outside the
1142 * new shifted window.
1143 */
1144 if (cur >= rxtid->hold_q_sz - 1)
1145 st = cur - (rxtid->hold_q_sz - 1);
1146 else
1147 st = ATH6KL_MAX_SEQ_NO -
1148 (rxtid->hold_q_sz - 2 - cur);
1149
1d2a4456 1150 aggr_deque_frms(agg_conn, tid, st, 0);
bdcd8170
KV
1151 }
1152
1153 stats->num_oow++;
1154 }
1155
1156 idx = AGGR_WIN_IDX(seq_no, rxtid->hold_q_sz);
1157
1158 node = &rxtid->hold_q[idx];
1159
1160 spin_lock_bh(&rxtid->lock);
1161
1162 /*
1163 * Is the cur frame duplicate or something beyond our window(hold_q
1164 * -> which is 2x, already)?
1165 *
1166 * 1. Duplicate is easy - drop incoming frame.
1167 * 2. Not falling in current sliding window.
1168 * 2a. is the frame_seq_no preceding current tid_seq_no?
1169 * -> drop the frame. perhaps sender did not get our ACK.
1170 * this is taken care of above.
1171 * 2b. is the frame_seq_no beyond window(st, TID_WINDOW_SZ);
1172 * -> Taken care of it above, by moving window forward.
1173 */
1174 dev_kfree_skb(node->skb);
1175 stats->num_dups++;
1176
1177 node->skb = frame;
1178 is_queued = true;
1179 node->is_amsdu = is_amsdu;
1180 node->seq_no = seq_no;
1181
1182 if (node->is_amsdu)
1183 stats->num_amsdu++;
1184 else
1185 stats->num_mpdu++;
1186
1187 spin_unlock_bh(&rxtid->lock);
1188
1d2a4456 1189 aggr_deque_frms(agg_conn, tid, 0, 1);
bdcd8170 1190
7baef812 1191 if (agg_conn->timer_scheduled)
bdcd8170
KV
1192 rxtid->progress = true;
1193 else
1194 for (idx = 0 ; idx < rxtid->hold_q_sz; idx++) {
1195 if (rxtid->hold_q[idx].skb) {
1196 /*
1197 * There is a frame in the queue and no
1198 * timer so start a timer to ensure that
1199 * the frame doesn't remain stuck
1200 * forever.
1201 */
7baef812
VT
1202 agg_conn->timer_scheduled = true;
1203 mod_timer(&agg_conn->timer,
bdcd8170
KV
1204 (jiffies +
1205 HZ * (AGGR_RX_TIMEOUT) / 1000));
1206 rxtid->progress = false;
1207 rxtid->timer_mon = true;
1208 break;
1209 }
1210 }
1211
1212 return is_queued;
1213}
1214
c1762a3f
TP
1215static void ath6kl_uapsd_trigger_frame_rx(struct ath6kl_vif *vif,
1216 struct ath6kl_sta *conn)
1217{
1218 struct ath6kl *ar = vif->ar;
1219 bool is_apsdq_empty, is_apsdq_empty_at_start;
1220 u32 num_frames_to_deliver, flags;
1221 struct sk_buff *skb = NULL;
1222
1223 /*
1224 * If the APSD q for this STA is not empty, dequeue and
1225 * send a pkt from the head of the q. Also update the
1226 * More data bit in the WMI_DATA_HDR if there are
1227 * more pkts for this STA in the APSD q.
1228 * If there are no more pkts for this STA,
1229 * update the APSD bitmap for this STA.
1230 */
1231
1232 num_frames_to_deliver = (conn->apsd_info >> ATH6KL_APSD_NUM_OF_AC) &
1233 ATH6KL_APSD_FRAME_MASK;
1234 /*
1235 * Number of frames to send in a service period is
1236 * indicated by the station
1237 * in the QOS_INFO of the association request
1238 * If it is zero, send all frames
1239 */
1240 if (!num_frames_to_deliver)
1241 num_frames_to_deliver = ATH6KL_APSD_ALL_FRAME;
1242
1243 spin_lock_bh(&conn->psq_lock);
1244 is_apsdq_empty = skb_queue_empty(&conn->apsdq);
1245 spin_unlock_bh(&conn->psq_lock);
1246 is_apsdq_empty_at_start = is_apsdq_empty;
1247
1248 while ((!is_apsdq_empty) && (num_frames_to_deliver)) {
1249
1250 spin_lock_bh(&conn->psq_lock);
1251 skb = skb_dequeue(&conn->apsdq);
1252 is_apsdq_empty = skb_queue_empty(&conn->apsdq);
1253 spin_unlock_bh(&conn->psq_lock);
1254
1255 /*
1256 * Set the STA flag to Trigger delivery,
1257 * so that the frame will go out
1258 */
1259 conn->sta_flags |= STA_PS_APSD_TRIGGER;
1260 num_frames_to_deliver--;
1261
1262 /* Last frame in the service period, set EOSP or queue empty */
1263 if ((is_apsdq_empty) || (!num_frames_to_deliver))
1264 conn->sta_flags |= STA_PS_APSD_EOSP;
1265
1266 ath6kl_data_tx(skb, vif->ndev);
1267 conn->sta_flags &= ~(STA_PS_APSD_TRIGGER);
1268 conn->sta_flags &= ~(STA_PS_APSD_EOSP);
1269 }
1270
1271 if (is_apsdq_empty) {
1272 if (is_apsdq_empty_at_start)
1273 flags = WMI_AP_APSD_NO_DELIVERY_FRAMES;
1274 else
1275 flags = 0;
1276
1277 ath6kl_wmi_set_apsd_bfrd_traf(ar->wmi,
96f1fadc
KV
1278 vif->fw_vif_idx,
1279 conn->aid, 0, flags);
c1762a3f
TP
1280 }
1281
1282 return;
1283}
1284
bdcd8170
KV
1285void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
1286{
1287 struct ath6kl *ar = target->dev->ar;
1288 struct sk_buff *skb = packet->pkt_cntxt;
1289 struct wmi_rx_meta_v2 *meta;
1290 struct wmi_data_hdr *dhdr;
1291 int min_hdr_len;
1292 u8 meta_type, dot11_hdr = 0;
8bd5bca8 1293 u8 pad_before_data_start;
bdcd8170
KV
1294 int status = packet->status;
1295 enum htc_endpoint_id ept = packet->endpoint;
1296 bool is_amsdu, prev_ps, ps_state = false;
c1762a3f 1297 bool trig_state = false;
bdcd8170
KV
1298 struct ath6kl_sta *conn = NULL;
1299 struct sk_buff *skb1 = NULL;
1300 struct ethhdr *datap = NULL;
6765d0aa 1301 struct ath6kl_vif *vif;
1d2a4456 1302 struct aggr_info_conn *aggr_conn;
bdcd8170 1303 u16 seq_no, offset;
6765d0aa 1304 u8 tid, if_idx;
bdcd8170
KV
1305
1306 ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
1307 "%s: ar=0x%p eid=%d, skb=0x%p, data=0x%p, len=0x%x status:%d",
1308 __func__, ar, ept, skb, packet->buf,
1309 packet->act_len, status);
1310
1311 if (status || !(skb->data + HTC_HDR_LENGTH)) {
6765d0aa
VT
1312 dev_kfree_skb(skb);
1313 return;
1314 }
1315
1316 skb_put(skb, packet->act_len + HTC_HDR_LENGTH);
1317 skb_pull(skb, HTC_HDR_LENGTH);
1318
81db48dc
VT
1319 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "rx ",
1320 skb->data, skb->len);
1321
6765d0aa 1322 if (ept == ar->ctrl_ep) {
81db48dc
VT
1323 if (test_bit(WMI_ENABLED, &ar->flag)) {
1324 ath6kl_check_wow_status(ar);
1325 ath6kl_wmi_control_rx(ar->wmi, skb);
1326 return;
1327 }
6765d0aa
VT
1328 if_idx =
1329 wmi_cmd_hdr_get_if_idx((struct wmi_cmd_hdr *) skb->data);
1330 } else {
1331 if_idx =
1332 wmi_data_hdr_get_if_idx((struct wmi_data_hdr *) skb->data);
1333 }
1334
1335 vif = ath6kl_get_vif_by_index(ar, if_idx);
1336 if (!vif) {
bdcd8170
KV
1337 dev_kfree_skb(skb);
1338 return;
1339 }
1340
1341 /*
1342 * Take lock to protect buffer counts and adaptive power throughput
1343 * state.
1344 */
478ac027 1345 spin_lock_bh(&vif->if_lock);
bdcd8170 1346
b95907a7
VT
1347 vif->net_stats.rx_packets++;
1348 vif->net_stats.rx_bytes += packet->act_len;
bdcd8170 1349
478ac027 1350 spin_unlock_bh(&vif->if_lock);
83dc5f2f 1351
28ae58dd 1352 skb->dev = vif->ndev;
bdcd8170
KV
1353
1354 if (!test_bit(WMI_ENABLED, &ar->flag)) {
1355 if (EPPING_ALIGNMENT_PAD > 0)
1356 skb_pull(skb, EPPING_ALIGNMENT_PAD);
28ae58dd 1357 ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
bdcd8170
KV
1358 return;
1359 }
1360
a918fb3c
RM
1361 ath6kl_check_wow_status(ar);
1362
67f9178f
VT
1363 min_hdr_len = sizeof(struct ethhdr) + sizeof(struct wmi_data_hdr) +
1364 sizeof(struct ath6kl_llc_snap_hdr);
bdcd8170
KV
1365
1366 dhdr = (struct wmi_data_hdr *) skb->data;
1367
1368 /*
1369 * In the case of AP mode we may receive NULL data frames
1370 * that do not have LLC hdr. They are 16 bytes in size.
1371 * Allow these frames in the AP mode.
1372 */
f5938f24 1373 if (vif->nw_type != AP_NETWORK &&
bdcd8170
KV
1374 ((packet->act_len < min_hdr_len) ||
1375 (packet->act_len > WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH))) {
1376 ath6kl_info("frame len is too short or too long\n");
b95907a7
VT
1377 vif->net_stats.rx_errors++;
1378 vif->net_stats.rx_length_errors++;
bdcd8170
KV
1379 dev_kfree_skb(skb);
1380 return;
1381 }
1382
1383 /* Get the Power save state of the STA */
f5938f24 1384 if (vif->nw_type == AP_NETWORK) {
bdcd8170
KV
1385 meta_type = wmi_data_hdr_get_meta(dhdr);
1386
1387 ps_state = !!((dhdr->info >> WMI_DATA_HDR_PS_SHIFT) &
1388 WMI_DATA_HDR_PS_MASK);
1389
1390 offset = sizeof(struct wmi_data_hdr);
c1762a3f 1391 trig_state = !!(le16_to_cpu(dhdr->info3) & WMI_DATA_HDR_TRIG);
bdcd8170
KV
1392
1393 switch (meta_type) {
1394 case 0:
1395 break;
1396 case WMI_META_VERSION_1:
1397 offset += sizeof(struct wmi_rx_meta_v1);
1398 break;
1399 case WMI_META_VERSION_2:
1400 offset += sizeof(struct wmi_rx_meta_v2);
1401 break;
1402 default:
1403 break;
1404 }
1405
1406 datap = (struct ethhdr *) (skb->data + offset);
6765d0aa 1407 conn = ath6kl_find_sta(vif, datap->h_source);
bdcd8170
KV
1408
1409 if (!conn) {
1410 dev_kfree_skb(skb);
1411 return;
1412 }
1413
1414 /*
1415 * If there is a change in PS state of the STA,
1416 * take appropriate steps:
1417 *
1418 * 1. If Sleep-->Awake, flush the psq for the STA
1419 * Clear the PVB for the STA.
1420 * 2. If Awake-->Sleep, Starting queueing frames
1421 * the STA.
1422 */
1423 prev_ps = !!(conn->sta_flags & STA_PS_SLEEP);
1424
1425 if (ps_state)
1426 conn->sta_flags |= STA_PS_SLEEP;
1427 else
1428 conn->sta_flags &= ~STA_PS_SLEEP;
1429
c1762a3f
TP
1430 /* Accept trigger only when the station is in sleep */
1431 if ((conn->sta_flags & STA_PS_SLEEP) && trig_state)
1432 ath6kl_uapsd_trigger_frame_rx(vif, conn);
1433
bdcd8170
KV
1434 if (prev_ps ^ !!(conn->sta_flags & STA_PS_SLEEP)) {
1435 if (!(conn->sta_flags & STA_PS_SLEEP)) {
1436 struct sk_buff *skbuff = NULL;
c1762a3f 1437 bool is_apsdq_empty;
d0ff7383
NG
1438 struct ath6kl_mgmt_buff *mgmt;
1439 u8 idx;
bdcd8170
KV
1440
1441 spin_lock_bh(&conn->psq_lock);
d0ff7383
NG
1442 while (conn->mgmt_psq_len > 0) {
1443 mgmt = list_first_entry(
1444 &conn->mgmt_psq,
1445 struct ath6kl_mgmt_buff,
1446 list);
1447 list_del(&mgmt->list);
1448 conn->mgmt_psq_len--;
1449 spin_unlock_bh(&conn->psq_lock);
1450 idx = vif->fw_vif_idx;
1451
1452 ath6kl_wmi_send_mgmt_cmd(ar->wmi,
1453 idx,
1454 mgmt->id,
1455 mgmt->freq,
1456 mgmt->wait,
1457 mgmt->buf,
1458 mgmt->len,
1459 mgmt->no_cck);
1460
1461 kfree(mgmt);
1462 spin_lock_bh(&conn->psq_lock);
1463 }
1464 conn->mgmt_psq_len = 0;
c1762a3f
TP
1465 while ((skbuff = skb_dequeue(&conn->psq))) {
1466 spin_unlock_bh(&conn->psq_lock);
1467 ath6kl_data_tx(skbuff, vif->ndev);
1468 spin_lock_bh(&conn->psq_lock);
1469 }
1470
1471 is_apsdq_empty = skb_queue_empty(&conn->apsdq);
1472 while ((skbuff = skb_dequeue(&conn->apsdq))) {
bdcd8170 1473 spin_unlock_bh(&conn->psq_lock);
28ae58dd 1474 ath6kl_data_tx(skbuff, vif->ndev);
bdcd8170
KV
1475 spin_lock_bh(&conn->psq_lock);
1476 }
1477 spin_unlock_bh(&conn->psq_lock);
c1762a3f
TP
1478
1479 if (!is_apsdq_empty)
1480 ath6kl_wmi_set_apsd_bfrd_traf(
1481 ar->wmi,
1482 vif->fw_vif_idx,
1483 conn->aid, 0, 0);
1484
bdcd8170 1485 /* Clear the PVB for this STA */
334234b5
VT
1486 ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx,
1487 conn->aid, 0);
bdcd8170
KV
1488 }
1489 }
1490
1491 /* drop NULL data frames here */
1492 if ((packet->act_len < min_hdr_len) ||
1493 (packet->act_len >
1494 WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH)) {
1495 dev_kfree_skb(skb);
1496 return;
1497 }
1498 }
1499
1500 is_amsdu = wmi_data_hdr_is_amsdu(dhdr) ? true : false;
1501 tid = wmi_data_hdr_get_up(dhdr);
1502 seq_no = wmi_data_hdr_get_seqno(dhdr);
1503 meta_type = wmi_data_hdr_get_meta(dhdr);
1504 dot11_hdr = wmi_data_hdr_get_dot11(dhdr);
8bd5bca8
KV
1505 pad_before_data_start =
1506 (le16_to_cpu(dhdr->info3) >> WMI_DATA_HDR_PAD_BEFORE_DATA_SHIFT)
1507 & WMI_DATA_HDR_PAD_BEFORE_DATA_MASK;
1508
594a0bc8 1509 skb_pull(skb, sizeof(struct wmi_data_hdr));
bdcd8170
KV
1510
1511 switch (meta_type) {
1512 case WMI_META_VERSION_1:
1513 skb_pull(skb, sizeof(struct wmi_rx_meta_v1));
1514 break;
1515 case WMI_META_VERSION_2:
1516 meta = (struct wmi_rx_meta_v2 *) skb->data;
1517 if (meta->csum_flags & 0x1) {
1518 skb->ip_summed = CHECKSUM_COMPLETE;
1519 skb->csum = (__force __wsum) meta->csum;
1520 }
1521 skb_pull(skb, sizeof(struct wmi_rx_meta_v2));
1522 break;
1523 default:
1524 break;
1525 }
1526
8bd5bca8
KV
1527 skb_pull(skb, pad_before_data_start);
1528
bdcd8170
KV
1529 if (dot11_hdr)
1530 status = ath6kl_wmi_dot11_hdr_remove(ar->wmi, skb);
1531 else if (!is_amsdu)
1532 status = ath6kl_wmi_dot3_2_dix(skb);
1533
1534 if (status) {
1535 /*
1536 * Drop frames that could not be processed (lack of
1537 * memory, etc.)
1538 */
1539 dev_kfree_skb(skb);
1540 return;
1541 }
1542
28ae58dd 1543 if (!(vif->ndev->flags & IFF_UP)) {
bdcd8170
KV
1544 dev_kfree_skb(skb);
1545 return;
1546 }
1547
f5938f24 1548 if (vif->nw_type == AP_NETWORK) {
bdcd8170
KV
1549 datap = (struct ethhdr *) skb->data;
1550 if (is_multicast_ether_addr(datap->h_dest))
1551 /*
1552 * Bcast/Mcast frames should be sent to the
1553 * OS stack as well as on the air.
1554 */
1555 skb1 = skb_copy(skb, GFP_ATOMIC);
1556 else {
1557 /*
1558 * Search for a connected STA with dstMac
1559 * as the Mac address. If found send the
1560 * frame to it on the air else send the
1561 * frame up the stack.
1562 */
6765d0aa 1563 conn = ath6kl_find_sta(vif, datap->h_dest);
bdcd8170
KV
1564
1565 if (conn && ar->intra_bss) {
1566 skb1 = skb;
1567 skb = NULL;
1568 } else if (conn && !ar->intra_bss) {
1569 dev_kfree_skb(skb);
1570 skb = NULL;
1571 }
1572 }
1573 if (skb1)
28ae58dd 1574 ath6kl_data_tx(skb1, vif->ndev);
ad3f78b9
KV
1575
1576 if (skb == NULL) {
1577 /* nothing to deliver up the stack */
1578 return;
1579 }
bdcd8170
KV
1580 }
1581
5694f962
KV
1582 datap = (struct ethhdr *) skb->data;
1583
1d2a4456
VT
1584 if (is_unicast_ether_addr(datap->h_dest)) {
1585 if (vif->nw_type == AP_NETWORK) {
1586 conn = ath6kl_find_sta(vif, datap->h_source);
1587 if (!conn)
1588 return;
1589 aggr_conn = conn->aggr_conn;
1590 } else
1591 aggr_conn = vif->aggr_cntxt->aggr_conn;
1592
1593 if (aggr_process_recv_frm(aggr_conn, tid, seq_no,
96f1fadc 1594 is_amsdu, skb)) {
1d2a4456
VT
1595 /* aggregation code will handle the skb */
1596 return;
1597 }
b514fab5
VT
1598 } else if (!is_broadcast_ether_addr(datap->h_dest))
1599 vif->net_stats.multicast++;
5694f962 1600
28ae58dd 1601 ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
bdcd8170
KV
1602}
1603
1604static void aggr_timeout(unsigned long arg)
1605{
1606 u8 i, j;
7baef812 1607 struct aggr_info_conn *aggr_conn = (struct aggr_info_conn *) arg;
bdcd8170
KV
1608 struct rxtid *rxtid;
1609 struct rxtid_stats *stats;
1610
1611 for (i = 0; i < NUM_OF_TIDS; i++) {
7baef812
VT
1612 rxtid = &aggr_conn->rx_tid[i];
1613 stats = &aggr_conn->stat[i];
bdcd8170
KV
1614
1615 if (!rxtid->aggr || !rxtid->timer_mon || rxtid->progress)
1616 continue;
1617
1618 stats->num_timeouts++;
37ca6335
KV
1619 ath6kl_dbg(ATH6KL_DBG_AGGR,
1620 "aggr timeout (st %d end %d)\n",
bdcd8170
KV
1621 rxtid->seq_next,
1622 ((rxtid->seq_next + rxtid->hold_q_sz-1) &
1623 ATH6KL_MAX_SEQ_NO));
1d2a4456 1624 aggr_deque_frms(aggr_conn, i, 0, 0);
bdcd8170
KV
1625 }
1626
7baef812 1627 aggr_conn->timer_scheduled = false;
bdcd8170
KV
1628
1629 for (i = 0; i < NUM_OF_TIDS; i++) {
7baef812 1630 rxtid = &aggr_conn->rx_tid[i];
bdcd8170
KV
1631
1632 if (rxtid->aggr && rxtid->hold_q) {
1633 for (j = 0; j < rxtid->hold_q_sz; j++) {
1634 if (rxtid->hold_q[j].skb) {
7baef812 1635 aggr_conn->timer_scheduled = true;
bdcd8170
KV
1636 rxtid->timer_mon = true;
1637 rxtid->progress = false;
1638 break;
1639 }
1640 }
1641
1642 if (j >= rxtid->hold_q_sz)
1643 rxtid->timer_mon = false;
1644 }
1645 }
1646
7baef812
VT
1647 if (aggr_conn->timer_scheduled)
1648 mod_timer(&aggr_conn->timer,
bdcd8170
KV
1649 jiffies + msecs_to_jiffies(AGGR_RX_TIMEOUT));
1650}
1651
7baef812 1652static void aggr_delete_tid_state(struct aggr_info_conn *aggr_conn, u8 tid)
bdcd8170
KV
1653{
1654 struct rxtid *rxtid;
1655 struct rxtid_stats *stats;
1656
7baef812 1657 if (!aggr_conn || tid >= NUM_OF_TIDS)
bdcd8170
KV
1658 return;
1659
7baef812
VT
1660 rxtid = &aggr_conn->rx_tid[tid];
1661 stats = &aggr_conn->stat[tid];
bdcd8170
KV
1662
1663 if (rxtid->aggr)
1d2a4456 1664 aggr_deque_frms(aggr_conn, tid, 0, 0);
bdcd8170
KV
1665
1666 rxtid->aggr = false;
1667 rxtid->progress = false;
1668 rxtid->timer_mon = false;
1669 rxtid->win_sz = 0;
1670 rxtid->seq_next = 0;
1671 rxtid->hold_q_sz = 0;
1672
1673 kfree(rxtid->hold_q);
1674 rxtid->hold_q = NULL;
1675
1676 memset(stats, 0, sizeof(struct rxtid_stats));
1677}
1678
3fdc0991 1679void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid_mux, u16 seq_no,
240d2799 1680 u8 win_sz)
bdcd8170 1681{
1d2a4456
VT
1682 struct ath6kl_sta *sta;
1683 struct aggr_info_conn *aggr_conn = NULL;
bdcd8170
KV
1684 struct rxtid *rxtid;
1685 struct rxtid_stats *stats;
1686 u16 hold_q_size;
1d2a4456 1687 u8 tid, aid;
bdcd8170 1688
1d2a4456
VT
1689 if (vif->nw_type == AP_NETWORK) {
1690 aid = ath6kl_get_aid(tid_mux);
1691 sta = ath6kl_find_sta_by_aid(vif->ar, aid);
1692 if (sta)
1693 aggr_conn = sta->aggr_conn;
1694 } else
1695 aggr_conn = vif->aggr_cntxt->aggr_conn;
bdcd8170 1696
1d2a4456
VT
1697 if (!aggr_conn)
1698 return;
7baef812 1699
3fdc0991
VT
1700 tid = ath6kl_get_tid(tid_mux);
1701 if (tid >= NUM_OF_TIDS)
1702 return;
1703
7baef812
VT
1704 rxtid = &aggr_conn->rx_tid[tid];
1705 stats = &aggr_conn->stat[tid];
bdcd8170
KV
1706
1707 if (win_sz < AGGR_WIN_SZ_MIN || win_sz > AGGR_WIN_SZ_MAX)
1708 ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: win_sz %d, tid %d\n",
1709 __func__, win_sz, tid);
1710
1711 if (rxtid->aggr)
7baef812 1712 aggr_delete_tid_state(aggr_conn, tid);
bdcd8170
KV
1713
1714 rxtid->seq_next = seq_no;
1715 hold_q_size = TID_WINDOW_SZ(win_sz) * sizeof(struct skb_hold_q);
1716 rxtid->hold_q = kzalloc(hold_q_size, GFP_KERNEL);
1717 if (!rxtid->hold_q)
1718 return;
1719
1720 rxtid->win_sz = win_sz;
1721 rxtid->hold_q_sz = TID_WINDOW_SZ(win_sz);
1722 if (!skb_queue_empty(&rxtid->q))
1723 return;
1724
1725 rxtid->aggr = true;
1726}
1727
c8651541
VT
1728void aggr_conn_init(struct ath6kl_vif *vif, struct aggr_info *aggr_info,
1729 struct aggr_info_conn *aggr_conn)
bdcd8170 1730{
bdcd8170
KV
1731 struct rxtid *rxtid;
1732 u8 i;
1733
7baef812
VT
1734 aggr_conn->aggr_sz = AGGR_SZ_DEFAULT;
1735 aggr_conn->dev = vif->ndev;
1736 init_timer(&aggr_conn->timer);
1737 aggr_conn->timer.function = aggr_timeout;
1738 aggr_conn->timer.data = (unsigned long) aggr_conn;
c8651541 1739 aggr_conn->aggr_info = aggr_info;
bdcd8170 1740
7baef812 1741 aggr_conn->timer_scheduled = false;
bdcd8170
KV
1742
1743 for (i = 0; i < NUM_OF_TIDS; i++) {
7baef812 1744 rxtid = &aggr_conn->rx_tid[i];
bdcd8170
KV
1745 rxtid->aggr = false;
1746 rxtid->progress = false;
1747 rxtid->timer_mon = false;
1748 skb_queue_head_init(&rxtid->q);
1749 spin_lock_init(&rxtid->lock);
1750 }
1751
7baef812
VT
1752}
1753
1754struct aggr_info *aggr_init(struct ath6kl_vif *vif)
1755{
1756 struct aggr_info *p_aggr = NULL;
1757
1758 p_aggr = kzalloc(sizeof(struct aggr_info), GFP_KERNEL);
1759 if (!p_aggr) {
1760 ath6kl_err("failed to alloc memory for aggr_node\n");
1761 return NULL;
1762 }
1763
1764 p_aggr->aggr_conn = kzalloc(sizeof(struct aggr_info_conn), GFP_KERNEL);
1765 if (!p_aggr->aggr_conn) {
1766 ath6kl_err("failed to alloc memory for connection specific aggr info\n");
1767 kfree(p_aggr);
1768 return NULL;
1769 }
1770
c8651541 1771 aggr_conn_init(vif, p_aggr, p_aggr->aggr_conn);
7baef812
VT
1772
1773 skb_queue_head_init(&p_aggr->rx_amsdu_freeq);
1774 ath6kl_alloc_netbufs(&p_aggr->rx_amsdu_freeq, AGGR_NUM_OF_FREE_NETBUFS);
1775
bdcd8170
KV
1776 return p_aggr;
1777}
1778
3fdc0991 1779void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid_mux)
bdcd8170 1780{
1d2a4456 1781 struct ath6kl_sta *sta;
bdcd8170 1782 struct rxtid *rxtid;
1d2a4456
VT
1783 struct aggr_info_conn *aggr_conn = NULL;
1784 u8 tid, aid;
1785
1786 if (vif->nw_type == AP_NETWORK) {
1787 aid = ath6kl_get_aid(tid_mux);
1788 sta = ath6kl_find_sta_by_aid(vif->ar, aid);
1789 if (sta)
1790 aggr_conn = sta->aggr_conn;
1791 } else
1792 aggr_conn = vif->aggr_cntxt->aggr_conn;
bdcd8170 1793
1d2a4456 1794 if (!aggr_conn)
bdcd8170
KV
1795 return;
1796
3fdc0991
VT
1797 tid = ath6kl_get_tid(tid_mux);
1798 if (tid >= NUM_OF_TIDS)
1799 return;
1800
7baef812 1801 rxtid = &aggr_conn->rx_tid[tid];
bdcd8170
KV
1802
1803 if (rxtid->aggr)
7baef812 1804 aggr_delete_tid_state(aggr_conn, tid);
bdcd8170
KV
1805}
1806
1d2a4456 1807void aggr_reset_state(struct aggr_info_conn *aggr_conn)
bdcd8170
KV
1808{
1809 u8 tid;
1810
1d2a4456 1811 if (!aggr_conn)
7baef812
VT
1812 return;
1813
1d2a4456
VT
1814 if (aggr_conn->timer_scheduled) {
1815 del_timer(&aggr_conn->timer);
1816 aggr_conn->timer_scheduled = false;
7a950ea8
VT
1817 }
1818
bdcd8170 1819 for (tid = 0; tid < NUM_OF_TIDS; tid++)
1d2a4456 1820 aggr_delete_tid_state(aggr_conn, tid);
bdcd8170
KV
1821}
1822
1823/* clean up our amsdu buffer list */
1824void ath6kl_cleanup_amsdu_rxbufs(struct ath6kl *ar)
1825{
1826 struct htc_packet *packet, *tmp_pkt;
1827
1828 spin_lock_bh(&ar->lock);
1829 if (list_empty(&ar->amsdu_rx_buffer_queue)) {
1830 spin_unlock_bh(&ar->lock);
1831 return;
1832 }
1833
1834 list_for_each_entry_safe(packet, tmp_pkt, &ar->amsdu_rx_buffer_queue,
1835 list) {
1836 list_del(&packet->list);
1837 spin_unlock_bh(&ar->lock);
1838 dev_kfree_skb(packet->pkt_cntxt);
1839 spin_lock_bh(&ar->lock);
1840 }
1841
1842 spin_unlock_bh(&ar->lock);
1843}
1844
1845void aggr_module_destroy(struct aggr_info *aggr_info)
1846{
1d2a4456 1847 if (!aggr_info)
bdcd8170
KV
1848 return;
1849
1d2a4456 1850 aggr_reset_state(aggr_info->aggr_conn);
7baef812
VT
1851 skb_queue_purge(&aggr_info->rx_amsdu_freeq);
1852 kfree(aggr_info->aggr_conn);
bdcd8170
KV
1853 kfree(aggr_info);
1854}
This page took 0.163828 seconds and 5 git commands to generate.