2 * Copyright (c) 2004-2011 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 static u8
ath6kl_ibss_map_epid(struct sk_buff
*skb
, struct net_device
*dev
,
23 struct ath6kl
*ar
= ath6kl_priv(dev
);
24 struct ethhdr
*eth_hdr
;
30 eth_hdr
= (struct ethhdr
*) (datap
+ sizeof(struct wmi_data_hdr
));
32 if (is_multicast_ether_addr(eth_hdr
->h_dest
))
35 for (i
= 0; i
< ar
->node_num
; i
++) {
36 if (memcmp(eth_hdr
->h_dest
, ar
->node_map
[i
].mac_addr
,
39 ar
->node_map
[i
].tx_pend
++;
40 return ar
->node_map
[i
].ep_id
;
43 if ((ep_map
== -1) && !ar
->node_map
[i
].tx_pend
)
48 ep_map
= ar
->node_num
;
50 if (ar
->node_num
> MAX_NODE_NUM
)
51 return ENDPOINT_UNUSED
;
54 memcpy(ar
->node_map
[ep_map
].mac_addr
, eth_hdr
->h_dest
, ETH_ALEN
);
56 for (i
= ENDPOINT_2
; i
<= ENDPOINT_5
; i
++) {
57 if (!ar
->tx_pending
[i
]) {
58 ar
->node_map
[ep_map
].ep_id
= i
;
63 * No free endpoint is available, start redistribution on
64 * the inuse endpoints.
66 if (i
== ENDPOINT_5
) {
67 ar
->node_map
[ep_map
].ep_id
= ar
->next_ep_id
;
69 if (ar
->next_ep_id
> ENDPOINT_5
)
70 ar
->next_ep_id
= ENDPOINT_2
;
75 ar
->node_map
[ep_map
].tx_pend
++;
77 return ar
->node_map
[ep_map
].ep_id
;
80 static bool ath6kl_powersave_ap(struct ath6kl
*ar
, struct sk_buff
*skb
,
83 struct ethhdr
*datap
= (struct ethhdr
*) skb
->data
;
84 struct ath6kl_sta
*conn
= NULL
;
85 bool ps_queued
= false, is_psq_empty
= false;
87 if (is_multicast_ether_addr(datap
->h_dest
)) {
91 for (ctr
= 0; ctr
< AP_MAX_NUM_STA
; ctr
++) {
92 if (ar
->sta_list
[ctr
].sta_flags
& STA_PS_SLEEP
) {
100 * If this transmit is not because of a Dtim Expiry
103 if (!test_bit(DTIM_EXPIRED
, &ar
->flag
)) {
104 bool is_mcastq_empty
= false;
106 spin_lock_bh(&ar
->mcastpsq_lock
);
108 skb_queue_empty(&ar
->mcastpsq
);
109 skb_queue_tail(&ar
->mcastpsq
, skb
);
110 spin_unlock_bh(&ar
->mcastpsq_lock
);
113 * If this is the first Mcast pkt getting
114 * queued indicate to the target to set the
115 * BitmapControl LSB of the TIM IE.
118 ath6kl_wmi_set_pvb_cmd(ar
->wmi
,
124 * This transmit is because of Dtim expiry.
125 * Determine if MoreData bit has to be set.
127 spin_lock_bh(&ar
->mcastpsq_lock
);
128 if (!skb_queue_empty(&ar
->mcastpsq
))
130 spin_unlock_bh(&ar
->mcastpsq_lock
);
134 conn
= ath6kl_find_sta(ar
, datap
->h_dest
);
138 /* Inform the caller that the skb is consumed */
142 if (conn
->sta_flags
& STA_PS_SLEEP
) {
143 if (!(conn
->sta_flags
& STA_PS_POLLED
)) {
144 /* Queue the frames if the STA is sleeping */
145 spin_lock_bh(&conn
->psq_lock
);
146 is_psq_empty
= skb_queue_empty(&conn
->psq
);
147 skb_queue_tail(&conn
->psq
, skb
);
148 spin_unlock_bh(&conn
->psq_lock
);
151 * If this is the first pkt getting queued
152 * for this STA, update the PVB for this
156 ath6kl_wmi_set_pvb_cmd(ar
->wmi
,
162 * This tx is because of a PsPoll.
163 * Determine if MoreData bit has to be set.
165 spin_lock_bh(&conn
->psq_lock
);
166 if (!skb_queue_empty(&conn
->psq
))
168 spin_unlock_bh(&conn
->psq_lock
);
178 int ath6kl_control_tx(void *devt
, struct sk_buff
*skb
,
179 enum htc_endpoint_id eid
)
181 struct ath6kl
*ar
= devt
;
183 struct ath6kl_cookie
*cookie
= NULL
;
185 spin_lock_bh(&ar
->lock
);
187 ath6kl_dbg(ATH6KL_DBG_WLAN_TX
,
188 "%s: skb=0x%p, len=0x%x eid =%d\n", __func__
,
191 if (test_bit(WMI_CTRL_EP_FULL
, &ar
->flag
) && (eid
== ar
->ctrl_ep
)) {
193 * Control endpoint is full, don't allocate resources, we
194 * are just going to drop this packet.
197 ath6kl_err("wmi ctrl ep full, dropping pkt : 0x%p, len:%d\n",
200 cookie
= ath6kl_alloc_cookie(ar
);
202 if (cookie
== NULL
) {
203 spin_unlock_bh(&ar
->lock
);
208 ar
->tx_pending
[eid
]++;
210 if (eid
!= ar
->ctrl_ep
)
211 ar
->total_tx_data_pend
++;
213 spin_unlock_bh(&ar
->lock
);
217 set_htc_pkt_info(&cookie
->htc_pkt
, cookie
, skb
->data
, skb
->len
,
218 eid
, ATH6KL_CONTROL_PKT_TAG
);
221 * This interface is asynchronous, if there is an error, cleanup
222 * will happen in the TX completion callback.
224 ath6kl_htc_tx(ar
->htc_target
, &cookie
->htc_pkt
);
233 int ath6kl_data_tx(struct sk_buff
*skb
, struct net_device
*dev
)
235 struct ath6kl
*ar
= ath6kl_priv(dev
);
236 struct ath6kl_cookie
*cookie
= NULL
;
237 enum htc_endpoint_id eid
= ENDPOINT_UNUSED
;
239 u16 htc_tag
= ATH6KL_DATA_PKT_TAG
;
240 u8 ac
= 99 ; /* initialize to unmapped ac */
241 bool chk_adhoc_ps_mapping
= false, more_data
= false;
244 ath6kl_dbg(ATH6KL_DBG_WLAN_TX
,
245 "%s: skb=0x%p, data=0x%p, len=0x%x\n", __func__
,
246 skb
, skb
->data
, skb
->len
);
248 /* If target is not associated */
249 if (!test_bit(CONNECTED
, &ar
->flag
)) {
254 if (!test_bit(WMI_READY
, &ar
->flag
))
257 /* AP mode Power saving processing */
258 if (ar
->nw_type
== AP_NETWORK
) {
259 if (ath6kl_powersave_ap(ar
, skb
, &more_data
))
263 if (test_bit(WMI_ENABLED
, &ar
->flag
)) {
264 if (skb_headroom(skb
) < dev
->needed_headroom
) {
269 if (ath6kl_wmi_dix_2_dot3(ar
->wmi
, skb
)) {
270 ath6kl_err("ath6kl_wmi_dix_2_dot3 failed\n");
274 if (ath6kl_wmi_data_hdr_add(ar
->wmi
, skb
, DATA_MSGTYPE
,
275 more_data
, 0, 0, NULL
)) {
276 ath6kl_err("wmi_data_hdr_add failed\n");
280 if ((ar
->nw_type
== ADHOC_NETWORK
) &&
281 ar
->ibss_ps_enable
&& test_bit(CONNECTED
, &ar
->flag
))
282 chk_adhoc_ps_mapping
= true;
284 /* get the stream mapping */
285 ret
= ath6kl_wmi_implicit_create_pstream(ar
->wmi
, skb
,
286 0, test_bit(WMM_ENABLED
, &ar
->flag
), &ac
);
293 spin_lock_bh(&ar
->lock
);
295 if (chk_adhoc_ps_mapping
)
296 eid
= ath6kl_ibss_map_epid(skb
, dev
, &map_no
);
298 eid
= ar
->ac2ep_map
[ac
];
300 if (eid
== 0 || eid
== ENDPOINT_UNUSED
) {
301 ath6kl_err("eid %d is not mapped!\n", eid
);
302 spin_unlock_bh(&ar
->lock
);
306 /* allocate resource for this packet */
307 cookie
= ath6kl_alloc_cookie(ar
);
310 spin_unlock_bh(&ar
->lock
);
314 /* update counts while the lock is held */
315 ar
->tx_pending
[eid
]++;
316 ar
->total_tx_data_pend
++;
318 spin_unlock_bh(&ar
->lock
);
320 if (!IS_ALIGNED((unsigned long) skb
->data
- HTC_HDR_LENGTH
, 4) &&
323 * We will touch (move the buffer data to align it. Since the
324 * skb buffer is cloned and not only the header is changed, we
325 * have to copy it to allow the changes. Since we are copying
326 * the data here, we may as well align it by reserving suitable
327 * headroom to avoid the memmove in ath6kl_htc_tx_buf_align().
329 struct sk_buff
*nskb
;
331 nskb
= skb_copy_expand(skb
, HTC_HDR_LENGTH
, 0, GFP_ATOMIC
);
339 cookie
->map_no
= map_no
;
340 set_htc_pkt_info(&cookie
->htc_pkt
, cookie
, skb
->data
, skb
->len
,
343 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES
, __func__
, "tx ",
344 skb
->data
, skb
->len
);
347 * HTC interface is asynchronous, if this fails, cleanup will
348 * happen in the ath6kl_tx_complete callback.
350 ath6kl_htc_tx(ar
->htc_target
, &cookie
->htc_pkt
);
357 ar
->net_stats
.tx_dropped
++;
358 ar
->net_stats
.tx_aborted_errors
++;
363 /* indicate tx activity or inactivity on a WMI stream */
364 void ath6kl_indicate_tx_activity(void *devt
, u8 traffic_class
, bool active
)
366 struct ath6kl
*ar
= devt
;
367 enum htc_endpoint_id eid
;
370 eid
= ar
->ac2ep_map
[traffic_class
];
372 if (!test_bit(WMI_ENABLED
, &ar
->flag
))
375 spin_lock_bh(&ar
->lock
);
377 ar
->ac_stream_active
[traffic_class
] = active
;
381 * Keep track of the active stream with the highest
384 if (ar
->ac_stream_pri_map
[traffic_class
] >
385 ar
->hiac_stream_active_pri
)
386 /* set the new highest active priority */
387 ar
->hiac_stream_active_pri
=
388 ar
->ac_stream_pri_map
[traffic_class
];
392 * We may have to search for the next active stream
393 * that is the highest priority.
395 if (ar
->hiac_stream_active_pri
==
396 ar
->ac_stream_pri_map
[traffic_class
]) {
398 * The highest priority stream just went inactive
399 * reset and search for the "next" highest "active"
402 ar
->hiac_stream_active_pri
= 0;
404 for (i
= 0; i
< WMM_NUM_AC
; i
++) {
405 if (ar
->ac_stream_active
[i
] &&
406 (ar
->ac_stream_pri_map
[i
] >
407 ar
->hiac_stream_active_pri
))
409 * Set the new highest active
412 ar
->hiac_stream_active_pri
=
413 ar
->ac_stream_pri_map
[i
];
418 spin_unlock_bh(&ar
->lock
);
421 /* notify HTC, this may cause credit distribution changes */
422 ath6kl_htc_indicate_activity_change(ar
->htc_target
, eid
, active
);
425 enum htc_send_full_action
ath6kl_tx_queue_full(struct htc_target
*target
,
426 struct htc_packet
*packet
)
428 struct ath6kl
*ar
= target
->dev
->ar
;
429 enum htc_endpoint_id endpoint
= packet
->endpoint
;
431 if (endpoint
== ar
->ctrl_ep
) {
433 * Under normal WMI if this is getting full, then something
434 * is running rampant the host should not be exhausting the
435 * WMI queue with too many commands the only exception to
436 * this is during testing using endpointping.
438 spin_lock_bh(&ar
->lock
);
439 set_bit(WMI_CTRL_EP_FULL
, &ar
->flag
);
440 spin_unlock_bh(&ar
->lock
);
441 ath6kl_err("wmi ctrl ep is full\n");
442 return HTC_SEND_FULL_KEEP
;
445 if (packet
->info
.tx
.tag
== ATH6KL_CONTROL_PKT_TAG
)
446 return HTC_SEND_FULL_KEEP
;
448 if (ar
->nw_type
== ADHOC_NETWORK
)
450 * In adhoc mode, we cannot differentiate traffic
451 * priorities so there is no need to continue, however we
452 * should stop the network.
454 goto stop_net_queues
;
457 * The last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for
458 * the highest active stream.
460 if (ar
->ac_stream_pri_map
[ar
->ep2ac_map
[endpoint
]] <
461 ar
->hiac_stream_active_pri
&&
462 ar
->cookie_count
<= MAX_HI_COOKIE_NUM
)
464 * Give preference to the highest priority stream by
465 * dropping the packets which overflowed.
467 return HTC_SEND_FULL_DROP
;
470 spin_lock_bh(&ar
->lock
);
471 set_bit(NETQ_STOPPED
, &ar
->flag
);
472 spin_unlock_bh(&ar
->lock
);
473 netif_stop_queue(ar
->net_dev
);
475 return HTC_SEND_FULL_KEEP
;
478 /* TODO this needs to be looked at */
479 static void ath6kl_tx_clear_node_map(struct ath6kl
*ar
,
480 enum htc_endpoint_id eid
, u32 map_no
)
484 if (ar
->nw_type
!= ADHOC_NETWORK
)
487 if (!ar
->ibss_ps_enable
)
490 if (eid
== ar
->ctrl_ep
)
497 ar
->node_map
[map_no
].tx_pend
--;
499 if (ar
->node_map
[map_no
].tx_pend
)
502 if (map_no
!= (ar
->node_num
- 1))
505 for (i
= ar
->node_num
; i
> 0; i
--) {
506 if (ar
->node_map
[i
- 1].tx_pend
)
509 memset(&ar
->node_map
[i
- 1], 0,
510 sizeof(struct ath6kl_node_mapping
));
515 void ath6kl_tx_complete(void *context
, struct list_head
*packet_queue
)
517 struct ath6kl
*ar
= context
;
518 struct sk_buff_head skb_queue
;
519 struct htc_packet
*packet
;
521 struct ath6kl_cookie
*ath6kl_cookie
;
524 enum htc_endpoint_id eid
;
525 bool wake_event
= false;
526 bool flushing
= false;
528 skb_queue_head_init(&skb_queue
);
530 /* lock the driver as we update internal state */
531 spin_lock_bh(&ar
->lock
);
533 /* reap completed packets */
534 while (!list_empty(packet_queue
)) {
536 packet
= list_first_entry(packet_queue
, struct htc_packet
,
538 list_del(&packet
->list
);
540 ath6kl_cookie
= (struct ath6kl_cookie
*)packet
->pkt_cntxt
;
544 status
= packet
->status
;
545 skb
= ath6kl_cookie
->skb
;
546 eid
= packet
->endpoint
;
547 map_no
= ath6kl_cookie
->map_no
;
549 if (!skb
|| !skb
->data
)
552 packet
->buf
= skb
->data
;
554 __skb_queue_tail(&skb_queue
, skb
);
556 if (!status
&& (packet
->act_len
!= skb
->len
))
559 ar
->tx_pending
[eid
]--;
561 if (eid
!= ar
->ctrl_ep
)
562 ar
->total_tx_data_pend
--;
564 if (eid
== ar
->ctrl_ep
) {
565 if (test_bit(WMI_CTRL_EP_FULL
, &ar
->flag
))
566 clear_bit(WMI_CTRL_EP_FULL
, &ar
->flag
);
568 if (ar
->tx_pending
[eid
] == 0)
573 if (status
== -ECANCELED
)
574 /* a packet was flushed */
577 ar
->net_stats
.tx_errors
++;
579 if (status
!= -ENOSPC
)
580 ath6kl_err("tx error, status: 0x%x\n", status
);
581 ath6kl_dbg(ATH6KL_DBG_WLAN_TX
,
582 "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
583 __func__
, skb
, packet
->buf
, packet
->act_len
,
586 ath6kl_dbg(ATH6KL_DBG_WLAN_TX
,
587 "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
588 __func__
, skb
, packet
->buf
, packet
->act_len
,
592 ar
->net_stats
.tx_packets
++;
593 ar
->net_stats
.tx_bytes
+= skb
->len
;
596 ath6kl_tx_clear_node_map(ar
, eid
, map_no
);
598 ath6kl_free_cookie(ar
, ath6kl_cookie
);
600 if (test_bit(NETQ_STOPPED
, &ar
->flag
))
601 clear_bit(NETQ_STOPPED
, &ar
->flag
);
604 spin_unlock_bh(&ar
->lock
);
606 __skb_queue_purge(&skb_queue
);
608 if (test_bit(CONNECTED
, &ar
->flag
)) {
610 netif_wake_queue(ar
->net_dev
);
614 wake_up(&ar
->event_wq
);
620 spin_unlock_bh(&ar
->lock
);
624 void ath6kl_tx_data_cleanup(struct ath6kl
*ar
)
628 /* flush all the data (non-control) streams */
629 for (i
= 0; i
< WMM_NUM_AC
; i
++)
630 ath6kl_htc_flush_txep(ar
->htc_target
, ar
->ac2ep_map
[i
],
631 ATH6KL_DATA_PKT_TAG
);
636 static void ath6kl_deliver_frames_to_nw_stack(struct net_device
*dev
,
644 if (!(skb
->dev
->flags
& IFF_UP
)) {
649 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
654 static void ath6kl_alloc_netbufs(struct sk_buff_head
*q
, u16 num
)
659 skb
= ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE
);
661 ath6kl_err("netbuf allocation failed\n");
664 skb_queue_tail(q
, skb
);
669 static struct sk_buff
*aggr_get_free_skb(struct aggr_info
*p_aggr
)
671 struct sk_buff
*skb
= NULL
;
673 if (skb_queue_len(&p_aggr
->free_q
) < (AGGR_NUM_OF_FREE_NETBUFS
>> 2))
674 ath6kl_alloc_netbufs(&p_aggr
->free_q
, AGGR_NUM_OF_FREE_NETBUFS
);
676 skb
= skb_dequeue(&p_aggr
->free_q
);
681 void ath6kl_rx_refill(struct htc_target
*target
, enum htc_endpoint_id endpoint
)
683 struct ath6kl
*ar
= target
->dev
->ar
;
687 struct htc_packet
*packet
;
688 struct list_head queue
;
690 n_buf_refill
= ATH6KL_MAX_RX_BUFFERS
-
691 ath6kl_htc_get_rxbuf_num(ar
->htc_target
, endpoint
);
693 if (n_buf_refill
<= 0)
696 INIT_LIST_HEAD(&queue
);
698 ath6kl_dbg(ATH6KL_DBG_WLAN_RX
,
699 "%s: providing htc with %d buffers at eid=%d\n",
700 __func__
, n_buf_refill
, endpoint
);
702 for (rx_buf
= 0; rx_buf
< n_buf_refill
; rx_buf
++) {
703 skb
= ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE
);
707 packet
= (struct htc_packet
*) skb
->head
;
708 if (!IS_ALIGNED((unsigned long) skb
->data
, 4))
709 skb
->data
= PTR_ALIGN(skb
->data
- 4, 4);
710 set_htc_rxpkt_info(packet
, skb
, skb
->data
,
711 ATH6KL_BUFFER_SIZE
, endpoint
);
712 list_add_tail(&packet
->list
, &queue
);
715 if (!list_empty(&queue
))
716 ath6kl_htc_add_rxbuf_multiple(ar
->htc_target
, &queue
);
719 void ath6kl_refill_amsdu_rxbufs(struct ath6kl
*ar
, int count
)
721 struct htc_packet
*packet
;
725 skb
= ath6kl_buf_alloc(ATH6KL_AMSDU_BUFFER_SIZE
);
729 packet
= (struct htc_packet
*) skb
->head
;
730 if (!IS_ALIGNED((unsigned long) skb
->data
, 4))
731 skb
->data
= PTR_ALIGN(skb
->data
- 4, 4);
732 set_htc_rxpkt_info(packet
, skb
, skb
->data
,
733 ATH6KL_AMSDU_BUFFER_SIZE
, 0);
734 spin_lock_bh(&ar
->lock
);
735 list_add_tail(&packet
->list
, &ar
->amsdu_rx_buffer_queue
);
736 spin_unlock_bh(&ar
->lock
);
742 * Callback to allocate a receive buffer for a pending packet. We use a
743 * pre-allocated list of buffers of maximum AMSDU size (4K).
745 struct htc_packet
*ath6kl_alloc_amsdu_rxbuf(struct htc_target
*target
,
746 enum htc_endpoint_id endpoint
,
749 struct ath6kl
*ar
= target
->dev
->ar
;
750 struct htc_packet
*packet
= NULL
;
751 struct list_head
*pkt_pos
;
752 int refill_cnt
= 0, depth
= 0;
754 ath6kl_dbg(ATH6KL_DBG_WLAN_RX
, "%s: eid=%d, len:%d\n",
755 __func__
, endpoint
, len
);
757 if ((len
<= ATH6KL_BUFFER_SIZE
) ||
758 (len
> ATH6KL_AMSDU_BUFFER_SIZE
))
761 spin_lock_bh(&ar
->lock
);
763 if (list_empty(&ar
->amsdu_rx_buffer_queue
)) {
764 spin_unlock_bh(&ar
->lock
);
765 refill_cnt
= ATH6KL_MAX_AMSDU_RX_BUFFERS
;
769 packet
= list_first_entry(&ar
->amsdu_rx_buffer_queue
,
770 struct htc_packet
, list
);
771 list_del(&packet
->list
);
772 list_for_each(pkt_pos
, &ar
->amsdu_rx_buffer_queue
)
775 refill_cnt
= ATH6KL_MAX_AMSDU_RX_BUFFERS
- depth
;
776 spin_unlock_bh(&ar
->lock
);
778 /* set actual endpoint ID */
779 packet
->endpoint
= endpoint
;
782 if (refill_cnt
>= ATH6KL_AMSDU_REFILL_THRESHOLD
)
783 ath6kl_refill_amsdu_rxbufs(ar
, refill_cnt
);
788 static void aggr_slice_amsdu(struct aggr_info
*p_aggr
,
789 struct rxtid
*rxtid
, struct sk_buff
*skb
)
791 struct sk_buff
*new_skb
;
793 u16 frame_8023_len
, payload_8023_len
, mac_hdr_len
, amsdu_len
;
796 mac_hdr_len
= sizeof(struct ethhdr
);
797 framep
= skb
->data
+ mac_hdr_len
;
798 amsdu_len
= skb
->len
- mac_hdr_len
;
800 while (amsdu_len
> mac_hdr_len
) {
801 hdr
= (struct ethhdr
*) framep
;
802 payload_8023_len
= ntohs(hdr
->h_proto
);
804 if (payload_8023_len
< MIN_MSDU_SUBFRAME_PAYLOAD_LEN
||
805 payload_8023_len
> MAX_MSDU_SUBFRAME_PAYLOAD_LEN
) {
806 ath6kl_err("802.3 AMSDU frame bound check failed. len %d\n",
811 frame_8023_len
= payload_8023_len
+ mac_hdr_len
;
812 new_skb
= aggr_get_free_skb(p_aggr
);
814 ath6kl_err("no buffer available\n");
818 memcpy(new_skb
->data
, framep
, frame_8023_len
);
819 skb_put(new_skb
, frame_8023_len
);
820 if (ath6kl_wmi_dot3_2_dix(new_skb
)) {
821 ath6kl_err("dot3_2_dix error\n");
822 dev_kfree_skb(new_skb
);
826 skb_queue_tail(&rxtid
->q
, new_skb
);
828 /* Is this the last subframe within this aggregate ? */
829 if ((amsdu_len
- frame_8023_len
) == 0)
832 /* Add the length of A-MSDU subframe padding bytes -
833 * Round to nearest word.
835 frame_8023_len
= ALIGN(frame_8023_len
, 4);
837 framep
+= frame_8023_len
;
838 amsdu_len
-= frame_8023_len
;
844 static void aggr_deque_frms(struct aggr_info
*p_aggr
, u8 tid
,
845 u16 seq_no
, u8 order
)
849 struct skb_hold_q
*node
;
850 u16 idx
, idx_end
, seq_end
;
851 struct rxtid_stats
*stats
;
856 rxtid
= &p_aggr
->rx_tid
[tid
];
857 stats
= &p_aggr
->stat
[tid
];
859 idx
= AGGR_WIN_IDX(rxtid
->seq_next
, rxtid
->hold_q_sz
);
862 * idx_end is typically the last possible frame in the window,
863 * but changes to 'the' seq_no, when BAR comes. If seq_no
864 * is non-zero, we will go up to that and stop.
865 * Note: last seq no in current window will occupy the same
866 * index position as index that is just previous to start.
867 * An imp point : if win_sz is 7, for seq_no space of 4095,
868 * then, there would be holes when sequence wrap around occurs.
869 * Target should judiciously choose the win_sz, based on
870 * this condition. For 4095, (TID_WINDOW_SZ = 2 x win_sz
871 * 2, 4, 8, 16 win_sz works fine).
872 * We must deque from "idx" to "idx_end", including both.
874 seq_end
= seq_no
? seq_no
: rxtid
->seq_next
;
875 idx_end
= AGGR_WIN_IDX(seq_end
, rxtid
->hold_q_sz
);
877 spin_lock_bh(&rxtid
->lock
);
880 node
= &rxtid
->hold_q
[idx
];
881 if ((order
== 1) && (!node
->skb
))
886 aggr_slice_amsdu(p_aggr
, rxtid
, node
->skb
);
888 skb_queue_tail(&rxtid
->q
, node
->skb
);
893 rxtid
->seq_next
= ATH6KL_NEXT_SEQ_NO(rxtid
->seq_next
);
894 idx
= AGGR_WIN_IDX(rxtid
->seq_next
, rxtid
->hold_q_sz
);
895 } while (idx
!= idx_end
);
897 spin_unlock_bh(&rxtid
->lock
);
899 stats
->num_delivered
+= skb_queue_len(&rxtid
->q
);
901 while ((skb
= skb_dequeue(&rxtid
->q
)))
902 ath6kl_deliver_frames_to_nw_stack(p_aggr
->dev
, skb
);
905 static bool aggr_process_recv_frm(struct aggr_info
*agg_info
, u8 tid
,
907 bool is_amsdu
, struct sk_buff
*frame
)
910 struct rxtid_stats
*stats
;
912 struct skb_hold_q
*node
;
913 u16 idx
, st
, cur
, end
;
914 bool is_queued
= false;
917 rxtid
= &agg_info
->rx_tid
[tid
];
918 stats
= &agg_info
->stat
[tid
];
920 stats
->num_into_aggr
++;
924 aggr_slice_amsdu(agg_info
, rxtid
, frame
);
927 while ((skb
= skb_dequeue(&rxtid
->q
)))
928 ath6kl_deliver_frames_to_nw_stack(agg_info
->dev
,
934 /* Check the incoming sequence no, if it's in the window */
935 st
= rxtid
->seq_next
;
937 end
= (st
+ rxtid
->hold_q_sz
-1) & ATH6KL_MAX_SEQ_NO
;
939 if (((st
< end
) && (cur
< st
|| cur
> end
)) ||
940 ((st
> end
) && (cur
> end
) && (cur
< st
))) {
941 extended_end
= (end
+ rxtid
->hold_q_sz
- 1) &
944 if (((end
< extended_end
) &&
945 (cur
< end
|| cur
> extended_end
)) ||
946 ((end
> extended_end
) && (cur
> extended_end
) &&
948 aggr_deque_frms(agg_info
, tid
, 0, 0);
949 if (cur
>= rxtid
->hold_q_sz
- 1)
950 rxtid
->seq_next
= cur
- (rxtid
->hold_q_sz
- 1);
952 rxtid
->seq_next
= ATH6KL_MAX_SEQ_NO
-
953 (rxtid
->hold_q_sz
- 2 - cur
);
956 * Dequeue only those frames that are outside the
957 * new shifted window.
959 if (cur
>= rxtid
->hold_q_sz
- 1)
960 st
= cur
- (rxtid
->hold_q_sz
- 1);
962 st
= ATH6KL_MAX_SEQ_NO
-
963 (rxtid
->hold_q_sz
- 2 - cur
);
965 aggr_deque_frms(agg_info
, tid
, st
, 0);
971 idx
= AGGR_WIN_IDX(seq_no
, rxtid
->hold_q_sz
);
973 node
= &rxtid
->hold_q
[idx
];
975 spin_lock_bh(&rxtid
->lock
);
978 * Is the cur frame duplicate or something beyond our window(hold_q
979 * -> which is 2x, already)?
981 * 1. Duplicate is easy - drop incoming frame.
982 * 2. Not falling in current sliding window.
983 * 2a. is the frame_seq_no preceding current tid_seq_no?
984 * -> drop the frame. perhaps sender did not get our ACK.
985 * this is taken care of above.
986 * 2b. is the frame_seq_no beyond window(st, TID_WINDOW_SZ);
987 * -> Taken care of it above, by moving window forward.
989 dev_kfree_skb(node
->skb
);
994 node
->is_amsdu
= is_amsdu
;
995 node
->seq_no
= seq_no
;
1002 spin_unlock_bh(&rxtid
->lock
);
1004 aggr_deque_frms(agg_info
, tid
, 0, 1);
1006 if (agg_info
->timer_scheduled
)
1007 rxtid
->progress
= true;
1009 for (idx
= 0 ; idx
< rxtid
->hold_q_sz
; idx
++) {
1010 if (rxtid
->hold_q
[idx
].skb
) {
1012 * There is a frame in the queue and no
1013 * timer so start a timer to ensure that
1014 * the frame doesn't remain stuck
1017 agg_info
->timer_scheduled
= true;
1018 mod_timer(&agg_info
->timer
,
1020 HZ
* (AGGR_RX_TIMEOUT
) / 1000));
1021 rxtid
->progress
= false;
1022 rxtid
->timer_mon
= true;
1030 void ath6kl_rx(struct htc_target
*target
, struct htc_packet
*packet
)
1032 struct ath6kl
*ar
= target
->dev
->ar
;
1033 struct sk_buff
*skb
= packet
->pkt_cntxt
;
1034 struct wmi_rx_meta_v2
*meta
;
1035 struct wmi_data_hdr
*dhdr
;
1037 u8 meta_type
, dot11_hdr
= 0;
1038 int status
= packet
->status
;
1039 enum htc_endpoint_id ept
= packet
->endpoint
;
1040 bool is_amsdu
, prev_ps
, ps_state
= false;
1041 struct ath6kl_sta
*conn
= NULL
;
1042 struct sk_buff
*skb1
= NULL
;
1043 struct ethhdr
*datap
= NULL
;
1047 ath6kl_dbg(ATH6KL_DBG_WLAN_RX
,
1048 "%s: ar=0x%p eid=%d, skb=0x%p, data=0x%p, len=0x%x status:%d",
1049 __func__
, ar
, ept
, skb
, packet
->buf
,
1050 packet
->act_len
, status
);
1052 if (status
|| !(skb
->data
+ HTC_HDR_LENGTH
)) {
1053 ar
->net_stats
.rx_errors
++;
1059 * Take lock to protect buffer counts and adaptive power throughput
1062 spin_lock_bh(&ar
->lock
);
1064 ar
->net_stats
.rx_packets
++;
1065 ar
->net_stats
.rx_bytes
+= packet
->act_len
;
1067 spin_unlock_bh(&ar
->lock
);
1069 skb_put(skb
, packet
->act_len
+ HTC_HDR_LENGTH
);
1070 skb_pull(skb
, HTC_HDR_LENGTH
);
1072 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES
, __func__
, "rx ",
1073 skb
->data
, skb
->len
);
1075 skb
->dev
= ar
->net_dev
;
1077 if (!test_bit(WMI_ENABLED
, &ar
->flag
)) {
1078 if (EPPING_ALIGNMENT_PAD
> 0)
1079 skb_pull(skb
, EPPING_ALIGNMENT_PAD
);
1080 ath6kl_deliver_frames_to_nw_stack(ar
->net_dev
, skb
);
1084 if (ept
== ar
->ctrl_ep
) {
1085 ath6kl_wmi_control_rx(ar
->wmi
, skb
);
1089 min_hdr_len
= sizeof(struct ethhdr
) + sizeof(struct wmi_data_hdr
) +
1090 sizeof(struct ath6kl_llc_snap_hdr
);
1092 dhdr
= (struct wmi_data_hdr
*) skb
->data
;
1095 * In the case of AP mode we may receive NULL data frames
1096 * that do not have LLC hdr. They are 16 bytes in size.
1097 * Allow these frames in the AP mode.
1099 if (ar
->nw_type
!= AP_NETWORK
&&
1100 ((packet
->act_len
< min_hdr_len
) ||
1101 (packet
->act_len
> WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH
))) {
1102 ath6kl_info("frame len is too short or too long\n");
1103 ar
->net_stats
.rx_errors
++;
1104 ar
->net_stats
.rx_length_errors
++;
1109 /* Get the Power save state of the STA */
1110 if (ar
->nw_type
== AP_NETWORK
) {
1111 meta_type
= wmi_data_hdr_get_meta(dhdr
);
1113 ps_state
= !!((dhdr
->info
>> WMI_DATA_HDR_PS_SHIFT
) &
1114 WMI_DATA_HDR_PS_MASK
);
1116 offset
= sizeof(struct wmi_data_hdr
);
1118 switch (meta_type
) {
1121 case WMI_META_VERSION_1
:
1122 offset
+= sizeof(struct wmi_rx_meta_v1
);
1124 case WMI_META_VERSION_2
:
1125 offset
+= sizeof(struct wmi_rx_meta_v2
);
1131 datap
= (struct ethhdr
*) (skb
->data
+ offset
);
1132 conn
= ath6kl_find_sta(ar
, datap
->h_source
);
1140 * If there is a change in PS state of the STA,
1141 * take appropriate steps:
1143 * 1. If Sleep-->Awake, flush the psq for the STA
1144 * Clear the PVB for the STA.
1145 * 2. If Awake-->Sleep, Starting queueing frames
1148 prev_ps
= !!(conn
->sta_flags
& STA_PS_SLEEP
);
1151 conn
->sta_flags
|= STA_PS_SLEEP
;
1153 conn
->sta_flags
&= ~STA_PS_SLEEP
;
1155 if (prev_ps
^ !!(conn
->sta_flags
& STA_PS_SLEEP
)) {
1156 if (!(conn
->sta_flags
& STA_PS_SLEEP
)) {
1157 struct sk_buff
*skbuff
= NULL
;
1159 spin_lock_bh(&conn
->psq_lock
);
1160 while ((skbuff
= skb_dequeue(&conn
->psq
))
1162 spin_unlock_bh(&conn
->psq_lock
);
1163 ath6kl_data_tx(skbuff
, ar
->net_dev
);
1164 spin_lock_bh(&conn
->psq_lock
);
1166 spin_unlock_bh(&conn
->psq_lock
);
1167 /* Clear the PVB for this STA */
1168 ath6kl_wmi_set_pvb_cmd(ar
->wmi
, conn
->aid
, 0);
1172 /* drop NULL data frames here */
1173 if ((packet
->act_len
< min_hdr_len
) ||
1175 WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH
)) {
1181 is_amsdu
= wmi_data_hdr_is_amsdu(dhdr
) ? true : false;
1182 tid
= wmi_data_hdr_get_up(dhdr
);
1183 seq_no
= wmi_data_hdr_get_seqno(dhdr
);
1184 meta_type
= wmi_data_hdr_get_meta(dhdr
);
1185 dot11_hdr
= wmi_data_hdr_get_dot11(dhdr
);
1186 skb_pull(skb
, sizeof(struct wmi_data_hdr
));
1188 switch (meta_type
) {
1189 case WMI_META_VERSION_1
:
1190 skb_pull(skb
, sizeof(struct wmi_rx_meta_v1
));
1192 case WMI_META_VERSION_2
:
1193 meta
= (struct wmi_rx_meta_v2
*) skb
->data
;
1194 if (meta
->csum_flags
& 0x1) {
1195 skb
->ip_summed
= CHECKSUM_COMPLETE
;
1196 skb
->csum
= (__force __wsum
) meta
->csum
;
1198 skb_pull(skb
, sizeof(struct wmi_rx_meta_v2
));
1205 status
= ath6kl_wmi_dot11_hdr_remove(ar
->wmi
, skb
);
1207 status
= ath6kl_wmi_dot3_2_dix(skb
);
1211 * Drop frames that could not be processed (lack of
1218 if (!(ar
->net_dev
->flags
& IFF_UP
)) {
1223 if (ar
->nw_type
== AP_NETWORK
) {
1224 datap
= (struct ethhdr
*) skb
->data
;
1225 if (is_multicast_ether_addr(datap
->h_dest
))
1227 * Bcast/Mcast frames should be sent to the
1228 * OS stack as well as on the air.
1230 skb1
= skb_copy(skb
, GFP_ATOMIC
);
1233 * Search for a connected STA with dstMac
1234 * as the Mac address. If found send the
1235 * frame to it on the air else send the
1236 * frame up the stack.
1238 struct ath6kl_sta
*conn
= NULL
;
1239 conn
= ath6kl_find_sta(ar
, datap
->h_dest
);
1241 if (conn
&& ar
->intra_bss
) {
1244 } else if (conn
&& !ar
->intra_bss
) {
1250 ath6kl_data_tx(skb1
, ar
->net_dev
);
1253 datap
= (struct ethhdr
*) skb
->data
;
1255 if (is_unicast_ether_addr(datap
->h_dest
) &&
1256 aggr_process_recv_frm(ar
->aggr_cntxt
, tid
, seq_no
,
1258 /* aggregation code will handle the skb */
1261 ath6kl_deliver_frames_to_nw_stack(ar
->net_dev
, skb
);
1264 static void aggr_timeout(unsigned long arg
)
1267 struct aggr_info
*p_aggr
= (struct aggr_info
*) arg
;
1268 struct rxtid
*rxtid
;
1269 struct rxtid_stats
*stats
;
1271 for (i
= 0; i
< NUM_OF_TIDS
; i
++) {
1272 rxtid
= &p_aggr
->rx_tid
[i
];
1273 stats
= &p_aggr
->stat
[i
];
1275 if (!rxtid
->aggr
|| !rxtid
->timer_mon
|| rxtid
->progress
)
1278 stats
->num_timeouts
++;
1279 ath6kl_dbg(ATH6KL_DBG_AGGR
,
1280 "aggr timeout (st %d end %d)\n",
1282 ((rxtid
->seq_next
+ rxtid
->hold_q_sz
-1) &
1283 ATH6KL_MAX_SEQ_NO
));
1284 aggr_deque_frms(p_aggr
, i
, 0, 0);
1287 p_aggr
->timer_scheduled
= false;
1289 for (i
= 0; i
< NUM_OF_TIDS
; i
++) {
1290 rxtid
= &p_aggr
->rx_tid
[i
];
1292 if (rxtid
->aggr
&& rxtid
->hold_q
) {
1293 for (j
= 0; j
< rxtid
->hold_q_sz
; j
++) {
1294 if (rxtid
->hold_q
[j
].skb
) {
1295 p_aggr
->timer_scheduled
= true;
1296 rxtid
->timer_mon
= true;
1297 rxtid
->progress
= false;
1302 if (j
>= rxtid
->hold_q_sz
)
1303 rxtid
->timer_mon
= false;
1307 if (p_aggr
->timer_scheduled
)
1308 mod_timer(&p_aggr
->timer
,
1309 jiffies
+ msecs_to_jiffies(AGGR_RX_TIMEOUT
));
1312 static void aggr_delete_tid_state(struct aggr_info
*p_aggr
, u8 tid
)
1314 struct rxtid
*rxtid
;
1315 struct rxtid_stats
*stats
;
1317 if (!p_aggr
|| tid
>= NUM_OF_TIDS
)
1320 rxtid
= &p_aggr
->rx_tid
[tid
];
1321 stats
= &p_aggr
->stat
[tid
];
1324 aggr_deque_frms(p_aggr
, tid
, 0, 0);
1326 rxtid
->aggr
= false;
1327 rxtid
->progress
= false;
1328 rxtid
->timer_mon
= false;
1330 rxtid
->seq_next
= 0;
1331 rxtid
->hold_q_sz
= 0;
1333 kfree(rxtid
->hold_q
);
1334 rxtid
->hold_q
= NULL
;
1336 memset(stats
, 0, sizeof(struct rxtid_stats
));
1339 void aggr_recv_addba_req_evt(struct ath6kl
*ar
, u8 tid
, u16 seq_no
, u8 win_sz
)
1341 struct aggr_info
*p_aggr
= ar
->aggr_cntxt
;
1342 struct rxtid
*rxtid
;
1343 struct rxtid_stats
*stats
;
1349 rxtid
= &p_aggr
->rx_tid
[tid
];
1350 stats
= &p_aggr
->stat
[tid
];
1352 if (win_sz
< AGGR_WIN_SZ_MIN
|| win_sz
> AGGR_WIN_SZ_MAX
)
1353 ath6kl_dbg(ATH6KL_DBG_WLAN_RX
, "%s: win_sz %d, tid %d\n",
1354 __func__
, win_sz
, tid
);
1357 aggr_delete_tid_state(p_aggr
, tid
);
1359 rxtid
->seq_next
= seq_no
;
1360 hold_q_size
= TID_WINDOW_SZ(win_sz
) * sizeof(struct skb_hold_q
);
1361 rxtid
->hold_q
= kzalloc(hold_q_size
, GFP_KERNEL
);
1365 rxtid
->win_sz
= win_sz
;
1366 rxtid
->hold_q_sz
= TID_WINDOW_SZ(win_sz
);
1367 if (!skb_queue_empty(&rxtid
->q
))
1373 struct aggr_info
*aggr_init(struct net_device
*dev
)
1375 struct aggr_info
*p_aggr
= NULL
;
1376 struct rxtid
*rxtid
;
1379 p_aggr
= kzalloc(sizeof(struct aggr_info
), GFP_KERNEL
);
1381 ath6kl_err("failed to alloc memory for aggr_node\n");
1385 p_aggr
->aggr_sz
= AGGR_SZ_DEFAULT
;
1387 init_timer(&p_aggr
->timer
);
1388 p_aggr
->timer
.function
= aggr_timeout
;
1389 p_aggr
->timer
.data
= (unsigned long) p_aggr
;
1391 p_aggr
->timer_scheduled
= false;
1392 skb_queue_head_init(&p_aggr
->free_q
);
1394 ath6kl_alloc_netbufs(&p_aggr
->free_q
, AGGR_NUM_OF_FREE_NETBUFS
);
1396 for (i
= 0; i
< NUM_OF_TIDS
; i
++) {
1397 rxtid
= &p_aggr
->rx_tid
[i
];
1398 rxtid
->aggr
= false;
1399 rxtid
->progress
= false;
1400 rxtid
->timer_mon
= false;
1401 skb_queue_head_init(&rxtid
->q
);
1402 spin_lock_init(&rxtid
->lock
);
1408 void aggr_recv_delba_req_evt(struct ath6kl
*ar
, u8 tid
)
1410 struct aggr_info
*p_aggr
= ar
->aggr_cntxt
;
1411 struct rxtid
*rxtid
;
1416 rxtid
= &p_aggr
->rx_tid
[tid
];
1419 aggr_delete_tid_state(p_aggr
, tid
);
1422 void aggr_reset_state(struct aggr_info
*aggr_info
)
1426 for (tid
= 0; tid
< NUM_OF_TIDS
; tid
++)
1427 aggr_delete_tid_state(aggr_info
, tid
);
1430 /* clean up our amsdu buffer list */
1431 void ath6kl_cleanup_amsdu_rxbufs(struct ath6kl
*ar
)
1433 struct htc_packet
*packet
, *tmp_pkt
;
1435 spin_lock_bh(&ar
->lock
);
1436 if (list_empty(&ar
->amsdu_rx_buffer_queue
)) {
1437 spin_unlock_bh(&ar
->lock
);
1441 list_for_each_entry_safe(packet
, tmp_pkt
, &ar
->amsdu_rx_buffer_queue
,
1443 list_del(&packet
->list
);
1444 spin_unlock_bh(&ar
->lock
);
1445 dev_kfree_skb(packet
->pkt_cntxt
);
1446 spin_lock_bh(&ar
->lock
);
1449 spin_unlock_bh(&ar
->lock
);
1452 void aggr_module_destroy(struct aggr_info
*aggr_info
)
1454 struct rxtid
*rxtid
;
1460 if (aggr_info
->timer_scheduled
) {
1461 del_timer(&aggr_info
->timer
);
1462 aggr_info
->timer_scheduled
= false;
1465 for (i
= 0; i
< NUM_OF_TIDS
; i
++) {
1466 rxtid
= &aggr_info
->rx_tid
[i
];
1467 if (rxtid
->hold_q
) {
1468 for (k
= 0; k
< rxtid
->hold_q_sz
; k
++)
1469 dev_kfree_skb(rxtid
->hold_q
[k
].skb
);
1470 kfree(rxtid
->hold_q
);
1473 skb_queue_purge(&rxtid
->q
);
1476 skb_queue_purge(&aggr_info
->free_q
);