2 * Copyright (c) 2008 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 * Implementation of receive path.
24 * Setup and link descriptors.
26 * 11N: we can no longer afford to self link the last descriptor.
27 * MAC acknowledges BA status as long as it copies frames to host
28 * buffer (or rx fifo). This can incorrectly acknowledge packets
29 * to a sender if last desc is self-linked.
31 * NOTE: Caller should hold the rxbuf lock.
34 static void ath_rx_buf_link(struct ath_softc
*sc
, struct ath_buf
*bf
)
36 struct ath_hal
*ah
= sc
->sc_ah
;
43 ds
->ds_link
= 0; /* link to null */
44 ds
->ds_data
= bf
->bf_buf_addr
;
47 * virtual addr of the beginning of the buffer. */
50 ds
->ds_vdata
= skb
->data
;
52 /* setup rx descriptors. The sc_rxbufsize here tells the harware
53 * how much data it can DMA to us and that we are prepared
55 ath9k_hw_setuprxdesc(ah
,
60 if (sc
->sc_rxlink
== NULL
)
61 ath9k_hw_putrxbuf(ah
, bf
->bf_daddr
);
63 *sc
->sc_rxlink
= bf
->bf_daddr
;
65 sc
->sc_rxlink
= &ds
->ds_link
;
69 /* Process received BAR frame */
71 static int ath_bar_rx(struct ath_softc
*sc
,
75 struct ieee80211_bar
*bar
;
76 struct ath_arx_tid
*rxtid
;
78 struct ath_recv_status
*rx_status
;
79 int tidno
, index
, cindex
;
82 /* look at BAR contents */
84 bar
= (struct ieee80211_bar
*)skb
->data
;
85 tidno
= (le16_to_cpu(bar
->control
) & IEEE80211_BAR_CTL_TID_M
)
86 >> IEEE80211_BAR_CTL_TID_S
;
87 seqno
= le16_to_cpu(bar
->start_seq_num
) >> IEEE80211_SEQ_SEQ_SHIFT
;
89 /* process BAR - indicate all pending RX frames till the BAR seqno */
91 rxtid
= &an
->an_aggr
.rx
.tid
[tidno
];
93 spin_lock_bh(&rxtid
->tidlock
);
95 /* get relative index */
97 index
= ATH_BA_INDEX(rxtid
->seq_next
, seqno
);
99 /* drop BAR if old sequence (index is too large) */
101 if ((index
> rxtid
->baw_size
) &&
102 (index
> (IEEE80211_SEQ_MAX
- (rxtid
->baw_size
<< 2))))
103 /* discard frame, ieee layer may not treat frame as a dup */
104 goto unlock_and_free
;
106 /* complete receive processing for all pending frames upto BAR seqno */
108 cindex
= (rxtid
->baw_head
+ index
) & (ATH_TID_MAX_BUFS
- 1);
109 while ((rxtid
->baw_head
!= rxtid
->baw_tail
) &&
110 (rxtid
->baw_head
!= cindex
)) {
111 tskb
= rxtid
->rxbuf
[rxtid
->baw_head
].rx_wbuf
;
112 rx_status
= &rxtid
->rxbuf
[rxtid
->baw_head
].rx_status
;
113 rxtid
->rxbuf
[rxtid
->baw_head
].rx_wbuf
= NULL
;
116 ath_rx_subframe(an
, tskb
, rx_status
);
118 INCR(rxtid
->baw_head
, ATH_TID_MAX_BUFS
);
119 INCR(rxtid
->seq_next
, IEEE80211_SEQ_MAX
);
122 /* ... and indicate rest of the frames in-order */
124 while (rxtid
->baw_head
!= rxtid
->baw_tail
&&
125 rxtid
->rxbuf
[rxtid
->baw_head
].rx_wbuf
!= NULL
) {
126 tskb
= rxtid
->rxbuf
[rxtid
->baw_head
].rx_wbuf
;
127 rx_status
= &rxtid
->rxbuf
[rxtid
->baw_head
].rx_status
;
128 rxtid
->rxbuf
[rxtid
->baw_head
].rx_wbuf
= NULL
;
130 ath_rx_subframe(an
, tskb
, rx_status
);
132 INCR(rxtid
->baw_head
, ATH_TID_MAX_BUFS
);
133 INCR(rxtid
->seq_next
, IEEE80211_SEQ_MAX
);
137 spin_unlock_bh(&rxtid
->tidlock
);
138 /* free bar itself */
140 return IEEE80211_FTYPE_CTL
;
143 /* Function to handle a subframe of aggregation when HT is enabled */
145 static int ath_ampdu_input(struct ath_softc
*sc
,
148 struct ath_recv_status
*rx_status
)
150 struct ieee80211_hdr
*hdr
;
151 struct ath_arx_tid
*rxtid
;
152 struct ath_rxbuf
*rxbuf
;
155 int tid
= 0, index
, cindex
, rxdiff
;
159 hdr
= (struct ieee80211_hdr
*)skb
->data
;
160 fc
= hdr
->frame_control
;
162 /* collect stats of frames with non-zero version */
164 if ((le16_to_cpu(hdr
->frame_control
) & IEEE80211_FCTL_VERS
) != 0) {
169 type
= le16_to_cpu(hdr
->frame_control
) & IEEE80211_FCTL_FTYPE
;
170 subtype
= le16_to_cpu(hdr
->frame_control
) & IEEE80211_FCTL_STYPE
;
172 if (ieee80211_is_back_req(fc
))
173 return ath_bar_rx(sc
, an
, skb
);
175 /* special aggregate processing only for qos unicast data frames */
177 if (!ieee80211_is_data(fc
) ||
178 !ieee80211_is_data_qos(fc
) ||
179 is_multicast_ether_addr(hdr
->addr1
))
180 return ath_rx_subframe(an
, skb
, rx_status
);
182 /* lookup rx tid state */
184 if (ieee80211_is_data_qos(fc
)) {
185 qc
= ieee80211_get_qos_ctl(hdr
);
189 if (sc
->sc_ah
->ah_opmode
== ATH9K_M_STA
) {
190 /* Drop the frame not belonging to me. */
191 if (memcmp(hdr
->addr1
, sc
->sc_myaddr
, ETH_ALEN
)) {
197 rxtid
= &an
->an_aggr
.rx
.tid
[tid
];
199 spin_lock(&rxtid
->tidlock
);
201 rxdiff
= (rxtid
->baw_tail
- rxtid
->baw_head
) &
202 (ATH_TID_MAX_BUFS
- 1);
205 * If the ADDBA exchange has not been completed by the source,
206 * process via legacy path (i.e. no reordering buffer is needed)
208 if (!rxtid
->addba_exchangecomplete
) {
209 spin_unlock(&rxtid
->tidlock
);
210 return ath_rx_subframe(an
, skb
, rx_status
);
213 /* extract sequence number from recvd frame */
215 rxseq
= le16_to_cpu(hdr
->seq_ctrl
) >> IEEE80211_SEQ_SEQ_SHIFT
;
217 if (rxtid
->seq_reset
) {
218 rxtid
->seq_reset
= 0;
219 rxtid
->seq_next
= rxseq
;
222 index
= ATH_BA_INDEX(rxtid
->seq_next
, rxseq
);
224 /* drop frame if old sequence (index is too large) */
226 if (index
> (IEEE80211_SEQ_MAX
- (rxtid
->baw_size
<< 2))) {
227 /* discard frame, ieee layer may not treat frame as a dup */
228 spin_unlock(&rxtid
->tidlock
);
230 return IEEE80211_FTYPE_DATA
;
233 /* sequence number is beyond block-ack window */
235 if (index
>= rxtid
->baw_size
) {
237 /* complete receive processing for all pending frames */
239 while (index
>= rxtid
->baw_size
) {
241 rxbuf
= rxtid
->rxbuf
+ rxtid
->baw_head
;
243 if (rxbuf
->rx_wbuf
!= NULL
) {
244 ath_rx_subframe(an
, rxbuf
->rx_wbuf
,
246 rxbuf
->rx_wbuf
= NULL
;
249 INCR(rxtid
->baw_head
, ATH_TID_MAX_BUFS
);
250 INCR(rxtid
->seq_next
, IEEE80211_SEQ_MAX
);
256 /* add buffer to the recv ba window */
258 cindex
= (rxtid
->baw_head
+ index
) & (ATH_TID_MAX_BUFS
- 1);
259 rxbuf
= rxtid
->rxbuf
+ cindex
;
261 if (rxbuf
->rx_wbuf
!= NULL
) {
262 spin_unlock(&rxtid
->tidlock
);
263 /* duplicate frame */
265 return IEEE80211_FTYPE_DATA
;
268 rxbuf
->rx_wbuf
= skb
;
269 rxbuf
->rx_time
= get_timestamp();
270 rxbuf
->rx_status
= *rx_status
;
272 /* advance tail if sequence received is newer
273 * than any received so far */
275 if (index
>= rxdiff
) {
276 rxtid
->baw_tail
= cindex
;
277 INCR(rxtid
->baw_tail
, ATH_TID_MAX_BUFS
);
280 /* indicate all in-order received frames */
282 while (rxtid
->baw_head
!= rxtid
->baw_tail
) {
283 rxbuf
= rxtid
->rxbuf
+ rxtid
->baw_head
;
287 ath_rx_subframe(an
, rxbuf
->rx_wbuf
, &rxbuf
->rx_status
);
288 rxbuf
->rx_wbuf
= NULL
;
290 INCR(rxtid
->baw_head
, ATH_TID_MAX_BUFS
);
291 INCR(rxtid
->seq_next
, IEEE80211_SEQ_MAX
);
295 * start a timer to flush all received frames if there are pending
298 if (rxtid
->baw_head
!= rxtid
->baw_tail
)
299 mod_timer(&rxtid
->timer
, ATH_RX_TIMEOUT
);
301 del_timer_sync(&rxtid
->timer
);
303 spin_unlock(&rxtid
->tidlock
);
304 return IEEE80211_FTYPE_DATA
;
307 /* Timer to flush all received sub-frames */
309 static void ath_rx_timer(unsigned long data
)
311 struct ath_arx_tid
*rxtid
= (struct ath_arx_tid
*)data
;
312 struct ath_node
*an
= rxtid
->an
;
313 struct ath_rxbuf
*rxbuf
;
316 spin_lock_bh(&rxtid
->tidlock
);
317 while (rxtid
->baw_head
!= rxtid
->baw_tail
) {
318 rxbuf
= rxtid
->rxbuf
+ rxtid
->baw_head
;
319 if (!rxbuf
->rx_wbuf
) {
320 INCR(rxtid
->baw_head
, ATH_TID_MAX_BUFS
);
321 INCR(rxtid
->seq_next
, IEEE80211_SEQ_MAX
);
326 * Stop if the next one is a very recent frame.
328 * Call get_timestamp in every iteration to protect against the
329 * case in which a new frame is received while we are executing
330 * this function. Using a timestamp obtained before entering
331 * the loop could lead to a very large time interval
332 * (a negative value typecast to unsigned), breaking the
335 if ((get_timestamp() - rxbuf
->rx_time
) <
336 (ATH_RX_TIMEOUT
* HZ
/ 1000))
339 ath_rx_subframe(an
, rxbuf
->rx_wbuf
,
341 rxbuf
->rx_wbuf
= NULL
;
343 INCR(rxtid
->baw_head
, ATH_TID_MAX_BUFS
);
344 INCR(rxtid
->seq_next
, IEEE80211_SEQ_MAX
);
348 * start a timer to flush all received frames if there are pending
351 if (rxtid
->baw_head
!= rxtid
->baw_tail
)
354 nosched
= 1; /* no need to re-arm the timer again */
356 spin_unlock_bh(&rxtid
->tidlock
);
359 /* Free all pending sub-frames in the re-ordering buffer */
361 static void ath_rx_flush_tid(struct ath_softc
*sc
,
362 struct ath_arx_tid
*rxtid
, int drop
)
364 struct ath_rxbuf
*rxbuf
;
367 spin_lock_irqsave(&rxtid
->tidlock
, flag
);
368 while (rxtid
->baw_head
!= rxtid
->baw_tail
) {
369 rxbuf
= rxtid
->rxbuf
+ rxtid
->baw_head
;
370 if (!rxbuf
->rx_wbuf
) {
371 INCR(rxtid
->baw_head
, ATH_TID_MAX_BUFS
);
372 INCR(rxtid
->seq_next
, IEEE80211_SEQ_MAX
);
377 dev_kfree_skb(rxbuf
->rx_wbuf
);
379 ath_rx_subframe(rxtid
->an
,
383 rxbuf
->rx_wbuf
= NULL
;
385 INCR(rxtid
->baw_head
, ATH_TID_MAX_BUFS
);
386 INCR(rxtid
->seq_next
, IEEE80211_SEQ_MAX
);
388 spin_unlock_irqrestore(&rxtid
->tidlock
, flag
);
391 static struct sk_buff
*ath_rxbuf_alloc(struct ath_softc
*sc
,
398 * Cache-line-align. This is important (for the
399 * 5210 at least) as not doing so causes bogus data
403 /* Note: the kernel can allocate a value greater than
404 * what we ask it to give us. We really only need 4 KB as that
405 * is this hardware supports and in fact we need at least 3849
406 * as that is the MAX AMSDU size this hardware supports.
407 * Unfortunately this means we may get 8 KB here from the
408 * kernel... and that is actually what is observed on some
410 skb
= dev_alloc_skb(len
+ sc
->sc_cachelsz
- 1);
412 off
= ((unsigned long) skb
->data
) % sc
->sc_cachelsz
;
414 skb_reserve(skb
, sc
->sc_cachelsz
- off
);
416 DPRINTF(sc
, ATH_DBG_FATAL
,
417 "%s: skbuff alloc of size %u failed\n",
425 static void ath_rx_requeue(struct ath_softc
*sc
, struct sk_buff
*skb
)
427 struct ath_buf
*bf
= ATH_RX_CONTEXT(skb
)->ctx_rxbuf
;
431 spin_lock_bh(&sc
->sc_rxbuflock
);
432 if (bf
->bf_status
& ATH_BUFSTATUS_STALE
) {
434 * This buffer is still held for hw acess.
435 * Mark it as free to be re-queued it later.
437 bf
->bf_status
|= ATH_BUFSTATUS_FREE
;
439 /* XXX: we probably never enter here, remove after
441 list_add_tail(&bf
->list
, &sc
->sc_rxbuf
);
442 ath_rx_buf_link(sc
, bf
);
444 spin_unlock_bh(&sc
->sc_rxbuflock
);
448 * The skb indicated to upper stack won't be returned to us.
449 * So we have to allocate a new one and queue it by ourselves.
451 static int ath_rx_indicate(struct ath_softc
*sc
,
453 struct ath_recv_status
*status
,
456 struct ath_buf
*bf
= ATH_RX_CONTEXT(skb
)->ctx_rxbuf
;
457 struct sk_buff
*nskb
;
460 /* indicate frame to the stack, which will free the old skb. */
461 type
= _ath_rx_indicate(sc
, skb
, status
, keyix
);
463 /* allocate a new skb and queue it to for H/W processing */
464 nskb
= ath_rxbuf_alloc(sc
, sc
->sc_rxbufsize
);
467 bf
->bf_buf_addr
= pci_map_single(sc
->pdev
, nskb
->data
,
470 bf
->bf_dmacontext
= bf
->bf_buf_addr
;
471 ATH_RX_CONTEXT(nskb
)->ctx_rxbuf
= bf
;
473 /* queue the new wbuf to H/W */
474 ath_rx_requeue(sc
, nskb
);
480 static void ath_opmode_init(struct ath_softc
*sc
)
482 struct ath_hal
*ah
= sc
->sc_ah
;
485 /* configure rx filter */
486 rfilt
= ath_calcrxfilter(sc
);
487 ath9k_hw_setrxfilter(ah
, rfilt
);
489 /* configure bssid mask */
490 if (ah
->ah_caps
.hw_caps
& ATH9K_HW_CAP_BSSIDMASK
)
491 ath9k_hw_setbssidmask(ah
, sc
->sc_bssidmask
);
493 /* configure operational mode */
494 ath9k_hw_setopmode(ah
);
496 /* Handle any link-level address change. */
497 ath9k_hw_setmac(ah
, sc
->sc_myaddr
);
499 /* calculate and install multicast filter */
500 mfilt
[0] = mfilt
[1] = ~0;
502 ath9k_hw_setmcastfilter(ah
, mfilt
[0], mfilt
[1]);
503 DPRINTF(sc
, ATH_DBG_CONFIG
,
504 "%s: RX filter 0x%x, MC filter %08x:%08x\n",
505 __func__
, rfilt
, mfilt
[0], mfilt
[1]);
508 int ath_rx_init(struct ath_softc
*sc
, int nbufs
)
515 spin_lock_init(&sc
->sc_rxflushlock
);
516 sc
->sc_flags
&= ~SC_OP_RXFLUSH
;
517 spin_lock_init(&sc
->sc_rxbuflock
);
520 * Cisco's VPN software requires that drivers be able to
521 * receive encapsulated frames that are larger than the MTU.
522 * Since we can't be sure how large a frame we'll get, setup
523 * to handle the larges on possible.
525 sc
->sc_rxbufsize
= roundup(IEEE80211_MAX_MPDU_LEN
,
529 DPRINTF(sc
, ATH_DBG_CONFIG
, "%s: cachelsz %u rxbufsize %u\n",
530 __func__
, sc
->sc_cachelsz
, sc
->sc_rxbufsize
);
532 /* Initialize rx descriptors */
534 error
= ath_descdma_setup(sc
, &sc
->sc_rxdma
, &sc
->sc_rxbuf
,
537 DPRINTF(sc
, ATH_DBG_FATAL
,
538 "%s: failed to allocate rx descriptors: %d\n",
543 /* Pre-allocate a wbuf for each rx buffer */
545 list_for_each_entry(bf
, &sc
->sc_rxbuf
, list
) {
546 skb
= ath_rxbuf_alloc(sc
, sc
->sc_rxbufsize
);
553 bf
->bf_buf_addr
= pci_map_single(sc
->pdev
, skb
->data
,
556 bf
->bf_dmacontext
= bf
->bf_buf_addr
;
557 ATH_RX_CONTEXT(skb
)->ctx_rxbuf
= bf
;
559 sc
->sc_rxlink
= NULL
;
569 /* Reclaim all rx queue resources */
571 void ath_rx_cleanup(struct ath_softc
*sc
)
576 list_for_each_entry(bf
, &sc
->sc_rxbuf
, list
) {
582 /* cleanup rx descriptors */
584 if (sc
->sc_rxdma
.dd_desc_len
!= 0)
585 ath_descdma_cleanup(sc
, &sc
->sc_rxdma
, &sc
->sc_rxbuf
);
589 * Calculate the receive filter according to the
590 * operating mode and state:
592 * o always accept unicast, broadcast, and multicast traffic
593 * o maintain current state of phy error reception (the hal
594 * may enable phy error frames for noise immunity work)
595 * o probe request frames are accepted only when operating in
596 * hostap, adhoc, or monitor modes
597 * o enable promiscuous mode according to the interface state
599 * - when operating in adhoc mode so the 802.11 layer creates
600 * node table entries for peers,
601 * - when operating in station mode for collecting rssi data when
602 * the station is otherwise quiet, or
603 * - when operating as a repeater so we see repeater-sta beacons
607 u32
ath_calcrxfilter(struct ath_softc
*sc
)
609 #define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
613 rfilt
= (ath9k_hw_getrxfilter(sc
->sc_ah
) & RX_FILTER_PRESERVE
)
614 | ATH9K_RX_FILTER_UCAST
| ATH9K_RX_FILTER_BCAST
615 | ATH9K_RX_FILTER_MCAST
;
617 /* If not a STA, enable processing of Probe Requests */
618 if (sc
->sc_ah
->ah_opmode
!= ATH9K_M_STA
)
619 rfilt
|= ATH9K_RX_FILTER_PROBEREQ
;
621 /* Can't set HOSTAP into promiscous mode */
622 if (((sc
->sc_ah
->ah_opmode
!= ATH9K_M_HOSTAP
) &&
623 (sc
->rx_filter
& FIF_PROMISC_IN_BSS
)) ||
624 (sc
->sc_ah
->ah_opmode
== ATH9K_M_MONITOR
)) {
625 rfilt
|= ATH9K_RX_FILTER_PROM
;
626 /* ??? To prevent from sending ACK */
627 rfilt
&= ~ATH9K_RX_FILTER_UCAST
;
630 if (((sc
->sc_ah
->ah_opmode
== ATH9K_M_STA
) &&
631 (sc
->rx_filter
& FIF_BCN_PRBRESP_PROMISC
)) ||
632 (sc
->sc_ah
->ah_opmode
== ATH9K_M_IBSS
))
633 rfilt
|= ATH9K_RX_FILTER_BEACON
;
635 /* If in HOSTAP mode, want to enable reception of PSPOLL frames
637 if (sc
->sc_ah
->ah_opmode
== ATH9K_M_HOSTAP
)
638 rfilt
|= (ATH9K_RX_FILTER_BEACON
| ATH9K_RX_FILTER_PSPOLL
);
641 #undef RX_FILTER_PRESERVE
644 /* Enable the receive h/w following a reset. */
646 int ath_startrecv(struct ath_softc
*sc
)
648 struct ath_hal
*ah
= sc
->sc_ah
;
649 struct ath_buf
*bf
, *tbf
;
651 spin_lock_bh(&sc
->sc_rxbuflock
);
652 if (list_empty(&sc
->sc_rxbuf
))
655 sc
->sc_rxlink
= NULL
;
656 list_for_each_entry_safe(bf
, tbf
, &sc
->sc_rxbuf
, list
) {
657 if (bf
->bf_status
& ATH_BUFSTATUS_STALE
) {
658 /* restarting h/w, no need for holding descriptors */
659 bf
->bf_status
&= ~ATH_BUFSTATUS_STALE
;
661 * Upper layer may not be done with the frame yet so
662 * we can't just re-queue it to hardware. Remove it
663 * from h/w queue. It'll be re-queued when upper layer
664 * returns the frame and ath_rx_requeue_mpdu is called.
666 if (!(bf
->bf_status
& ATH_BUFSTATUS_FREE
)) {
671 /* chain descriptors */
672 ath_rx_buf_link(sc
, bf
);
675 /* We could have deleted elements so the list may be empty now */
676 if (list_empty(&sc
->sc_rxbuf
))
679 bf
= list_first_entry(&sc
->sc_rxbuf
, struct ath_buf
, list
);
680 ath9k_hw_putrxbuf(ah
, bf
->bf_daddr
);
681 ath9k_hw_rxena(ah
); /* enable recv descriptors */
684 spin_unlock_bh(&sc
->sc_rxbuflock
);
685 ath_opmode_init(sc
); /* set filters, etc. */
686 ath9k_hw_startpcureceive(ah
); /* re-enable PCU/DMA engine */
690 /* Disable the receive h/w in preparation for a reset. */
692 bool ath_stoprecv(struct ath_softc
*sc
)
694 struct ath_hal
*ah
= sc
->sc_ah
;
698 ath9k_hw_stoppcurecv(ah
); /* disable PCU */
699 ath9k_hw_setrxfilter(ah
, 0); /* clear recv filter */
700 stopped
= ath9k_hw_stopdmarecv(ah
); /* disable DMA engine */
701 mdelay(3); /* 3ms is long enough for 1 frame */
702 tsf
= ath9k_hw_gettsf64(ah
);
703 sc
->sc_rxlink
= NULL
; /* just in case */
707 /* Flush receive queue */
709 void ath_flushrecv(struct ath_softc
*sc
)
712 * ath_rx_tasklet may be used to handle rx interrupt and flush receive
713 * queue at the same time. Use a lock to serialize the access of rx
715 * ath_rx_tasklet cannot hold the spinlock while indicating packets.
716 * Instead, do not claim the spinlock but check for a flush in
717 * progress (see references to sc_rxflush)
719 spin_lock_bh(&sc
->sc_rxflushlock
);
720 sc
->sc_flags
|= SC_OP_RXFLUSH
;
722 ath_rx_tasklet(sc
, 1);
724 sc
->sc_flags
&= ~SC_OP_RXFLUSH
;
725 spin_unlock_bh(&sc
->sc_rxflushlock
);
728 /* Process an individual frame */
730 int ath_rx_input(struct ath_softc
*sc
,
734 struct ath_recv_status
*rx_status
,
735 enum ATH_RX_TYPE
*status
)
737 if (is_ampdu
&& (sc
->sc_flags
& SC_OP_RXAGGR
)) {
738 *status
= ATH_RX_CONSUMED
;
739 return ath_ampdu_input(sc
, an
, skb
, rx_status
);
741 *status
= ATH_RX_NON_CONSUMED
;
746 /* Process receive queue, as well as LED, etc. */
748 int ath_rx_tasklet(struct ath_softc
*sc
, int flush
)
750 #define PA2DESC(_sc, _pa) \
751 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
752 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
754 struct ath_buf
*bf
, *bf_held
= NULL
;
756 struct ieee80211_hdr
*hdr
;
757 struct sk_buff
*skb
= NULL
;
758 struct ath_recv_status rx_status
;
759 struct ath_hal
*ah
= sc
->sc_ah
;
760 int type
, rx_processed
= 0;
767 /* If handling rx interrupt and flush is in progress => exit */
768 if ((sc
->sc_flags
& SC_OP_RXFLUSH
) && (flush
== 0))
771 spin_lock_bh(&sc
->sc_rxbuflock
);
772 if (list_empty(&sc
->sc_rxbuf
)) {
773 sc
->sc_rxlink
= NULL
;
774 spin_unlock_bh(&sc
->sc_rxbuflock
);
778 bf
= list_first_entry(&sc
->sc_rxbuf
, struct ath_buf
, list
);
781 * There is a race condition that BH gets scheduled after sw
782 * writes RxE and before hw re-load the last descriptor to get
783 * the newly chained one. Software must keep the last DONE
784 * descriptor as a holding descriptor - software does so by
785 * marking it with the STALE flag.
787 if (bf
->bf_status
& ATH_BUFSTATUS_STALE
) {
789 if (list_is_last(&bf_held
->list
, &sc
->sc_rxbuf
)) {
791 * The holding descriptor is the last
792 * descriptor in queue. It's safe to
793 * remove the last holding descriptor
796 list_del(&bf_held
->list
);
797 bf_held
->bf_status
&= ~ATH_BUFSTATUS_STALE
;
798 sc
->sc_rxlink
= NULL
;
800 if (bf_held
->bf_status
& ATH_BUFSTATUS_FREE
) {
801 list_add_tail(&bf_held
->list
,
803 ath_rx_buf_link(sc
, bf_held
);
805 spin_unlock_bh(&sc
->sc_rxbuflock
);
808 bf
= list_entry(bf
->list
.next
, struct ath_buf
, list
);
815 * Must provide the virtual address of the current
816 * descriptor, the physical address, and the virtual
817 * address of the next descriptor in the h/w chain.
818 * This allows the HAL to look ahead to see if the
819 * hardware is done with a descriptor by checking the
820 * done bit in the following descriptor and the address
821 * of the current descriptor the DMA engine is working
822 * on. All this is necessary because of our use of
823 * a self-linked list to avoid rx overruns.
825 retval
= ath9k_hw_rxprocdesc(ah
,
828 PA2DESC(sc
, ds
->ds_link
),
830 if (retval
== -EINPROGRESS
) {
832 struct ath_desc
*tds
;
834 if (list_is_last(&bf
->list
, &sc
->sc_rxbuf
)) {
835 spin_unlock_bh(&sc
->sc_rxbuflock
);
839 tbf
= list_entry(bf
->list
.next
, struct ath_buf
, list
);
842 * On some hardware the descriptor status words could
843 * get corrupted, including the done bit. Because of
844 * this, check if the next descriptor's done bit is
847 * If the next descriptor's done bit is set, the current
848 * descriptor has been corrupted. Force s/w to discard
849 * this descriptor and continue...
853 retval
= ath9k_hw_rxprocdesc(ah
,
855 PA2DESC(sc
, tds
->ds_link
), 0);
856 if (retval
== -EINPROGRESS
) {
857 spin_unlock_bh(&sc
->sc_rxbuflock
);
862 /* XXX: we do not support frames spanning
863 * multiple descriptors */
864 bf
->bf_status
|= ATH_BUFSTATUS_DONE
;
867 if (skb
== NULL
) { /* XXX ??? can this happen */
868 spin_unlock_bh(&sc
->sc_rxbuflock
);
872 * Now we know it's a completed frame, we can indicate the
873 * frame. Remove the previous holding descriptor and leave
874 * this one in the queue as the new holding descriptor.
877 list_del(&bf_held
->list
);
878 bf_held
->bf_status
&= ~ATH_BUFSTATUS_STALE
;
879 if (bf_held
->bf_status
& ATH_BUFSTATUS_FREE
) {
880 list_add_tail(&bf_held
->list
, &sc
->sc_rxbuf
);
881 /* try to requeue this descriptor */
882 ath_rx_buf_link(sc
, bf_held
);
886 bf
->bf_status
|= ATH_BUFSTATUS_STALE
;
889 * Release the lock here in case ieee80211_input() return
890 * the frame immediately by calling ath_rx_mpdu_requeue().
892 spin_unlock_bh(&sc
->sc_rxbuflock
);
896 * If we're asked to flush receive queue, directly
897 * chain it back at the queue without processing it.
902 hdr
= (struct ieee80211_hdr
*)skb
->data
;
903 fc
= hdr
->frame_control
;
904 memset(&rx_status
, 0, sizeof(struct ath_recv_status
));
906 if (ds
->ds_rxstat
.rs_more
) {
908 * Frame spans multiple descriptors; this
909 * cannot happen yet as we don't support
910 * jumbograms. If not in monitor mode,
915 * Enable this if you want to see
916 * error frames in Monitor mode.
918 if (sc
->sc_ah
->ah_opmode
!= ATH9K_M_MONITOR
)
921 /* fall thru for monitor mode handling... */
922 } else if (ds
->ds_rxstat
.rs_status
!= 0) {
923 if (ds
->ds_rxstat
.rs_status
& ATH9K_RXERR_CRC
)
924 rx_status
.flags
|= ATH_RX_FCS_ERROR
;
925 if (ds
->ds_rxstat
.rs_status
& ATH9K_RXERR_PHY
) {
926 phyerr
= ds
->ds_rxstat
.rs_phyerr
& 0x1f;
930 if (ds
->ds_rxstat
.rs_status
& ATH9K_RXERR_DECRYPT
) {
932 * Decrypt error. We only mark packet status
933 * here and always push up the frame up to let
934 * mac80211 handle the actual error case, be
935 * it no decryption key or real decryption
936 * error. This let us keep statistics there.
938 rx_status
.flags
|= ATH_RX_DECRYPT_ERROR
;
939 } else if (ds
->ds_rxstat
.rs_status
& ATH9K_RXERR_MIC
) {
941 * Demic error. We only mark frame status here
942 * and always push up the frame up to let
943 * mac80211 handle the actual error case. This
944 * let us keep statistics there. Hardware may
945 * post a false-positive MIC error.
947 if (ieee80211_is_ctl(fc
))
949 * Sometimes, we get invalid
950 * MIC failures on valid control frames.
951 * Remove these mic errors.
953 ds
->ds_rxstat
.rs_status
&=
956 rx_status
.flags
|= ATH_RX_MIC_ERROR
;
959 * Reject error frames with the exception of
960 * decryption and MIC failures. For monitor mode,
961 * we also ignore the CRC error.
963 if (sc
->sc_ah
->ah_opmode
== ATH9K_M_MONITOR
) {
964 if (ds
->ds_rxstat
.rs_status
&
965 ~(ATH9K_RXERR_DECRYPT
| ATH9K_RXERR_MIC
|
969 if (ds
->ds_rxstat
.rs_status
&
970 ~(ATH9K_RXERR_DECRYPT
| ATH9K_RXERR_MIC
)) {
976 * The status portion of the descriptor could get corrupted.
978 if (sc
->sc_rxbufsize
< ds
->ds_rxstat
.rs_datalen
)
981 * Sync and unmap the frame. At this point we're
982 * committed to passing the sk_buff somewhere so
983 * clear buf_skb; this means a new sk_buff must be
984 * allocated when the rx descriptor is setup again
985 * to receive another frame.
987 skb_put(skb
, ds
->ds_rxstat
.rs_datalen
);
988 skb
->protocol
= cpu_to_be16(ETH_P_CONTROL
);
989 rx_status
.tsf
= ath_extend_tsf(sc
, ds
->ds_rxstat
.rs_tstamp
);
991 sc
->sc_hwmap
[ds
->ds_rxstat
.rs_rate
].ieeerate
;
993 sc
->sc_hwmap
[ds
->ds_rxstat
.rs_rate
].rateKbps
;
994 rx_status
.ratecode
= ds
->ds_rxstat
.rs_rate
;
997 if (rx_status
.ratecode
& 0x80) {
998 /* TODO - add table to avoid division */
999 if (ds
->ds_rxstat
.rs_flags
& ATH9K_RX_2040
) {
1000 rx_status
.flags
|= ATH_RX_40MHZ
;
1001 rx_status
.rateKbps
=
1002 (rx_status
.rateKbps
* 27) / 13;
1004 if (ds
->ds_rxstat
.rs_flags
& ATH9K_RX_GI
)
1005 rx_status
.rateKbps
=
1006 (rx_status
.rateKbps
* 10) / 9;
1008 rx_status
.flags
|= ATH_RX_SHORT_GI
;
1011 /* sc_noise_floor is only available when the station
1012 attaches to an AP, so we use a default value
1013 if we are not yet attached. */
1014 rx_status
.abs_rssi
=
1015 ds
->ds_rxstat
.rs_rssi
+ sc
->sc_ani
.sc_noise_floor
;
1017 pci_dma_sync_single_for_cpu(sc
->pdev
,
1020 PCI_DMA_FROMDEVICE
);
1021 pci_unmap_single(sc
->pdev
,
1024 PCI_DMA_FROMDEVICE
);
1026 /* XXX: Ah! make me more readable, use a helper */
1027 if (ah
->ah_caps
.hw_caps
& ATH9K_HW_CAP_HT
) {
1028 if (ds
->ds_rxstat
.rs_moreaggr
== 0) {
1029 rx_status
.rssictl
[0] =
1030 ds
->ds_rxstat
.rs_rssi_ctl0
;
1031 rx_status
.rssictl
[1] =
1032 ds
->ds_rxstat
.rs_rssi_ctl1
;
1033 rx_status
.rssictl
[2] =
1034 ds
->ds_rxstat
.rs_rssi_ctl2
;
1035 rx_status
.rssi
= ds
->ds_rxstat
.rs_rssi
;
1036 if (ds
->ds_rxstat
.rs_flags
& ATH9K_RX_2040
) {
1037 rx_status
.rssiextn
[0] =
1038 ds
->ds_rxstat
.rs_rssi_ext0
;
1039 rx_status
.rssiextn
[1] =
1040 ds
->ds_rxstat
.rs_rssi_ext1
;
1041 rx_status
.rssiextn
[2] =
1042 ds
->ds_rxstat
.rs_rssi_ext2
;
1044 ATH_RX_RSSI_EXTN_VALID
;
1046 rx_status
.flags
|= ATH_RX_RSSI_VALID
|
1047 ATH_RX_CHAIN_RSSI_VALID
;
1051 * Need to insert the "combined" rssi into the
1052 * status structure for upper layer processing
1054 rx_status
.rssi
= ds
->ds_rxstat
.rs_rssi
;
1055 rx_status
.flags
|= ATH_RX_RSSI_VALID
;
1058 /* Pass frames up to the stack. */
1060 type
= ath_rx_indicate(sc
, skb
,
1061 &rx_status
, ds
->ds_rxstat
.rs_keyix
);
1064 * change the default rx antenna if rx diversity chooses the
1065 * other antenna 3 times in a row.
1067 if (sc
->sc_defant
!= ds
->ds_rxstat
.rs_antenna
) {
1068 if (++sc
->sc_rxotherant
>= 3)
1069 ath_setdefantenna(sc
,
1070 ds
->ds_rxstat
.rs_antenna
);
1072 sc
->sc_rxotherant
= 0;
1075 #ifdef CONFIG_SLOW_ANT_DIV
1076 if ((rx_status
.flags
& ATH_RX_RSSI_VALID
) &&
1077 ieee80211_is_beacon(fc
)) {
1078 ath_slow_ant_div(&sc
->sc_antdiv
, hdr
, &ds
->ds_rxstat
);
1082 * For frames successfully indicated, the buffer will be
1083 * returned to us by upper layers by calling
1084 * ath_rx_mpdu_requeue, either synchronusly or asynchronously.
1085 * So we don't want to do it here in this loop.
1090 bf
->bf_status
|= ATH_BUFSTATUS_FREE
;
1094 DPRINTF(sc
, ATH_DBG_CONFIG
,
1095 "%s: Reset rx chain mask. "
1096 "Do internal reset\n", __func__
);
1098 ath_reset(sc
, false);
1105 /* Process ADDBA request in per-TID data structure */
1107 int ath_rx_aggr_start(struct ath_softc
*sc
,
1112 struct ath_arx_tid
*rxtid
;
1113 struct ath_node
*an
;
1114 struct ieee80211_hw
*hw
= sc
->hw
;
1115 struct ieee80211_supported_band
*sband
;
1118 spin_lock_bh(&sc
->node_lock
);
1119 an
= ath_node_find(sc
, (u8
*) addr
);
1120 spin_unlock_bh(&sc
->node_lock
);
1123 DPRINTF(sc
, ATH_DBG_AGGR
,
1124 "%s: Node not found to initialize RX aggregation\n",
1129 sband
= hw
->wiphy
->bands
[hw
->conf
.channel
->band
];
1130 buffersize
= IEEE80211_MIN_AMPDU_BUF
<<
1131 sband
->ht_info
.ampdu_factor
; /* FIXME */
1133 rxtid
= &an
->an_aggr
.rx
.tid
[tid
];
1135 spin_lock_bh(&rxtid
->tidlock
);
1136 if (sc
->sc_flags
& SC_OP_RXAGGR
) {
1137 /* Allow aggregation reception
1138 * Adjust rx BA window size. Peer might indicate a
1139 * zero buffer size for a _dont_care_ condition.
1142 rxtid
->baw_size
= min(buffersize
, rxtid
->baw_size
);
1144 /* set rx sequence number */
1145 rxtid
->seq_next
= *ssn
;
1147 /* Allocate the receive buffers for this TID */
1148 DPRINTF(sc
, ATH_DBG_AGGR
,
1149 "%s: Allcating rxbuffer for TID %d\n", __func__
, tid
);
1151 if (rxtid
->rxbuf
== NULL
) {
1153 * If the rxbuff is not NULL at this point, we *probably*
1154 * already allocated the buffer on a previous ADDBA,
1155 * and this is a subsequent ADDBA that got through.
1156 * Don't allocate, but use the value in the pointer,
1157 * we zero it out when we de-allocate.
1159 rxtid
->rxbuf
= kmalloc(ATH_TID_MAX_BUFS
*
1160 sizeof(struct ath_rxbuf
), GFP_ATOMIC
);
1162 if (rxtid
->rxbuf
== NULL
) {
1163 DPRINTF(sc
, ATH_DBG_AGGR
,
1164 "%s: Unable to allocate RX buffer, "
1165 "refusing ADDBA\n", __func__
);
1167 /* Ensure the memory is zeroed out (all internal
1168 * pointers are null) */
1169 memset(rxtid
->rxbuf
, 0, ATH_TID_MAX_BUFS
*
1170 sizeof(struct ath_rxbuf
));
1171 DPRINTF(sc
, ATH_DBG_AGGR
,
1172 "%s: Allocated @%p\n", __func__
, rxtid
->rxbuf
);
1174 /* Allow aggregation reception */
1175 rxtid
->addba_exchangecomplete
= 1;
1178 spin_unlock_bh(&rxtid
->tidlock
);
1185 int ath_rx_aggr_stop(struct ath_softc
*sc
,
1189 struct ath_node
*an
;
1191 spin_lock_bh(&sc
->node_lock
);
1192 an
= ath_node_find(sc
, (u8
*) addr
);
1193 spin_unlock_bh(&sc
->node_lock
);
1196 DPRINTF(sc
, ATH_DBG_AGGR
,
1197 "%s: RX aggr stop for non-existent node\n", __func__
);
1201 ath_rx_aggr_teardown(sc
, an
, tid
);
1205 /* Rx aggregation tear down */
1207 void ath_rx_aggr_teardown(struct ath_softc
*sc
,
1208 struct ath_node
*an
, u8 tid
)
1210 struct ath_arx_tid
*rxtid
= &an
->an_aggr
.rx
.tid
[tid
];
1212 if (!rxtid
->addba_exchangecomplete
)
1215 del_timer_sync(&rxtid
->timer
);
1216 ath_rx_flush_tid(sc
, rxtid
, 0);
1217 rxtid
->addba_exchangecomplete
= 0;
1219 /* De-allocate the receive buffer array allocated when addba started */
1222 DPRINTF(sc
, ATH_DBG_AGGR
,
1223 "%s: Deallocating TID %d rxbuff @%p\n",
1224 __func__
, tid
, rxtid
->rxbuf
);
1225 kfree(rxtid
->rxbuf
);
1227 /* Set pointer to null to avoid reuse*/
1228 rxtid
->rxbuf
= NULL
;
1232 /* Initialize per-node receive state */
1234 void ath_rx_node_init(struct ath_softc
*sc
, struct ath_node
*an
)
1236 if (sc
->sc_flags
& SC_OP_RXAGGR
) {
1237 struct ath_arx_tid
*rxtid
;
1240 /* Init per tid rx state */
1241 for (tidno
= 0, rxtid
= &an
->an_aggr
.rx
.tid
[tidno
];
1242 tidno
< WME_NUM_TID
;
1245 rxtid
->seq_reset
= 1;
1246 rxtid
->seq_next
= 0;
1247 rxtid
->baw_size
= WME_MAX_BA
;
1248 rxtid
->baw_head
= rxtid
->baw_tail
= 0;
1251 * Ensure the buffer pointer is null at this point
1252 * (needs to be allocated when addba is received)
1255 rxtid
->rxbuf
= NULL
;
1256 setup_timer(&rxtid
->timer
, ath_rx_timer
,
1257 (unsigned long)rxtid
);
1258 spin_lock_init(&rxtid
->tidlock
);
1261 rxtid
->addba_exchangecomplete
= 0;
1266 void ath_rx_node_cleanup(struct ath_softc
*sc
, struct ath_node
*an
)
1268 if (sc
->sc_flags
& SC_OP_RXAGGR
) {
1269 struct ath_arx_tid
*rxtid
;
1272 /* Init per tid rx state */
1273 for (tidno
= 0, rxtid
= &an
->an_aggr
.rx
.tid
[tidno
];
1274 tidno
< WME_NUM_TID
;
1277 if (!rxtid
->addba_exchangecomplete
)
1280 /* must cancel timer first */
1281 del_timer_sync(&rxtid
->timer
);
1283 /* drop any pending sub-frames */
1284 ath_rx_flush_tid(sc
, rxtid
, 1);
1286 for (i
= 0; i
< ATH_TID_MAX_BUFS
; i
++)
1287 ASSERT(rxtid
->rxbuf
[i
].rx_wbuf
== NULL
);
1289 rxtid
->addba_exchangecomplete
= 0;
1295 /* Cleanup per-node receive state */
1297 void ath_rx_node_free(struct ath_softc
*sc
, struct ath_node
*an
)
1299 ath_rx_node_cleanup(sc
, an
);