2 * Copyright (c) 2008-2011 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/dma-mapping.h>
19 #include "ar9003_mac.h"
21 #define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb))
23 static inline bool ath9k_check_auto_sleep(struct ath_softc
*sc
)
25 return sc
->ps_enabled
&&
26 (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_AUTOSLEEP
);
30 * Setup and link descriptors.
32 * 11N: we can no longer afford to self link the last descriptor.
33 * MAC acknowledges BA status as long as it copies frames to host
34 * buffer (or rx fifo). This can incorrectly acknowledge packets
35 * to a sender if last desc is self-linked.
37 static void ath_rx_buf_link(struct ath_softc
*sc
, struct ath_buf
*bf
)
39 struct ath_hw
*ah
= sc
->sc_ah
;
40 struct ath_common
*common
= ath9k_hw_common(ah
);
47 ds
->ds_link
= 0; /* link to null */
48 ds
->ds_data
= bf
->bf_buf_addr
;
50 /* virtual addr of the beginning of the buffer. */
53 ds
->ds_vdata
= skb
->data
;
56 * setup rx descriptors. The rx_bufsize here tells the hardware
57 * how much data it can DMA to us and that we are prepared
60 ath9k_hw_setuprxdesc(ah
, ds
,
64 if (sc
->rx
.rxlink
== NULL
)
65 ath9k_hw_putrxbuf(ah
, bf
->bf_daddr
);
67 *sc
->rx
.rxlink
= bf
->bf_daddr
;
69 sc
->rx
.rxlink
= &ds
->ds_link
;
72 static void ath_setdefantenna(struct ath_softc
*sc
, u32 antenna
)
74 /* XXX block beacon interrupts */
75 ath9k_hw_setantenna(sc
->sc_ah
, antenna
);
76 sc
->rx
.defant
= antenna
;
77 sc
->rx
.rxotherant
= 0;
80 static void ath_opmode_init(struct ath_softc
*sc
)
82 struct ath_hw
*ah
= sc
->sc_ah
;
83 struct ath_common
*common
= ath9k_hw_common(ah
);
87 /* configure rx filter */
88 rfilt
= ath_calcrxfilter(sc
);
89 ath9k_hw_setrxfilter(ah
, rfilt
);
91 /* configure bssid mask */
92 ath_hw_setbssidmask(common
);
94 /* configure operational mode */
95 ath9k_hw_setopmode(ah
);
97 /* calculate and install multicast filter */
98 mfilt
[0] = mfilt
[1] = ~0;
99 ath9k_hw_setmcastfilter(ah
, mfilt
[0], mfilt
[1]);
102 static bool ath_rx_edma_buf_link(struct ath_softc
*sc
,
103 enum ath9k_rx_qtype qtype
)
105 struct ath_hw
*ah
= sc
->sc_ah
;
106 struct ath_rx_edma
*rx_edma
;
110 rx_edma
= &sc
->rx
.rx_edma
[qtype
];
111 if (skb_queue_len(&rx_edma
->rx_fifo
) >= rx_edma
->rx_fifo_hwsize
)
114 bf
= list_first_entry(&sc
->rx
.rxbuf
, struct ath_buf
, list
);
115 list_del_init(&bf
->list
);
120 memset(skb
->data
, 0, ah
->caps
.rx_status_len
);
121 dma_sync_single_for_device(sc
->dev
, bf
->bf_buf_addr
,
122 ah
->caps
.rx_status_len
, DMA_TO_DEVICE
);
124 SKB_CB_ATHBUF(skb
) = bf
;
125 ath9k_hw_addrxbuf_edma(ah
, bf
->bf_buf_addr
, qtype
);
126 skb_queue_tail(&rx_edma
->rx_fifo
, skb
);
131 static void ath_rx_addbuffer_edma(struct ath_softc
*sc
,
132 enum ath9k_rx_qtype qtype
, int size
)
134 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
135 struct ath_buf
*bf
, *tbf
;
137 if (list_empty(&sc
->rx
.rxbuf
)) {
138 ath_dbg(common
, QUEUE
, "No free rx buf available\n");
142 list_for_each_entry_safe(bf
, tbf
, &sc
->rx
.rxbuf
, list
)
143 if (!ath_rx_edma_buf_link(sc
, qtype
))
148 static void ath_rx_remove_buffer(struct ath_softc
*sc
,
149 enum ath9k_rx_qtype qtype
)
152 struct ath_rx_edma
*rx_edma
;
155 rx_edma
= &sc
->rx
.rx_edma
[qtype
];
157 while ((skb
= skb_dequeue(&rx_edma
->rx_fifo
)) != NULL
) {
158 bf
= SKB_CB_ATHBUF(skb
);
160 list_add_tail(&bf
->list
, &sc
->rx
.rxbuf
);
164 static void ath_rx_edma_cleanup(struct ath_softc
*sc
)
166 struct ath_hw
*ah
= sc
->sc_ah
;
167 struct ath_common
*common
= ath9k_hw_common(ah
);
170 ath_rx_remove_buffer(sc
, ATH9K_RX_QUEUE_LP
);
171 ath_rx_remove_buffer(sc
, ATH9K_RX_QUEUE_HP
);
173 list_for_each_entry(bf
, &sc
->rx
.rxbuf
, list
) {
175 dma_unmap_single(sc
->dev
, bf
->bf_buf_addr
,
178 dev_kfree_skb_any(bf
->bf_mpdu
);
185 static void ath_rx_edma_init_queue(struct ath_rx_edma
*rx_edma
, int size
)
187 skb_queue_head_init(&rx_edma
->rx_fifo
);
188 rx_edma
->rx_fifo_hwsize
= size
;
191 static int ath_rx_edma_init(struct ath_softc
*sc
, int nbufs
)
193 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
194 struct ath_hw
*ah
= sc
->sc_ah
;
200 ath9k_hw_set_rx_bufsize(ah
, common
->rx_bufsize
-
201 ah
->caps
.rx_status_len
);
203 ath_rx_edma_init_queue(&sc
->rx
.rx_edma
[ATH9K_RX_QUEUE_LP
],
204 ah
->caps
.rx_lp_qdepth
);
205 ath_rx_edma_init_queue(&sc
->rx
.rx_edma
[ATH9K_RX_QUEUE_HP
],
206 ah
->caps
.rx_hp_qdepth
);
208 size
= sizeof(struct ath_buf
) * nbufs
;
209 bf
= devm_kzalloc(sc
->dev
, size
, GFP_KERNEL
);
213 INIT_LIST_HEAD(&sc
->rx
.rxbuf
);
215 for (i
= 0; i
< nbufs
; i
++, bf
++) {
216 skb
= ath_rxbuf_alloc(common
, common
->rx_bufsize
, GFP_KERNEL
);
222 memset(skb
->data
, 0, common
->rx_bufsize
);
225 bf
->bf_buf_addr
= dma_map_single(sc
->dev
, skb
->data
,
228 if (unlikely(dma_mapping_error(sc
->dev
,
230 dev_kfree_skb_any(skb
);
234 "dma_mapping_error() on RX init\n");
239 list_add_tail(&bf
->list
, &sc
->rx
.rxbuf
);
245 ath_rx_edma_cleanup(sc
);
249 static void ath_edma_start_recv(struct ath_softc
*sc
)
251 spin_lock_bh(&sc
->rx
.rxbuflock
);
253 ath9k_hw_rxena(sc
->sc_ah
);
255 ath_rx_addbuffer_edma(sc
, ATH9K_RX_QUEUE_HP
,
256 sc
->rx
.rx_edma
[ATH9K_RX_QUEUE_HP
].rx_fifo_hwsize
);
258 ath_rx_addbuffer_edma(sc
, ATH9K_RX_QUEUE_LP
,
259 sc
->rx
.rx_edma
[ATH9K_RX_QUEUE_LP
].rx_fifo_hwsize
);
263 ath9k_hw_startpcureceive(sc
->sc_ah
, !!(sc
->hw
->conf
.flags
& IEEE80211_CONF_OFFCHANNEL
));
265 spin_unlock_bh(&sc
->rx
.rxbuflock
);
268 static void ath_edma_stop_recv(struct ath_softc
*sc
)
270 ath_rx_remove_buffer(sc
, ATH9K_RX_QUEUE_HP
);
271 ath_rx_remove_buffer(sc
, ATH9K_RX_QUEUE_LP
);
274 int ath_rx_init(struct ath_softc
*sc
, int nbufs
)
276 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
281 spin_lock_init(&sc
->sc_pcu_lock
);
282 spin_lock_init(&sc
->rx
.rxbuflock
);
283 clear_bit(SC_OP_RXFLUSH
, &sc
->sc_flags
);
285 common
->rx_bufsize
= IEEE80211_MAX_MPDU_LEN
/ 2 +
286 sc
->sc_ah
->caps
.rx_status_len
;
288 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_EDMA
) {
289 return ath_rx_edma_init(sc
, nbufs
);
291 ath_dbg(common
, CONFIG
, "cachelsz %u rxbufsize %u\n",
292 common
->cachelsz
, common
->rx_bufsize
);
294 /* Initialize rx descriptors */
296 error
= ath_descdma_setup(sc
, &sc
->rx
.rxdma
, &sc
->rx
.rxbuf
,
300 "failed to allocate rx descriptors: %d\n",
305 list_for_each_entry(bf
, &sc
->rx
.rxbuf
, list
) {
306 skb
= ath_rxbuf_alloc(common
, common
->rx_bufsize
,
314 bf
->bf_buf_addr
= dma_map_single(sc
->dev
, skb
->data
,
317 if (unlikely(dma_mapping_error(sc
->dev
,
319 dev_kfree_skb_any(skb
);
323 "dma_mapping_error() on RX init\n");
328 sc
->rx
.rxlink
= NULL
;
338 void ath_rx_cleanup(struct ath_softc
*sc
)
340 struct ath_hw
*ah
= sc
->sc_ah
;
341 struct ath_common
*common
= ath9k_hw_common(ah
);
345 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_EDMA
) {
346 ath_rx_edma_cleanup(sc
);
349 list_for_each_entry(bf
, &sc
->rx
.rxbuf
, list
) {
352 dma_unmap_single(sc
->dev
, bf
->bf_buf_addr
,
364 * Calculate the receive filter according to the
365 * operating mode and state:
367 * o always accept unicast, broadcast, and multicast traffic
368 * o maintain current state of phy error reception (the hal
369 * may enable phy error frames for noise immunity work)
370 * o probe request frames are accepted only when operating in
371 * hostap, adhoc, or monitor modes
372 * o enable promiscuous mode according to the interface state
374 * - when operating in adhoc mode so the 802.11 layer creates
375 * node table entries for peers,
376 * - when operating in station mode for collecting rssi data when
377 * the station is otherwise quiet, or
378 * - when operating as a repeater so we see repeater-sta beacons
382 u32
ath_calcrxfilter(struct ath_softc
*sc
)
386 rfilt
= ATH9K_RX_FILTER_UCAST
| ATH9K_RX_FILTER_BCAST
387 | ATH9K_RX_FILTER_MCAST
;
389 if (sc
->rx
.rxfilter
& FIF_PROBE_REQ
)
390 rfilt
|= ATH9K_RX_FILTER_PROBEREQ
;
393 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
394 * mode interface or when in monitor mode. AP mode does not need this
395 * since it receives all in-BSS frames anyway.
397 if (sc
->sc_ah
->is_monitoring
)
398 rfilt
|= ATH9K_RX_FILTER_PROM
;
400 if (sc
->rx
.rxfilter
& FIF_CONTROL
)
401 rfilt
|= ATH9K_RX_FILTER_CONTROL
;
403 if ((sc
->sc_ah
->opmode
== NL80211_IFTYPE_STATION
) &&
405 !(sc
->rx
.rxfilter
& FIF_BCN_PRBRESP_PROMISC
))
406 rfilt
|= ATH9K_RX_FILTER_MYBEACON
;
408 rfilt
|= ATH9K_RX_FILTER_BEACON
;
410 if ((sc
->sc_ah
->opmode
== NL80211_IFTYPE_AP
) ||
411 (sc
->rx
.rxfilter
& FIF_PSPOLL
))
412 rfilt
|= ATH9K_RX_FILTER_PSPOLL
;
414 if (conf_is_ht(&sc
->hw
->conf
))
415 rfilt
|= ATH9K_RX_FILTER_COMP_BAR
;
417 if (sc
->nvifs
> 1 || (sc
->rx
.rxfilter
& FIF_OTHER_BSS
)) {
418 /* This is needed for older chips */
419 if (sc
->sc_ah
->hw_version
.macVersion
<= AR_SREV_VERSION_9160
)
420 rfilt
|= ATH9K_RX_FILTER_PROM
;
421 rfilt
|= ATH9K_RX_FILTER_MCAST_BCAST_ALL
;
424 if (AR_SREV_9550(sc
->sc_ah
))
425 rfilt
|= ATH9K_RX_FILTER_4ADDRESS
;
431 int ath_startrecv(struct ath_softc
*sc
)
433 struct ath_hw
*ah
= sc
->sc_ah
;
434 struct ath_buf
*bf
, *tbf
;
436 if (ah
->caps
.hw_caps
& ATH9K_HW_CAP_EDMA
) {
437 ath_edma_start_recv(sc
);
441 spin_lock_bh(&sc
->rx
.rxbuflock
);
442 if (list_empty(&sc
->rx
.rxbuf
))
445 sc
->rx
.rxlink
= NULL
;
446 list_for_each_entry_safe(bf
, tbf
, &sc
->rx
.rxbuf
, list
) {
447 ath_rx_buf_link(sc
, bf
);
450 /* We could have deleted elements so the list may be empty now */
451 if (list_empty(&sc
->rx
.rxbuf
))
454 bf
= list_first_entry(&sc
->rx
.rxbuf
, struct ath_buf
, list
);
455 ath9k_hw_putrxbuf(ah
, bf
->bf_daddr
);
460 ath9k_hw_startpcureceive(ah
, !!(sc
->hw
->conf
.flags
& IEEE80211_CONF_OFFCHANNEL
));
462 spin_unlock_bh(&sc
->rx
.rxbuflock
);
467 bool ath_stoprecv(struct ath_softc
*sc
)
469 struct ath_hw
*ah
= sc
->sc_ah
;
470 bool stopped
, reset
= false;
472 spin_lock_bh(&sc
->rx
.rxbuflock
);
473 ath9k_hw_abortpcurecv(ah
);
474 ath9k_hw_setrxfilter(ah
, 0);
475 stopped
= ath9k_hw_stopdmarecv(ah
, &reset
);
477 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_EDMA
)
478 ath_edma_stop_recv(sc
);
480 sc
->rx
.rxlink
= NULL
;
481 spin_unlock_bh(&sc
->rx
.rxbuflock
);
483 if (!(ah
->ah_flags
& AH_UNPLUGGED
) &&
484 unlikely(!stopped
)) {
485 ath_err(ath9k_hw_common(sc
->sc_ah
),
486 "Could not stop RX, we could be "
487 "confusing the DMA engine when we start RX up\n");
488 ATH_DBG_WARN_ON_ONCE(!stopped
);
490 return stopped
&& !reset
;
493 void ath_flushrecv(struct ath_softc
*sc
)
495 set_bit(SC_OP_RXFLUSH
, &sc
->sc_flags
);
496 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_EDMA
)
497 ath_rx_tasklet(sc
, 1, true);
498 ath_rx_tasklet(sc
, 1, false);
499 clear_bit(SC_OP_RXFLUSH
, &sc
->sc_flags
);
502 static bool ath_beacon_dtim_pending_cab(struct sk_buff
*skb
)
504 /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */
505 struct ieee80211_mgmt
*mgmt
;
506 u8
*pos
, *end
, id
, elen
;
507 struct ieee80211_tim_ie
*tim
;
509 mgmt
= (struct ieee80211_mgmt
*)skb
->data
;
510 pos
= mgmt
->u
.beacon
.variable
;
511 end
= skb
->data
+ skb
->len
;
513 while (pos
+ 2 < end
) {
516 if (pos
+ elen
> end
)
519 if (id
== WLAN_EID_TIM
) {
520 if (elen
< sizeof(*tim
))
522 tim
= (struct ieee80211_tim_ie
*) pos
;
523 if (tim
->dtim_count
!= 0)
525 return tim
->bitmap_ctrl
& 0x01;
534 static void ath_rx_ps_beacon(struct ath_softc
*sc
, struct sk_buff
*skb
)
536 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
538 if (skb
->len
< 24 + 8 + 2 + 2)
541 sc
->ps_flags
&= ~PS_WAIT_FOR_BEACON
;
543 if (sc
->ps_flags
& PS_BEACON_SYNC
) {
544 sc
->ps_flags
&= ~PS_BEACON_SYNC
;
546 "Reconfigure Beacon timers based on timestamp from the AP\n");
547 ath9k_set_beacon(sc
);
550 if (ath_beacon_dtim_pending_cab(skb
)) {
552 * Remain awake waiting for buffered broadcast/multicast
553 * frames. If the last broadcast/multicast frame is not
554 * received properly, the next beacon frame will work as
555 * a backup trigger for returning into NETWORK SLEEP state,
556 * so we are waiting for it as well.
559 "Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n");
560 sc
->ps_flags
|= PS_WAIT_FOR_CAB
| PS_WAIT_FOR_BEACON
;
564 if (sc
->ps_flags
& PS_WAIT_FOR_CAB
) {
566 * This can happen if a broadcast frame is dropped or the AP
567 * fails to send a frame indicating that all CAB frames have
570 sc
->ps_flags
&= ~PS_WAIT_FOR_CAB
;
571 ath_dbg(common
, PS
, "PS wait for CAB frames timed out\n");
575 static void ath_rx_ps(struct ath_softc
*sc
, struct sk_buff
*skb
, bool mybeacon
)
577 struct ieee80211_hdr
*hdr
;
578 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
580 hdr
= (struct ieee80211_hdr
*)skb
->data
;
582 /* Process Beacon and CAB receive in PS state */
583 if (((sc
->ps_flags
& PS_WAIT_FOR_BEACON
) || ath9k_check_auto_sleep(sc
))
585 ath_rx_ps_beacon(sc
, skb
);
586 } else if ((sc
->ps_flags
& PS_WAIT_FOR_CAB
) &&
587 (ieee80211_is_data(hdr
->frame_control
) ||
588 ieee80211_is_action(hdr
->frame_control
)) &&
589 is_multicast_ether_addr(hdr
->addr1
) &&
590 !ieee80211_has_moredata(hdr
->frame_control
)) {
592 * No more broadcast/multicast frames to be received at this
595 sc
->ps_flags
&= ~(PS_WAIT_FOR_CAB
| PS_WAIT_FOR_BEACON
);
597 "All PS CAB frames received, back to sleep\n");
598 } else if ((sc
->ps_flags
& PS_WAIT_FOR_PSPOLL_DATA
) &&
599 !is_multicast_ether_addr(hdr
->addr1
) &&
600 !ieee80211_has_morefrags(hdr
->frame_control
)) {
601 sc
->ps_flags
&= ~PS_WAIT_FOR_PSPOLL_DATA
;
603 "Going back to sleep after having received PS-Poll data (0x%lx)\n",
604 sc
->ps_flags
& (PS_WAIT_FOR_BEACON
|
606 PS_WAIT_FOR_PSPOLL_DATA
|
607 PS_WAIT_FOR_TX_ACK
));
611 static bool ath_edma_get_buffers(struct ath_softc
*sc
,
612 enum ath9k_rx_qtype qtype
,
613 struct ath_rx_status
*rs
,
614 struct ath_buf
**dest
)
616 struct ath_rx_edma
*rx_edma
= &sc
->rx
.rx_edma
[qtype
];
617 struct ath_hw
*ah
= sc
->sc_ah
;
618 struct ath_common
*common
= ath9k_hw_common(ah
);
623 skb
= skb_peek(&rx_edma
->rx_fifo
);
627 bf
= SKB_CB_ATHBUF(skb
);
630 dma_sync_single_for_cpu(sc
->dev
, bf
->bf_buf_addr
,
631 common
->rx_bufsize
, DMA_FROM_DEVICE
);
633 ret
= ath9k_hw_process_rxdesc_edma(ah
, rs
, skb
->data
);
634 if (ret
== -EINPROGRESS
) {
635 /*let device gain the buffer again*/
636 dma_sync_single_for_device(sc
->dev
, bf
->bf_buf_addr
,
637 common
->rx_bufsize
, DMA_FROM_DEVICE
);
641 __skb_unlink(skb
, &rx_edma
->rx_fifo
);
642 if (ret
== -EINVAL
) {
643 /* corrupt descriptor, skip this one and the following one */
644 list_add_tail(&bf
->list
, &sc
->rx
.rxbuf
);
645 ath_rx_edma_buf_link(sc
, qtype
);
647 skb
= skb_peek(&rx_edma
->rx_fifo
);
649 bf
= SKB_CB_ATHBUF(skb
);
652 __skb_unlink(skb
, &rx_edma
->rx_fifo
);
653 list_add_tail(&bf
->list
, &sc
->rx
.rxbuf
);
654 ath_rx_edma_buf_link(sc
, qtype
);
664 static struct ath_buf
*ath_edma_get_next_rx_buf(struct ath_softc
*sc
,
665 struct ath_rx_status
*rs
,
666 enum ath9k_rx_qtype qtype
)
668 struct ath_buf
*bf
= NULL
;
670 while (ath_edma_get_buffers(sc
, qtype
, rs
, &bf
)) {
679 static struct ath_buf
*ath_get_next_rx_buf(struct ath_softc
*sc
,
680 struct ath_rx_status
*rs
)
682 struct ath_hw
*ah
= sc
->sc_ah
;
683 struct ath_common
*common
= ath9k_hw_common(ah
);
688 if (list_empty(&sc
->rx
.rxbuf
)) {
689 sc
->rx
.rxlink
= NULL
;
693 bf
= list_first_entry(&sc
->rx
.rxbuf
, struct ath_buf
, list
);
697 * Must provide the virtual address of the current
698 * descriptor, the physical address, and the virtual
699 * address of the next descriptor in the h/w chain.
700 * This allows the HAL to look ahead to see if the
701 * hardware is done with a descriptor by checking the
702 * done bit in the following descriptor and the address
703 * of the current descriptor the DMA engine is working
704 * on. All this is necessary because of our use of
705 * a self-linked list to avoid rx overruns.
707 ret
= ath9k_hw_rxprocdesc(ah
, ds
, rs
);
708 if (ret
== -EINPROGRESS
) {
709 struct ath_rx_status trs
;
711 struct ath_desc
*tds
;
713 memset(&trs
, 0, sizeof(trs
));
714 if (list_is_last(&bf
->list
, &sc
->rx
.rxbuf
)) {
715 sc
->rx
.rxlink
= NULL
;
719 tbf
= list_entry(bf
->list
.next
, struct ath_buf
, list
);
722 * On some hardware the descriptor status words could
723 * get corrupted, including the done bit. Because of
724 * this, check if the next descriptor's done bit is
727 * If the next descriptor's done bit is set, the current
728 * descriptor has been corrupted. Force s/w to discard
729 * this descriptor and continue...
733 ret
= ath9k_hw_rxprocdesc(ah
, tds
, &trs
);
734 if (ret
== -EINPROGRESS
)
742 * Synchronize the DMA transfer with CPU before
743 * 1. accessing the frame
744 * 2. requeueing the same buffer to h/w
746 dma_sync_single_for_cpu(sc
->dev
, bf
->bf_buf_addr
,
753 /* Assumes you've already done the endian to CPU conversion */
754 static bool ath9k_rx_accept(struct ath_common
*common
,
755 struct ieee80211_hdr
*hdr
,
756 struct ieee80211_rx_status
*rxs
,
757 struct ath_rx_status
*rx_stats
,
760 struct ath_softc
*sc
= (struct ath_softc
*) common
->priv
;
761 bool is_mc
, is_valid_tkip
, strip_mic
, mic_error
;
762 struct ath_hw
*ah
= common
->ah
;
764 u8 rx_status_len
= ah
->caps
.rx_status_len
;
766 fc
= hdr
->frame_control
;
768 is_mc
= !!is_multicast_ether_addr(hdr
->addr1
);
769 is_valid_tkip
= rx_stats
->rs_keyix
!= ATH9K_RXKEYIX_INVALID
&&
770 test_bit(rx_stats
->rs_keyix
, common
->tkip_keymap
);
771 strip_mic
= is_valid_tkip
&& ieee80211_is_data(fc
) &&
772 ieee80211_has_protected(fc
) &&
773 !(rx_stats
->rs_status
&
774 (ATH9K_RXERR_DECRYPT
| ATH9K_RXERR_CRC
| ATH9K_RXERR_MIC
|
775 ATH9K_RXERR_KEYMISS
));
778 * Key miss events are only relevant for pairwise keys where the
779 * descriptor does contain a valid key index. This has been observed
780 * mostly with CCMP encryption.
782 if (rx_stats
->rs_keyix
== ATH9K_RXKEYIX_INVALID
||
783 !test_bit(rx_stats
->rs_keyix
, common
->ccmp_keymap
))
784 rx_stats
->rs_status
&= ~ATH9K_RXERR_KEYMISS
;
786 if (!rx_stats
->rs_datalen
) {
787 RX_STAT_INC(rx_len_err
);
792 * rs_status follows rs_datalen so if rs_datalen is too large
793 * we can take a hint that hardware corrupted it, so ignore
796 if (rx_stats
->rs_datalen
> (common
->rx_bufsize
- rx_status_len
)) {
797 RX_STAT_INC(rx_len_err
);
801 /* Only use error bits from the last fragment */
802 if (rx_stats
->rs_more
)
805 mic_error
= is_valid_tkip
&& !ieee80211_is_ctl(fc
) &&
806 !ieee80211_has_morefrags(fc
) &&
807 !(le16_to_cpu(hdr
->seq_ctrl
) & IEEE80211_SCTL_FRAG
) &&
808 (rx_stats
->rs_status
& ATH9K_RXERR_MIC
);
811 * The rx_stats->rs_status will not be set until the end of the
812 * chained descriptors so it can be ignored if rs_more is set. The
813 * rs_more will be false at the last element of the chained
816 if (rx_stats
->rs_status
!= 0) {
819 if (rx_stats
->rs_status
& ATH9K_RXERR_CRC
) {
820 rxs
->flag
|= RX_FLAG_FAILED_FCS_CRC
;
823 if (rx_stats
->rs_status
& ATH9K_RXERR_PHY
)
826 if ((rx_stats
->rs_status
& ATH9K_RXERR_DECRYPT
) ||
827 (!is_mc
&& (rx_stats
->rs_status
& ATH9K_RXERR_KEYMISS
))) {
828 *decrypt_error
= true;
833 * Reject error frames with the exception of
834 * decryption and MIC failures. For monitor mode,
835 * we also ignore the CRC error.
837 status_mask
= ATH9K_RXERR_DECRYPT
| ATH9K_RXERR_MIC
|
840 if (ah
->is_monitoring
&& (sc
->rx
.rxfilter
& FIF_FCSFAIL
))
841 status_mask
|= ATH9K_RXERR_CRC
;
843 if (rx_stats
->rs_status
& ~status_mask
)
848 * For unicast frames the MIC error bit can have false positives,
849 * so all MIC error reports need to be validated in software.
850 * False negatives are not common, so skip software verification
851 * if the hardware considers the MIC valid.
854 rxs
->flag
|= RX_FLAG_MMIC_STRIPPED
;
855 else if (is_mc
&& mic_error
)
856 rxs
->flag
|= RX_FLAG_MMIC_ERROR
;
861 static int ath9k_process_rate(struct ath_common
*common
,
862 struct ieee80211_hw
*hw
,
863 struct ath_rx_status
*rx_stats
,
864 struct ieee80211_rx_status
*rxs
)
866 struct ieee80211_supported_band
*sband
;
867 enum ieee80211_band band
;
869 struct ath_softc __maybe_unused
*sc
= common
->priv
;
871 band
= hw
->conf
.channel
->band
;
872 sband
= hw
->wiphy
->bands
[band
];
874 if (rx_stats
->rs_rate
& 0x80) {
876 rxs
->flag
|= RX_FLAG_HT
;
877 if (rx_stats
->rs_flags
& ATH9K_RX_2040
)
878 rxs
->flag
|= RX_FLAG_40MHZ
;
879 if (rx_stats
->rs_flags
& ATH9K_RX_GI
)
880 rxs
->flag
|= RX_FLAG_SHORT_GI
;
881 rxs
->rate_idx
= rx_stats
->rs_rate
& 0x7f;
885 for (i
= 0; i
< sband
->n_bitrates
; i
++) {
886 if (sband
->bitrates
[i
].hw_value
== rx_stats
->rs_rate
) {
890 if (sband
->bitrates
[i
].hw_value_short
== rx_stats
->rs_rate
) {
891 rxs
->flag
|= RX_FLAG_SHORTPRE
;
898 * No valid hardware bitrate found -- we should not get here
899 * because hardware has already validated this frame as OK.
902 "unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
904 RX_STAT_INC(rx_rate_err
);
908 static void ath9k_process_rssi(struct ath_common
*common
,
909 struct ieee80211_hw
*hw
,
910 struct ieee80211_hdr
*hdr
,
911 struct ath_rx_status
*rx_stats
)
913 struct ath_softc
*sc
= hw
->priv
;
914 struct ath_hw
*ah
= common
->ah
;
916 int rssi
= rx_stats
->rs_rssi
;
918 if (!rx_stats
->is_mybeacon
||
919 ((ah
->opmode
!= NL80211_IFTYPE_STATION
) &&
920 (ah
->opmode
!= NL80211_IFTYPE_ADHOC
)))
923 if (rx_stats
->rs_rssi
!= ATH9K_RSSI_BAD
&& !rx_stats
->rs_moreaggr
)
924 ATH_RSSI_LPF(sc
->last_rssi
, rx_stats
->rs_rssi
);
926 last_rssi
= sc
->last_rssi
;
927 if (likely(last_rssi
!= ATH_RSSI_DUMMY_MARKER
))
928 rssi
= ATH_EP_RND(last_rssi
, ATH_RSSI_EP_MULTIPLIER
);
932 /* Update Beacon RSSI, this is used by ANI. */
933 ah
->stats
.avgbrssi
= rssi
;
937 * For Decrypt or Demic errors, we only mark packet status here and always push
938 * up the frame up to let mac80211 handle the actual error case, be it no
939 * decryption key or real decryption error. This let us keep statistics there.
941 static int ath9k_rx_skb_preprocess(struct ath_common
*common
,
942 struct ieee80211_hw
*hw
,
943 struct ieee80211_hdr
*hdr
,
944 struct ath_rx_status
*rx_stats
,
945 struct ieee80211_rx_status
*rx_status
,
948 struct ath_hw
*ah
= common
->ah
;
951 * everything but the rate is checked here, the rate check is done
952 * separately to avoid doing two lookups for a rate for each frame.
954 if (!ath9k_rx_accept(common
, hdr
, rx_status
, rx_stats
, decrypt_error
))
957 /* Only use status info from the last fragment */
958 if (rx_stats
->rs_more
)
961 ath9k_process_rssi(common
, hw
, hdr
, rx_stats
);
963 if (ath9k_process_rate(common
, hw
, rx_stats
, rx_status
))
966 rx_status
->band
= hw
->conf
.channel
->band
;
967 rx_status
->freq
= hw
->conf
.channel
->center_freq
;
968 rx_status
->signal
= ah
->noise
+ rx_stats
->rs_rssi
;
969 rx_status
->antenna
= rx_stats
->rs_antenna
;
970 rx_status
->flag
|= RX_FLAG_MACTIME_END
;
971 if (rx_stats
->rs_moreaggr
)
972 rx_status
->flag
|= RX_FLAG_NO_SIGNAL_VAL
;
977 static void ath9k_rx_skb_postprocess(struct ath_common
*common
,
979 struct ath_rx_status
*rx_stats
,
980 struct ieee80211_rx_status
*rxs
,
983 struct ath_hw
*ah
= common
->ah
;
984 struct ieee80211_hdr
*hdr
;
985 int hdrlen
, padpos
, padsize
;
989 /* see if any padding is done by the hw and remove it */
990 hdr
= (struct ieee80211_hdr
*) skb
->data
;
991 hdrlen
= ieee80211_get_hdrlen_from_skb(skb
);
992 fc
= hdr
->frame_control
;
993 padpos
= ath9k_cmn_padpos(hdr
->frame_control
);
995 /* The MAC header is padded to have 32-bit boundary if the
996 * packet payload is non-zero. The general calculation for
997 * padsize would take into account odd header lengths:
998 * padsize = (4 - padpos % 4) % 4; However, since only
999 * even-length headers are used, padding can only be 0 or 2
1000 * bytes and we can optimize this a bit. In addition, we must
1001 * not try to remove padding from short control frames that do
1002 * not have payload. */
1003 padsize
= padpos
& 3;
1004 if (padsize
&& skb
->len
>=padpos
+padsize
+FCS_LEN
) {
1005 memmove(skb
->data
+ padsize
, skb
->data
, padpos
);
1006 skb_pull(skb
, padsize
);
1009 keyix
= rx_stats
->rs_keyix
;
1011 if (!(keyix
== ATH9K_RXKEYIX_INVALID
) && !decrypt_error
&&
1012 ieee80211_has_protected(fc
)) {
1013 rxs
->flag
|= RX_FLAG_DECRYPTED
;
1014 } else if (ieee80211_has_protected(fc
)
1015 && !decrypt_error
&& skb
->len
>= hdrlen
+ 4) {
1016 keyix
= skb
->data
[hdrlen
+ 3] >> 6;
1018 if (test_bit(keyix
, common
->keymap
))
1019 rxs
->flag
|= RX_FLAG_DECRYPTED
;
1021 if (ah
->sw_mgmt_crypto
&&
1022 (rxs
->flag
& RX_FLAG_DECRYPTED
) &&
1023 ieee80211_is_mgmt(fc
))
1024 /* Use software decrypt for management frames. */
1025 rxs
->flag
&= ~RX_FLAG_DECRYPTED
;
1028 int ath_rx_tasklet(struct ath_softc
*sc
, int flush
, bool hp
)
1031 struct sk_buff
*skb
= NULL
, *requeue_skb
, *hdr_skb
;
1032 struct ieee80211_rx_status
*rxs
;
1033 struct ath_hw
*ah
= sc
->sc_ah
;
1034 struct ath_common
*common
= ath9k_hw_common(ah
);
1035 struct ieee80211_hw
*hw
= sc
->hw
;
1036 struct ieee80211_hdr
*hdr
;
1038 struct ath_rx_status rs
;
1039 enum ath9k_rx_qtype qtype
;
1040 bool edma
= !!(ah
->caps
.hw_caps
& ATH9K_HW_CAP_EDMA
);
1042 u8 rx_status_len
= ah
->caps
.rx_status_len
;
1045 unsigned long flags
;
1048 dma_type
= DMA_BIDIRECTIONAL
;
1050 dma_type
= DMA_FROM_DEVICE
;
1052 qtype
= hp
? ATH9K_RX_QUEUE_HP
: ATH9K_RX_QUEUE_LP
;
1053 spin_lock_bh(&sc
->rx
.rxbuflock
);
1055 tsf
= ath9k_hw_gettsf64(ah
);
1056 tsf_lower
= tsf
& 0xffffffff;
1059 bool decrypt_error
= false;
1060 /* If handling rx interrupt and flush is in progress => exit */
1061 if (test_bit(SC_OP_RXFLUSH
, &sc
->sc_flags
) && (flush
== 0))
1064 memset(&rs
, 0, sizeof(rs
));
1066 bf
= ath_edma_get_next_rx_buf(sc
, &rs
, qtype
);
1068 bf
= ath_get_next_rx_buf(sc
, &rs
);
1078 * Take frame header from the first fragment and RX status from
1082 hdr_skb
= sc
->rx
.frag
;
1086 hdr
= (struct ieee80211_hdr
*) (hdr_skb
->data
+ rx_status_len
);
1087 rxs
= IEEE80211_SKB_RXCB(hdr_skb
);
1088 if (ieee80211_is_beacon(hdr
->frame_control
)) {
1089 RX_STAT_INC(rx_beacons
);
1090 if (!is_zero_ether_addr(common
->curbssid
) &&
1091 ether_addr_equal(hdr
->addr3
, common
->curbssid
))
1092 rs
.is_mybeacon
= true;
1094 rs
.is_mybeacon
= false;
1097 rs
.is_mybeacon
= false;
1099 if (ieee80211_is_data_present(hdr
->frame_control
) &&
1100 !ieee80211_is_qos_nullfunc(hdr
->frame_control
))
1103 ath_debug_stat_rx(sc
, &rs
);
1106 * If we're asked to flush receive queue, directly
1107 * chain it back at the queue without processing it.
1109 if (test_bit(SC_OP_RXFLUSH
, &sc
->sc_flags
)) {
1110 RX_STAT_INC(rx_drop_rxflush
);
1111 goto requeue_drop_frag
;
1114 memset(rxs
, 0, sizeof(struct ieee80211_rx_status
));
1116 rxs
->mactime
= (tsf
& ~0xffffffffULL
) | rs
.rs_tstamp
;
1117 if (rs
.rs_tstamp
> tsf_lower
&&
1118 unlikely(rs
.rs_tstamp
- tsf_lower
> 0x10000000))
1119 rxs
->mactime
-= 0x100000000ULL
;
1121 if (rs
.rs_tstamp
< tsf_lower
&&
1122 unlikely(tsf_lower
- rs
.rs_tstamp
> 0x10000000))
1123 rxs
->mactime
+= 0x100000000ULL
;
1125 retval
= ath9k_rx_skb_preprocess(common
, hw
, hdr
, &rs
,
1126 rxs
, &decrypt_error
);
1128 goto requeue_drop_frag
;
1130 if (rs
.is_mybeacon
) {
1131 sc
->hw_busy_count
= 0;
1132 ath_start_rx_poll(sc
, 3);
1134 /* Ensure we always have an skb to requeue once we are done
1135 * processing the current buffer's skb */
1136 requeue_skb
= ath_rxbuf_alloc(common
, common
->rx_bufsize
, GFP_ATOMIC
);
1138 /* If there is no memory we ignore the current RX'd frame,
1139 * tell hardware it can give us a new frame using the old
1140 * skb and put it at the tail of the sc->rx.rxbuf list for
1143 RX_STAT_INC(rx_oom_err
);
1144 goto requeue_drop_frag
;
1147 /* Unmap the frame */
1148 dma_unmap_single(sc
->dev
, bf
->bf_buf_addr
,
1152 skb_put(skb
, rs
.rs_datalen
+ ah
->caps
.rx_status_len
);
1153 if (ah
->caps
.rx_status_len
)
1154 skb_pull(skb
, ah
->caps
.rx_status_len
);
1157 ath9k_rx_skb_postprocess(common
, hdr_skb
, &rs
,
1158 rxs
, decrypt_error
);
1160 /* We will now give hardware our shiny new allocated skb */
1161 bf
->bf_mpdu
= requeue_skb
;
1162 bf
->bf_buf_addr
= dma_map_single(sc
->dev
, requeue_skb
->data
,
1165 if (unlikely(dma_mapping_error(sc
->dev
,
1166 bf
->bf_buf_addr
))) {
1167 dev_kfree_skb_any(requeue_skb
);
1169 bf
->bf_buf_addr
= 0;
1170 ath_err(common
, "dma_mapping_error() on RX\n");
1171 ieee80211_rx(hw
, skb
);
1176 RX_STAT_INC(rx_frags
);
1178 * rs_more indicates chained descriptors which can be
1179 * used to link buffers together for a sort of
1180 * scatter-gather operation.
1183 /* too many fragments - cannot handle frame */
1184 dev_kfree_skb_any(sc
->rx
.frag
);
1185 dev_kfree_skb_any(skb
);
1186 RX_STAT_INC(rx_too_many_frags_err
);
1194 int space
= skb
->len
- skb_tailroom(hdr_skb
);
1196 if (pskb_expand_head(hdr_skb
, 0, space
, GFP_ATOMIC
) < 0) {
1198 RX_STAT_INC(rx_oom_err
);
1199 goto requeue_drop_frag
;
1204 skb_copy_from_linear_data(skb
, skb_put(hdr_skb
, skb
->len
),
1206 dev_kfree_skb_any(skb
);
1211 if (ah
->caps
.hw_caps
& ATH9K_HW_CAP_ANT_DIV_COMB
) {
1214 * change the default rx antenna if rx diversity
1215 * chooses the other antenna 3 times in a row.
1217 if (sc
->rx
.defant
!= rs
.rs_antenna
) {
1218 if (++sc
->rx
.rxotherant
>= 3)
1219 ath_setdefantenna(sc
, rs
.rs_antenna
);
1221 sc
->rx
.rxotherant
= 0;
1226 if (rxs
->flag
& RX_FLAG_MMIC_STRIPPED
)
1227 skb_trim(skb
, skb
->len
- 8);
1229 spin_lock_irqsave(&sc
->sc_pm_lock
, flags
);
1230 if ((sc
->ps_flags
& (PS_WAIT_FOR_BEACON
|
1232 PS_WAIT_FOR_PSPOLL_DATA
)) ||
1233 ath9k_check_auto_sleep(sc
))
1234 ath_rx_ps(sc
, skb
, rs
.is_mybeacon
);
1235 spin_unlock_irqrestore(&sc
->sc_pm_lock
, flags
);
1237 if ((ah
->caps
.hw_caps
& ATH9K_HW_CAP_ANT_DIV_COMB
) && sc
->ant_rx
== 3)
1238 ath_ant_comb_scan(sc
, &rs
);
1240 ieee80211_rx(hw
, skb
);
1244 dev_kfree_skb_any(sc
->rx
.frag
);
1249 list_add_tail(&bf
->list
, &sc
->rx
.rxbuf
);
1250 ath_rx_edma_buf_link(sc
, qtype
);
1252 list_move_tail(&bf
->list
, &sc
->rx
.rxbuf
);
1253 ath_rx_buf_link(sc
, bf
);
1259 spin_unlock_bh(&sc
->rx
.rxbuflock
);
1261 if (!(ah
->imask
& ATH9K_INT_RXEOL
)) {
1262 ah
->imask
|= (ATH9K_INT_RXEOL
| ATH9K_INT_RXORN
);
1263 ath9k_hw_set_interrupts(ah
);