ath9k: Add QCA956x HW support
[deliverable/linux.git] / drivers / net / wireless / ath / ath9k / recv.c
CommitLineData
f078f209 1/*
5b68138e 2 * Copyright (c) 2008-2011 Atheros Communications Inc.
f078f209
LR
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
b7f080cf 17#include <linux/dma-mapping.h>
394cf0a1 18#include "ath9k.h"
b622a720 19#include "ar9003_mac.h"
f078f209 20
1a04d59d 21#define SKB_CB_ATHBUF(__skb) (*((struct ath_rxbuf **)__skb->cb))
b5c80475 22
ededf1f8
VT
23static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
24{
25 return sc->ps_enabled &&
26 (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP);
27}
28
f078f209
LR
29/*
30 * Setup and link descriptors.
31 *
32 * 11N: we can no longer afford to self link the last descriptor.
33 * MAC acknowledges BA status as long as it copies frames to host
34 * buffer (or rx fifo). This can incorrectly acknowledge packets
35 * to a sender if last desc is self-linked.
f078f209 36 */
7dd74f5f
FF
37static void ath_rx_buf_link(struct ath_softc *sc, struct ath_rxbuf *bf,
38 bool flush)
f078f209 39{
cbe61d8a 40 struct ath_hw *ah = sc->sc_ah;
cc861f74 41 struct ath_common *common = ath9k_hw_common(ah);
f078f209
LR
42 struct ath_desc *ds;
43 struct sk_buff *skb;
44
f078f209 45 ds = bf->bf_desc;
be0418ad 46 ds->ds_link = 0; /* link to null */
f078f209
LR
47 ds->ds_data = bf->bf_buf_addr;
48
be0418ad 49 /* virtual addr of the beginning of the buffer. */
f078f209 50 skb = bf->bf_mpdu;
9680e8a3 51 BUG_ON(skb == NULL);
f078f209
LR
52 ds->ds_vdata = skb->data;
53
cc861f74
LR
54 /*
55 * setup rx descriptors. The rx_bufsize here tells the hardware
b4b6cda2 56 * how much data it can DMA to us and that we are prepared
cc861f74
LR
57 * to process
58 */
b77f483f 59 ath9k_hw_setuprxdesc(ah, ds,
cc861f74 60 common->rx_bufsize,
f078f209
LR
61 0);
62
7dd74f5f 63 if (sc->rx.rxlink)
b77f483f 64 *sc->rx.rxlink = bf->bf_daddr;
7dd74f5f
FF
65 else if (!flush)
66 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
f078f209 67
b77f483f 68 sc->rx.rxlink = &ds->ds_link;
f078f209
LR
69}
70
7dd74f5f
FF
71static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_rxbuf *bf,
72 bool flush)
e96542e5
FF
73{
74 if (sc->rx.buf_hold)
7dd74f5f 75 ath_rx_buf_link(sc, sc->rx.buf_hold, flush);
e96542e5
FF
76
77 sc->rx.buf_hold = bf;
78}
79
ff37e337
S
80static void ath_setdefantenna(struct ath_softc *sc, u32 antenna)
81{
82 /* XXX block beacon interrupts */
83 ath9k_hw_setantenna(sc->sc_ah, antenna);
b77f483f
S
84 sc->rx.defant = antenna;
85 sc->rx.rxotherant = 0;
ff37e337
S
86}
87
f078f209
LR
88static void ath_opmode_init(struct ath_softc *sc)
89{
cbe61d8a 90 struct ath_hw *ah = sc->sc_ah;
1510718d
LR
91 struct ath_common *common = ath9k_hw_common(ah);
92
f078f209
LR
93 u32 rfilt, mfilt[2];
94
95 /* configure rx filter */
96 rfilt = ath_calcrxfilter(sc);
97 ath9k_hw_setrxfilter(ah, rfilt);
98
99 /* configure bssid mask */
364734fa 100 ath_hw_setbssidmask(common);
f078f209
LR
101
102 /* configure operational mode */
103 ath9k_hw_setopmode(ah);
104
f078f209
LR
105 /* calculate and install multicast filter */
106 mfilt[0] = mfilt[1] = ~0;
f078f209 107 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
f078f209
LR
108}
109
b5c80475
FF
110static bool ath_rx_edma_buf_link(struct ath_softc *sc,
111 enum ath9k_rx_qtype qtype)
f078f209 112{
b5c80475
FF
113 struct ath_hw *ah = sc->sc_ah;
114 struct ath_rx_edma *rx_edma;
f078f209 115 struct sk_buff *skb;
1a04d59d 116 struct ath_rxbuf *bf;
f078f209 117
b5c80475
FF
118 rx_edma = &sc->rx.rx_edma[qtype];
119 if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize)
120 return false;
f078f209 121
1a04d59d 122 bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list);
b5c80475 123 list_del_init(&bf->list);
f078f209 124
b5c80475
FF
125 skb = bf->bf_mpdu;
126
b5c80475
FF
127 memset(skb->data, 0, ah->caps.rx_status_len);
128 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
129 ah->caps.rx_status_len, DMA_TO_DEVICE);
f078f209 130
b5c80475
FF
131 SKB_CB_ATHBUF(skb) = bf;
132 ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype);
07236bf3 133 __skb_queue_tail(&rx_edma->rx_fifo, skb);
f078f209 134
b5c80475
FF
135 return true;
136}
137
138static void ath_rx_addbuffer_edma(struct ath_softc *sc,
7a897203 139 enum ath9k_rx_qtype qtype)
b5c80475 140{
b5c80475 141 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1a04d59d 142 struct ath_rxbuf *bf, *tbf;
b5c80475 143
b5c80475 144 if (list_empty(&sc->rx.rxbuf)) {
d2182b69 145 ath_dbg(common, QUEUE, "No free rx buf available\n");
b5c80475 146 return;
797fe5cb 147 }
f078f209 148
6a01f0c0 149 list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list)
b5c80475
FF
150 if (!ath_rx_edma_buf_link(sc, qtype))
151 break;
152
b5c80475
FF
153}
154
155static void ath_rx_remove_buffer(struct ath_softc *sc,
156 enum ath9k_rx_qtype qtype)
157{
1a04d59d 158 struct ath_rxbuf *bf;
b5c80475
FF
159 struct ath_rx_edma *rx_edma;
160 struct sk_buff *skb;
161
162 rx_edma = &sc->rx.rx_edma[qtype];
163
07236bf3 164 while ((skb = __skb_dequeue(&rx_edma->rx_fifo)) != NULL) {
b5c80475
FF
165 bf = SKB_CB_ATHBUF(skb);
166 BUG_ON(!bf);
167 list_add_tail(&bf->list, &sc->rx.rxbuf);
168 }
169}
170
171static void ath_rx_edma_cleanup(struct ath_softc *sc)
172{
ba542385
MSS
173 struct ath_hw *ah = sc->sc_ah;
174 struct ath_common *common = ath9k_hw_common(ah);
1a04d59d 175 struct ath_rxbuf *bf;
b5c80475
FF
176
177 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
178 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
179
797fe5cb 180 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
ba542385
MSS
181 if (bf->bf_mpdu) {
182 dma_unmap_single(sc->dev, bf->bf_buf_addr,
183 common->rx_bufsize,
184 DMA_BIDIRECTIONAL);
b5c80475 185 dev_kfree_skb_any(bf->bf_mpdu);
ba542385
MSS
186 bf->bf_buf_addr = 0;
187 bf->bf_mpdu = NULL;
188 }
b5c80475 189 }
b5c80475
FF
190}
191
192static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size)
193{
5d07cca2 194 __skb_queue_head_init(&rx_edma->rx_fifo);
b5c80475
FF
195 rx_edma->rx_fifo_hwsize = size;
196}
197
198static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
199{
200 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
201 struct ath_hw *ah = sc->sc_ah;
202 struct sk_buff *skb;
1a04d59d 203 struct ath_rxbuf *bf;
b5c80475
FF
204 int error = 0, i;
205 u32 size;
206
b5c80475
FF
207 ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize -
208 ah->caps.rx_status_len);
209
210 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP],
211 ah->caps.rx_lp_qdepth);
212 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP],
213 ah->caps.rx_hp_qdepth);
214
1a04d59d 215 size = sizeof(struct ath_rxbuf) * nbufs;
b81950b1 216 bf = devm_kzalloc(sc->dev, size, GFP_KERNEL);
b5c80475
FF
217 if (!bf)
218 return -ENOMEM;
219
220 INIT_LIST_HEAD(&sc->rx.rxbuf);
b5c80475
FF
221
222 for (i = 0; i < nbufs; i++, bf++) {
cc861f74 223 skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL);
b5c80475 224 if (!skb) {
797fe5cb 225 error = -ENOMEM;
b5c80475 226 goto rx_init_fail;
f078f209 227 }
f078f209 228
b5c80475 229 memset(skb->data, 0, common->rx_bufsize);
797fe5cb 230 bf->bf_mpdu = skb;
b5c80475 231
797fe5cb 232 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
cc861f74 233 common->rx_bufsize,
b5c80475 234 DMA_BIDIRECTIONAL);
797fe5cb 235 if (unlikely(dma_mapping_error(sc->dev,
b5c80475
FF
236 bf->bf_buf_addr))) {
237 dev_kfree_skb_any(skb);
238 bf->bf_mpdu = NULL;
6cf9e995 239 bf->bf_buf_addr = 0;
3800276a 240 ath_err(common,
b5c80475
FF
241 "dma_mapping_error() on RX init\n");
242 error = -ENOMEM;
243 goto rx_init_fail;
244 }
245
246 list_add_tail(&bf->list, &sc->rx.rxbuf);
247 }
248
249 return 0;
250
251rx_init_fail:
252 ath_rx_edma_cleanup(sc);
253 return error;
254}
255
256static void ath_edma_start_recv(struct ath_softc *sc)
257{
b5c80475 258 ath9k_hw_rxena(sc->sc_ah);
7a897203
SM
259 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP);
260 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP);
b5c80475 261 ath_opmode_init(sc);
fbbcd146 262 ath9k_hw_startpcureceive(sc->sc_ah, sc->cur_chan->offchannel);
b5c80475
FF
263}
264
265static void ath_edma_stop_recv(struct ath_softc *sc)
266{
b5c80475
FF
267 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
268 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
b5c80475
FF
269}
270
271int ath_rx_init(struct ath_softc *sc, int nbufs)
272{
273 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
274 struct sk_buff *skb;
1a04d59d 275 struct ath_rxbuf *bf;
b5c80475
FF
276 int error = 0;
277
4bdd1e97 278 spin_lock_init(&sc->sc_pcu_lock);
b5c80475 279
0d95521e
FF
280 common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 +
281 sc->sc_ah->caps.rx_status_len;
282
e87f3d53 283 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
b5c80475 284 return ath_rx_edma_init(sc, nbufs);
b5c80475 285
e87f3d53
SM
286 ath_dbg(common, CONFIG, "cachelsz %u rxbufsize %u\n",
287 common->cachelsz, common->rx_bufsize);
b5c80475 288
e87f3d53
SM
289 /* Initialize rx descriptors */
290
291 error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
292 "rx", nbufs, 1, 0);
293 if (error != 0) {
294 ath_err(common,
295 "failed to allocate rx descriptors: %d\n",
296 error);
297 goto err;
298 }
299
300 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
301 skb = ath_rxbuf_alloc(common, common->rx_bufsize,
302 GFP_KERNEL);
303 if (skb == NULL) {
304 error = -ENOMEM;
797fe5cb
S
305 goto err;
306 }
b5c80475 307
e87f3d53
SM
308 bf->bf_mpdu = skb;
309 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
310 common->rx_bufsize,
311 DMA_FROM_DEVICE);
312 if (unlikely(dma_mapping_error(sc->dev,
313 bf->bf_buf_addr))) {
314 dev_kfree_skb_any(skb);
315 bf->bf_mpdu = NULL;
316 bf->bf_buf_addr = 0;
317 ath_err(common,
318 "dma_mapping_error() on RX init\n");
319 error = -ENOMEM;
320 goto err;
b5c80475 321 }
797fe5cb 322 }
e87f3d53 323 sc->rx.rxlink = NULL;
797fe5cb 324err:
f078f209
LR
325 if (error)
326 ath_rx_cleanup(sc);
327
328 return error;
329}
330
f078f209
LR
331void ath_rx_cleanup(struct ath_softc *sc)
332{
cc861f74
LR
333 struct ath_hw *ah = sc->sc_ah;
334 struct ath_common *common = ath9k_hw_common(ah);
f078f209 335 struct sk_buff *skb;
1a04d59d 336 struct ath_rxbuf *bf;
f078f209 337
b5c80475
FF
338 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
339 ath_rx_edma_cleanup(sc);
340 return;
e87f3d53
SM
341 }
342
343 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
344 skb = bf->bf_mpdu;
345 if (skb) {
346 dma_unmap_single(sc->dev, bf->bf_buf_addr,
347 common->rx_bufsize,
348 DMA_FROM_DEVICE);
349 dev_kfree_skb(skb);
350 bf->bf_buf_addr = 0;
351 bf->bf_mpdu = NULL;
051b9191 352 }
b5c80475 353 }
f078f209
LR
354}
355
356/*
357 * Calculate the receive filter according to the
358 * operating mode and state:
359 *
360 * o always accept unicast, broadcast, and multicast traffic
361 * o maintain current state of phy error reception (the hal
362 * may enable phy error frames for noise immunity work)
363 * o probe request frames are accepted only when operating in
364 * hostap, adhoc, or monitor modes
365 * o enable promiscuous mode according to the interface state
366 * o accept beacons:
367 * - when operating in adhoc mode so the 802.11 layer creates
368 * node table entries for peers,
369 * - when operating in station mode for collecting rssi data when
370 * the station is otherwise quiet, or
371 * - when operating as a repeater so we see repeater-sta beacons
372 * - when scanning
373 */
374
375u32 ath_calcrxfilter(struct ath_softc *sc)
376{
78b21949 377 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
f078f209
LR
378 u32 rfilt;
379
89f927af
LR
380 if (config_enabled(CONFIG_ATH9K_TX99))
381 return 0;
382
ac06697c 383 rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
f078f209
LR
384 | ATH9K_RX_FILTER_MCAST;
385
73e4937d
ZK
386 /* if operating on a DFS channel, enable radar pulse detection */
387 if (sc->hw->conf.radar_enabled)
388 rfilt |= ATH9K_RX_FILTER_PHYRADAR | ATH9K_RX_FILTER_PHYERR;
389
fce34430
SM
390 spin_lock_bh(&sc->chan_lock);
391
392 if (sc->cur_chan->rxfilter & FIF_PROBE_REQ)
f078f209
LR
393 rfilt |= ATH9K_RX_FILTER_PROBEREQ;
394
217ba9da
JM
395 /*
396 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
397 * mode interface or when in monitor mode. AP mode does not need this
398 * since it receives all in-BSS frames anyway.
399 */
2e286947 400 if (sc->sc_ah->is_monitoring)
f078f209 401 rfilt |= ATH9K_RX_FILTER_PROM;
f078f209 402
35c273ea
LB
403 if ((sc->cur_chan->rxfilter & FIF_CONTROL) ||
404 sc->sc_ah->dynack.enabled)
d42c6b71
S
405 rfilt |= ATH9K_RX_FILTER_CONTROL;
406
dbaaa147 407 if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) &&
ca529c93 408 (sc->cur_chan->nvifs <= 1) &&
fce34430 409 !(sc->cur_chan->rxfilter & FIF_BCN_PRBRESP_PROMISC))
dbaaa147
VT
410 rfilt |= ATH9K_RX_FILTER_MYBEACON;
411 else
f078f209
LR
412 rfilt |= ATH9K_RX_FILTER_BEACON;
413
264bbec8 414 if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
fce34430 415 (sc->cur_chan->rxfilter & FIF_PSPOLL))
dbaaa147 416 rfilt |= ATH9K_RX_FILTER_PSPOLL;
be0418ad 417
3d1132d0 418 if (sc->cur_chandef.width != NL80211_CHAN_WIDTH_20_NOHT)
7ea310be
S
419 rfilt |= ATH9K_RX_FILTER_COMP_BAR;
420
ca529c93 421 if (sc->cur_chan->nvifs > 1 || (sc->cur_chan->rxfilter & FIF_OTHER_BSS)) {
a549459c
TW
422 /* This is needed for older chips */
423 if (sc->sc_ah->hw_version.macVersion <= AR_SREV_VERSION_9160)
5eb6ba83 424 rfilt |= ATH9K_RX_FILTER_PROM;
b93bce2a
JM
425 rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL;
426 }
427
ede6a5e7
MP
428 if (AR_SREV_9550(sc->sc_ah) || AR_SREV_9531(sc->sc_ah) ||
429 AR_SREV_9561(sc->sc_ah))
b3d7aa43
GJ
430 rfilt |= ATH9K_RX_FILTER_4ADDRESS;
431
499afacc 432 if (ath9k_is_chanctx_enabled() &&
78b21949
FF
433 test_bit(ATH_OP_SCANNING, &common->op_flags))
434 rfilt |= ATH9K_RX_FILTER_BEACON;
435
fce34430
SM
436 spin_unlock_bh(&sc->chan_lock);
437
f078f209 438 return rfilt;
7dcfdcd9 439
f078f209
LR
440}
441
19ec477f 442void ath_startrecv(struct ath_softc *sc)
f078f209 443{
cbe61d8a 444 struct ath_hw *ah = sc->sc_ah;
1a04d59d 445 struct ath_rxbuf *bf, *tbf;
f078f209 446
b5c80475
FF
447 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
448 ath_edma_start_recv(sc);
19ec477f 449 return;
b5c80475
FF
450 }
451
b77f483f 452 if (list_empty(&sc->rx.rxbuf))
f078f209
LR
453 goto start_recv;
454
e96542e5 455 sc->rx.buf_hold = NULL;
b77f483f
S
456 sc->rx.rxlink = NULL;
457 list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) {
7dd74f5f 458 ath_rx_buf_link(sc, bf, false);
f078f209
LR
459 }
460
461 /* We could have deleted elements so the list may be empty now */
b77f483f 462 if (list_empty(&sc->rx.rxbuf))
f078f209
LR
463 goto start_recv;
464
1a04d59d 465 bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list);
f078f209 466 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
be0418ad 467 ath9k_hw_rxena(ah);
f078f209
LR
468
469start_recv:
be0418ad 470 ath_opmode_init(sc);
fbbcd146 471 ath9k_hw_startpcureceive(ah, sc->cur_chan->offchannel);
f078f209
LR
472}
473
4b883f02
FF
474static void ath_flushrecv(struct ath_softc *sc)
475{
476 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
477 ath_rx_tasklet(sc, 1, true);
478 ath_rx_tasklet(sc, 1, false);
479}
480
f078f209
LR
481bool ath_stoprecv(struct ath_softc *sc)
482{
cbe61d8a 483 struct ath_hw *ah = sc->sc_ah;
5882da02 484 bool stopped, reset = false;
f078f209 485
d47844a0 486 ath9k_hw_abortpcurecv(ah);
be0418ad 487 ath9k_hw_setrxfilter(ah, 0);
5882da02 488 stopped = ath9k_hw_stopdmarecv(ah, &reset);
b5c80475 489
4b883f02
FF
490 ath_flushrecv(sc);
491
b5c80475
FF
492 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
493 ath_edma_stop_recv(sc);
494 else
495 sc->rx.rxlink = NULL;
be0418ad 496
d584747b
RM
497 if (!(ah->ah_flags & AH_UNPLUGGED) &&
498 unlikely(!stopped)) {
d7fd1b50
BG
499 ath_err(ath9k_hw_common(sc->sc_ah),
500 "Could not stop RX, we could be "
501 "confusing the DMA engine when we start RX up\n");
502 ATH_DBG_WARN_ON_ONCE(!stopped);
503 }
2232d31b 504 return stopped && !reset;
f078f209
LR
505}
506
cc65965c
JM
507static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb)
508{
509 /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */
510 struct ieee80211_mgmt *mgmt;
511 u8 *pos, *end, id, elen;
512 struct ieee80211_tim_ie *tim;
513
514 mgmt = (struct ieee80211_mgmt *)skb->data;
515 pos = mgmt->u.beacon.variable;
516 end = skb->data + skb->len;
517
518 while (pos + 2 < end) {
519 id = *pos++;
520 elen = *pos++;
521 if (pos + elen > end)
522 break;
523
524 if (id == WLAN_EID_TIM) {
525 if (elen < sizeof(*tim))
526 break;
527 tim = (struct ieee80211_tim_ie *) pos;
528 if (tim->dtim_count != 0)
529 break;
530 return tim->bitmap_ctrl & 0x01;
531 }
532
533 pos += elen;
534 }
535
536 return false;
537}
538
cc65965c
JM
539static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
540{
1510718d 541 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
48bf43fa 542 bool skip_beacon = false;
cc65965c
JM
543
544 if (skb->len < 24 + 8 + 2 + 2)
545 return;
546
1b04b930 547 sc->ps_flags &= ~PS_WAIT_FOR_BEACON;
293dc5df 548
1b04b930
S
549 if (sc->ps_flags & PS_BEACON_SYNC) {
550 sc->ps_flags &= ~PS_BEACON_SYNC;
d2182b69 551 ath_dbg(common, PS,
1a6404a1 552 "Reconfigure beacon timers based on synchronized timestamp\n");
48bf43fa 553
853854d6 554#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
48bf43fa
SM
555 if (ath9k_is_chanctx_enabled()) {
556 if (sc->cur_chan == &sc->offchannel.chan)
557 skip_beacon = true;
558 }
853854d6 559#endif
48bf43fa
SM
560
561 if (!skip_beacon &&
562 !(WARN_ON_ONCE(sc->cur_chan->beacon.beacon_interval == 0)))
76c93983 563 ath9k_set_beacon(sc);
c7dd40c9
SM
564
565 ath9k_p2p_beacon_sync(sc);
ccdfeab6
JM
566 }
567
cc65965c
JM
568 if (ath_beacon_dtim_pending_cab(skb)) {
569 /*
570 * Remain awake waiting for buffered broadcast/multicast
58f5fffd
GJ
571 * frames. If the last broadcast/multicast frame is not
572 * received properly, the next beacon frame will work as
573 * a backup trigger for returning into NETWORK SLEEP state,
574 * so we are waiting for it as well.
cc65965c 575 */
d2182b69 576 ath_dbg(common, PS,
226afe68 577 "Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n");
1b04b930 578 sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON;
cc65965c
JM
579 return;
580 }
581
1b04b930 582 if (sc->ps_flags & PS_WAIT_FOR_CAB) {
cc65965c
JM
583 /*
584 * This can happen if a broadcast frame is dropped or the AP
585 * fails to send a frame indicating that all CAB frames have
586 * been delivered.
587 */
1b04b930 588 sc->ps_flags &= ~PS_WAIT_FOR_CAB;
d2182b69 589 ath_dbg(common, PS, "PS wait for CAB frames timed out\n");
cc65965c 590 }
cc65965c
JM
591}
592
f73c604c 593static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb, bool mybeacon)
cc65965c
JM
594{
595 struct ieee80211_hdr *hdr;
c46917bb 596 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
cc65965c
JM
597
598 hdr = (struct ieee80211_hdr *)skb->data;
599
600 /* Process Beacon and CAB receive in PS state */
ededf1f8 601 if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc))
07c15a3f 602 && mybeacon) {
cc65965c 603 ath_rx_ps_beacon(sc, skb);
07c15a3f
SM
604 } else if ((sc->ps_flags & PS_WAIT_FOR_CAB) &&
605 (ieee80211_is_data(hdr->frame_control) ||
606 ieee80211_is_action(hdr->frame_control)) &&
607 is_multicast_ether_addr(hdr->addr1) &&
608 !ieee80211_has_moredata(hdr->frame_control)) {
cc65965c
JM
609 /*
610 * No more broadcast/multicast frames to be received at this
611 * point.
612 */
3fac6dfd 613 sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON);
d2182b69 614 ath_dbg(common, PS,
226afe68 615 "All PS CAB frames received, back to sleep\n");
1b04b930 616 } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) &&
9a23f9ca
JM
617 !is_multicast_ether_addr(hdr->addr1) &&
618 !ieee80211_has_morefrags(hdr->frame_control)) {
1b04b930 619 sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA;
d2182b69 620 ath_dbg(common, PS,
226afe68 621 "Going back to sleep after having received PS-Poll data (0x%lx)\n",
1b04b930
S
622 sc->ps_flags & (PS_WAIT_FOR_BEACON |
623 PS_WAIT_FOR_CAB |
624 PS_WAIT_FOR_PSPOLL_DATA |
625 PS_WAIT_FOR_TX_ACK));
cc65965c
JM
626 }
627}
628
b5c80475 629static bool ath_edma_get_buffers(struct ath_softc *sc,
3a2923e8
FF
630 enum ath9k_rx_qtype qtype,
631 struct ath_rx_status *rs,
1a04d59d 632 struct ath_rxbuf **dest)
f078f209 633{
b5c80475
FF
634 struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
635 struct ath_hw *ah = sc->sc_ah;
636 struct ath_common *common = ath9k_hw_common(ah);
637 struct sk_buff *skb;
1a04d59d 638 struct ath_rxbuf *bf;
b5c80475
FF
639 int ret;
640
641 skb = skb_peek(&rx_edma->rx_fifo);
642 if (!skb)
643 return false;
644
645 bf = SKB_CB_ATHBUF(skb);
646 BUG_ON(!bf);
647
ce9426d1 648 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
b5c80475
FF
649 common->rx_bufsize, DMA_FROM_DEVICE);
650
3a2923e8 651 ret = ath9k_hw_process_rxdesc_edma(ah, rs, skb->data);
ce9426d1
ML
652 if (ret == -EINPROGRESS) {
653 /*let device gain the buffer again*/
654 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
655 common->rx_bufsize, DMA_FROM_DEVICE);
b5c80475 656 return false;
ce9426d1 657 }
b5c80475
FF
658
659 __skb_unlink(skb, &rx_edma->rx_fifo);
660 if (ret == -EINVAL) {
661 /* corrupt descriptor, skip this one and the following one */
662 list_add_tail(&bf->list, &sc->rx.rxbuf);
663 ath_rx_edma_buf_link(sc, qtype);
b5c80475 664
3a2923e8
FF
665 skb = skb_peek(&rx_edma->rx_fifo);
666 if (skb) {
667 bf = SKB_CB_ATHBUF(skb);
668 BUG_ON(!bf);
669
670 __skb_unlink(skb, &rx_edma->rx_fifo);
671 list_add_tail(&bf->list, &sc->rx.rxbuf);
672 ath_rx_edma_buf_link(sc, qtype);
3a2923e8 673 }
6bb51c70
TH
674
675 bf = NULL;
b5c80475 676 }
b5c80475 677
3a2923e8 678 *dest = bf;
b5c80475
FF
679 return true;
680}
f078f209 681
1a04d59d 682static struct ath_rxbuf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
b5c80475
FF
683 struct ath_rx_status *rs,
684 enum ath9k_rx_qtype qtype)
685{
1a04d59d 686 struct ath_rxbuf *bf = NULL;
b5c80475 687
3a2923e8
FF
688 while (ath_edma_get_buffers(sc, qtype, rs, &bf)) {
689 if (!bf)
690 continue;
b5c80475 691
3a2923e8
FF
692 return bf;
693 }
694 return NULL;
b5c80475
FF
695}
696
1a04d59d 697static struct ath_rxbuf *ath_get_next_rx_buf(struct ath_softc *sc,
b5c80475
FF
698 struct ath_rx_status *rs)
699{
700 struct ath_hw *ah = sc->sc_ah;
701 struct ath_common *common = ath9k_hw_common(ah);
f078f209 702 struct ath_desc *ds;
1a04d59d 703 struct ath_rxbuf *bf;
b5c80475
FF
704 int ret;
705
706 if (list_empty(&sc->rx.rxbuf)) {
707 sc->rx.rxlink = NULL;
708 return NULL;
709 }
710
1a04d59d 711 bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list);
e96542e5
FF
712 if (bf == sc->rx.buf_hold)
713 return NULL;
714
b5c80475
FF
715 ds = bf->bf_desc;
716
717 /*
718 * Must provide the virtual address of the current
719 * descriptor, the physical address, and the virtual
720 * address of the next descriptor in the h/w chain.
721 * This allows the HAL to look ahead to see if the
722 * hardware is done with a descriptor by checking the
723 * done bit in the following descriptor and the address
724 * of the current descriptor the DMA engine is working
725 * on. All this is necessary because of our use of
726 * a self-linked list to avoid rx overruns.
727 */
3de21116 728 ret = ath9k_hw_rxprocdesc(ah, ds, rs);
b5c80475
FF
729 if (ret == -EINPROGRESS) {
730 struct ath_rx_status trs;
1a04d59d 731 struct ath_rxbuf *tbf;
b5c80475
FF
732 struct ath_desc *tds;
733
734 memset(&trs, 0, sizeof(trs));
735 if (list_is_last(&bf->list, &sc->rx.rxbuf)) {
736 sc->rx.rxlink = NULL;
737 return NULL;
738 }
739
1a04d59d 740 tbf = list_entry(bf->list.next, struct ath_rxbuf, list);
b5c80475
FF
741
742 /*
743 * On some hardware the descriptor status words could
744 * get corrupted, including the done bit. Because of
745 * this, check if the next descriptor's done bit is
746 * set or not.
747 *
748 * If the next descriptor's done bit is set, the current
749 * descriptor has been corrupted. Force s/w to discard
750 * this descriptor and continue...
751 */
752
753 tds = tbf->bf_desc;
3de21116 754 ret = ath9k_hw_rxprocdesc(ah, tds, &trs);
b5c80475
FF
755 if (ret == -EINPROGRESS)
756 return NULL;
723e7113
FF
757
758 /*
b7b146c9
FF
759 * Re-check previous descriptor, in case it has been filled
760 * in the mean time.
723e7113 761 */
b7b146c9
FF
762 ret = ath9k_hw_rxprocdesc(ah, ds, rs);
763 if (ret == -EINPROGRESS) {
764 /*
765 * mark descriptor as zero-length and set the 'more'
766 * flag to ensure that both buffers get discarded
767 */
768 rs->rs_datalen = 0;
769 rs->rs_more = true;
770 }
b5c80475
FF
771 }
772
a3dc48e8 773 list_del(&bf->list);
b5c80475
FF
774 if (!bf->bf_mpdu)
775 return bf;
776
777 /*
778 * Synchronize the DMA transfer with CPU before
779 * 1. accessing the frame
780 * 2. requeueing the same buffer to h/w
781 */
ce9426d1 782 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
b5c80475
FF
783 common->rx_bufsize,
784 DMA_FROM_DEVICE);
785
786 return bf;
787}
788
e0dd1a96
SM
789static void ath9k_process_tsf(struct ath_rx_status *rs,
790 struct ieee80211_rx_status *rxs,
791 u64 tsf)
792{
793 u32 tsf_lower = tsf & 0xffffffff;
794
795 rxs->mactime = (tsf & ~0xffffffffULL) | rs->rs_tstamp;
796 if (rs->rs_tstamp > tsf_lower &&
797 unlikely(rs->rs_tstamp - tsf_lower > 0x10000000))
798 rxs->mactime -= 0x100000000ULL;
799
800 if (rs->rs_tstamp < tsf_lower &&
801 unlikely(tsf_lower - rs->rs_tstamp > 0x10000000))
802 rxs->mactime += 0x100000000ULL;
803}
804
d435700f
S
805/*
806 * For Decrypt or Demic errors, we only mark packet status here and always push
807 * up the frame up to let mac80211 handle the actual error case, be it no
808 * decryption key or real decryption error. This let us keep statistics there.
809 */
723e7113 810static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
6f38482e 811 struct sk_buff *skb,
d435700f
S
812 struct ath_rx_status *rx_stats,
813 struct ieee80211_rx_status *rx_status,
e0dd1a96 814 bool *decrypt_error, u64 tsf)
d435700f 815{
723e7113
FF
816 struct ieee80211_hw *hw = sc->hw;
817 struct ath_hw *ah = sc->sc_ah;
818 struct ath_common *common = ath9k_hw_common(ah);
6f38482e 819 struct ieee80211_hdr *hdr;
723e7113
FF
820 bool discard_current = sc->rx.discard_next;
821
5871d2d7
SM
822 /*
823 * Discard corrupt descriptors which are marked in
824 * ath_get_next_rx_buf().
825 */
723e7113 826 if (discard_current)
b7b146c9
FF
827 goto corrupt;
828
829 sc->rx.discard_next = false;
f749b946 830
5871d2d7
SM
831 /*
832 * Discard zero-length packets.
833 */
834 if (!rx_stats->rs_datalen) {
835 RX_STAT_INC(rx_len_err);
b7b146c9 836 goto corrupt;
5871d2d7
SM
837 }
838
b7b146c9
FF
839 /*
840 * rs_status follows rs_datalen so if rs_datalen is too large
841 * we can take a hint that hardware corrupted it, so ignore
842 * those frames.
843 */
5871d2d7
SM
844 if (rx_stats->rs_datalen > (common->rx_bufsize - ah->caps.rx_status_len)) {
845 RX_STAT_INC(rx_len_err);
b7b146c9 846 goto corrupt;
5871d2d7
SM
847 }
848
4a470647
SM
849 /* Only use status info from the last fragment */
850 if (rx_stats->rs_more)
851 return 0;
852
b0925595
SM
853 /*
854 * Return immediately if the RX descriptor has been marked
855 * as corrupt based on the various error bits.
856 *
857 * This is different from the other corrupt descriptor
858 * condition handled above.
859 */
b7b146c9
FF
860 if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC)
861 goto corrupt;
b0925595 862
6f38482e
SM
863 hdr = (struct ieee80211_hdr *) (skb->data + ah->caps.rx_status_len);
864
e0dd1a96 865 ath9k_process_tsf(rx_stats, rx_status, tsf);
5e85a32a 866 ath_debug_stat_rx(sc, rx_stats);
e0dd1a96 867
6b87d71c
SM
868 /*
869 * Process PHY errors and return so that the packet
870 * can be dropped.
871 */
872 if (rx_stats->rs_status & ATH9K_RXERR_PHY) {
873 ath9k_dfs_process_phyerr(sc, hdr, rx_stats, rx_status->mactime);
67dc74f1 874 if (ath_cmn_process_fft(&sc->spec_priv, hdr, rx_stats, rx_status->mactime))
6b87d71c
SM
875 RX_STAT_INC(rx_spectral);
876
b7b146c9 877 return -EINVAL;
6b87d71c
SM
878 }
879
d435700f
S
880 /*
881 * everything but the rate is checked here, the rate check is done
882 * separately to avoid doing two lookups for a rate for each frame.
883 */
fce34430
SM
884 spin_lock_bh(&sc->chan_lock);
885 if (!ath9k_cmn_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error,
886 sc->cur_chan->rxfilter)) {
887 spin_unlock_bh(&sc->chan_lock);
b7b146c9 888 return -EINVAL;
fce34430
SM
889 }
890 spin_unlock_bh(&sc->chan_lock);
d435700f 891
1cc47a5b
OR
892 if (ath_is_mybeacon(common, hdr)) {
893 RX_STAT_INC(rx_beacons);
894 rx_stats->is_mybeacon = true;
895 }
6f38482e 896
ff9a93f2
SM
897 /*
898 * This shouldn't happen, but have a safety check anyway.
899 */
b7b146c9
FF
900 if (WARN_ON(!ah->curchan))
901 return -EINVAL;
ff9a93f2 902
12746036
OR
903 if (ath9k_cmn_process_rate(common, hw, rx_stats, rx_status)) {
904 /*
905 * No valid hardware bitrate found -- we should not get here
906 * because hardware has already validated this frame as OK.
907 */
908 ath_dbg(common, ANY, "unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
909 rx_stats->rs_rate);
910 RX_STAT_INC(rx_rate_err);
b7b146c9 911 return -EINVAL;
7c5c73cd 912 }
d435700f 913
27babf9f 914 if (ath9k_is_chanctx_enabled()) {
70b06dac 915 if (rx_stats->is_mybeacon)
a2b28601 916 ath_chanctx_beacon_recv_ev(sc,
70b06dac 917 ATH_CHANCTX_EVENT_BEACON_RECEIVED);
58b57375
FF
918 }
919
32efb0cc 920 ath9k_cmn_process_rssi(common, hw, rx_stats, rx_status);
74a97755 921
ff9a93f2
SM
922 rx_status->band = ah->curchan->chan->band;
923 rx_status->freq = ah->curchan->chan->center_freq;
d435700f 924 rx_status->antenna = rx_stats->rs_antenna;
96d21371 925 rx_status->flag |= RX_FLAG_MACTIME_END;
d435700f 926
a5525d9c
SM
927#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
928 if (ieee80211_is_data_present(hdr->frame_control) &&
929 !ieee80211_is_qos_nullfunc(hdr->frame_control))
930 sc->rx.num_pkts++;
931#endif
932
b7b146c9
FF
933 return 0;
934
935corrupt:
936 sc->rx.discard_next = rx_stats->rs_more;
937 return -EINVAL;
d435700f
S
938}
939
c3124df7
SM
940/*
941 * Run the LNA combining algorithm only in these cases:
942 *
943 * Standalone WLAN cards with both LNA/Antenna diversity
944 * enabled in the EEPROM.
945 *
946 * WLAN+BT cards which are in the supported card list
947 * in ath_pci_id_table and the user has loaded the
948 * driver with "bt_ant_diversity" set to true.
949 */
950static void ath9k_antenna_check(struct ath_softc *sc,
951 struct ath_rx_status *rs)
952{
953 struct ath_hw *ah = sc->sc_ah;
954 struct ath9k_hw_capabilities *pCap = &ah->caps;
955 struct ath_common *common = ath9k_hw_common(ah);
956
957 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB))
958 return;
959
c3124df7
SM
960 /*
961 * Change the default rx antenna if rx diversity
962 * chooses the other antenna 3 times in a row.
963 */
964 if (sc->rx.defant != rs->rs_antenna) {
965 if (++sc->rx.rxotherant >= 3)
966 ath_setdefantenna(sc, rs->rs_antenna);
967 } else {
968 sc->rx.rxotherant = 0;
969 }
970
971 if (pCap->hw_caps & ATH9K_HW_CAP_BT_ANT_DIV) {
972 if (common->bt_ant_diversity)
973 ath_ant_comb_scan(sc, rs);
974 } else {
975 ath_ant_comb_scan(sc, rs);
976 }
977}
978
21fbbca3
CL
979static void ath9k_apply_ampdu_details(struct ath_softc *sc,
980 struct ath_rx_status *rs, struct ieee80211_rx_status *rxs)
981{
982 if (rs->rs_isaggr) {
983 rxs->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN;
984
985 rxs->ampdu_reference = sc->rx.ampdu_ref;
986
987 if (!rs->rs_moreaggr) {
988 rxs->flag |= RX_FLAG_AMPDU_IS_LAST;
989 sc->rx.ampdu_ref++;
990 }
991
992 if (rs->rs_flags & ATH9K_RX_DELIM_CRC_PRE)
993 rxs->flag |= RX_FLAG_AMPDU_DELIM_CRC_ERROR;
994 }
995}
996
b5c80475
FF
997int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
998{
1a04d59d 999 struct ath_rxbuf *bf;
0d95521e 1000 struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb;
5ca42627 1001 struct ieee80211_rx_status *rxs;
cbe61d8a 1002 struct ath_hw *ah = sc->sc_ah;
27c51f1a 1003 struct ath_common *common = ath9k_hw_common(ah);
7545daf4 1004 struct ieee80211_hw *hw = sc->hw;
c9b14170 1005 int retval;
29bffa96 1006 struct ath_rx_status rs;
b5c80475
FF
1007 enum ath9k_rx_qtype qtype;
1008 bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
1009 int dma_type;
a6d2055b 1010 u64 tsf = 0;
8ab2cd09 1011 unsigned long flags;
2e1cd495 1012 dma_addr_t new_buf_addr;
c82552c5 1013 unsigned int budget = 512;
982e0395 1014 struct ieee80211_hdr *hdr;
be0418ad 1015
b5c80475 1016 if (edma)
b5c80475 1017 dma_type = DMA_BIDIRECTIONAL;
56824223
ML
1018 else
1019 dma_type = DMA_FROM_DEVICE;
b5c80475
FF
1020
1021 qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP;
f078f209 1022
a6d2055b 1023 tsf = ath9k_hw_gettsf64(ah);
a6d2055b 1024
f078f209 1025 do {
e1352fde 1026 bool decrypt_error = false;
f078f209 1027
29bffa96 1028 memset(&rs, 0, sizeof(rs));
b5c80475
FF
1029 if (edma)
1030 bf = ath_edma_get_next_rx_buf(sc, &rs, qtype);
1031 else
1032 bf = ath_get_next_rx_buf(sc, &rs);
f078f209 1033
b5c80475
FF
1034 if (!bf)
1035 break;
f078f209 1036
f078f209 1037 skb = bf->bf_mpdu;
be0418ad 1038 if (!skb)
f078f209 1039 continue;
f078f209 1040
0d95521e
FF
1041 /*
1042 * Take frame header from the first fragment and RX status from
1043 * the last one.
1044 */
1045 if (sc->rx.frag)
1046 hdr_skb = sc->rx.frag;
1047 else
1048 hdr_skb = skb;
1049
f6307dda 1050 rxs = IEEE80211_SKB_RXCB(hdr_skb);
ffb1c56a
AN
1051 memset(rxs, 0, sizeof(struct ieee80211_rx_status));
1052
6f38482e 1053 retval = ath9k_rx_skb_preprocess(sc, hdr_skb, &rs, rxs,
e0dd1a96 1054 &decrypt_error, tsf);
83c76570
ZK
1055 if (retval)
1056 goto requeue_drop_frag;
1057
cb71d9ba
LR
1058 /* Ensure we always have an skb to requeue once we are done
1059 * processing the current buffer's skb */
cc861f74 1060 requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
cb71d9ba
LR
1061
1062 /* If there is no memory we ignore the current RX'd frame,
1063 * tell hardware it can give us a new frame using the old
b77f483f 1064 * skb and put it at the tail of the sc->rx.rxbuf list for
cb71d9ba 1065 * processing. */
15072189
BG
1066 if (!requeue_skb) {
1067 RX_STAT_INC(rx_oom_err);
0d95521e 1068 goto requeue_drop_frag;
15072189 1069 }
f078f209 1070
2e1cd495
FF
1071 /* We will now give hardware our shiny new allocated skb */
1072 new_buf_addr = dma_map_single(sc->dev, requeue_skb->data,
1073 common->rx_bufsize, dma_type);
1074 if (unlikely(dma_mapping_error(sc->dev, new_buf_addr))) {
1075 dev_kfree_skb_any(requeue_skb);
1076 goto requeue_drop_frag;
1077 }
1078
9bf9fca8 1079 /* Unmap the frame */
7da3c55c 1080 dma_unmap_single(sc->dev, bf->bf_buf_addr,
2e1cd495 1081 common->rx_bufsize, dma_type);
f078f209 1082
176f0e84
SM
1083 bf->bf_mpdu = requeue_skb;
1084 bf->bf_buf_addr = new_buf_addr;
1085
b5c80475
FF
1086 skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len);
1087 if (ah->caps.rx_status_len)
1088 skb_pull(skb, ah->caps.rx_status_len);
be0418ad 1089
0d95521e 1090 if (!rs.rs_more)
5a078fcb
OR
1091 ath9k_cmn_rx_skb_postprocess(common, hdr_skb, &rs,
1092 rxs, decrypt_error);
be0418ad 1093
0d95521e 1094 if (rs.rs_more) {
15072189 1095 RX_STAT_INC(rx_frags);
0d95521e
FF
1096 /*
1097 * rs_more indicates chained descriptors which can be
1098 * used to link buffers together for a sort of
1099 * scatter-gather operation.
1100 */
1101 if (sc->rx.frag) {
1102 /* too many fragments - cannot handle frame */
1103 dev_kfree_skb_any(sc->rx.frag);
1104 dev_kfree_skb_any(skb);
15072189 1105 RX_STAT_INC(rx_too_many_frags_err);
0d95521e
FF
1106 skb = NULL;
1107 }
1108 sc->rx.frag = skb;
1109 goto requeue;
1110 }
1111
1112 if (sc->rx.frag) {
1113 int space = skb->len - skb_tailroom(hdr_skb);
1114
0d95521e
FF
1115 if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) {
1116 dev_kfree_skb(skb);
15072189 1117 RX_STAT_INC(rx_oom_err);
0d95521e
FF
1118 goto requeue_drop_frag;
1119 }
1120
b5447ff9
ED
1121 sc->rx.frag = NULL;
1122
0d95521e
FF
1123 skb_copy_from_linear_data(skb, skb_put(hdr_skb, skb->len),
1124 skb->len);
1125 dev_kfree_skb_any(skb);
1126 skb = hdr_skb;
1127 }
1128
16fe28e9
SM
1129 if (rxs->flag & RX_FLAG_MMIC_STRIPPED)
1130 skb_trim(skb, skb->len - 8);
eb840a80 1131
16fe28e9
SM
1132 spin_lock_irqsave(&sc->sc_pm_lock, flags);
1133 if ((sc->ps_flags & (PS_WAIT_FOR_BEACON |
1134 PS_WAIT_FOR_CAB |
1135 PS_WAIT_FOR_PSPOLL_DATA)) ||
1136 ath9k_check_auto_sleep(sc))
1137 ath_rx_ps(sc, skb, rs.is_mybeacon);
1138 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
eb840a80 1139
c3124df7 1140 ath9k_antenna_check(sc, &rs);
21fbbca3 1141 ath9k_apply_ampdu_details(sc, &rs, rxs);
350e2dcb 1142 ath_debug_rate_stats(sc, &rs, skb);
21fbbca3 1143
982e0395
LB
1144 hdr = (struct ieee80211_hdr *)skb->data;
1145 if (ieee80211_is_ack(hdr->frame_control))
1146 ath_dynack_sample_ack_ts(sc->sc_ah, skb, rs.rs_tstamp);
1147
7545daf4 1148 ieee80211_rx(hw, skb);
cc65965c 1149
0d95521e
FF
1150requeue_drop_frag:
1151 if (sc->rx.frag) {
1152 dev_kfree_skb_any(sc->rx.frag);
1153 sc->rx.frag = NULL;
1154 }
cb71d9ba 1155requeue:
a3dc48e8 1156 list_add_tail(&bf->list, &sc->rx.rxbuf);
a3dc48e8 1157
7dd74f5f
FF
1158 if (!edma) {
1159 ath_rx_buf_relink(sc, bf, flush);
3a758134
TH
1160 if (!flush)
1161 ath9k_hw_rxena(ah);
7dd74f5f
FF
1162 } else if (!flush) {
1163 ath_rx_edma_buf_link(sc, qtype);
b5c80475 1164 }
c82552c5
TH
1165
1166 if (!budget--)
1167 break;
be0418ad
S
1168 } while (1);
1169
29ab0b36
RM
1170 if (!(ah->imask & ATH9K_INT_RXEOL)) {
1171 ah->imask |= (ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
72d874c6 1172 ath9k_hw_set_interrupts(ah);
29ab0b36
RM
1173 }
1174
f078f209 1175 return 0;
f078f209 1176}
This page took 0.859058 seconds and 5 git commands to generate.