ath9k: Fix ath_startrecv()
[deliverable/linux.git] / drivers / net / wireless / ath / ath9k / recv.c
CommitLineData
f078f209 1/*
5b68138e 2 * Copyright (c) 2008-2011 Atheros Communications Inc.
f078f209
LR
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
b7f080cf 17#include <linux/dma-mapping.h>
394cf0a1 18#include "ath9k.h"
b622a720 19#include "ar9003_mac.h"
f078f209 20
1a04d59d 21#define SKB_CB_ATHBUF(__skb) (*((struct ath_rxbuf **)__skb->cb))
b5c80475 22
ededf1f8
VT
23static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
24{
25 return sc->ps_enabled &&
26 (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP);
27}
28
f078f209
LR
29/*
30 * Setup and link descriptors.
31 *
32 * 11N: we can no longer afford to self link the last descriptor.
33 * MAC acknowledges BA status as long as it copies frames to host
34 * buffer (or rx fifo). This can incorrectly acknowledge packets
35 * to a sender if last desc is self-linked.
f078f209 36 */
7dd74f5f
FF
37static void ath_rx_buf_link(struct ath_softc *sc, struct ath_rxbuf *bf,
38 bool flush)
f078f209 39{
cbe61d8a 40 struct ath_hw *ah = sc->sc_ah;
cc861f74 41 struct ath_common *common = ath9k_hw_common(ah);
f078f209
LR
42 struct ath_desc *ds;
43 struct sk_buff *skb;
44
f078f209 45 ds = bf->bf_desc;
be0418ad 46 ds->ds_link = 0; /* link to null */
f078f209
LR
47 ds->ds_data = bf->bf_buf_addr;
48
be0418ad 49 /* virtual addr of the beginning of the buffer. */
f078f209 50 skb = bf->bf_mpdu;
9680e8a3 51 BUG_ON(skb == NULL);
f078f209
LR
52 ds->ds_vdata = skb->data;
53
cc861f74
LR
54 /*
55 * setup rx descriptors. The rx_bufsize here tells the hardware
b4b6cda2 56 * how much data it can DMA to us and that we are prepared
cc861f74
LR
57 * to process
58 */
b77f483f 59 ath9k_hw_setuprxdesc(ah, ds,
cc861f74 60 common->rx_bufsize,
f078f209
LR
61 0);
62
7dd74f5f 63 if (sc->rx.rxlink)
b77f483f 64 *sc->rx.rxlink = bf->bf_daddr;
7dd74f5f
FF
65 else if (!flush)
66 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
f078f209 67
b77f483f 68 sc->rx.rxlink = &ds->ds_link;
f078f209
LR
69}
70
7dd74f5f
FF
71static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_rxbuf *bf,
72 bool flush)
e96542e5
FF
73{
74 if (sc->rx.buf_hold)
7dd74f5f 75 ath_rx_buf_link(sc, sc->rx.buf_hold, flush);
e96542e5
FF
76
77 sc->rx.buf_hold = bf;
78}
79
ff37e337
S
80static void ath_setdefantenna(struct ath_softc *sc, u32 antenna)
81{
82 /* XXX block beacon interrupts */
83 ath9k_hw_setantenna(sc->sc_ah, antenna);
b77f483f
S
84 sc->rx.defant = antenna;
85 sc->rx.rxotherant = 0;
ff37e337
S
86}
87
f078f209
LR
88static void ath_opmode_init(struct ath_softc *sc)
89{
cbe61d8a 90 struct ath_hw *ah = sc->sc_ah;
1510718d
LR
91 struct ath_common *common = ath9k_hw_common(ah);
92
f078f209
LR
93 u32 rfilt, mfilt[2];
94
95 /* configure rx filter */
96 rfilt = ath_calcrxfilter(sc);
97 ath9k_hw_setrxfilter(ah, rfilt);
98
99 /* configure bssid mask */
364734fa 100 ath_hw_setbssidmask(common);
f078f209
LR
101
102 /* configure operational mode */
103 ath9k_hw_setopmode(ah);
104
f078f209
LR
105 /* calculate and install multicast filter */
106 mfilt[0] = mfilt[1] = ~0;
f078f209 107 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
f078f209
LR
108}
109
b5c80475
FF
110static bool ath_rx_edma_buf_link(struct ath_softc *sc,
111 enum ath9k_rx_qtype qtype)
f078f209 112{
b5c80475
FF
113 struct ath_hw *ah = sc->sc_ah;
114 struct ath_rx_edma *rx_edma;
f078f209 115 struct sk_buff *skb;
1a04d59d 116 struct ath_rxbuf *bf;
f078f209 117
b5c80475
FF
118 rx_edma = &sc->rx.rx_edma[qtype];
119 if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize)
120 return false;
f078f209 121
1a04d59d 122 bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list);
b5c80475 123 list_del_init(&bf->list);
f078f209 124
b5c80475
FF
125 skb = bf->bf_mpdu;
126
b5c80475
FF
127 memset(skb->data, 0, ah->caps.rx_status_len);
128 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
129 ah->caps.rx_status_len, DMA_TO_DEVICE);
f078f209 130
b5c80475
FF
131 SKB_CB_ATHBUF(skb) = bf;
132 ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype);
07236bf3 133 __skb_queue_tail(&rx_edma->rx_fifo, skb);
f078f209 134
b5c80475
FF
135 return true;
136}
137
138static void ath_rx_addbuffer_edma(struct ath_softc *sc,
7a897203 139 enum ath9k_rx_qtype qtype)
b5c80475 140{
b5c80475 141 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1a04d59d 142 struct ath_rxbuf *bf, *tbf;
b5c80475 143
b5c80475 144 if (list_empty(&sc->rx.rxbuf)) {
d2182b69 145 ath_dbg(common, QUEUE, "No free rx buf available\n");
b5c80475 146 return;
797fe5cb 147 }
f078f209 148
6a01f0c0 149 list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list)
b5c80475
FF
150 if (!ath_rx_edma_buf_link(sc, qtype))
151 break;
152
b5c80475
FF
153}
154
155static void ath_rx_remove_buffer(struct ath_softc *sc,
156 enum ath9k_rx_qtype qtype)
157{
1a04d59d 158 struct ath_rxbuf *bf;
b5c80475
FF
159 struct ath_rx_edma *rx_edma;
160 struct sk_buff *skb;
161
162 rx_edma = &sc->rx.rx_edma[qtype];
163
07236bf3 164 while ((skb = __skb_dequeue(&rx_edma->rx_fifo)) != NULL) {
b5c80475
FF
165 bf = SKB_CB_ATHBUF(skb);
166 BUG_ON(!bf);
167 list_add_tail(&bf->list, &sc->rx.rxbuf);
168 }
169}
170
171static void ath_rx_edma_cleanup(struct ath_softc *sc)
172{
ba542385
MSS
173 struct ath_hw *ah = sc->sc_ah;
174 struct ath_common *common = ath9k_hw_common(ah);
1a04d59d 175 struct ath_rxbuf *bf;
b5c80475
FF
176
177 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
178 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
179
797fe5cb 180 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
ba542385
MSS
181 if (bf->bf_mpdu) {
182 dma_unmap_single(sc->dev, bf->bf_buf_addr,
183 common->rx_bufsize,
184 DMA_BIDIRECTIONAL);
b5c80475 185 dev_kfree_skb_any(bf->bf_mpdu);
ba542385
MSS
186 bf->bf_buf_addr = 0;
187 bf->bf_mpdu = NULL;
188 }
b5c80475 189 }
b5c80475
FF
190}
191
192static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size)
193{
5d07cca2 194 __skb_queue_head_init(&rx_edma->rx_fifo);
b5c80475
FF
195 rx_edma->rx_fifo_hwsize = size;
196}
197
198static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
199{
200 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
201 struct ath_hw *ah = sc->sc_ah;
202 struct sk_buff *skb;
1a04d59d 203 struct ath_rxbuf *bf;
b5c80475
FF
204 int error = 0, i;
205 u32 size;
206
b5c80475
FF
207 ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize -
208 ah->caps.rx_status_len);
209
210 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP],
211 ah->caps.rx_lp_qdepth);
212 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP],
213 ah->caps.rx_hp_qdepth);
214
1a04d59d 215 size = sizeof(struct ath_rxbuf) * nbufs;
b81950b1 216 bf = devm_kzalloc(sc->dev, size, GFP_KERNEL);
b5c80475
FF
217 if (!bf)
218 return -ENOMEM;
219
220 INIT_LIST_HEAD(&sc->rx.rxbuf);
b5c80475
FF
221
222 for (i = 0; i < nbufs; i++, bf++) {
cc861f74 223 skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL);
b5c80475 224 if (!skb) {
797fe5cb 225 error = -ENOMEM;
b5c80475 226 goto rx_init_fail;
f078f209 227 }
f078f209 228
b5c80475 229 memset(skb->data, 0, common->rx_bufsize);
797fe5cb 230 bf->bf_mpdu = skb;
b5c80475 231
797fe5cb 232 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
cc861f74 233 common->rx_bufsize,
b5c80475 234 DMA_BIDIRECTIONAL);
797fe5cb 235 if (unlikely(dma_mapping_error(sc->dev,
b5c80475
FF
236 bf->bf_buf_addr))) {
237 dev_kfree_skb_any(skb);
238 bf->bf_mpdu = NULL;
6cf9e995 239 bf->bf_buf_addr = 0;
3800276a 240 ath_err(common,
b5c80475
FF
241 "dma_mapping_error() on RX init\n");
242 error = -ENOMEM;
243 goto rx_init_fail;
244 }
245
246 list_add_tail(&bf->list, &sc->rx.rxbuf);
247 }
248
249 return 0;
250
251rx_init_fail:
252 ath_rx_edma_cleanup(sc);
253 return error;
254}
255
256static void ath_edma_start_recv(struct ath_softc *sc)
257{
b5c80475 258 ath9k_hw_rxena(sc->sc_ah);
7a897203
SM
259 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP);
260 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP);
b5c80475 261 ath_opmode_init(sc);
fbbcd146 262 ath9k_hw_startpcureceive(sc->sc_ah, sc->cur_chan->offchannel);
b5c80475
FF
263}
264
265static void ath_edma_stop_recv(struct ath_softc *sc)
266{
b5c80475
FF
267 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
268 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
b5c80475
FF
269}
270
271int ath_rx_init(struct ath_softc *sc, int nbufs)
272{
273 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
274 struct sk_buff *skb;
1a04d59d 275 struct ath_rxbuf *bf;
b5c80475
FF
276 int error = 0;
277
4bdd1e97 278 spin_lock_init(&sc->sc_pcu_lock);
b5c80475 279
0d95521e
FF
280 common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 +
281 sc->sc_ah->caps.rx_status_len;
282
e87f3d53 283 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
b5c80475 284 return ath_rx_edma_init(sc, nbufs);
b5c80475 285
e87f3d53
SM
286 ath_dbg(common, CONFIG, "cachelsz %u rxbufsize %u\n",
287 common->cachelsz, common->rx_bufsize);
b5c80475 288
e87f3d53
SM
289 /* Initialize rx descriptors */
290
291 error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
292 "rx", nbufs, 1, 0);
293 if (error != 0) {
294 ath_err(common,
295 "failed to allocate rx descriptors: %d\n",
296 error);
297 goto err;
298 }
299
300 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
301 skb = ath_rxbuf_alloc(common, common->rx_bufsize,
302 GFP_KERNEL);
303 if (skb == NULL) {
304 error = -ENOMEM;
797fe5cb
S
305 goto err;
306 }
b5c80475 307
e87f3d53
SM
308 bf->bf_mpdu = skb;
309 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
310 common->rx_bufsize,
311 DMA_FROM_DEVICE);
312 if (unlikely(dma_mapping_error(sc->dev,
313 bf->bf_buf_addr))) {
314 dev_kfree_skb_any(skb);
315 bf->bf_mpdu = NULL;
316 bf->bf_buf_addr = 0;
317 ath_err(common,
318 "dma_mapping_error() on RX init\n");
319 error = -ENOMEM;
320 goto err;
b5c80475 321 }
797fe5cb 322 }
e87f3d53 323 sc->rx.rxlink = NULL;
797fe5cb 324err:
f078f209
LR
325 if (error)
326 ath_rx_cleanup(sc);
327
328 return error;
329}
330
f078f209
LR
331void ath_rx_cleanup(struct ath_softc *sc)
332{
cc861f74
LR
333 struct ath_hw *ah = sc->sc_ah;
334 struct ath_common *common = ath9k_hw_common(ah);
f078f209 335 struct sk_buff *skb;
1a04d59d 336 struct ath_rxbuf *bf;
f078f209 337
b5c80475
FF
338 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
339 ath_rx_edma_cleanup(sc);
340 return;
e87f3d53
SM
341 }
342
343 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
344 skb = bf->bf_mpdu;
345 if (skb) {
346 dma_unmap_single(sc->dev, bf->bf_buf_addr,
347 common->rx_bufsize,
348 DMA_FROM_DEVICE);
349 dev_kfree_skb(skb);
350 bf->bf_buf_addr = 0;
351 bf->bf_mpdu = NULL;
051b9191 352 }
b5c80475 353 }
f078f209
LR
354}
355
356/*
357 * Calculate the receive filter according to the
358 * operating mode and state:
359 *
360 * o always accept unicast, broadcast, and multicast traffic
361 * o maintain current state of phy error reception (the hal
362 * may enable phy error frames for noise immunity work)
363 * o probe request frames are accepted only when operating in
364 * hostap, adhoc, or monitor modes
365 * o enable promiscuous mode according to the interface state
366 * o accept beacons:
367 * - when operating in adhoc mode so the 802.11 layer creates
368 * node table entries for peers,
369 * - when operating in station mode for collecting rssi data when
370 * the station is otherwise quiet, or
371 * - when operating as a repeater so we see repeater-sta beacons
372 * - when scanning
373 */
374
375u32 ath_calcrxfilter(struct ath_softc *sc)
376{
78b21949 377 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
f078f209
LR
378 u32 rfilt;
379
89f927af
LR
380 if (config_enabled(CONFIG_ATH9K_TX99))
381 return 0;
382
ac06697c 383 rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
f078f209
LR
384 | ATH9K_RX_FILTER_MCAST;
385
73e4937d
ZK
386 /* if operating on a DFS channel, enable radar pulse detection */
387 if (sc->hw->conf.radar_enabled)
388 rfilt |= ATH9K_RX_FILTER_PHYRADAR | ATH9K_RX_FILTER_PHYERR;
389
9c1d8e4a 390 if (sc->rx.rxfilter & FIF_PROBE_REQ)
f078f209
LR
391 rfilt |= ATH9K_RX_FILTER_PROBEREQ;
392
217ba9da
JM
393 /*
394 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
395 * mode interface or when in monitor mode. AP mode does not need this
396 * since it receives all in-BSS frames anyway.
397 */
2e286947 398 if (sc->sc_ah->is_monitoring)
f078f209 399 rfilt |= ATH9K_RX_FILTER_PROM;
f078f209 400
d42c6b71
S
401 if (sc->rx.rxfilter & FIF_CONTROL)
402 rfilt |= ATH9K_RX_FILTER_CONTROL;
403
dbaaa147 404 if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) &&
cfda6695 405 (sc->nvifs <= 1) &&
dbaaa147
VT
406 !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC))
407 rfilt |= ATH9K_RX_FILTER_MYBEACON;
408 else
f078f209
LR
409 rfilt |= ATH9K_RX_FILTER_BEACON;
410
264bbec8 411 if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
66afad01 412 (sc->rx.rxfilter & FIF_PSPOLL))
dbaaa147 413 rfilt |= ATH9K_RX_FILTER_PSPOLL;
be0418ad 414
7ea310be
S
415 if (conf_is_ht(&sc->hw->conf))
416 rfilt |= ATH9K_RX_FILTER_COMP_BAR;
417
7545daf4 418 if (sc->nvifs > 1 || (sc->rx.rxfilter & FIF_OTHER_BSS)) {
a549459c
TW
419 /* This is needed for older chips */
420 if (sc->sc_ah->hw_version.macVersion <= AR_SREV_VERSION_9160)
5eb6ba83 421 rfilt |= ATH9K_RX_FILTER_PROM;
b93bce2a
JM
422 rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL;
423 }
424
2c323058 425 if (AR_SREV_9550(sc->sc_ah) || AR_SREV_9531(sc->sc_ah))
b3d7aa43
GJ
426 rfilt |= ATH9K_RX_FILTER_4ADDRESS;
427
499afacc 428 if (ath9k_is_chanctx_enabled() &&
78b21949
FF
429 test_bit(ATH_OP_SCANNING, &common->op_flags))
430 rfilt |= ATH9K_RX_FILTER_BEACON;
431
f078f209 432 return rfilt;
7dcfdcd9 433
f078f209
LR
434}
435
19ec477f 436void ath_startrecv(struct ath_softc *sc)
f078f209 437{
cbe61d8a 438 struct ath_hw *ah = sc->sc_ah;
1a04d59d 439 struct ath_rxbuf *bf, *tbf;
f078f209 440
b5c80475
FF
441 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
442 ath_edma_start_recv(sc);
19ec477f 443 return;
b5c80475
FF
444 }
445
b77f483f 446 if (list_empty(&sc->rx.rxbuf))
f078f209
LR
447 goto start_recv;
448
e96542e5 449 sc->rx.buf_hold = NULL;
b77f483f
S
450 sc->rx.rxlink = NULL;
451 list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) {
7dd74f5f 452 ath_rx_buf_link(sc, bf, false);
f078f209
LR
453 }
454
455 /* We could have deleted elements so the list may be empty now */
b77f483f 456 if (list_empty(&sc->rx.rxbuf))
f078f209
LR
457 goto start_recv;
458
1a04d59d 459 bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list);
f078f209 460 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
be0418ad 461 ath9k_hw_rxena(ah);
f078f209
LR
462
463start_recv:
be0418ad 464 ath_opmode_init(sc);
fbbcd146 465 ath9k_hw_startpcureceive(ah, sc->cur_chan->offchannel);
f078f209
LR
466}
467
4b883f02
FF
468static void ath_flushrecv(struct ath_softc *sc)
469{
470 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
471 ath_rx_tasklet(sc, 1, true);
472 ath_rx_tasklet(sc, 1, false);
473}
474
f078f209
LR
475bool ath_stoprecv(struct ath_softc *sc)
476{
cbe61d8a 477 struct ath_hw *ah = sc->sc_ah;
5882da02 478 bool stopped, reset = false;
f078f209 479
d47844a0 480 ath9k_hw_abortpcurecv(ah);
be0418ad 481 ath9k_hw_setrxfilter(ah, 0);
5882da02 482 stopped = ath9k_hw_stopdmarecv(ah, &reset);
b5c80475 483
4b883f02
FF
484 ath_flushrecv(sc);
485
b5c80475
FF
486 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
487 ath_edma_stop_recv(sc);
488 else
489 sc->rx.rxlink = NULL;
be0418ad 490
d584747b
RM
491 if (!(ah->ah_flags & AH_UNPLUGGED) &&
492 unlikely(!stopped)) {
d7fd1b50
BG
493 ath_err(ath9k_hw_common(sc->sc_ah),
494 "Could not stop RX, we could be "
495 "confusing the DMA engine when we start RX up\n");
496 ATH_DBG_WARN_ON_ONCE(!stopped);
497 }
2232d31b 498 return stopped && !reset;
f078f209
LR
499}
500
cc65965c
JM
501static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb)
502{
503 /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */
504 struct ieee80211_mgmt *mgmt;
505 u8 *pos, *end, id, elen;
506 struct ieee80211_tim_ie *tim;
507
508 mgmt = (struct ieee80211_mgmt *)skb->data;
509 pos = mgmt->u.beacon.variable;
510 end = skb->data + skb->len;
511
512 while (pos + 2 < end) {
513 id = *pos++;
514 elen = *pos++;
515 if (pos + elen > end)
516 break;
517
518 if (id == WLAN_EID_TIM) {
519 if (elen < sizeof(*tim))
520 break;
521 tim = (struct ieee80211_tim_ie *) pos;
522 if (tim->dtim_count != 0)
523 break;
524 return tim->bitmap_ctrl & 0x01;
525 }
526
527 pos += elen;
528 }
529
530 return false;
531}
532
cc65965c
JM
533static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
534{
1510718d 535 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
cc65965c
JM
536
537 if (skb->len < 24 + 8 + 2 + 2)
538 return;
539
1b04b930 540 sc->ps_flags &= ~PS_WAIT_FOR_BEACON;
293dc5df 541
1b04b930
S
542 if (sc->ps_flags & PS_BEACON_SYNC) {
543 sc->ps_flags &= ~PS_BEACON_SYNC;
d2182b69 544 ath_dbg(common, PS,
1a6404a1 545 "Reconfigure beacon timers based on synchronized timestamp\n");
ca900ac9 546 if (!(WARN_ON_ONCE(sc->cur_chan->beacon.beacon_interval == 0)))
76c93983 547 ath9k_set_beacon(sc);
c7dd40c9
SM
548
549 ath9k_p2p_beacon_sync(sc);
ccdfeab6
JM
550 }
551
cc65965c
JM
552 if (ath_beacon_dtim_pending_cab(skb)) {
553 /*
554 * Remain awake waiting for buffered broadcast/multicast
58f5fffd
GJ
555 * frames. If the last broadcast/multicast frame is not
556 * received properly, the next beacon frame will work as
557 * a backup trigger for returning into NETWORK SLEEP state,
558 * so we are waiting for it as well.
cc65965c 559 */
d2182b69 560 ath_dbg(common, PS,
226afe68 561 "Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n");
1b04b930 562 sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON;
cc65965c
JM
563 return;
564 }
565
1b04b930 566 if (sc->ps_flags & PS_WAIT_FOR_CAB) {
cc65965c
JM
567 /*
568 * This can happen if a broadcast frame is dropped or the AP
569 * fails to send a frame indicating that all CAB frames have
570 * been delivered.
571 */
1b04b930 572 sc->ps_flags &= ~PS_WAIT_FOR_CAB;
d2182b69 573 ath_dbg(common, PS, "PS wait for CAB frames timed out\n");
cc65965c 574 }
cc65965c
JM
575}
576
f73c604c 577static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb, bool mybeacon)
cc65965c
JM
578{
579 struct ieee80211_hdr *hdr;
c46917bb 580 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
cc65965c
JM
581
582 hdr = (struct ieee80211_hdr *)skb->data;
583
584 /* Process Beacon and CAB receive in PS state */
ededf1f8 585 if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc))
07c15a3f 586 && mybeacon) {
cc65965c 587 ath_rx_ps_beacon(sc, skb);
07c15a3f
SM
588 } else if ((sc->ps_flags & PS_WAIT_FOR_CAB) &&
589 (ieee80211_is_data(hdr->frame_control) ||
590 ieee80211_is_action(hdr->frame_control)) &&
591 is_multicast_ether_addr(hdr->addr1) &&
592 !ieee80211_has_moredata(hdr->frame_control)) {
cc65965c
JM
593 /*
594 * No more broadcast/multicast frames to be received at this
595 * point.
596 */
3fac6dfd 597 sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON);
d2182b69 598 ath_dbg(common, PS,
226afe68 599 "All PS CAB frames received, back to sleep\n");
1b04b930 600 } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) &&
9a23f9ca
JM
601 !is_multicast_ether_addr(hdr->addr1) &&
602 !ieee80211_has_morefrags(hdr->frame_control)) {
1b04b930 603 sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA;
d2182b69 604 ath_dbg(common, PS,
226afe68 605 "Going back to sleep after having received PS-Poll data (0x%lx)\n",
1b04b930
S
606 sc->ps_flags & (PS_WAIT_FOR_BEACON |
607 PS_WAIT_FOR_CAB |
608 PS_WAIT_FOR_PSPOLL_DATA |
609 PS_WAIT_FOR_TX_ACK));
cc65965c
JM
610 }
611}
612
b5c80475 613static bool ath_edma_get_buffers(struct ath_softc *sc,
3a2923e8
FF
614 enum ath9k_rx_qtype qtype,
615 struct ath_rx_status *rs,
1a04d59d 616 struct ath_rxbuf **dest)
f078f209 617{
b5c80475
FF
618 struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
619 struct ath_hw *ah = sc->sc_ah;
620 struct ath_common *common = ath9k_hw_common(ah);
621 struct sk_buff *skb;
1a04d59d 622 struct ath_rxbuf *bf;
b5c80475
FF
623 int ret;
624
625 skb = skb_peek(&rx_edma->rx_fifo);
626 if (!skb)
627 return false;
628
629 bf = SKB_CB_ATHBUF(skb);
630 BUG_ON(!bf);
631
ce9426d1 632 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
b5c80475
FF
633 common->rx_bufsize, DMA_FROM_DEVICE);
634
3a2923e8 635 ret = ath9k_hw_process_rxdesc_edma(ah, rs, skb->data);
ce9426d1
ML
636 if (ret == -EINPROGRESS) {
637 /*let device gain the buffer again*/
638 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
639 common->rx_bufsize, DMA_FROM_DEVICE);
b5c80475 640 return false;
ce9426d1 641 }
b5c80475
FF
642
643 __skb_unlink(skb, &rx_edma->rx_fifo);
644 if (ret == -EINVAL) {
645 /* corrupt descriptor, skip this one and the following one */
646 list_add_tail(&bf->list, &sc->rx.rxbuf);
647 ath_rx_edma_buf_link(sc, qtype);
b5c80475 648
3a2923e8
FF
649 skb = skb_peek(&rx_edma->rx_fifo);
650 if (skb) {
651 bf = SKB_CB_ATHBUF(skb);
652 BUG_ON(!bf);
653
654 __skb_unlink(skb, &rx_edma->rx_fifo);
655 list_add_tail(&bf->list, &sc->rx.rxbuf);
656 ath_rx_edma_buf_link(sc, qtype);
3a2923e8 657 }
6bb51c70
TH
658
659 bf = NULL;
b5c80475 660 }
b5c80475 661
3a2923e8 662 *dest = bf;
b5c80475
FF
663 return true;
664}
f078f209 665
1a04d59d 666static struct ath_rxbuf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
b5c80475
FF
667 struct ath_rx_status *rs,
668 enum ath9k_rx_qtype qtype)
669{
1a04d59d 670 struct ath_rxbuf *bf = NULL;
b5c80475 671
3a2923e8
FF
672 while (ath_edma_get_buffers(sc, qtype, rs, &bf)) {
673 if (!bf)
674 continue;
b5c80475 675
3a2923e8
FF
676 return bf;
677 }
678 return NULL;
b5c80475
FF
679}
680
1a04d59d 681static struct ath_rxbuf *ath_get_next_rx_buf(struct ath_softc *sc,
b5c80475
FF
682 struct ath_rx_status *rs)
683{
684 struct ath_hw *ah = sc->sc_ah;
685 struct ath_common *common = ath9k_hw_common(ah);
f078f209 686 struct ath_desc *ds;
1a04d59d 687 struct ath_rxbuf *bf;
b5c80475
FF
688 int ret;
689
690 if (list_empty(&sc->rx.rxbuf)) {
691 sc->rx.rxlink = NULL;
692 return NULL;
693 }
694
1a04d59d 695 bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list);
e96542e5
FF
696 if (bf == sc->rx.buf_hold)
697 return NULL;
698
b5c80475
FF
699 ds = bf->bf_desc;
700
701 /*
702 * Must provide the virtual address of the current
703 * descriptor, the physical address, and the virtual
704 * address of the next descriptor in the h/w chain.
705 * This allows the HAL to look ahead to see if the
706 * hardware is done with a descriptor by checking the
707 * done bit in the following descriptor and the address
708 * of the current descriptor the DMA engine is working
709 * on. All this is necessary because of our use of
710 * a self-linked list to avoid rx overruns.
711 */
3de21116 712 ret = ath9k_hw_rxprocdesc(ah, ds, rs);
b5c80475
FF
713 if (ret == -EINPROGRESS) {
714 struct ath_rx_status trs;
1a04d59d 715 struct ath_rxbuf *tbf;
b5c80475
FF
716 struct ath_desc *tds;
717
718 memset(&trs, 0, sizeof(trs));
719 if (list_is_last(&bf->list, &sc->rx.rxbuf)) {
720 sc->rx.rxlink = NULL;
721 return NULL;
722 }
723
1a04d59d 724 tbf = list_entry(bf->list.next, struct ath_rxbuf, list);
b5c80475
FF
725
726 /*
727 * On some hardware the descriptor status words could
728 * get corrupted, including the done bit. Because of
729 * this, check if the next descriptor's done bit is
730 * set or not.
731 *
732 * If the next descriptor's done bit is set, the current
733 * descriptor has been corrupted. Force s/w to discard
734 * this descriptor and continue...
735 */
736
737 tds = tbf->bf_desc;
3de21116 738 ret = ath9k_hw_rxprocdesc(ah, tds, &trs);
b5c80475
FF
739 if (ret == -EINPROGRESS)
740 return NULL;
723e7113
FF
741
742 /*
b7b146c9
FF
743 * Re-check previous descriptor, in case it has been filled
744 * in the mean time.
723e7113 745 */
b7b146c9
FF
746 ret = ath9k_hw_rxprocdesc(ah, ds, rs);
747 if (ret == -EINPROGRESS) {
748 /*
749 * mark descriptor as zero-length and set the 'more'
750 * flag to ensure that both buffers get discarded
751 */
752 rs->rs_datalen = 0;
753 rs->rs_more = true;
754 }
b5c80475
FF
755 }
756
a3dc48e8 757 list_del(&bf->list);
b5c80475
FF
758 if (!bf->bf_mpdu)
759 return bf;
760
761 /*
762 * Synchronize the DMA transfer with CPU before
763 * 1. accessing the frame
764 * 2. requeueing the same buffer to h/w
765 */
ce9426d1 766 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
b5c80475
FF
767 common->rx_bufsize,
768 DMA_FROM_DEVICE);
769
770 return bf;
771}
772
e0dd1a96
SM
773static void ath9k_process_tsf(struct ath_rx_status *rs,
774 struct ieee80211_rx_status *rxs,
775 u64 tsf)
776{
777 u32 tsf_lower = tsf & 0xffffffff;
778
779 rxs->mactime = (tsf & ~0xffffffffULL) | rs->rs_tstamp;
780 if (rs->rs_tstamp > tsf_lower &&
781 unlikely(rs->rs_tstamp - tsf_lower > 0x10000000))
782 rxs->mactime -= 0x100000000ULL;
783
784 if (rs->rs_tstamp < tsf_lower &&
785 unlikely(tsf_lower - rs->rs_tstamp > 0x10000000))
786 rxs->mactime += 0x100000000ULL;
787}
788
d435700f
S
789/*
790 * For Decrypt or Demic errors, we only mark packet status here and always push
791 * up the frame up to let mac80211 handle the actual error case, be it no
792 * decryption key or real decryption error. This let us keep statistics there.
793 */
723e7113 794static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
6f38482e 795 struct sk_buff *skb,
d435700f
S
796 struct ath_rx_status *rx_stats,
797 struct ieee80211_rx_status *rx_status,
e0dd1a96 798 bool *decrypt_error, u64 tsf)
d435700f 799{
723e7113
FF
800 struct ieee80211_hw *hw = sc->hw;
801 struct ath_hw *ah = sc->sc_ah;
802 struct ath_common *common = ath9k_hw_common(ah);
6f38482e 803 struct ieee80211_hdr *hdr;
723e7113
FF
804 bool discard_current = sc->rx.discard_next;
805
5871d2d7
SM
806 /*
807 * Discard corrupt descriptors which are marked in
808 * ath_get_next_rx_buf().
809 */
723e7113 810 if (discard_current)
b7b146c9
FF
811 goto corrupt;
812
813 sc->rx.discard_next = false;
f749b946 814
5871d2d7
SM
815 /*
816 * Discard zero-length packets.
817 */
818 if (!rx_stats->rs_datalen) {
819 RX_STAT_INC(rx_len_err);
b7b146c9 820 goto corrupt;
5871d2d7
SM
821 }
822
b7b146c9
FF
823 /*
824 * rs_status follows rs_datalen so if rs_datalen is too large
825 * we can take a hint that hardware corrupted it, so ignore
826 * those frames.
827 */
5871d2d7
SM
828 if (rx_stats->rs_datalen > (common->rx_bufsize - ah->caps.rx_status_len)) {
829 RX_STAT_INC(rx_len_err);
b7b146c9 830 goto corrupt;
5871d2d7
SM
831 }
832
4a470647
SM
833 /* Only use status info from the last fragment */
834 if (rx_stats->rs_more)
835 return 0;
836
b0925595
SM
837 /*
838 * Return immediately if the RX descriptor has been marked
839 * as corrupt based on the various error bits.
840 *
841 * This is different from the other corrupt descriptor
842 * condition handled above.
843 */
b7b146c9
FF
844 if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC)
845 goto corrupt;
b0925595 846
6f38482e
SM
847 hdr = (struct ieee80211_hdr *) (skb->data + ah->caps.rx_status_len);
848
e0dd1a96 849 ath9k_process_tsf(rx_stats, rx_status, tsf);
5e85a32a 850 ath_debug_stat_rx(sc, rx_stats);
e0dd1a96 851
6b87d71c
SM
852 /*
853 * Process PHY errors and return so that the packet
854 * can be dropped.
855 */
856 if (rx_stats->rs_status & ATH9K_RXERR_PHY) {
857 ath9k_dfs_process_phyerr(sc, hdr, rx_stats, rx_status->mactime);
858 if (ath_process_fft(sc, hdr, rx_stats, rx_status->mactime))
859 RX_STAT_INC(rx_spectral);
860
b7b146c9 861 return -EINVAL;
6b87d71c
SM
862 }
863
d435700f
S
864 /*
865 * everything but the rate is checked here, the rate check is done
866 * separately to avoid doing two lookups for a rate for each frame.
867 */
f3b6a488 868 if (!ath9k_cmn_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error, sc->rx.rxfilter))
b7b146c9 869 return -EINVAL;
d435700f 870
1cc47a5b
OR
871 if (ath_is_mybeacon(common, hdr)) {
872 RX_STAT_INC(rx_beacons);
873 rx_stats->is_mybeacon = true;
874 }
6f38482e 875
ff9a93f2
SM
876 /*
877 * This shouldn't happen, but have a safety check anyway.
878 */
b7b146c9
FF
879 if (WARN_ON(!ah->curchan))
880 return -EINVAL;
ff9a93f2 881
12746036
OR
882 if (ath9k_cmn_process_rate(common, hw, rx_stats, rx_status)) {
883 /*
884 * No valid hardware bitrate found -- we should not get here
885 * because hardware has already validated this frame as OK.
886 */
887 ath_dbg(common, ANY, "unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
888 rx_stats->rs_rate);
889 RX_STAT_INC(rx_rate_err);
b7b146c9 890 return -EINVAL;
7c5c73cd 891 }
d435700f 892
27babf9f 893 if (ath9k_is_chanctx_enabled()) {
70b06dac
SM
894 if (rx_stats->is_mybeacon)
895 ath_chanctx_beacon_recv_ev(sc, rx_stats->rs_tstamp,
896 ATH_CHANCTX_EVENT_BEACON_RECEIVED);
58b57375
FF
897 }
898
32efb0cc 899 ath9k_cmn_process_rssi(common, hw, rx_stats, rx_status);
74a97755 900
ff9a93f2
SM
901 rx_status->band = ah->curchan->chan->band;
902 rx_status->freq = ah->curchan->chan->center_freq;
d435700f 903 rx_status->antenna = rx_stats->rs_antenna;
96d21371 904 rx_status->flag |= RX_FLAG_MACTIME_END;
d435700f 905
a5525d9c
SM
906#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
907 if (ieee80211_is_data_present(hdr->frame_control) &&
908 !ieee80211_is_qos_nullfunc(hdr->frame_control))
909 sc->rx.num_pkts++;
910#endif
911
b7b146c9
FF
912 return 0;
913
914corrupt:
915 sc->rx.discard_next = rx_stats->rs_more;
916 return -EINVAL;
d435700f
S
917}
918
c3124df7
SM
919/*
920 * Run the LNA combining algorithm only in these cases:
921 *
922 * Standalone WLAN cards with both LNA/Antenna diversity
923 * enabled in the EEPROM.
924 *
925 * WLAN+BT cards which are in the supported card list
926 * in ath_pci_id_table and the user has loaded the
927 * driver with "bt_ant_diversity" set to true.
928 */
929static void ath9k_antenna_check(struct ath_softc *sc,
930 struct ath_rx_status *rs)
931{
932 struct ath_hw *ah = sc->sc_ah;
933 struct ath9k_hw_capabilities *pCap = &ah->caps;
934 struct ath_common *common = ath9k_hw_common(ah);
935
936 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB))
937 return;
938
c3124df7
SM
939 /*
940 * Change the default rx antenna if rx diversity
941 * chooses the other antenna 3 times in a row.
942 */
943 if (sc->rx.defant != rs->rs_antenna) {
944 if (++sc->rx.rxotherant >= 3)
945 ath_setdefantenna(sc, rs->rs_antenna);
946 } else {
947 sc->rx.rxotherant = 0;
948 }
949
950 if (pCap->hw_caps & ATH9K_HW_CAP_BT_ANT_DIV) {
951 if (common->bt_ant_diversity)
952 ath_ant_comb_scan(sc, rs);
953 } else {
954 ath_ant_comb_scan(sc, rs);
955 }
956}
957
21fbbca3
CL
958static void ath9k_apply_ampdu_details(struct ath_softc *sc,
959 struct ath_rx_status *rs, struct ieee80211_rx_status *rxs)
960{
961 if (rs->rs_isaggr) {
962 rxs->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN;
963
964 rxs->ampdu_reference = sc->rx.ampdu_ref;
965
966 if (!rs->rs_moreaggr) {
967 rxs->flag |= RX_FLAG_AMPDU_IS_LAST;
968 sc->rx.ampdu_ref++;
969 }
970
971 if (rs->rs_flags & ATH9K_RX_DELIM_CRC_PRE)
972 rxs->flag |= RX_FLAG_AMPDU_DELIM_CRC_ERROR;
973 }
974}
975
b5c80475
FF
976int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
977{
1a04d59d 978 struct ath_rxbuf *bf;
0d95521e 979 struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb;
5ca42627 980 struct ieee80211_rx_status *rxs;
cbe61d8a 981 struct ath_hw *ah = sc->sc_ah;
27c51f1a 982 struct ath_common *common = ath9k_hw_common(ah);
7545daf4 983 struct ieee80211_hw *hw = sc->hw;
c9b14170 984 int retval;
29bffa96 985 struct ath_rx_status rs;
b5c80475
FF
986 enum ath9k_rx_qtype qtype;
987 bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
988 int dma_type;
a6d2055b 989 u64 tsf = 0;
8ab2cd09 990 unsigned long flags;
2e1cd495 991 dma_addr_t new_buf_addr;
c82552c5 992 unsigned int budget = 512;
be0418ad 993
b5c80475 994 if (edma)
b5c80475 995 dma_type = DMA_BIDIRECTIONAL;
56824223
ML
996 else
997 dma_type = DMA_FROM_DEVICE;
b5c80475
FF
998
999 qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP;
f078f209 1000
a6d2055b 1001 tsf = ath9k_hw_gettsf64(ah);
a6d2055b 1002
f078f209 1003 do {
e1352fde 1004 bool decrypt_error = false;
f078f209 1005
29bffa96 1006 memset(&rs, 0, sizeof(rs));
b5c80475
FF
1007 if (edma)
1008 bf = ath_edma_get_next_rx_buf(sc, &rs, qtype);
1009 else
1010 bf = ath_get_next_rx_buf(sc, &rs);
f078f209 1011
b5c80475
FF
1012 if (!bf)
1013 break;
f078f209 1014
f078f209 1015 skb = bf->bf_mpdu;
be0418ad 1016 if (!skb)
f078f209 1017 continue;
f078f209 1018
0d95521e
FF
1019 /*
1020 * Take frame header from the first fragment and RX status from
1021 * the last one.
1022 */
1023 if (sc->rx.frag)
1024 hdr_skb = sc->rx.frag;
1025 else
1026 hdr_skb = skb;
1027
f6307dda 1028 rxs = IEEE80211_SKB_RXCB(hdr_skb);
ffb1c56a
AN
1029 memset(rxs, 0, sizeof(struct ieee80211_rx_status));
1030
6f38482e 1031 retval = ath9k_rx_skb_preprocess(sc, hdr_skb, &rs, rxs,
e0dd1a96 1032 &decrypt_error, tsf);
83c76570
ZK
1033 if (retval)
1034 goto requeue_drop_frag;
1035
cb71d9ba
LR
1036 /* Ensure we always have an skb to requeue once we are done
1037 * processing the current buffer's skb */
cc861f74 1038 requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
cb71d9ba
LR
1039
1040 /* If there is no memory we ignore the current RX'd frame,
1041 * tell hardware it can give us a new frame using the old
b77f483f 1042 * skb and put it at the tail of the sc->rx.rxbuf list for
cb71d9ba 1043 * processing. */
15072189
BG
1044 if (!requeue_skb) {
1045 RX_STAT_INC(rx_oom_err);
0d95521e 1046 goto requeue_drop_frag;
15072189 1047 }
f078f209 1048
2e1cd495
FF
1049 /* We will now give hardware our shiny new allocated skb */
1050 new_buf_addr = dma_map_single(sc->dev, requeue_skb->data,
1051 common->rx_bufsize, dma_type);
1052 if (unlikely(dma_mapping_error(sc->dev, new_buf_addr))) {
1053 dev_kfree_skb_any(requeue_skb);
1054 goto requeue_drop_frag;
1055 }
1056
9bf9fca8 1057 /* Unmap the frame */
7da3c55c 1058 dma_unmap_single(sc->dev, bf->bf_buf_addr,
2e1cd495 1059 common->rx_bufsize, dma_type);
f078f209 1060
176f0e84
SM
1061 bf->bf_mpdu = requeue_skb;
1062 bf->bf_buf_addr = new_buf_addr;
1063
b5c80475
FF
1064 skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len);
1065 if (ah->caps.rx_status_len)
1066 skb_pull(skb, ah->caps.rx_status_len);
be0418ad 1067
0d95521e 1068 if (!rs.rs_more)
5a078fcb
OR
1069 ath9k_cmn_rx_skb_postprocess(common, hdr_skb, &rs,
1070 rxs, decrypt_error);
be0418ad 1071
0d95521e 1072 if (rs.rs_more) {
15072189 1073 RX_STAT_INC(rx_frags);
0d95521e
FF
1074 /*
1075 * rs_more indicates chained descriptors which can be
1076 * used to link buffers together for a sort of
1077 * scatter-gather operation.
1078 */
1079 if (sc->rx.frag) {
1080 /* too many fragments - cannot handle frame */
1081 dev_kfree_skb_any(sc->rx.frag);
1082 dev_kfree_skb_any(skb);
15072189 1083 RX_STAT_INC(rx_too_many_frags_err);
0d95521e
FF
1084 skb = NULL;
1085 }
1086 sc->rx.frag = skb;
1087 goto requeue;
1088 }
1089
1090 if (sc->rx.frag) {
1091 int space = skb->len - skb_tailroom(hdr_skb);
1092
0d95521e
FF
1093 if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) {
1094 dev_kfree_skb(skb);
15072189 1095 RX_STAT_INC(rx_oom_err);
0d95521e
FF
1096 goto requeue_drop_frag;
1097 }
1098
b5447ff9
ED
1099 sc->rx.frag = NULL;
1100
0d95521e
FF
1101 skb_copy_from_linear_data(skb, skb_put(hdr_skb, skb->len),
1102 skb->len);
1103 dev_kfree_skb_any(skb);
1104 skb = hdr_skb;
1105 }
1106
16fe28e9
SM
1107 if (rxs->flag & RX_FLAG_MMIC_STRIPPED)
1108 skb_trim(skb, skb->len - 8);
eb840a80 1109
16fe28e9
SM
1110 spin_lock_irqsave(&sc->sc_pm_lock, flags);
1111 if ((sc->ps_flags & (PS_WAIT_FOR_BEACON |
1112 PS_WAIT_FOR_CAB |
1113 PS_WAIT_FOR_PSPOLL_DATA)) ||
1114 ath9k_check_auto_sleep(sc))
1115 ath_rx_ps(sc, skb, rs.is_mybeacon);
1116 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
eb840a80 1117
c3124df7 1118 ath9k_antenna_check(sc, &rs);
21fbbca3 1119 ath9k_apply_ampdu_details(sc, &rs, rxs);
350e2dcb 1120 ath_debug_rate_stats(sc, &rs, skb);
21fbbca3 1121
7545daf4 1122 ieee80211_rx(hw, skb);
cc65965c 1123
0d95521e
FF
1124requeue_drop_frag:
1125 if (sc->rx.frag) {
1126 dev_kfree_skb_any(sc->rx.frag);
1127 sc->rx.frag = NULL;
1128 }
cb71d9ba 1129requeue:
a3dc48e8 1130 list_add_tail(&bf->list, &sc->rx.rxbuf);
a3dc48e8 1131
7dd74f5f
FF
1132 if (!edma) {
1133 ath_rx_buf_relink(sc, bf, flush);
3a758134
TH
1134 if (!flush)
1135 ath9k_hw_rxena(ah);
7dd74f5f
FF
1136 } else if (!flush) {
1137 ath_rx_edma_buf_link(sc, qtype);
b5c80475 1138 }
c82552c5
TH
1139
1140 if (!budget--)
1141 break;
be0418ad
S
1142 } while (1);
1143
29ab0b36
RM
1144 if (!(ah->imask & ATH9K_INT_RXEOL)) {
1145 ah->imask |= (ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
72d874c6 1146 ath9k_hw_set_interrupts(ah);
29ab0b36
RM
1147 }
1148
f078f209 1149 return 0;
f078f209 1150}
This page took 0.832335 seconds and 5 git commands to generate.