rfkill: always call get_state() hook on resume
[deliverable/linux.git] / drivers / net / wireless / ath9k / recv.c
CommitLineData
f078f209
LR
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
f078f209
LR
17#include "core.h"
18
19/*
20 * Setup and link descriptors.
21 *
22 * 11N: we can no longer afford to self link the last descriptor.
23 * MAC acknowledges BA status as long as it copies frames to host
24 * buffer (or rx fifo). This can incorrectly acknowledge packets
25 * to a sender if last desc is self-linked.
f078f209 26 */
f078f209
LR
27static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
28{
29 struct ath_hal *ah = sc->sc_ah;
30 struct ath_desc *ds;
31 struct sk_buff *skb;
32
33 ATH_RXBUF_RESET(bf);
34
35 ds = bf->bf_desc;
be0418ad 36 ds->ds_link = 0; /* link to null */
f078f209
LR
37 ds->ds_data = bf->bf_buf_addr;
38
be0418ad 39 /* virtual addr of the beginning of the buffer. */
f078f209
LR
40 skb = bf->bf_mpdu;
41 ASSERT(skb != NULL);
42 ds->ds_vdata = skb->data;
43
44 /* setup rx descriptors */
be0418ad
S
45 ath9k_hw_setuprxdesc(ah, ds,
46 skb_tailroom(skb), /* buffer size */
f078f209
LR
47 0);
48
49 if (sc->sc_rxlink == NULL)
50 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
51 else
52 *sc->sc_rxlink = bf->bf_daddr;
53
54 sc->sc_rxlink = &ds->ds_link;
55 ath9k_hw_rxena(ah);
56}
57
be0418ad 58static struct sk_buff *ath_rxbuf_alloc(struct ath_softc *sc, u32 len)
f078f209
LR
59{
60 struct sk_buff *skb;
61 u32 off;
62
63 /*
64 * Cache-line-align. This is important (for the
65 * 5210 at least) as not doing so causes bogus data
66 * in rx'd frames.
67 */
68
69 skb = dev_alloc_skb(len + sc->sc_cachelsz - 1);
70 if (skb != NULL) {
71 off = ((unsigned long) skb->data) % sc->sc_cachelsz;
72 if (off != 0)
73 skb_reserve(skb, sc->sc_cachelsz - off);
74 } else {
75 DPRINTF(sc, ATH_DBG_FATAL,
76 "%s: skbuff alloc of size %u failed\n",
77 __func__, len);
78 return NULL;
79 }
80
81 return skb;
82}
83
be0418ad 84static void ath_rx_requeue(struct ath_softc *sc, struct ath_buf *bf)
f078f209 85{
be0418ad 86 struct sk_buff *skb;
f078f209
LR
87
88 ASSERT(bf != NULL);
89
be0418ad
S
90 if (bf->bf_mpdu == NULL) {
91 skb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize);
92 if (skb != NULL) {
93 bf->bf_mpdu = skb;
94 bf->bf_buf_addr = pci_map_single(sc->pdev, skb->data,
95 skb_end_pointer(skb) - skb->head,
96 PCI_DMA_FROMDEVICE);
97 bf->bf_dmacontext = bf->bf_buf_addr;
98
99 }
f078f209 100 }
be0418ad
S
101
102 list_move_tail(&bf->list, &sc->sc_rxbuf);
103 ath_rx_buf_link(sc, bf);
104}
105
106
107static int ath_rate2idx(struct ath_softc *sc, int rate)
108{
109 int i = 0, cur_band, n_rates;
110 struct ieee80211_hw *hw = sc->hw;
111
112 cur_band = hw->conf.channel->band;
113 n_rates = sc->sbands[cur_band].n_bitrates;
114
115 for (i = 0; i < n_rates; i++) {
116 if (sc->sbands[cur_band].bitrates[i].bitrate == rate)
117 break;
118 }
119
120 /*
121 * NB:mac80211 validates rx rate index against the supported legacy rate
122 * index only (should be done against ht rates also), return the highest
123 * legacy rate index for rx rate which does not match any one of the
124 * supported basic and extended rates to make mac80211 happy.
125 * The following hack will be cleaned up once the issue with
126 * the rx rate index validation in mac80211 is fixed.
127 */
128 if (i == n_rates)
129 return n_rates - 1;
130
131 return i;
f078f209
LR
132}
133
134/*
be0418ad
S
135 * For Decrypt or Demic errors, we only mark packet status here and always push
136 * up the frame up to let mac80211 handle the actual error case, be it no
137 * decryption key or real decryption error. This let us keep statistics there.
f078f209 138 */
be0418ad
S
139static int ath_rx_prepare(struct sk_buff *skb, struct ath_desc *ds,
140 struct ieee80211_rx_status *rx_status, bool *decrypt_error,
141 struct ath_softc *sc)
f078f209 142{
e63835b0 143 struct ath_rate_table *rate_table = sc->hw_rate_table[sc->sc_curmode];
be0418ad 144 struct ieee80211_hdr *hdr;
e63835b0 145 int ratekbps, rix;
be0418ad
S
146 u8 ratecode;
147 __le16 fc;
148
149 hdr = (struct ieee80211_hdr *)skb->data;
150 fc = hdr->frame_control;
151 memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
152
153 if (ds->ds_rxstat.rs_more) {
154 /*
155 * Frame spans multiple descriptors; this cannot happen yet
156 * as we don't support jumbograms. If not in monitor mode,
157 * discard the frame. Enable this if you want to see
158 * error frames in Monitor mode.
159 */
160 if (sc->sc_ah->ah_opmode != ATH9K_M_MONITOR)
161 goto rx_next;
162 } else if (ds->ds_rxstat.rs_status != 0) {
163 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_CRC)
164 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
165 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_PHY)
166 goto rx_next;
f078f209 167
be0418ad
S
168 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_DECRYPT) {
169 *decrypt_error = true;
170 } else if (ds->ds_rxstat.rs_status & ATH9K_RXERR_MIC) {
171 if (ieee80211_is_ctl(fc))
172 /*
173 * Sometimes, we get invalid
174 * MIC failures on valid control frames.
175 * Remove these mic errors.
176 */
177 ds->ds_rxstat.rs_status &= ~ATH9K_RXERR_MIC;
178 else
179 rx_status->flag |= RX_FLAG_MMIC_ERROR;
180 }
181 /*
182 * Reject error frames with the exception of
183 * decryption and MIC failures. For monitor mode,
184 * we also ignore the CRC error.
185 */
186 if (sc->sc_ah->ah_opmode == ATH9K_M_MONITOR) {
187 if (ds->ds_rxstat.rs_status &
188 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
189 ATH9K_RXERR_CRC))
190 goto rx_next;
191 } else {
192 if (ds->ds_rxstat.rs_status &
193 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
194 goto rx_next;
195 }
196 }
f078f209
LR
197 }
198
be0418ad 199 ratecode = ds->ds_rxstat.rs_rate;
e63835b0
S
200 rix = rate_table->rateCodeToIndex[ratecode];
201 ratekbps = rate_table->info[rix].ratekbps;
be0418ad
S
202
203 /* HT rate */
204 if (ratecode & 0x80) {
205 if (ds->ds_rxstat.rs_flags & ATH9K_RX_2040)
206 ratekbps = (ratekbps * 27) / 13;
207 if (ds->ds_rxstat.rs_flags & ATH9K_RX_GI)
208 ratekbps = (ratekbps * 10) / 9;
209 }
210
211 rx_status->mactime = ath_extend_tsf(sc, ds->ds_rxstat.rs_tstamp);
212 rx_status->band = sc->hw->conf.channel->band;
213 rx_status->freq = sc->hw->conf.channel->center_freq;
214 rx_status->noise = sc->sc_ani.sc_noise_floor;
215 rx_status->signal = rx_status->noise + ds->ds_rxstat.rs_rssi;
216 rx_status->rate_idx = ath_rate2idx(sc, (ratekbps / 100));
217 rx_status->antenna = ds->ds_rxstat.rs_antenna;
218
219 /* at 45 you will be able to use MCS 15 reliably. A more elaborate
220 * scheme can be used here but it requires tables of SNR/throughput for
221 * each possible mode used. */
222 rx_status->qual = ds->ds_rxstat.rs_rssi * 100 / 45;
223
224 /* rssi can be more than 45 though, anything above that
225 * should be considered at 100% */
226 if (rx_status->qual > 100)
227 rx_status->qual = 100;
228
229 rx_status->flag |= RX_FLAG_TSFT;
230
231 return 1;
232rx_next:
233 return 0;
f078f209
LR
234}
235
236static void ath_opmode_init(struct ath_softc *sc)
237{
238 struct ath_hal *ah = sc->sc_ah;
239 u32 rfilt, mfilt[2];
240
241 /* configure rx filter */
242 rfilt = ath_calcrxfilter(sc);
243 ath9k_hw_setrxfilter(ah, rfilt);
244
245 /* configure bssid mask */
60b67f51 246 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
f078f209
LR
247 ath9k_hw_setbssidmask(ah, sc->sc_bssidmask);
248
249 /* configure operational mode */
250 ath9k_hw_setopmode(ah);
251
252 /* Handle any link-level address change. */
253 ath9k_hw_setmac(ah, sc->sc_myaddr);
254
255 /* calculate and install multicast filter */
256 mfilt[0] = mfilt[1] = ~0;
257
258 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
259 DPRINTF(sc, ATH_DBG_CONFIG ,
260 "%s: RX filter 0x%x, MC filter %08x:%08x\n",
261 __func__, rfilt, mfilt[0], mfilt[1]);
262}
263
264int ath_rx_init(struct ath_softc *sc, int nbufs)
265{
266 struct sk_buff *skb;
267 struct ath_buf *bf;
268 int error = 0;
269
270 do {
271 spin_lock_init(&sc->sc_rxflushlock);
98deeea0 272 sc->sc_flags &= ~SC_OP_RXFLUSH;
f078f209
LR
273 spin_lock_init(&sc->sc_rxbuflock);
274
f078f209
LR
275 sc->sc_rxbufsize = roundup(IEEE80211_MAX_MPDU_LEN,
276 min(sc->sc_cachelsz,
277 (u16)64));
278
279 DPRINTF(sc, ATH_DBG_CONFIG, "%s: cachelsz %u rxbufsize %u\n",
280 __func__, sc->sc_cachelsz, sc->sc_rxbufsize);
281
282 /* Initialize rx descriptors */
283
284 error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf,
285 "rx", nbufs, 1);
286 if (error != 0) {
287 DPRINTF(sc, ATH_DBG_FATAL,
288 "%s: failed to allocate rx descriptors: %d\n",
289 __func__, error);
290 break;
291 }
292
f078f209
LR
293 list_for_each_entry(bf, &sc->sc_rxbuf, list) {
294 skb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize);
295 if (skb == NULL) {
296 error = -ENOMEM;
297 break;
298 }
299
300 bf->bf_mpdu = skb;
927e70e9
S
301 bf->bf_buf_addr = pci_map_single(sc->pdev, skb->data,
302 skb_end_pointer(skb) - skb->head,
303 PCI_DMA_FROMDEVICE);
304 bf->bf_dmacontext = bf->bf_buf_addr;
f078f209
LR
305 }
306 sc->sc_rxlink = NULL;
307
308 } while (0);
309
310 if (error)
311 ath_rx_cleanup(sc);
312
313 return error;
314}
315
f078f209
LR
316void ath_rx_cleanup(struct ath_softc *sc)
317{
318 struct sk_buff *skb;
319 struct ath_buf *bf;
320
321 list_for_each_entry(bf, &sc->sc_rxbuf, list) {
322 skb = bf->bf_mpdu;
323 if (skb)
324 dev_kfree_skb(skb);
325 }
326
f078f209
LR
327 if (sc->sc_rxdma.dd_desc_len != 0)
328 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
329}
330
331/*
332 * Calculate the receive filter according to the
333 * operating mode and state:
334 *
335 * o always accept unicast, broadcast, and multicast traffic
336 * o maintain current state of phy error reception (the hal
337 * may enable phy error frames for noise immunity work)
338 * o probe request frames are accepted only when operating in
339 * hostap, adhoc, or monitor modes
340 * o enable promiscuous mode according to the interface state
341 * o accept beacons:
342 * - when operating in adhoc mode so the 802.11 layer creates
343 * node table entries for peers,
344 * - when operating in station mode for collecting rssi data when
345 * the station is otherwise quiet, or
346 * - when operating as a repeater so we see repeater-sta beacons
347 * - when scanning
348 */
349
350u32 ath_calcrxfilter(struct ath_softc *sc)
351{
352#define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
7dcfdcd9 353
f078f209
LR
354 u32 rfilt;
355
356 rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE)
357 | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
358 | ATH9K_RX_FILTER_MCAST;
359
360 /* If not a STA, enable processing of Probe Requests */
b4696c8b 361 if (sc->sc_ah->ah_opmode != ATH9K_M_STA)
f078f209
LR
362 rfilt |= ATH9K_RX_FILTER_PROBEREQ;
363
364 /* Can't set HOSTAP into promiscous mode */
b4696c8b 365 if (((sc->sc_ah->ah_opmode != ATH9K_M_HOSTAP) &&
7dcfdcd9 366 (sc->rx_filter & FIF_PROMISC_IN_BSS)) ||
b4696c8b 367 (sc->sc_ah->ah_opmode == ATH9K_M_MONITOR)) {
f078f209
LR
368 rfilt |= ATH9K_RX_FILTER_PROM;
369 /* ??? To prevent from sending ACK */
370 rfilt &= ~ATH9K_RX_FILTER_UCAST;
371 }
372
ffb82676 373 if (sc->sc_ah->ah_opmode == ATH9K_M_STA ||
be0418ad 374 sc->sc_ah->ah_opmode == ATH9K_M_IBSS)
f078f209
LR
375 rfilt |= ATH9K_RX_FILTER_BEACON;
376
377 /* If in HOSTAP mode, want to enable reception of PSPOLL frames
378 & beacon frames */
b4696c8b 379 if (sc->sc_ah->ah_opmode == ATH9K_M_HOSTAP)
f078f209 380 rfilt |= (ATH9K_RX_FILTER_BEACON | ATH9K_RX_FILTER_PSPOLL);
be0418ad 381
f078f209 382 return rfilt;
7dcfdcd9 383
f078f209
LR
384#undef RX_FILTER_PRESERVE
385}
386
f078f209
LR
387int ath_startrecv(struct ath_softc *sc)
388{
389 struct ath_hal *ah = sc->sc_ah;
390 struct ath_buf *bf, *tbf;
391
392 spin_lock_bh(&sc->sc_rxbuflock);
393 if (list_empty(&sc->sc_rxbuf))
394 goto start_recv;
395
396 sc->sc_rxlink = NULL;
397 list_for_each_entry_safe(bf, tbf, &sc->sc_rxbuf, list) {
f078f209
LR
398 ath_rx_buf_link(sc, bf);
399 }
400
401 /* We could have deleted elements so the list may be empty now */
402 if (list_empty(&sc->sc_rxbuf))
403 goto start_recv;
404
405 bf = list_first_entry(&sc->sc_rxbuf, struct ath_buf, list);
406 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
be0418ad 407 ath9k_hw_rxena(ah);
f078f209
LR
408
409start_recv:
410 spin_unlock_bh(&sc->sc_rxbuflock);
be0418ad
S
411 ath_opmode_init(sc);
412 ath9k_hw_startpcureceive(ah);
413
f078f209
LR
414 return 0;
415}
416
f078f209
LR
417bool ath_stoprecv(struct ath_softc *sc)
418{
419 struct ath_hal *ah = sc->sc_ah;
f078f209
LR
420 bool stopped;
421
be0418ad
S
422 ath9k_hw_stoppcurecv(ah);
423 ath9k_hw_setrxfilter(ah, 0);
424 stopped = ath9k_hw_stopdmarecv(ah);
425 mdelay(3); /* 3ms is long enough for 1 frame */
426 sc->sc_rxlink = NULL;
427
f078f209
LR
428 return stopped;
429}
430
f078f209
LR
431void ath_flushrecv(struct ath_softc *sc)
432{
f078f209 433 spin_lock_bh(&sc->sc_rxflushlock);
98deeea0 434 sc->sc_flags |= SC_OP_RXFLUSH;
f078f209 435 ath_rx_tasklet(sc, 1);
98deeea0 436 sc->sc_flags &= ~SC_OP_RXFLUSH;
f078f209
LR
437 spin_unlock_bh(&sc->sc_rxflushlock);
438}
439
f078f209
LR
440int ath_rx_tasklet(struct ath_softc *sc, int flush)
441{
442#define PA2DESC(_sc, _pa) \
443 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
444 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
445
be0418ad 446 struct ath_buf *bf;
f078f209 447 struct ath_desc *ds;
f078f209 448 struct sk_buff *skb = NULL;
be0418ad 449 struct ieee80211_rx_status rx_status;
f078f209 450 struct ath_hal *ah = sc->sc_ah;
be0418ad
S
451 struct ieee80211_hdr *hdr;
452 int hdrlen, padsize, retval;
453 bool decrypt_error = false;
454 u8 keyix;
455
456 spin_lock_bh(&sc->sc_rxbuflock);
f078f209
LR
457
458 do {
459 /* If handling rx interrupt and flush is in progress => exit */
98deeea0 460 if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0))
f078f209
LR
461 break;
462
f078f209
LR
463 if (list_empty(&sc->sc_rxbuf)) {
464 sc->sc_rxlink = NULL;
f078f209
LR
465 break;
466 }
467
468 bf = list_first_entry(&sc->sc_rxbuf, struct ath_buf, list);
f078f209 469 ds = bf->bf_desc;
f078f209
LR
470
471 /*
472 * Must provide the virtual address of the current
473 * descriptor, the physical address, and the virtual
474 * address of the next descriptor in the h/w chain.
475 * This allows the HAL to look ahead to see if the
476 * hardware is done with a descriptor by checking the
477 * done bit in the following descriptor and the address
478 * of the current descriptor the DMA engine is working
479 * on. All this is necessary because of our use of
480 * a self-linked list to avoid rx overruns.
481 */
be0418ad 482 retval = ath9k_hw_rxprocdesc(ah, ds,
f078f209
LR
483 bf->bf_daddr,
484 PA2DESC(sc, ds->ds_link),
485 0);
486 if (retval == -EINPROGRESS) {
487 struct ath_buf *tbf;
488 struct ath_desc *tds;
489
490 if (list_is_last(&bf->list, &sc->sc_rxbuf)) {
be0418ad 491 sc->sc_rxlink = NULL;
f078f209
LR
492 break;
493 }
494
495 tbf = list_entry(bf->list.next, struct ath_buf, list);
496
497 /*
498 * On some hardware the descriptor status words could
499 * get corrupted, including the done bit. Because of
500 * this, check if the next descriptor's done bit is
501 * set or not.
502 *
503 * If the next descriptor's done bit is set, the current
504 * descriptor has been corrupted. Force s/w to discard
505 * this descriptor and continue...
506 */
507
508 tds = tbf->bf_desc;
be0418ad
S
509 retval = ath9k_hw_rxprocdesc(ah, tds, tbf->bf_daddr,
510 PA2DESC(sc, tds->ds_link), 0);
f078f209 511 if (retval == -EINPROGRESS) {
f078f209
LR
512 break;
513 }
514 }
515
f078f209 516 skb = bf->bf_mpdu;
be0418ad 517 if (!skb)
f078f209 518 continue;
f078f209 519
f078f209 520 /*
be0418ad
S
521 * If we're asked to flush receive queue, directly
522 * chain it back at the queue without processing it.
f078f209 523 */
be0418ad 524 if (flush)
f078f209 525 goto rx_next;
f078f209 526
be0418ad
S
527 if (!ds->ds_rxstat.rs_datalen)
528 goto rx_next;
f078f209 529
be0418ad 530 /* The status portion of the descriptor could get corrupted. */
f078f209
LR
531 if (sc->sc_rxbufsize < ds->ds_rxstat.rs_datalen)
532 goto rx_next;
f078f209 533
be0418ad
S
534 if (!ath_rx_prepare(skb, ds, &rx_status, &decrypt_error, sc))
535 goto rx_next;
f078f209 536
be0418ad
S
537 /* Sync and unmap the frame */
538 pci_dma_sync_single_for_cpu(sc->pdev, bf->bf_buf_addr,
f078f209
LR
539 skb_tailroom(skb),
540 PCI_DMA_FROMDEVICE);
be0418ad 541 pci_unmap_single(sc->pdev, bf->bf_buf_addr,
f078f209
LR
542 sc->sc_rxbufsize,
543 PCI_DMA_FROMDEVICE);
544
be0418ad
S
545 skb_put(skb, ds->ds_rxstat.rs_datalen);
546 skb->protocol = cpu_to_be16(ETH_P_CONTROL);
547
548 /* see if any padding is done by the hw and remove it */
549 hdr = (struct ieee80211_hdr *)skb->data;
550 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
551
552 if (hdrlen & 3) {
553 padsize = hdrlen % 4;
554 memmove(skb->data + padsize, skb->data, hdrlen);
555 skb_pull(skb, padsize);
f078f209
LR
556 }
557
be0418ad 558 keyix = ds->ds_rxstat.rs_keyix;
f078f209 559
be0418ad
S
560 if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error) {
561 rx_status.flag |= RX_FLAG_DECRYPTED;
562 } else if ((le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_PROTECTED)
563 && !decrypt_error && skb->len >= hdrlen + 4) {
564 keyix = skb->data[hdrlen + 3] >> 6;
565
566 if (test_bit(keyix, sc->sc_keymap))
567 rx_status.flag |= RX_FLAG_DECRYPTED;
568 }
569
570 /* Send the frame to mac80211 */
571 __ieee80211_rx(sc->hw, skb, &rx_status);
572 bf->bf_mpdu = NULL;
f078f209
LR
573
574 /*
575 * change the default rx antenna if rx diversity chooses the
576 * other antenna 3 times in a row.
577 */
578 if (sc->sc_defant != ds->ds_rxstat.rs_antenna) {
579 if (++sc->sc_rxotherant >= 3)
be0418ad 580 ath_setdefantenna(sc, ds->ds_rxstat.rs_antenna);
f078f209
LR
581 } else {
582 sc->sc_rxotherant = 0;
583 }
f078f209 584rx_next:
be0418ad
S
585 ath_rx_requeue(sc, bf);
586 } while (1);
587
588 spin_unlock_bh(&sc->sc_rxbuflock);
f078f209
LR
589
590 return 0;
591#undef PA2DESC
592}
This page took 0.103482 seconds and 5 git commands to generate.