ath9k: Node cleanup
[deliverable/linux.git] / drivers / net / wireless / ath9k / core.c
1 /*
2 * Copyright (c) 2008, Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 /* Implementation of the main "ATH" layer. */
18
19 #include "core.h"
20 #include "regd.h"
21
22 static int ath_outdoor; /* enable outdoor use */
23
24 static u32 ath_chainmask_sel_up_rssi_thres =
25 ATH_CHAINMASK_SEL_UP_RSSI_THRES;
26 static u32 ath_chainmask_sel_down_rssi_thres =
27 ATH_CHAINMASK_SEL_DOWN_RSSI_THRES;
28 static u32 ath_chainmask_sel_period =
29 ATH_CHAINMASK_SEL_TIMEOUT;
30
31 /* return bus cachesize in 4B word units */
32
33 static void bus_read_cachesize(struct ath_softc *sc, int *csz)
34 {
35 u8 u8tmp;
36
37 pci_read_config_byte(sc->pdev, PCI_CACHE_LINE_SIZE, (u8 *)&u8tmp);
38 *csz = (int)u8tmp;
39
40 /*
41 * This check was put in to avoid "unplesant" consequences if
42 * the bootrom has not fully initialized all PCI devices.
43 * Sometimes the cache line size register is not set
44 */
45
46 if (*csz == 0)
47 *csz = DEFAULT_CACHELINE >> 2; /* Use the default size */
48 }
49
50 static u8 parse_mpdudensity(u8 mpdudensity)
51 {
52 /*
53 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
54 * 0 for no restriction
55 * 1 for 1/4 us
56 * 2 for 1/2 us
57 * 3 for 1 us
58 * 4 for 2 us
59 * 5 for 4 us
60 * 6 for 8 us
61 * 7 for 16 us
62 */
63 switch (mpdudensity) {
64 case 0:
65 return 0;
66 case 1:
67 case 2:
68 case 3:
69 /* Our lower layer calculations limit our precision to
70 1 microsecond */
71 return 1;
72 case 4:
73 return 2;
74 case 5:
75 return 4;
76 case 6:
77 return 8;
78 case 7:
79 return 16;
80 default:
81 return 0;
82 }
83 }
84
85 /*
86 * Set current operating mode
87 *
88 * This function initializes and fills the rate table in the ATH object based
89 * on the operating mode.
90 */
91 static void ath_setcurmode(struct ath_softc *sc, enum wireless_mode mode)
92 {
93 const struct ath9k_rate_table *rt;
94 int i;
95
96 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap));
97 rt = ath9k_hw_getratetable(sc->sc_ah, mode);
98 BUG_ON(!rt);
99
100 for (i = 0; i < rt->rateCount; i++)
101 sc->sc_rixmap[rt->info[i].rateCode] = (u8) i;
102
103 memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap));
104 for (i = 0; i < 256; i++) {
105 u8 ix = rt->rateCodeToIndex[i];
106
107 if (ix == 0xff)
108 continue;
109
110 sc->sc_hwmap[i].ieeerate =
111 rt->info[ix].dot11Rate & IEEE80211_RATE_VAL;
112 sc->sc_hwmap[i].rateKbps = rt->info[ix].rateKbps;
113
114 if (rt->info[ix].shortPreamble ||
115 rt->info[ix].phy == PHY_OFDM) {
116 /* XXX: Handle this */
117 }
118
119 /* NB: this uses the last entry if the rate isn't found */
120 /* XXX beware of overlow */
121 }
122 sc->sc_currates = rt;
123 sc->sc_curmode = mode;
124 /*
125 * All protection frames are transmited at 2Mb/s for
126 * 11g, otherwise at 1Mb/s.
127 * XXX select protection rate index from rate table.
128 */
129 sc->sc_protrix = (mode == ATH9K_MODE_11G ? 1 : 0);
130 }
131
132 /*
133 * Set up rate table (legacy rates)
134 */
135 static void ath_setup_rates(struct ath_softc *sc, enum ieee80211_band band)
136 {
137 struct ath_hal *ah = sc->sc_ah;
138 const struct ath9k_rate_table *rt = NULL;
139 struct ieee80211_supported_band *sband;
140 struct ieee80211_rate *rate;
141 int i, maxrates;
142
143 switch (band) {
144 case IEEE80211_BAND_2GHZ:
145 rt = ath9k_hw_getratetable(ah, ATH9K_MODE_11G);
146 break;
147 case IEEE80211_BAND_5GHZ:
148 rt = ath9k_hw_getratetable(ah, ATH9K_MODE_11A);
149 break;
150 default:
151 break;
152 }
153
154 if (rt == NULL)
155 return;
156
157 sband = &sc->sbands[band];
158 rate = sc->rates[band];
159
160 if (rt->rateCount > ATH_RATE_MAX)
161 maxrates = ATH_RATE_MAX;
162 else
163 maxrates = rt->rateCount;
164
165 for (i = 0; i < maxrates; i++) {
166 rate[i].bitrate = rt->info[i].rateKbps / 100;
167 rate[i].hw_value = rt->info[i].rateCode;
168 sband->n_bitrates++;
169 DPRINTF(sc, ATH_DBG_CONFIG,
170 "%s: Rate: %2dMbps, ratecode: %2d\n",
171 __func__,
172 rate[i].bitrate / 10,
173 rate[i].hw_value);
174 }
175 }
176
177 /*
178 * Set up channel list
179 */
180 static int ath_setup_channels(struct ath_softc *sc)
181 {
182 struct ath_hal *ah = sc->sc_ah;
183 int nchan, i, a = 0, b = 0;
184 u8 regclassids[ATH_REGCLASSIDS_MAX];
185 u32 nregclass = 0;
186 struct ieee80211_supported_band *band_2ghz;
187 struct ieee80211_supported_band *band_5ghz;
188 struct ieee80211_channel *chan_2ghz;
189 struct ieee80211_channel *chan_5ghz;
190 struct ath9k_channel *c;
191
192 /* Fill in ah->ah_channels */
193 if (!ath9k_regd_init_channels(ah,
194 ATH_CHAN_MAX,
195 (u32 *)&nchan,
196 regclassids,
197 ATH_REGCLASSIDS_MAX,
198 &nregclass,
199 CTRY_DEFAULT,
200 false,
201 1)) {
202 u32 rd = ah->ah_currentRD;
203
204 DPRINTF(sc, ATH_DBG_FATAL,
205 "%s: unable to collect channel list; "
206 "regdomain likely %u country code %u\n",
207 __func__, rd, CTRY_DEFAULT);
208 return -EINVAL;
209 }
210
211 band_2ghz = &sc->sbands[IEEE80211_BAND_2GHZ];
212 band_5ghz = &sc->sbands[IEEE80211_BAND_5GHZ];
213 chan_2ghz = sc->channels[IEEE80211_BAND_2GHZ];
214 chan_5ghz = sc->channels[IEEE80211_BAND_5GHZ];
215
216 for (i = 0; i < nchan; i++) {
217 c = &ah->ah_channels[i];
218 if (IS_CHAN_2GHZ(c)) {
219 chan_2ghz[a].band = IEEE80211_BAND_2GHZ;
220 chan_2ghz[a].center_freq = c->channel;
221 chan_2ghz[a].max_power = c->maxTxPower;
222
223 if (c->privFlags & CHANNEL_DISALLOW_ADHOC)
224 chan_2ghz[a].flags |=
225 IEEE80211_CHAN_NO_IBSS;
226 if (c->channelFlags & CHANNEL_PASSIVE)
227 chan_2ghz[a].flags |=
228 IEEE80211_CHAN_PASSIVE_SCAN;
229
230 band_2ghz->n_channels = ++a;
231
232 DPRINTF(sc, ATH_DBG_CONFIG,
233 "%s: 2MHz channel: %d, "
234 "channelFlags: 0x%x\n",
235 __func__,
236 c->channel,
237 c->channelFlags);
238 } else if (IS_CHAN_5GHZ(c)) {
239 chan_5ghz[b].band = IEEE80211_BAND_5GHZ;
240 chan_5ghz[b].center_freq = c->channel;
241 chan_5ghz[b].max_power = c->maxTxPower;
242
243 if (c->privFlags & CHANNEL_DISALLOW_ADHOC)
244 chan_5ghz[b].flags |=
245 IEEE80211_CHAN_NO_IBSS;
246 if (c->channelFlags & CHANNEL_PASSIVE)
247 chan_5ghz[b].flags |=
248 IEEE80211_CHAN_PASSIVE_SCAN;
249
250 band_5ghz->n_channels = ++b;
251
252 DPRINTF(sc, ATH_DBG_CONFIG,
253 "%s: 5MHz channel: %d, "
254 "channelFlags: 0x%x\n",
255 __func__,
256 c->channel,
257 c->channelFlags);
258 }
259 }
260
261 return 0;
262 }
263
264 /*
265 * Determine mode from channel flags
266 *
267 * This routine will provide the enumerated WIRELESSS_MODE value based
268 * on the settings of the channel flags. If no valid set of flags
269 * exist, the lowest mode (11b) is selected.
270 */
271
272 static enum wireless_mode ath_chan2mode(struct ath9k_channel *chan)
273 {
274 if (chan->chanmode == CHANNEL_A)
275 return ATH9K_MODE_11A;
276 else if (chan->chanmode == CHANNEL_G)
277 return ATH9K_MODE_11G;
278 else if (chan->chanmode == CHANNEL_B)
279 return ATH9K_MODE_11B;
280 else if (chan->chanmode == CHANNEL_A_HT20)
281 return ATH9K_MODE_11NA_HT20;
282 else if (chan->chanmode == CHANNEL_G_HT20)
283 return ATH9K_MODE_11NG_HT20;
284 else if (chan->chanmode == CHANNEL_A_HT40PLUS)
285 return ATH9K_MODE_11NA_HT40PLUS;
286 else if (chan->chanmode == CHANNEL_A_HT40MINUS)
287 return ATH9K_MODE_11NA_HT40MINUS;
288 else if (chan->chanmode == CHANNEL_G_HT40PLUS)
289 return ATH9K_MODE_11NG_HT40PLUS;
290 else if (chan->chanmode == CHANNEL_G_HT40MINUS)
291 return ATH9K_MODE_11NG_HT40MINUS;
292
293 WARN_ON(1); /* should not get here */
294
295 return ATH9K_MODE_11B;
296 }
297
298 /*
299 * Stop the device, grabbing the top-level lock to protect
300 * against concurrent entry through ath_init (which can happen
301 * if another thread does a system call and the thread doing the
302 * stop is preempted).
303 */
304
305 static int ath_stop(struct ath_softc *sc)
306 {
307 struct ath_hal *ah = sc->sc_ah;
308
309 DPRINTF(sc, ATH_DBG_CONFIG, "%s: invalid %ld\n",
310 __func__, sc->sc_flags & SC_OP_INVALID);
311
312 /*
313 * Shutdown the hardware and driver:
314 * stop output from above
315 * turn off timers
316 * disable interrupts
317 * clear transmit machinery
318 * clear receive machinery
319 * turn off the radio
320 * reclaim beacon resources
321 *
322 * Note that some of this work is not possible if the
323 * hardware is gone (invalid).
324 */
325
326 ath_draintxq(sc, false);
327 if (!(sc->sc_flags & SC_OP_INVALID)) {
328 ath_stoprecv(sc);
329 ath9k_hw_phy_disable(ah);
330 } else
331 sc->sc_rxlink = NULL;
332
333 return 0;
334 }
335
336 /*
337 * Set the current channel
338 *
339 * Set/change channels. If the channel is really being changed, it's done
340 * by reseting the chip. To accomplish this we must first cleanup any pending
341 * DMA, then restart stuff after a la ath_init.
342 */
343 int ath_set_channel(struct ath_softc *sc, struct ath9k_channel *hchan)
344 {
345 struct ath_hal *ah = sc->sc_ah;
346 bool fastcc = true, stopped;
347
348 if (sc->sc_flags & SC_OP_INVALID) /* the device is invalid or removed */
349 return -EIO;
350
351 DPRINTF(sc, ATH_DBG_CONFIG,
352 "%s: %u (%u MHz) -> %u (%u MHz), cflags:%x\n",
353 __func__,
354 ath9k_hw_mhz2ieee(ah, sc->sc_ah->ah_curchan->channel,
355 sc->sc_ah->ah_curchan->channelFlags),
356 sc->sc_ah->ah_curchan->channel,
357 ath9k_hw_mhz2ieee(ah, hchan->channel, hchan->channelFlags),
358 hchan->channel, hchan->channelFlags);
359
360 if (hchan->channel != sc->sc_ah->ah_curchan->channel ||
361 hchan->channelFlags != sc->sc_ah->ah_curchan->channelFlags ||
362 (sc->sc_flags & SC_OP_CHAINMASK_UPDATE) ||
363 (sc->sc_flags & SC_OP_FULL_RESET)) {
364 int status;
365 /*
366 * This is only performed if the channel settings have
367 * actually changed.
368 *
369 * To switch channels clear any pending DMA operations;
370 * wait long enough for the RX fifo to drain, reset the
371 * hardware at the new frequency, and then re-enable
372 * the relevant bits of the h/w.
373 */
374 ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
375 ath_draintxq(sc, false); /* clear pending tx frames */
376 stopped = ath_stoprecv(sc); /* turn off frame recv */
377
378 /* XXX: do not flush receive queue here. We don't want
379 * to flush data frames already in queue because of
380 * changing channel. */
381
382 if (!stopped || (sc->sc_flags & SC_OP_FULL_RESET))
383 fastcc = false;
384
385 spin_lock_bh(&sc->sc_resetlock);
386 if (!ath9k_hw_reset(ah, hchan,
387 sc->sc_ht_info.tx_chan_width,
388 sc->sc_tx_chainmask,
389 sc->sc_rx_chainmask,
390 sc->sc_ht_extprotspacing,
391 fastcc, &status)) {
392 DPRINTF(sc, ATH_DBG_FATAL,
393 "%s: unable to reset channel %u (%uMhz) "
394 "flags 0x%x hal status %u\n", __func__,
395 ath9k_hw_mhz2ieee(ah, hchan->channel,
396 hchan->channelFlags),
397 hchan->channel, hchan->channelFlags, status);
398 spin_unlock_bh(&sc->sc_resetlock);
399 return -EIO;
400 }
401 spin_unlock_bh(&sc->sc_resetlock);
402
403 sc->sc_flags &= ~SC_OP_CHAINMASK_UPDATE;
404 sc->sc_flags &= ~SC_OP_FULL_RESET;
405
406 /* Re-enable rx framework */
407 if (ath_startrecv(sc) != 0) {
408 DPRINTF(sc, ATH_DBG_FATAL,
409 "%s: unable to restart recv logic\n", __func__);
410 return -EIO;
411 }
412 /*
413 * Change channels and update the h/w rate map
414 * if we're switching; e.g. 11a to 11b/g.
415 */
416 ath_setcurmode(sc, ath_chan2mode(hchan));
417
418 ath_update_txpow(sc); /* update tx power state */
419 /*
420 * Re-enable interrupts.
421 */
422 ath9k_hw_set_interrupts(ah, sc->sc_imask);
423 }
424 return 0;
425 }
426
427 /**********************/
428 /* Chainmask Handling */
429 /**********************/
430
431 static void ath_chainmask_sel_timertimeout(unsigned long data)
432 {
433 struct ath_chainmask_sel *cm = (struct ath_chainmask_sel *)data;
434 cm->switch_allowed = 1;
435 }
436
437 /* Start chainmask select timer */
438 static void ath_chainmask_sel_timerstart(struct ath_chainmask_sel *cm)
439 {
440 cm->switch_allowed = 0;
441 mod_timer(&cm->timer, ath_chainmask_sel_period);
442 }
443
444 /* Stop chainmask select timer */
445 static void ath_chainmask_sel_timerstop(struct ath_chainmask_sel *cm)
446 {
447 cm->switch_allowed = 0;
448 del_timer_sync(&cm->timer);
449 }
450
451 static void ath_chainmask_sel_init(struct ath_softc *sc, struct ath_node *an)
452 {
453 struct ath_chainmask_sel *cm = &an->an_chainmask_sel;
454
455 memset(cm, 0, sizeof(struct ath_chainmask_sel));
456
457 cm->cur_tx_mask = sc->sc_tx_chainmask;
458 cm->cur_rx_mask = sc->sc_rx_chainmask;
459 cm->tx_avgrssi = ATH_RSSI_DUMMY_MARKER;
460 setup_timer(&cm->timer,
461 ath_chainmask_sel_timertimeout, (unsigned long) cm);
462 }
463
464 int ath_chainmask_sel_logic(struct ath_softc *sc, struct ath_node *an)
465 {
466 struct ath_chainmask_sel *cm = &an->an_chainmask_sel;
467
468 /*
469 * Disable auto-swtiching in one of the following if conditions.
470 * sc_chainmask_auto_sel is used for internal global auto-switching
471 * enabled/disabled setting
472 */
473 if (sc->sc_ah->ah_caps.tx_chainmask != ATH_CHAINMASK_SEL_3X3) {
474 cm->cur_tx_mask = sc->sc_tx_chainmask;
475 return cm->cur_tx_mask;
476 }
477
478 if (cm->tx_avgrssi == ATH_RSSI_DUMMY_MARKER)
479 return cm->cur_tx_mask;
480
481 if (cm->switch_allowed) {
482 /* Switch down from tx 3 to tx 2. */
483 if (cm->cur_tx_mask == ATH_CHAINMASK_SEL_3X3 &&
484 ATH_RSSI_OUT(cm->tx_avgrssi) >=
485 ath_chainmask_sel_down_rssi_thres) {
486 cm->cur_tx_mask = sc->sc_tx_chainmask;
487
488 /* Don't let another switch happen until
489 * this timer expires */
490 ath_chainmask_sel_timerstart(cm);
491 }
492 /* Switch up from tx 2 to 3. */
493 else if (cm->cur_tx_mask == sc->sc_tx_chainmask &&
494 ATH_RSSI_OUT(cm->tx_avgrssi) <=
495 ath_chainmask_sel_up_rssi_thres) {
496 cm->cur_tx_mask = ATH_CHAINMASK_SEL_3X3;
497
498 /* Don't let another switch happen
499 * until this timer expires */
500 ath_chainmask_sel_timerstart(cm);
501 }
502 }
503
504 return cm->cur_tx_mask;
505 }
506
507 /*
508 * Update tx/rx chainmask. For legacy association,
509 * hard code chainmask to 1x1, for 11n association, use
510 * the chainmask configuration.
511 */
512
513 void ath_update_chainmask(struct ath_softc *sc, int is_ht)
514 {
515 sc->sc_flags |= SC_OP_CHAINMASK_UPDATE;
516 if (is_ht) {
517 sc->sc_tx_chainmask = sc->sc_ah->ah_caps.tx_chainmask;
518 sc->sc_rx_chainmask = sc->sc_ah->ah_caps.rx_chainmask;
519 } else {
520 sc->sc_tx_chainmask = 1;
521 sc->sc_rx_chainmask = 1;
522 }
523
524 DPRINTF(sc, ATH_DBG_CONFIG, "%s: tx chmask: %d, rx chmask: %d\n",
525 __func__, sc->sc_tx_chainmask, sc->sc_rx_chainmask);
526 }
527
528 /*******/
529 /* ANI */
530 /*******/
531
532 /*
533 * This routine performs the periodic noise floor calibration function
534 * that is used to adjust and optimize the chip performance. This
535 * takes environmental changes (location, temperature) into account.
536 * When the task is complete, it reschedules itself depending on the
537 * appropriate interval that was calculated.
538 */
539
540 static void ath_ani_calibrate(unsigned long data)
541 {
542 struct ath_softc *sc;
543 struct ath_hal *ah;
544 bool longcal = false;
545 bool shortcal = false;
546 bool aniflag = false;
547 unsigned int timestamp = jiffies_to_msecs(jiffies);
548 u32 cal_interval;
549
550 sc = (struct ath_softc *)data;
551 ah = sc->sc_ah;
552
553 /*
554 * don't calibrate when we're scanning.
555 * we are most likely not on our home channel.
556 */
557 if (sc->rx_filter & FIF_BCN_PRBRESP_PROMISC)
558 return;
559
560 /* Long calibration runs independently of short calibration. */
561 if ((timestamp - sc->sc_ani.sc_longcal_timer) >= ATH_LONG_CALINTERVAL) {
562 longcal = true;
563 DPRINTF(sc, ATH_DBG_ANI, "%s: longcal @%lu\n",
564 __func__, jiffies);
565 sc->sc_ani.sc_longcal_timer = timestamp;
566 }
567
568 /* Short calibration applies only while sc_caldone is false */
569 if (!sc->sc_ani.sc_caldone) {
570 if ((timestamp - sc->sc_ani.sc_shortcal_timer) >=
571 ATH_SHORT_CALINTERVAL) {
572 shortcal = true;
573 DPRINTF(sc, ATH_DBG_ANI, "%s: shortcal @%lu\n",
574 __func__, jiffies);
575 sc->sc_ani.sc_shortcal_timer = timestamp;
576 sc->sc_ani.sc_resetcal_timer = timestamp;
577 }
578 } else {
579 if ((timestamp - sc->sc_ani.sc_resetcal_timer) >=
580 ATH_RESTART_CALINTERVAL) {
581 ath9k_hw_reset_calvalid(ah, ah->ah_curchan,
582 &sc->sc_ani.sc_caldone);
583 if (sc->sc_ani.sc_caldone)
584 sc->sc_ani.sc_resetcal_timer = timestamp;
585 }
586 }
587
588 /* Verify whether we must check ANI */
589 if ((timestamp - sc->sc_ani.sc_checkani_timer) >=
590 ATH_ANI_POLLINTERVAL) {
591 aniflag = true;
592 sc->sc_ani.sc_checkani_timer = timestamp;
593 }
594
595 /* Skip all processing if there's nothing to do. */
596 if (longcal || shortcal || aniflag) {
597 /* Call ANI routine if necessary */
598 if (aniflag)
599 ath9k_hw_ani_monitor(ah, &sc->sc_halstats,
600 ah->ah_curchan);
601
602 /* Perform calibration if necessary */
603 if (longcal || shortcal) {
604 bool iscaldone = false;
605
606 if (ath9k_hw_calibrate(ah, ah->ah_curchan,
607 sc->sc_rx_chainmask, longcal,
608 &iscaldone)) {
609 if (longcal)
610 sc->sc_ani.sc_noise_floor =
611 ath9k_hw_getchan_noise(ah,
612 ah->ah_curchan);
613
614 DPRINTF(sc, ATH_DBG_ANI,
615 "%s: calibrate chan %u/%x nf: %d\n",
616 __func__,
617 ah->ah_curchan->channel,
618 ah->ah_curchan->channelFlags,
619 sc->sc_ani.sc_noise_floor);
620 } else {
621 DPRINTF(sc, ATH_DBG_ANY,
622 "%s: calibrate chan %u/%x failed\n",
623 __func__,
624 ah->ah_curchan->channel,
625 ah->ah_curchan->channelFlags);
626 }
627 sc->sc_ani.sc_caldone = iscaldone;
628 }
629 }
630
631 /*
632 * Set timer interval based on previous results.
633 * The interval must be the shortest necessary to satisfy ANI,
634 * short calibration and long calibration.
635 */
636
637 cal_interval = ATH_ANI_POLLINTERVAL;
638 if (!sc->sc_ani.sc_caldone)
639 cal_interval = min(cal_interval, (u32)ATH_SHORT_CALINTERVAL);
640
641 mod_timer(&sc->sc_ani.timer, jiffies + msecs_to_jiffies(cal_interval));
642 }
643
644 /******************/
645 /* VAP management */
646 /******************/
647
648 int ath_vap_attach(struct ath_softc *sc,
649 int if_id,
650 struct ieee80211_vif *if_data,
651 enum ath9k_opmode opmode)
652 {
653 struct ath_vap *avp;
654
655 if (if_id >= ATH_BCBUF || sc->sc_vaps[if_id] != NULL) {
656 DPRINTF(sc, ATH_DBG_FATAL,
657 "%s: Invalid interface id = %u\n", __func__, if_id);
658 return -EINVAL;
659 }
660
661 switch (opmode) {
662 case ATH9K_M_STA:
663 case ATH9K_M_IBSS:
664 case ATH9K_M_MONITOR:
665 break;
666 case ATH9K_M_HOSTAP:
667 /* XXX not right, beacon buffer is allocated on RUN trans */
668 if (list_empty(&sc->sc_bbuf))
669 return -ENOMEM;
670 break;
671 default:
672 return -EINVAL;
673 }
674
675 /* create ath_vap */
676 avp = kmalloc(sizeof(struct ath_vap), GFP_KERNEL);
677 if (avp == NULL)
678 return -ENOMEM;
679
680 memset(avp, 0, sizeof(struct ath_vap));
681 avp->av_if_data = if_data;
682 /* Set the VAP opmode */
683 avp->av_opmode = opmode;
684 avp->av_bslot = -1;
685
686 if (opmode == ATH9K_M_HOSTAP)
687 ath9k_hw_set_tsfadjust(sc->sc_ah, 1);
688
689 sc->sc_vaps[if_id] = avp;
690 sc->sc_nvaps++;
691 /* Set the device opmode */
692 sc->sc_ah->ah_opmode = opmode;
693
694 /* default VAP configuration */
695 avp->av_config.av_fixed_rateset = IEEE80211_FIXED_RATE_NONE;
696 avp->av_config.av_fixed_retryset = 0x03030303;
697
698 return 0;
699 }
700
701 int ath_vap_detach(struct ath_softc *sc, int if_id)
702 {
703 struct ath_hal *ah = sc->sc_ah;
704 struct ath_vap *avp;
705
706 avp = sc->sc_vaps[if_id];
707 if (avp == NULL) {
708 DPRINTF(sc, ATH_DBG_FATAL, "%s: invalid interface id %u\n",
709 __func__, if_id);
710 return -EINVAL;
711 }
712
713 /*
714 * Quiesce the hardware while we remove the vap. In
715 * particular we need to reclaim all references to the
716 * vap state by any frames pending on the tx queues.
717 *
718 * XXX can we do this w/o affecting other vap's?
719 */
720 ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
721 ath_draintxq(sc, false); /* stop xmit side */
722 ath_stoprecv(sc); /* stop recv side */
723 ath_flushrecv(sc); /* flush recv queue */
724
725 kfree(avp);
726 sc->sc_vaps[if_id] = NULL;
727 sc->sc_nvaps--;
728
729 return 0;
730 }
731
732 int ath_vap_config(struct ath_softc *sc,
733 int if_id, struct ath_vap_config *if_config)
734 {
735 struct ath_vap *avp;
736
737 if (if_id >= ATH_BCBUF) {
738 DPRINTF(sc, ATH_DBG_FATAL,
739 "%s: Invalid interface id = %u\n", __func__, if_id);
740 return -EINVAL;
741 }
742
743 avp = sc->sc_vaps[if_id];
744 ASSERT(avp != NULL);
745
746 if (avp)
747 memcpy(&avp->av_config, if_config, sizeof(avp->av_config));
748
749 return 0;
750 }
751
752 /********/
753 /* Core */
754 /********/
755
756 int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan)
757 {
758 struct ath_hal *ah = sc->sc_ah;
759 int status;
760 int error = 0;
761
762 DPRINTF(sc, ATH_DBG_CONFIG, "%s: mode %d\n",
763 __func__, sc->sc_ah->ah_opmode);
764
765 /*
766 * Stop anything previously setup. This is safe
767 * whether this is the first time through or not.
768 */
769 ath_stop(sc);
770
771 /* Initialize chanmask selection */
772 sc->sc_tx_chainmask = ah->ah_caps.tx_chainmask;
773 sc->sc_rx_chainmask = ah->ah_caps.rx_chainmask;
774
775 /* Reset SERDES registers */
776 ath9k_hw_configpcipowersave(ah, 0);
777
778 /*
779 * The basic interface to setting the hardware in a good
780 * state is ``reset''. On return the hardware is known to
781 * be powered up and with interrupts disabled. This must
782 * be followed by initialization of the appropriate bits
783 * and then setup of the interrupt mask.
784 */
785
786 spin_lock_bh(&sc->sc_resetlock);
787 if (!ath9k_hw_reset(ah, initial_chan,
788 sc->sc_ht_info.tx_chan_width,
789 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
790 sc->sc_ht_extprotspacing, false, &status)) {
791 DPRINTF(sc, ATH_DBG_FATAL,
792 "%s: unable to reset hardware; hal status %u "
793 "(freq %u flags 0x%x)\n", __func__, status,
794 initial_chan->channel, initial_chan->channelFlags);
795 error = -EIO;
796 spin_unlock_bh(&sc->sc_resetlock);
797 goto done;
798 }
799 spin_unlock_bh(&sc->sc_resetlock);
800 /*
801 * This is needed only to setup initial state
802 * but it's best done after a reset.
803 */
804 ath_update_txpow(sc);
805
806 /*
807 * Setup the hardware after reset:
808 * The receive engine is set going.
809 * Frame transmit is handled entirely
810 * in the frame output path; there's nothing to do
811 * here except setup the interrupt mask.
812 */
813 if (ath_startrecv(sc) != 0) {
814 DPRINTF(sc, ATH_DBG_FATAL,
815 "%s: unable to start recv logic\n", __func__);
816 error = -EIO;
817 goto done;
818 }
819 /* Setup our intr mask. */
820 sc->sc_imask = ATH9K_INT_RX | ATH9K_INT_TX
821 | ATH9K_INT_RXEOL | ATH9K_INT_RXORN
822 | ATH9K_INT_FATAL | ATH9K_INT_GLOBAL;
823
824 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_GTT)
825 sc->sc_imask |= ATH9K_INT_GTT;
826
827 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)
828 sc->sc_imask |= ATH9K_INT_CST;
829
830 /*
831 * Enable MIB interrupts when there are hardware phy counters.
832 * Note we only do this (at the moment) for station mode.
833 */
834 if (ath9k_hw_phycounters(ah) &&
835 ((sc->sc_ah->ah_opmode == ATH9K_M_STA) ||
836 (sc->sc_ah->ah_opmode == ATH9K_M_IBSS)))
837 sc->sc_imask |= ATH9K_INT_MIB;
838 /*
839 * Some hardware processes the TIM IE and fires an
840 * interrupt when the TIM bit is set. For hardware
841 * that does, if not overridden by configuration,
842 * enable the TIM interrupt when operating as station.
843 */
844 if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_ENHANCEDPM) &&
845 (sc->sc_ah->ah_opmode == ATH9K_M_STA) &&
846 !sc->sc_config.swBeaconProcess)
847 sc->sc_imask |= ATH9K_INT_TIM;
848 /*
849 * Don't enable interrupts here as we've not yet built our
850 * vap and node data structures, which will be needed as soon
851 * as we start receiving.
852 */
853 ath_setcurmode(sc, ath_chan2mode(initial_chan));
854
855 /* XXX: we must make sure h/w is ready and clear invalid flag
856 * before turning on interrupt. */
857 sc->sc_flags &= ~SC_OP_INVALID;
858 done:
859 return error;
860 }
861
862 int ath_reset(struct ath_softc *sc, bool retry_tx)
863 {
864 struct ath_hal *ah = sc->sc_ah;
865 int status;
866 int error = 0;
867
868 ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
869 ath_draintxq(sc, retry_tx); /* stop xmit */
870 ath_stoprecv(sc); /* stop recv */
871 ath_flushrecv(sc); /* flush recv queue */
872
873 /* Reset chip */
874 spin_lock_bh(&sc->sc_resetlock);
875 if (!ath9k_hw_reset(ah, sc->sc_ah->ah_curchan,
876 sc->sc_ht_info.tx_chan_width,
877 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
878 sc->sc_ht_extprotspacing, false, &status)) {
879 DPRINTF(sc, ATH_DBG_FATAL,
880 "%s: unable to reset hardware; hal status %u\n",
881 __func__, status);
882 error = -EIO;
883 }
884 spin_unlock_bh(&sc->sc_resetlock);
885
886 if (ath_startrecv(sc) != 0) /* restart recv */
887 DPRINTF(sc, ATH_DBG_FATAL,
888 "%s: unable to start recv logic\n", __func__);
889
890 /*
891 * We may be doing a reset in response to a request
892 * that changes the channel so update any state that
893 * might change as a result.
894 */
895 ath_setcurmode(sc, ath_chan2mode(sc->sc_ah->ah_curchan));
896
897 ath_update_txpow(sc);
898
899 if (sc->sc_flags & SC_OP_BEACONS)
900 ath_beacon_config(sc, ATH_IF_ID_ANY); /* restart beacons */
901
902 ath9k_hw_set_interrupts(ah, sc->sc_imask);
903
904 /* Restart the txq */
905 if (retry_tx) {
906 int i;
907 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
908 if (ATH_TXQ_SETUP(sc, i)) {
909 spin_lock_bh(&sc->sc_txq[i].axq_lock);
910 ath_txq_schedule(sc, &sc->sc_txq[i]);
911 spin_unlock_bh(&sc->sc_txq[i].axq_lock);
912 }
913 }
914 }
915
916 return error;
917 }
918
919 int ath_suspend(struct ath_softc *sc)
920 {
921 struct ath_hal *ah = sc->sc_ah;
922
923 /* No I/O if device has been surprise removed */
924 if (sc->sc_flags & SC_OP_INVALID)
925 return -EIO;
926
927 /* Shut off the interrupt before setting sc->sc_invalid to '1' */
928 ath9k_hw_set_interrupts(ah, 0);
929
930 /* XXX: we must make sure h/w will not generate any interrupt
931 * before setting the invalid flag. */
932 sc->sc_flags |= SC_OP_INVALID;
933
934 /* disable HAL and put h/w to sleep */
935 ath9k_hw_disable(sc->sc_ah);
936
937 ath9k_hw_configpcipowersave(sc->sc_ah, 1);
938
939 return 0;
940 }
941
942 /* Interrupt handler. Most of the actual processing is deferred.
943 * It's the caller's responsibility to ensure the chip is awake. */
944
945 irqreturn_t ath_isr(int irq, void *dev)
946 {
947 struct ath_softc *sc = dev;
948 struct ath_hal *ah = sc->sc_ah;
949 enum ath9k_int status;
950 bool sched = false;
951
952 do {
953 if (sc->sc_flags & SC_OP_INVALID) {
954 /*
955 * The hardware is not ready/present, don't
956 * touch anything. Note this can happen early
957 * on if the IRQ is shared.
958 */
959 return IRQ_NONE;
960 }
961 if (!ath9k_hw_intrpend(ah)) { /* shared irq, not for us */
962 return IRQ_NONE;
963 }
964
965 /*
966 * Figure out the reason(s) for the interrupt. Note
967 * that the hal returns a pseudo-ISR that may include
968 * bits we haven't explicitly enabled so we mask the
969 * value to insure we only process bits we requested.
970 */
971 ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
972
973 status &= sc->sc_imask; /* discard unasked-for bits */
974
975 /*
976 * If there are no status bits set, then this interrupt was not
977 * for me (should have been caught above).
978 */
979
980 if (!status)
981 return IRQ_NONE;
982
983 sc->sc_intrstatus = status;
984
985 if (status & ATH9K_INT_FATAL) {
986 /* need a chip reset */
987 sched = true;
988 } else if (status & ATH9K_INT_RXORN) {
989 /* need a chip reset */
990 sched = true;
991 } else {
992 if (status & ATH9K_INT_SWBA) {
993 /* schedule a tasklet for beacon handling */
994 tasklet_schedule(&sc->bcon_tasklet);
995 }
996 if (status & ATH9K_INT_RXEOL) {
997 /*
998 * NB: the hardware should re-read the link when
999 * RXE bit is written, but it doesn't work
1000 * at least on older hardware revs.
1001 */
1002 sched = true;
1003 }
1004
1005 if (status & ATH9K_INT_TXURN)
1006 /* bump tx trigger level */
1007 ath9k_hw_updatetxtriglevel(ah, true);
1008 /* XXX: optimize this */
1009 if (status & ATH9K_INT_RX)
1010 sched = true;
1011 if (status & ATH9K_INT_TX)
1012 sched = true;
1013 if (status & ATH9K_INT_BMISS)
1014 sched = true;
1015 /* carrier sense timeout */
1016 if (status & ATH9K_INT_CST)
1017 sched = true;
1018 if (status & ATH9K_INT_MIB) {
1019 /*
1020 * Disable interrupts until we service the MIB
1021 * interrupt; otherwise it will continue to
1022 * fire.
1023 */
1024 ath9k_hw_set_interrupts(ah, 0);
1025 /*
1026 * Let the hal handle the event. We assume
1027 * it will clear whatever condition caused
1028 * the interrupt.
1029 */
1030 ath9k_hw_procmibevent(ah, &sc->sc_halstats);
1031 ath9k_hw_set_interrupts(ah, sc->sc_imask);
1032 }
1033 if (status & ATH9K_INT_TIM_TIMER) {
1034 if (!(ah->ah_caps.hw_caps &
1035 ATH9K_HW_CAP_AUTOSLEEP)) {
1036 /* Clear RxAbort bit so that we can
1037 * receive frames */
1038 ath9k_hw_setrxabort(ah, 0);
1039 sched = true;
1040 }
1041 }
1042 }
1043 } while (0);
1044
1045 if (sched) {
1046 /* turn off every interrupt except SWBA */
1047 ath9k_hw_set_interrupts(ah, (sc->sc_imask & ATH9K_INT_SWBA));
1048 tasklet_schedule(&sc->intr_tq);
1049 }
1050
1051 return IRQ_HANDLED;
1052 }
1053
1054 /* Deferred interrupt processing */
1055
1056 static void ath9k_tasklet(unsigned long data)
1057 {
1058 struct ath_softc *sc = (struct ath_softc *)data;
1059 u32 status = sc->sc_intrstatus;
1060
1061 if (status & ATH9K_INT_FATAL) {
1062 /* need a chip reset */
1063 ath_reset(sc, false);
1064 return;
1065 } else {
1066
1067 if (status &
1068 (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN)) {
1069 /* XXX: fill me in */
1070 /*
1071 if (status & ATH9K_INT_RXORN) {
1072 }
1073 if (status & ATH9K_INT_RXEOL) {
1074 }
1075 */
1076 spin_lock_bh(&sc->sc_rxflushlock);
1077 ath_rx_tasklet(sc, 0);
1078 spin_unlock_bh(&sc->sc_rxflushlock);
1079 }
1080 /* XXX: optimize this */
1081 if (status & ATH9K_INT_TX)
1082 ath_tx_tasklet(sc);
1083 /* XXX: fill me in */
1084 /*
1085 if (status & ATH9K_INT_BMISS) {
1086 }
1087 if (status & (ATH9K_INT_TIM | ATH9K_INT_DTIMSYNC)) {
1088 if (status & ATH9K_INT_TIM) {
1089 }
1090 if (status & ATH9K_INT_DTIMSYNC) {
1091 }
1092 }
1093 */
1094 }
1095
1096 /* re-enable hardware interrupt */
1097 ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask);
1098 }
1099
1100 int ath_init(u16 devid, struct ath_softc *sc)
1101 {
1102 struct ath_hal *ah = NULL;
1103 int status;
1104 int error = 0, i;
1105 int csz = 0;
1106
1107 /* XXX: hardware will not be ready until ath_open() being called */
1108 sc->sc_flags |= SC_OP_INVALID;
1109
1110 sc->sc_debug = DBG_DEFAULT;
1111 DPRINTF(sc, ATH_DBG_CONFIG, "%s: devid 0x%x\n", __func__, devid);
1112
1113 /* Initialize tasklet */
1114 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
1115 tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet,
1116 (unsigned long)sc);
1117
1118 /*
1119 * Cache line size is used to size and align various
1120 * structures used to communicate with the hardware.
1121 */
1122 bus_read_cachesize(sc, &csz);
1123 /* XXX assert csz is non-zero */
1124 sc->sc_cachelsz = csz << 2; /* convert to bytes */
1125
1126 spin_lock_init(&sc->sc_resetlock);
1127
1128 ah = ath9k_hw_attach(devid, sc, sc->mem, &status);
1129 if (ah == NULL) {
1130 DPRINTF(sc, ATH_DBG_FATAL,
1131 "%s: unable to attach hardware; HAL status %u\n",
1132 __func__, status);
1133 error = -ENXIO;
1134 goto bad;
1135 }
1136 sc->sc_ah = ah;
1137
1138 /* Initializes the noise floor to a reasonable default value.
1139 * Later on this will be updated during ANI processing. */
1140 sc->sc_ani.sc_noise_floor = ATH_DEFAULT_NOISE_FLOOR;
1141
1142 /* Get the hardware key cache size. */
1143 sc->sc_keymax = ah->ah_caps.keycache_size;
1144 if (sc->sc_keymax > ATH_KEYMAX) {
1145 DPRINTF(sc, ATH_DBG_KEYCACHE,
1146 "%s: Warning, using only %u entries in %u key cache\n",
1147 __func__, ATH_KEYMAX, sc->sc_keymax);
1148 sc->sc_keymax = ATH_KEYMAX;
1149 }
1150
1151 /*
1152 * Reset the key cache since some parts do not
1153 * reset the contents on initial power up.
1154 */
1155 for (i = 0; i < sc->sc_keymax; i++)
1156 ath9k_hw_keyreset(ah, (u16) i);
1157 /*
1158 * Mark key cache slots associated with global keys
1159 * as in use. If we knew TKIP was not to be used we
1160 * could leave the +32, +64, and +32+64 slots free.
1161 * XXX only for splitmic.
1162 */
1163 for (i = 0; i < IEEE80211_WEP_NKID; i++) {
1164 set_bit(i, sc->sc_keymap);
1165 set_bit(i + 32, sc->sc_keymap);
1166 set_bit(i + 64, sc->sc_keymap);
1167 set_bit(i + 32 + 64, sc->sc_keymap);
1168 }
1169 /*
1170 * Collect the channel list using the default country
1171 * code and including outdoor channels. The 802.11 layer
1172 * is resposible for filtering this list based on settings
1173 * like the phy mode.
1174 */
1175 error = ath_setup_channels(sc);
1176 if (error)
1177 goto bad;
1178
1179 /* default to STA mode */
1180 sc->sc_ah->ah_opmode = ATH9K_M_MONITOR;
1181
1182 /* Setup rate tables */
1183
1184 ath_setup_rates(sc, IEEE80211_BAND_2GHZ);
1185 ath_setup_rates(sc, IEEE80211_BAND_5GHZ);
1186
1187 /* NB: setup here so ath_rate_update is happy */
1188 ath_setcurmode(sc, ATH9K_MODE_11A);
1189
1190 /*
1191 * Allocate hardware transmit queues: one queue for
1192 * beacon frames and one data queue for each QoS
1193 * priority. Note that the hal handles reseting
1194 * these queues at the needed time.
1195 */
1196 sc->sc_bhalq = ath_beaconq_setup(ah);
1197 if (sc->sc_bhalq == -1) {
1198 DPRINTF(sc, ATH_DBG_FATAL,
1199 "%s: unable to setup a beacon xmit queue\n", __func__);
1200 error = -EIO;
1201 goto bad2;
1202 }
1203 sc->sc_cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
1204 if (sc->sc_cabq == NULL) {
1205 DPRINTF(sc, ATH_DBG_FATAL,
1206 "%s: unable to setup CAB xmit queue\n", __func__);
1207 error = -EIO;
1208 goto bad2;
1209 }
1210
1211 sc->sc_config.cabqReadytime = ATH_CABQ_READY_TIME;
1212 ath_cabq_update(sc);
1213
1214 for (i = 0; i < ARRAY_SIZE(sc->sc_haltype2q); i++)
1215 sc->sc_haltype2q[i] = -1;
1216
1217 /* Setup data queues */
1218 /* NB: ensure BK queue is the lowest priority h/w queue */
1219 if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
1220 DPRINTF(sc, ATH_DBG_FATAL,
1221 "%s: unable to setup xmit queue for BK traffic\n",
1222 __func__);
1223 error = -EIO;
1224 goto bad2;
1225 }
1226
1227 if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
1228 DPRINTF(sc, ATH_DBG_FATAL,
1229 "%s: unable to setup xmit queue for BE traffic\n",
1230 __func__);
1231 error = -EIO;
1232 goto bad2;
1233 }
1234 if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
1235 DPRINTF(sc, ATH_DBG_FATAL,
1236 "%s: unable to setup xmit queue for VI traffic\n",
1237 __func__);
1238 error = -EIO;
1239 goto bad2;
1240 }
1241 if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
1242 DPRINTF(sc, ATH_DBG_FATAL,
1243 "%s: unable to setup xmit queue for VO traffic\n",
1244 __func__);
1245 error = -EIO;
1246 goto bad2;
1247 }
1248
1249 setup_timer(&sc->sc_ani.timer, ath_ani_calibrate, (unsigned long)sc);
1250
1251 sc->sc_rc = ath_rate_attach(ah);
1252 if (sc->sc_rc == NULL) {
1253 error = -EIO;
1254 goto bad2;
1255 }
1256
1257 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1258 ATH9K_CIPHER_TKIP, NULL)) {
1259 /*
1260 * Whether we should enable h/w TKIP MIC.
1261 * XXX: if we don't support WME TKIP MIC, then we wouldn't
1262 * report WMM capable, so it's always safe to turn on
1263 * TKIP MIC in this case.
1264 */
1265 ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC,
1266 0, 1, NULL);
1267 }
1268
1269 /*
1270 * Check whether the separate key cache entries
1271 * are required to handle both tx+rx MIC keys.
1272 * With split mic keys the number of stations is limited
1273 * to 27 otherwise 59.
1274 */
1275 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1276 ATH9K_CIPHER_TKIP, NULL)
1277 && ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1278 ATH9K_CIPHER_MIC, NULL)
1279 && ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT,
1280 0, NULL))
1281 sc->sc_splitmic = 1;
1282
1283 /* turn on mcast key search if possible */
1284 if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
1285 (void)ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1,
1286 1, NULL);
1287
1288 sc->sc_config.txpowlimit = ATH_TXPOWER_MAX;
1289 sc->sc_config.txpowlimit_override = 0;
1290
1291 /* 11n Capabilities */
1292 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) {
1293 sc->sc_flags |= SC_OP_TXAGGR;
1294 sc->sc_flags |= SC_OP_RXAGGR;
1295 }
1296
1297 sc->sc_tx_chainmask = ah->ah_caps.tx_chainmask;
1298 sc->sc_rx_chainmask = ah->ah_caps.rx_chainmask;
1299
1300 ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
1301 sc->sc_defant = ath9k_hw_getdefantenna(ah);
1302
1303 ath9k_hw_getmac(ah, sc->sc_myaddr);
1304 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) {
1305 ath9k_hw_getbssidmask(ah, sc->sc_bssidmask);
1306 ATH_SET_VAP_BSSID_MASK(sc->sc_bssidmask);
1307 ath9k_hw_setbssidmask(ah, sc->sc_bssidmask);
1308 }
1309 sc->sc_slottime = ATH9K_SLOT_TIME_9; /* default to short slot time */
1310
1311 /* initialize beacon slots */
1312 for (i = 0; i < ARRAY_SIZE(sc->sc_bslot); i++)
1313 sc->sc_bslot[i] = ATH_IF_ID_ANY;
1314
1315 /* save MISC configurations */
1316 sc->sc_config.swBeaconProcess = 1;
1317
1318 #ifdef CONFIG_SLOW_ANT_DIV
1319 /* range is 40 - 255, we use something in the middle */
1320 ath_slow_ant_div_init(&sc->sc_antdiv, sc, 0x127);
1321 #endif
1322
1323 return 0;
1324 bad2:
1325 /* cleanup tx queues */
1326 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1327 if (ATH_TXQ_SETUP(sc, i))
1328 ath_tx_cleanupq(sc, &sc->sc_txq[i]);
1329 bad:
1330 if (ah)
1331 ath9k_hw_detach(ah);
1332 return error;
1333 }
1334
1335 void ath_deinit(struct ath_softc *sc)
1336 {
1337 struct ath_hal *ah = sc->sc_ah;
1338 int i;
1339
1340 DPRINTF(sc, ATH_DBG_CONFIG, "%s\n", __func__);
1341
1342 tasklet_kill(&sc->intr_tq);
1343 tasklet_kill(&sc->bcon_tasklet);
1344 ath_stop(sc);
1345 if (!(sc->sc_flags & SC_OP_INVALID))
1346 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
1347 ath_rate_detach(sc->sc_rc);
1348 /* cleanup tx queues */
1349 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1350 if (ATH_TXQ_SETUP(sc, i))
1351 ath_tx_cleanupq(sc, &sc->sc_txq[i]);
1352 ath9k_hw_detach(ah);
1353 }
1354
1355 /*******************/
1356 /* Node Management */
1357 /*******************/
1358
1359 void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta, int if_id)
1360 {
1361 struct ath_vap *avp;
1362 struct ath_node *an;
1363
1364 avp = sc->sc_vaps[if_id];
1365 ASSERT(avp != NULL);
1366
1367 an = (struct ath_node *)sta->drv_priv;
1368
1369 /* set up per-node tx/rx state */
1370 ath_tx_node_init(sc, an);
1371 ath_rx_node_init(sc, an);
1372
1373 an->maxampdu = 1 << (IEEE80211_HTCAP_MAXRXAMPDU_FACTOR +
1374 sta->ht_cap.ampdu_factor);
1375 an->mpdudensity = parse_mpdudensity(sta->ht_cap.ampdu_density);
1376
1377 ath_chainmask_sel_init(sc, an);
1378 ath_chainmask_sel_timerstart(&an->an_chainmask_sel);
1379 }
1380
1381 void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
1382 {
1383 struct ath_node *an = (struct ath_node *)sta->drv_priv;
1384
1385 ath_chainmask_sel_timerstop(&an->an_chainmask_sel);
1386
1387 ath_tx_node_cleanup(sc, an);
1388
1389 ath_tx_node_free(sc, an);
1390 ath_rx_node_free(sc, an);
1391 }
1392
1393 /*
1394 * Set up New Node
1395 *
1396 * Setup driver-specific state for a newly associated node. This routine
1397 * really only applies if compression or XR are enabled, there is no code
1398 * covering any other cases.
1399 */
1400
1401 void ath_newassoc(struct ath_softc *sc,
1402 struct ath_node *an, int isnew, int isuapsd)
1403 {
1404 int tidno;
1405
1406 /* if station reassociates, tear down the aggregation state. */
1407 if (!isnew) {
1408 for (tidno = 0; tidno < WME_NUM_TID; tidno++) {
1409 if (sc->sc_flags & SC_OP_TXAGGR)
1410 ath_tx_aggr_teardown(sc, an, tidno);
1411 if (sc->sc_flags & SC_OP_RXAGGR)
1412 ath_rx_aggr_teardown(sc, an, tidno);
1413 }
1414 }
1415 }
1416
1417 /**************/
1418 /* Encryption */
1419 /**************/
1420
1421 void ath_key_reset(struct ath_softc *sc, u16 keyix, int freeslot)
1422 {
1423 ath9k_hw_keyreset(sc->sc_ah, keyix);
1424 if (freeslot)
1425 clear_bit(keyix, sc->sc_keymap);
1426 }
1427
1428 int ath_keyset(struct ath_softc *sc,
1429 u16 keyix,
1430 struct ath9k_keyval *hk,
1431 const u8 mac[ETH_ALEN])
1432 {
1433 bool status;
1434
1435 status = ath9k_hw_set_keycache_entry(sc->sc_ah,
1436 keyix, hk, mac, false);
1437
1438 return status != false;
1439 }
1440
1441 /***********************/
1442 /* TX Power/Regulatory */
1443 /***********************/
1444
1445 /*
1446 * Set Transmit power in HAL
1447 *
1448 * This routine makes the actual HAL calls to set the new transmit power
1449 * limit.
1450 */
1451
1452 void ath_update_txpow(struct ath_softc *sc)
1453 {
1454 struct ath_hal *ah = sc->sc_ah;
1455 u32 txpow;
1456
1457 if (sc->sc_curtxpow != sc->sc_config.txpowlimit) {
1458 ath9k_hw_set_txpowerlimit(ah, sc->sc_config.txpowlimit);
1459 /* read back in case value is clamped */
1460 ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow);
1461 sc->sc_curtxpow = txpow;
1462 }
1463 }
1464
1465 /* Return the current country and domain information */
1466 void ath_get_currentCountry(struct ath_softc *sc,
1467 struct ath9k_country_entry *ctry)
1468 {
1469 ath9k_regd_get_current_country(sc->sc_ah, ctry);
1470
1471 /* If HAL not specific yet, since it is band dependent,
1472 * use the one we passed in. */
1473 if (ctry->countryCode == CTRY_DEFAULT) {
1474 ctry->iso[0] = 0;
1475 ctry->iso[1] = 0;
1476 } else if (ctry->iso[0] && ctry->iso[1]) {
1477 if (!ctry->iso[2]) {
1478 if (ath_outdoor)
1479 ctry->iso[2] = 'O';
1480 else
1481 ctry->iso[2] = 'I';
1482 }
1483 }
1484 }
1485
1486 /**************************/
1487 /* Slow Antenna Diversity */
1488 /**************************/
1489
1490 void ath_slow_ant_div_init(struct ath_antdiv *antdiv,
1491 struct ath_softc *sc,
1492 int32_t rssitrig)
1493 {
1494 int trig;
1495
1496 /* antdivf_rssitrig can range from 40 - 0xff */
1497 trig = (rssitrig > 0xff) ? 0xff : rssitrig;
1498 trig = (rssitrig < 40) ? 40 : rssitrig;
1499
1500 antdiv->antdiv_sc = sc;
1501 antdiv->antdivf_rssitrig = trig;
1502 }
1503
1504 void ath_slow_ant_div_start(struct ath_antdiv *antdiv,
1505 u8 num_antcfg,
1506 const u8 *bssid)
1507 {
1508 antdiv->antdiv_num_antcfg =
1509 num_antcfg < ATH_ANT_DIV_MAX_CFG ?
1510 num_antcfg : ATH_ANT_DIV_MAX_CFG;
1511 antdiv->antdiv_state = ATH_ANT_DIV_IDLE;
1512 antdiv->antdiv_curcfg = 0;
1513 antdiv->antdiv_bestcfg = 0;
1514 antdiv->antdiv_laststatetsf = 0;
1515
1516 memcpy(antdiv->antdiv_bssid, bssid, sizeof(antdiv->antdiv_bssid));
1517
1518 antdiv->antdiv_start = 1;
1519 }
1520
1521 void ath_slow_ant_div_stop(struct ath_antdiv *antdiv)
1522 {
1523 antdiv->antdiv_start = 0;
1524 }
1525
1526 static int32_t ath_find_max_val(int32_t *val,
1527 u8 num_val, u8 *max_index)
1528 {
1529 u32 MaxVal = *val++;
1530 u32 cur_index = 0;
1531
1532 *max_index = 0;
1533 while (++cur_index < num_val) {
1534 if (*val > MaxVal) {
1535 MaxVal = *val;
1536 *max_index = cur_index;
1537 }
1538
1539 val++;
1540 }
1541
1542 return MaxVal;
1543 }
1544
1545 void ath_slow_ant_div(struct ath_antdiv *antdiv,
1546 struct ieee80211_hdr *hdr,
1547 struct ath_rx_status *rx_stats)
1548 {
1549 struct ath_softc *sc = antdiv->antdiv_sc;
1550 struct ath_hal *ah = sc->sc_ah;
1551 u64 curtsf = 0;
1552 u8 bestcfg, curcfg = antdiv->antdiv_curcfg;
1553 __le16 fc = hdr->frame_control;
1554
1555 if (antdiv->antdiv_start && ieee80211_is_beacon(fc)
1556 && !compare_ether_addr(hdr->addr3, antdiv->antdiv_bssid)) {
1557 antdiv->antdiv_lastbrssi[curcfg] = rx_stats->rs_rssi;
1558 antdiv->antdiv_lastbtsf[curcfg] = ath9k_hw_gettsf64(sc->sc_ah);
1559 curtsf = antdiv->antdiv_lastbtsf[curcfg];
1560 } else {
1561 return;
1562 }
1563
1564 switch (antdiv->antdiv_state) {
1565 case ATH_ANT_DIV_IDLE:
1566 if ((antdiv->antdiv_lastbrssi[curcfg] <
1567 antdiv->antdivf_rssitrig)
1568 && ((curtsf - antdiv->antdiv_laststatetsf) >
1569 ATH_ANT_DIV_MIN_IDLE_US)) {
1570
1571 curcfg++;
1572 if (curcfg == antdiv->antdiv_num_antcfg)
1573 curcfg = 0;
1574
1575 if (!ath9k_hw_select_antconfig(ah, curcfg)) {
1576 antdiv->antdiv_bestcfg = antdiv->antdiv_curcfg;
1577 antdiv->antdiv_curcfg = curcfg;
1578 antdiv->antdiv_laststatetsf = curtsf;
1579 antdiv->antdiv_state = ATH_ANT_DIV_SCAN;
1580 }
1581 }
1582 break;
1583
1584 case ATH_ANT_DIV_SCAN:
1585 if ((curtsf - antdiv->antdiv_laststatetsf) <
1586 ATH_ANT_DIV_MIN_SCAN_US)
1587 break;
1588
1589 curcfg++;
1590 if (curcfg == antdiv->antdiv_num_antcfg)
1591 curcfg = 0;
1592
1593 if (curcfg == antdiv->antdiv_bestcfg) {
1594 ath_find_max_val(antdiv->antdiv_lastbrssi,
1595 antdiv->antdiv_num_antcfg, &bestcfg);
1596 if (!ath9k_hw_select_antconfig(ah, bestcfg)) {
1597 antdiv->antdiv_bestcfg = bestcfg;
1598 antdiv->antdiv_curcfg = bestcfg;
1599 antdiv->antdiv_laststatetsf = curtsf;
1600 antdiv->antdiv_state = ATH_ANT_DIV_IDLE;
1601 }
1602 } else {
1603 if (!ath9k_hw_select_antconfig(ah, curcfg)) {
1604 antdiv->antdiv_curcfg = curcfg;
1605 antdiv->antdiv_laststatetsf = curtsf;
1606 antdiv->antdiv_state = ATH_ANT_DIV_SCAN;
1607 }
1608 }
1609
1610 break;
1611 }
1612 }
1613
1614 /***********************/
1615 /* Descriptor Handling */
1616 /***********************/
1617
1618 /*
1619 * Set up DMA descriptors
1620 *
1621 * This function will allocate both the DMA descriptor structure, and the
1622 * buffers it contains. These are used to contain the descriptors used
1623 * by the system.
1624 */
1625
1626 int ath_descdma_setup(struct ath_softc *sc,
1627 struct ath_descdma *dd,
1628 struct list_head *head,
1629 const char *name,
1630 int nbuf,
1631 int ndesc)
1632 {
1633 #define DS2PHYS(_dd, _ds) \
1634 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
1635 #define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
1636 #define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
1637
1638 struct ath_desc *ds;
1639 struct ath_buf *bf;
1640 int i, bsize, error;
1641
1642 DPRINTF(sc, ATH_DBG_CONFIG, "%s: %s DMA: %u buffers %u desc/buf\n",
1643 __func__, name, nbuf, ndesc);
1644
1645 /* ath_desc must be a multiple of DWORDs */
1646 if ((sizeof(struct ath_desc) % 4) != 0) {
1647 DPRINTF(sc, ATH_DBG_FATAL, "%s: ath_desc not DWORD aligned\n",
1648 __func__);
1649 ASSERT((sizeof(struct ath_desc) % 4) == 0);
1650 error = -ENOMEM;
1651 goto fail;
1652 }
1653
1654 dd->dd_name = name;
1655 dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
1656
1657 /*
1658 * Need additional DMA memory because we can't use
1659 * descriptors that cross the 4K page boundary. Assume
1660 * one skipped descriptor per 4K page.
1661 */
1662 if (!(sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
1663 u32 ndesc_skipped =
1664 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
1665 u32 dma_len;
1666
1667 while (ndesc_skipped) {
1668 dma_len = ndesc_skipped * sizeof(struct ath_desc);
1669 dd->dd_desc_len += dma_len;
1670
1671 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
1672 };
1673 }
1674
1675 /* allocate descriptors */
1676 dd->dd_desc = pci_alloc_consistent(sc->pdev,
1677 dd->dd_desc_len,
1678 &dd->dd_desc_paddr);
1679 if (dd->dd_desc == NULL) {
1680 error = -ENOMEM;
1681 goto fail;
1682 }
1683 ds = dd->dd_desc;
1684 DPRINTF(sc, ATH_DBG_CONFIG, "%s: %s DMA map: %p (%u) -> %llx (%u)\n",
1685 __func__, dd->dd_name, ds, (u32) dd->dd_desc_len,
1686 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
1687
1688 /* allocate buffers */
1689 bsize = sizeof(struct ath_buf) * nbuf;
1690 bf = kmalloc(bsize, GFP_KERNEL);
1691 if (bf == NULL) {
1692 error = -ENOMEM;
1693 goto fail2;
1694 }
1695 memset(bf, 0, bsize);
1696 dd->dd_bufptr = bf;
1697
1698 INIT_LIST_HEAD(head);
1699 for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
1700 bf->bf_desc = ds;
1701 bf->bf_daddr = DS2PHYS(dd, ds);
1702
1703 if (!(sc->sc_ah->ah_caps.hw_caps &
1704 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
1705 /*
1706 * Skip descriptor addresses which can cause 4KB
1707 * boundary crossing (addr + length) with a 32 dword
1708 * descriptor fetch.
1709 */
1710 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
1711 ASSERT((caddr_t) bf->bf_desc <
1712 ((caddr_t) dd->dd_desc +
1713 dd->dd_desc_len));
1714
1715 ds += ndesc;
1716 bf->bf_desc = ds;
1717 bf->bf_daddr = DS2PHYS(dd, ds);
1718 }
1719 }
1720 list_add_tail(&bf->list, head);
1721 }
1722 return 0;
1723 fail2:
1724 pci_free_consistent(sc->pdev,
1725 dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr);
1726 fail:
1727 memset(dd, 0, sizeof(*dd));
1728 return error;
1729 #undef ATH_DESC_4KB_BOUND_CHECK
1730 #undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
1731 #undef DS2PHYS
1732 }
1733
1734 /*
1735 * Cleanup DMA descriptors
1736 *
1737 * This function will free the DMA block that was allocated for the descriptor
1738 * pool. Since this was allocated as one "chunk", it is freed in the same
1739 * manner.
1740 */
1741
1742 void ath_descdma_cleanup(struct ath_softc *sc,
1743 struct ath_descdma *dd,
1744 struct list_head *head)
1745 {
1746 /* Free memory associated with descriptors */
1747 pci_free_consistent(sc->pdev,
1748 dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr);
1749
1750 INIT_LIST_HEAD(head);
1751 kfree(dd->dd_bufptr);
1752 memset(dd, 0, sizeof(*dd));
1753 }
1754
1755 /*************/
1756 /* Utilities */
1757 /*************/
1758
1759 int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
1760 {
1761 int qnum;
1762
1763 switch (queue) {
1764 case 0:
1765 qnum = sc->sc_haltype2q[ATH9K_WME_AC_VO];
1766 break;
1767 case 1:
1768 qnum = sc->sc_haltype2q[ATH9K_WME_AC_VI];
1769 break;
1770 case 2:
1771 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
1772 break;
1773 case 3:
1774 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BK];
1775 break;
1776 default:
1777 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
1778 break;
1779 }
1780
1781 return qnum;
1782 }
1783
1784 int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc)
1785 {
1786 int qnum;
1787
1788 switch (queue) {
1789 case ATH9K_WME_AC_VO:
1790 qnum = 0;
1791 break;
1792 case ATH9K_WME_AC_VI:
1793 qnum = 1;
1794 break;
1795 case ATH9K_WME_AC_BE:
1796 qnum = 2;
1797 break;
1798 case ATH9K_WME_AC_BK:
1799 qnum = 3;
1800 break;
1801 default:
1802 qnum = -1;
1803 break;
1804 }
1805
1806 return qnum;
1807 }
1808
1809
1810 /*
1811 * Expand time stamp to TSF
1812 *
1813 * Extend 15-bit time stamp from rx descriptor to
1814 * a full 64-bit TSF using the current h/w TSF.
1815 */
1816
1817 u64 ath_extend_tsf(struct ath_softc *sc, u32 rstamp)
1818 {
1819 u64 tsf;
1820
1821 tsf = ath9k_hw_gettsf64(sc->sc_ah);
1822 if ((tsf & 0x7fff) < rstamp)
1823 tsf -= 0x8000;
1824 return (tsf & ~0x7fff) | rstamp;
1825 }
1826
1827 /*
1828 * Set Default Antenna
1829 *
1830 * Call into the HAL to set the default antenna to use. Not really valid for
1831 * MIMO technology.
1832 */
1833
1834 void ath_setdefantenna(void *context, u32 antenna)
1835 {
1836 struct ath_softc *sc = (struct ath_softc *)context;
1837 struct ath_hal *ah = sc->sc_ah;
1838
1839 /* XXX block beacon interrupts */
1840 ath9k_hw_setantenna(ah, antenna);
1841 sc->sc_defant = antenna;
1842 sc->sc_rxotherant = 0;
1843 }
1844
1845 /*
1846 * Set Slot Time
1847 *
1848 * This will wake up the chip if required, and set the slot time for the
1849 * frame (maximum transmit time). Slot time is assumed to be already set
1850 * in the ATH object member sc_slottime
1851 */
1852
1853 void ath_setslottime(struct ath_softc *sc)
1854 {
1855 ath9k_hw_setslottime(sc->sc_ah, sc->sc_slottime);
1856 sc->sc_updateslot = OK;
1857 }
This page took 0.110492 seconds and 5 git commands to generate.