ath9k_hw: add initvals for the AR9003 hardware family
[deliverable/linux.git] / drivers / net / wireless / ath / ath9k / hw.c
1 /*
2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include <linux/io.h>
18 #include <asm/unaligned.h>
19
20 #include "hw.h"
21 #include "hw-ops.h"
22 #include "rc.h"
23 #include "ar5008_initvals.h"
24 #include "ar9001_initvals.h"
25 #include "ar9002_initvals.h"
26 #include "ar9003_initvals.h"
27
28 #define ATH9K_CLOCK_RATE_CCK 22
29 #define ATH9K_CLOCK_RATE_5GHZ_OFDM 40
30 #define ATH9K_CLOCK_RATE_2GHZ_OFDM 44
31
32 static void ar9002_hw_attach_ops(struct ath_hw *ah);
33 static void ar9003_hw_attach_ops(struct ath_hw *ah);
34
35 static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type);
36
37 MODULE_AUTHOR("Atheros Communications");
38 MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
39 MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
40 MODULE_LICENSE("Dual BSD/GPL");
41
42 static int __init ath9k_init(void)
43 {
44 return 0;
45 }
46 module_init(ath9k_init);
47
48 static void __exit ath9k_exit(void)
49 {
50 return;
51 }
52 module_exit(ath9k_exit);
53
54 /* Private hardware callbacks */
55
56 static void ath9k_hw_init_cal_settings(struct ath_hw *ah)
57 {
58 ath9k_hw_private_ops(ah)->init_cal_settings(ah);
59 }
60
61 static void ath9k_hw_init_mode_regs(struct ath_hw *ah)
62 {
63 ath9k_hw_private_ops(ah)->init_mode_regs(ah);
64 }
65
66 static bool ath9k_hw_macversion_supported(struct ath_hw *ah)
67 {
68 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
69
70 return priv_ops->macversion_supported(ah->hw_version.macVersion);
71 }
72
73 static u32 ath9k_hw_compute_pll_control(struct ath_hw *ah,
74 struct ath9k_channel *chan)
75 {
76 return ath9k_hw_private_ops(ah)->compute_pll_control(ah, chan);
77 }
78
79 /********************/
80 /* Helper Functions */
81 /********************/
82
83 static u32 ath9k_hw_mac_clks(struct ath_hw *ah, u32 usecs)
84 {
85 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
86
87 if (!ah->curchan) /* should really check for CCK instead */
88 return usecs *ATH9K_CLOCK_RATE_CCK;
89 if (conf->channel->band == IEEE80211_BAND_2GHZ)
90 return usecs *ATH9K_CLOCK_RATE_2GHZ_OFDM;
91 return usecs *ATH9K_CLOCK_RATE_5GHZ_OFDM;
92 }
93
94 static u32 ath9k_hw_mac_to_clks(struct ath_hw *ah, u32 usecs)
95 {
96 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
97
98 if (conf_is_ht40(conf))
99 return ath9k_hw_mac_clks(ah, usecs) * 2;
100 else
101 return ath9k_hw_mac_clks(ah, usecs);
102 }
103
104 bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout)
105 {
106 int i;
107
108 BUG_ON(timeout < AH_TIME_QUANTUM);
109
110 for (i = 0; i < (timeout / AH_TIME_QUANTUM); i++) {
111 if ((REG_READ(ah, reg) & mask) == val)
112 return true;
113
114 udelay(AH_TIME_QUANTUM);
115 }
116
117 ath_print(ath9k_hw_common(ah), ATH_DBG_ANY,
118 "timeout (%d us) on reg 0x%x: 0x%08x & 0x%08x != 0x%08x\n",
119 timeout, reg, REG_READ(ah, reg), mask, val);
120
121 return false;
122 }
123 EXPORT_SYMBOL(ath9k_hw_wait);
124
125 u32 ath9k_hw_reverse_bits(u32 val, u32 n)
126 {
127 u32 retval;
128 int i;
129
130 for (i = 0, retval = 0; i < n; i++) {
131 retval = (retval << 1) | (val & 1);
132 val >>= 1;
133 }
134 return retval;
135 }
136
137 bool ath9k_get_channel_edges(struct ath_hw *ah,
138 u16 flags, u16 *low,
139 u16 *high)
140 {
141 struct ath9k_hw_capabilities *pCap = &ah->caps;
142
143 if (flags & CHANNEL_5GHZ) {
144 *low = pCap->low_5ghz_chan;
145 *high = pCap->high_5ghz_chan;
146 return true;
147 }
148 if ((flags & CHANNEL_2GHZ)) {
149 *low = pCap->low_2ghz_chan;
150 *high = pCap->high_2ghz_chan;
151 return true;
152 }
153 return false;
154 }
155
156 u16 ath9k_hw_computetxtime(struct ath_hw *ah,
157 u8 phy, int kbps,
158 u32 frameLen, u16 rateix,
159 bool shortPreamble)
160 {
161 u32 bitsPerSymbol, numBits, numSymbols, phyTime, txTime;
162
163 if (kbps == 0)
164 return 0;
165
166 switch (phy) {
167 case WLAN_RC_PHY_CCK:
168 phyTime = CCK_PREAMBLE_BITS + CCK_PLCP_BITS;
169 if (shortPreamble)
170 phyTime >>= 1;
171 numBits = frameLen << 3;
172 txTime = CCK_SIFS_TIME + phyTime + ((numBits * 1000) / kbps);
173 break;
174 case WLAN_RC_PHY_OFDM:
175 if (ah->curchan && IS_CHAN_QUARTER_RATE(ah->curchan)) {
176 bitsPerSymbol = (kbps * OFDM_SYMBOL_TIME_QUARTER) / 1000;
177 numBits = OFDM_PLCP_BITS + (frameLen << 3);
178 numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol);
179 txTime = OFDM_SIFS_TIME_QUARTER
180 + OFDM_PREAMBLE_TIME_QUARTER
181 + (numSymbols * OFDM_SYMBOL_TIME_QUARTER);
182 } else if (ah->curchan &&
183 IS_CHAN_HALF_RATE(ah->curchan)) {
184 bitsPerSymbol = (kbps * OFDM_SYMBOL_TIME_HALF) / 1000;
185 numBits = OFDM_PLCP_BITS + (frameLen << 3);
186 numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol);
187 txTime = OFDM_SIFS_TIME_HALF +
188 OFDM_PREAMBLE_TIME_HALF
189 + (numSymbols * OFDM_SYMBOL_TIME_HALF);
190 } else {
191 bitsPerSymbol = (kbps * OFDM_SYMBOL_TIME) / 1000;
192 numBits = OFDM_PLCP_BITS + (frameLen << 3);
193 numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol);
194 txTime = OFDM_SIFS_TIME + OFDM_PREAMBLE_TIME
195 + (numSymbols * OFDM_SYMBOL_TIME);
196 }
197 break;
198 default:
199 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
200 "Unknown phy %u (rate ix %u)\n", phy, rateix);
201 txTime = 0;
202 break;
203 }
204
205 return txTime;
206 }
207 EXPORT_SYMBOL(ath9k_hw_computetxtime);
208
209 void ath9k_hw_get_channel_centers(struct ath_hw *ah,
210 struct ath9k_channel *chan,
211 struct chan_centers *centers)
212 {
213 int8_t extoff;
214
215 if (!IS_CHAN_HT40(chan)) {
216 centers->ctl_center = centers->ext_center =
217 centers->synth_center = chan->channel;
218 return;
219 }
220
221 if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
222 (chan->chanmode == CHANNEL_G_HT40PLUS)) {
223 centers->synth_center =
224 chan->channel + HT40_CHANNEL_CENTER_SHIFT;
225 extoff = 1;
226 } else {
227 centers->synth_center =
228 chan->channel - HT40_CHANNEL_CENTER_SHIFT;
229 extoff = -1;
230 }
231
232 centers->ctl_center =
233 centers->synth_center - (extoff * HT40_CHANNEL_CENTER_SHIFT);
234 /* 25 MHz spacing is supported by hw but not on upper layers */
235 centers->ext_center =
236 centers->synth_center + (extoff * HT40_CHANNEL_CENTER_SHIFT);
237 }
238
239 /******************/
240 /* Chip Revisions */
241 /******************/
242
243 static void ath9k_hw_read_revisions(struct ath_hw *ah)
244 {
245 u32 val;
246
247 val = REG_READ(ah, AR_SREV) & AR_SREV_ID;
248
249 if (val == 0xFF) {
250 val = REG_READ(ah, AR_SREV);
251 ah->hw_version.macVersion =
252 (val & AR_SREV_VERSION2) >> AR_SREV_TYPE2_S;
253 ah->hw_version.macRev = MS(val, AR_SREV_REVISION2);
254 ah->is_pciexpress = (val & AR_SREV_TYPE2_HOST_MODE) ? 0 : 1;
255 } else {
256 if (!AR_SREV_9100(ah))
257 ah->hw_version.macVersion = MS(val, AR_SREV_VERSION);
258
259 ah->hw_version.macRev = val & AR_SREV_REVISION;
260
261 if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCIE)
262 ah->is_pciexpress = true;
263 }
264 }
265
266 static int ath9k_hw_get_radiorev(struct ath_hw *ah)
267 {
268 u32 val;
269 int i;
270
271 REG_WRITE(ah, AR_PHY(0x36), 0x00007058);
272
273 for (i = 0; i < 8; i++)
274 REG_WRITE(ah, AR_PHY(0x20), 0x00010000);
275 val = (REG_READ(ah, AR_PHY(256)) >> 24) & 0xff;
276 val = ((val & 0xf0) >> 4) | ((val & 0x0f) << 4);
277
278 return ath9k_hw_reverse_bits(val, 8);
279 }
280
281 /************************************/
282 /* HW Attach, Detach, Init Routines */
283 /************************************/
284
285 static void ath9k_hw_disablepcie(struct ath_hw *ah)
286 {
287 if (AR_SREV_9100(ah))
288 return;
289
290 REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00);
291 REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
292 REG_WRITE(ah, AR_PCIE_SERDES, 0x28000029);
293 REG_WRITE(ah, AR_PCIE_SERDES, 0x57160824);
294 REG_WRITE(ah, AR_PCIE_SERDES, 0x25980579);
295 REG_WRITE(ah, AR_PCIE_SERDES, 0x00000000);
296 REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
297 REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
298 REG_WRITE(ah, AR_PCIE_SERDES, 0x000e1007);
299
300 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
301 }
302
303 /* This should work for all families including legacy */
304 static bool ath9k_hw_chip_test(struct ath_hw *ah)
305 {
306 struct ath_common *common = ath9k_hw_common(ah);
307 u32 regAddr[2] = { AR_STA_ID0 };
308 u32 regHold[2];
309 u32 patternData[4] = { 0x55555555,
310 0xaaaaaaaa,
311 0x66666666,
312 0x99999999 };
313 int i, j, loop_max;
314
315 if (!AR_SREV_9300_20_OR_LATER(ah)) {
316 loop_max = 2;
317 regAddr[1] = AR_PHY_BASE + (8 << 2);
318 } else
319 loop_max = 1;
320
321 for (i = 0; i < loop_max; i++) {
322 u32 addr = regAddr[i];
323 u32 wrData, rdData;
324
325 regHold[i] = REG_READ(ah, addr);
326 for (j = 0; j < 0x100; j++) {
327 wrData = (j << 16) | j;
328 REG_WRITE(ah, addr, wrData);
329 rdData = REG_READ(ah, addr);
330 if (rdData != wrData) {
331 ath_print(common, ATH_DBG_FATAL,
332 "address test failed "
333 "addr: 0x%08x - wr:0x%08x != "
334 "rd:0x%08x\n",
335 addr, wrData, rdData);
336 return false;
337 }
338 }
339 for (j = 0; j < 4; j++) {
340 wrData = patternData[j];
341 REG_WRITE(ah, addr, wrData);
342 rdData = REG_READ(ah, addr);
343 if (wrData != rdData) {
344 ath_print(common, ATH_DBG_FATAL,
345 "address test failed "
346 "addr: 0x%08x - wr:0x%08x != "
347 "rd:0x%08x\n",
348 addr, wrData, rdData);
349 return false;
350 }
351 }
352 REG_WRITE(ah, regAddr[i], regHold[i]);
353 }
354 udelay(100);
355
356 return true;
357 }
358
359 static void ath9k_hw_init_config(struct ath_hw *ah)
360 {
361 int i;
362
363 ah->config.dma_beacon_response_time = 2;
364 ah->config.sw_beacon_response_time = 10;
365 ah->config.additional_swba_backoff = 0;
366 ah->config.ack_6mb = 0x0;
367 ah->config.cwm_ignore_extcca = 0;
368 ah->config.pcie_powersave_enable = 0;
369 ah->config.pcie_clock_req = 0;
370 ah->config.pcie_waen = 0;
371 ah->config.analog_shiftreg = 1;
372 ah->config.ofdm_trig_low = 200;
373 ah->config.ofdm_trig_high = 500;
374 ah->config.cck_trig_high = 200;
375 ah->config.cck_trig_low = 100;
376
377 /*
378 * For now ANI is disabled for AR9003, it is still
379 * being tested.
380 */
381 if (!AR_SREV_9300_20_OR_LATER(ah))
382 ah->config.enable_ani = 1;
383
384 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
385 ah->config.spurchans[i][0] = AR_NO_SPUR;
386 ah->config.spurchans[i][1] = AR_NO_SPUR;
387 }
388
389 if (ah->hw_version.devid != AR2427_DEVID_PCIE)
390 ah->config.ht_enable = 1;
391 else
392 ah->config.ht_enable = 0;
393
394 ah->config.rx_intr_mitigation = true;
395
396 /*
397 * We need this for PCI devices only (Cardbus, PCI, miniPCI)
398 * _and_ if on non-uniprocessor systems (Multiprocessor/HT).
399 * This means we use it for all AR5416 devices, and the few
400 * minor PCI AR9280 devices out there.
401 *
402 * Serialization is required because these devices do not handle
403 * well the case of two concurrent reads/writes due to the latency
404 * involved. During one read/write another read/write can be issued
405 * on another CPU while the previous read/write may still be working
406 * on our hardware, if we hit this case the hardware poops in a loop.
407 * We prevent this by serializing reads and writes.
408 *
409 * This issue is not present on PCI-Express devices or pre-AR5416
410 * devices (legacy, 802.11abg).
411 */
412 if (num_possible_cpus() > 1)
413 ah->config.serialize_regmode = SER_REG_MODE_AUTO;
414 }
415
416 static void ath9k_hw_init_defaults(struct ath_hw *ah)
417 {
418 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
419
420 regulatory->country_code = CTRY_DEFAULT;
421 regulatory->power_limit = MAX_RATE_POWER;
422 regulatory->tp_scale = ATH9K_TP_SCALE_MAX;
423
424 ah->hw_version.magic = AR5416_MAGIC;
425 ah->hw_version.subvendorid = 0;
426
427 ah->ah_flags = 0;
428 if (!AR_SREV_9100(ah))
429 ah->ah_flags = AH_USE_EEPROM;
430
431 ah->atim_window = 0;
432 ah->sta_id1_defaults = AR_STA_ID1_CRPT_MIC_ENABLE;
433 ah->beacon_interval = 100;
434 ah->enable_32kHz_clock = DONT_USE_32KHZ;
435 ah->slottime = (u32) -1;
436 ah->globaltxtimeout = (u32) -1;
437 ah->power_mode = ATH9K_PM_UNDEFINED;
438 }
439
440 static int ath9k_hw_rf_claim(struct ath_hw *ah)
441 {
442 u32 val;
443
444 REG_WRITE(ah, AR_PHY(0), 0x00000007);
445
446 val = ath9k_hw_get_radiorev(ah);
447 switch (val & AR_RADIO_SREV_MAJOR) {
448 case 0:
449 val = AR_RAD5133_SREV_MAJOR;
450 break;
451 case AR_RAD5133_SREV_MAJOR:
452 case AR_RAD5122_SREV_MAJOR:
453 case AR_RAD2133_SREV_MAJOR:
454 case AR_RAD2122_SREV_MAJOR:
455 break;
456 default:
457 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
458 "Radio Chip Rev 0x%02X not supported\n",
459 val & AR_RADIO_SREV_MAJOR);
460 return -EOPNOTSUPP;
461 }
462
463 ah->hw_version.analog5GhzRev = val;
464
465 return 0;
466 }
467
468 static int ath9k_hw_init_macaddr(struct ath_hw *ah)
469 {
470 struct ath_common *common = ath9k_hw_common(ah);
471 u32 sum;
472 int i;
473 u16 eeval;
474
475 sum = 0;
476 for (i = 0; i < 3; i++) {
477 eeval = ah->eep_ops->get_eeprom(ah, AR_EEPROM_MAC(i));
478 sum += eeval;
479 common->macaddr[2 * i] = eeval >> 8;
480 common->macaddr[2 * i + 1] = eeval & 0xff;
481 }
482 if (sum == 0 || sum == 0xffff * 3)
483 return -EADDRNOTAVAIL;
484
485 return 0;
486 }
487
488 static void ath9k_hw_init_rxgain_ini(struct ath_hw *ah)
489 {
490 u32 rxgain_type;
491
492 if (ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) >= AR5416_EEP_MINOR_VER_17) {
493 rxgain_type = ah->eep_ops->get_eeprom(ah, EEP_RXGAIN_TYPE);
494
495 if (rxgain_type == AR5416_EEP_RXGAIN_13DB_BACKOFF)
496 INIT_INI_ARRAY(&ah->iniModesRxGain,
497 ar9280Modes_backoff_13db_rxgain_9280_2,
498 ARRAY_SIZE(ar9280Modes_backoff_13db_rxgain_9280_2), 6);
499 else if (rxgain_type == AR5416_EEP_RXGAIN_23DB_BACKOFF)
500 INIT_INI_ARRAY(&ah->iniModesRxGain,
501 ar9280Modes_backoff_23db_rxgain_9280_2,
502 ARRAY_SIZE(ar9280Modes_backoff_23db_rxgain_9280_2), 6);
503 else
504 INIT_INI_ARRAY(&ah->iniModesRxGain,
505 ar9280Modes_original_rxgain_9280_2,
506 ARRAY_SIZE(ar9280Modes_original_rxgain_9280_2), 6);
507 } else {
508 INIT_INI_ARRAY(&ah->iniModesRxGain,
509 ar9280Modes_original_rxgain_9280_2,
510 ARRAY_SIZE(ar9280Modes_original_rxgain_9280_2), 6);
511 }
512 }
513
514 static void ath9k_hw_init_txgain_ini(struct ath_hw *ah)
515 {
516 u32 txgain_type;
517
518 if (ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) >= AR5416_EEP_MINOR_VER_19) {
519 txgain_type = ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE);
520
521 if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER)
522 INIT_INI_ARRAY(&ah->iniModesTxGain,
523 ar9280Modes_high_power_tx_gain_9280_2,
524 ARRAY_SIZE(ar9280Modes_high_power_tx_gain_9280_2), 6);
525 else
526 INIT_INI_ARRAY(&ah->iniModesTxGain,
527 ar9280Modes_original_tx_gain_9280_2,
528 ARRAY_SIZE(ar9280Modes_original_tx_gain_9280_2), 6);
529 } else {
530 INIT_INI_ARRAY(&ah->iniModesTxGain,
531 ar9280Modes_original_tx_gain_9280_2,
532 ARRAY_SIZE(ar9280Modes_original_tx_gain_9280_2), 6);
533 }
534 }
535
536 static int ath9k_hw_post_init(struct ath_hw *ah)
537 {
538 int ecode;
539
540 if (!AR_SREV_9271(ah)) {
541 if (!ath9k_hw_chip_test(ah))
542 return -ENODEV;
543 }
544
545 ecode = ath9k_hw_rf_claim(ah);
546 if (ecode != 0)
547 return ecode;
548
549 ecode = ath9k_hw_eeprom_init(ah);
550 if (ecode != 0)
551 return ecode;
552
553 ath_print(ath9k_hw_common(ah), ATH_DBG_CONFIG,
554 "Eeprom VER: %d, REV: %d\n",
555 ah->eep_ops->get_eeprom_ver(ah),
556 ah->eep_ops->get_eeprom_rev(ah));
557
558 ecode = ath9k_hw_rf_alloc_ext_banks(ah);
559 if (ecode) {
560 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
561 "Failed allocating banks for "
562 "external radio\n");
563 return ecode;
564 }
565
566 if (!AR_SREV_9100(ah)) {
567 ath9k_hw_ani_setup(ah);
568 ath9k_hw_ani_init(ah);
569 }
570
571 return 0;
572 }
573
574 static bool ar9002_hw_macversion_supported(u32 macversion)
575 {
576 switch (macversion) {
577 case AR_SREV_VERSION_5416_PCI:
578 case AR_SREV_VERSION_5416_PCIE:
579 case AR_SREV_VERSION_9160:
580 case AR_SREV_VERSION_9100:
581 case AR_SREV_VERSION_9280:
582 case AR_SREV_VERSION_9285:
583 case AR_SREV_VERSION_9287:
584 case AR_SREV_VERSION_9271:
585 return true;
586 default:
587 break;
588 }
589 return false;
590 }
591
592 static bool ar9003_hw_macversion_supported(u32 macversion)
593 {
594 switch (macversion) {
595 case AR_SREV_VERSION_9300:
596 return true;
597 default:
598 break;
599 }
600 return false;
601 }
602
603 static void ar9002_hw_init_cal_settings(struct ath_hw *ah)
604 {
605 if (AR_SREV_9160_10_OR_LATER(ah)) {
606 if (AR_SREV_9280_10_OR_LATER(ah)) {
607 ah->iq_caldata.calData = &iq_cal_single_sample;
608 ah->adcgain_caldata.calData =
609 &adc_gain_cal_single_sample;
610 ah->adcdc_caldata.calData =
611 &adc_dc_cal_single_sample;
612 ah->adcdc_calinitdata.calData =
613 &adc_init_dc_cal;
614 } else {
615 ah->iq_caldata.calData = &iq_cal_multi_sample;
616 ah->adcgain_caldata.calData =
617 &adc_gain_cal_multi_sample;
618 ah->adcdc_caldata.calData =
619 &adc_dc_cal_multi_sample;
620 ah->adcdc_calinitdata.calData =
621 &adc_init_dc_cal;
622 }
623 ah->supp_cals = ADC_GAIN_CAL | ADC_DC_CAL | IQ_MISMATCH_CAL;
624 }
625 }
626
627 static void ar9002_hw_init_mode_regs(struct ath_hw *ah)
628 {
629 if (AR_SREV_9271(ah)) {
630 INIT_INI_ARRAY(&ah->iniModes, ar9271Modes_9271,
631 ARRAY_SIZE(ar9271Modes_9271), 6);
632 INIT_INI_ARRAY(&ah->iniCommon, ar9271Common_9271,
633 ARRAY_SIZE(ar9271Common_9271), 2);
634 INIT_INI_ARRAY(&ah->iniCommon_normal_cck_fir_coeff_9271,
635 ar9271Common_normal_cck_fir_coeff_9271,
636 ARRAY_SIZE(ar9271Common_normal_cck_fir_coeff_9271), 2);
637 INIT_INI_ARRAY(&ah->iniCommon_japan_2484_cck_fir_coeff_9271,
638 ar9271Common_japan_2484_cck_fir_coeff_9271,
639 ARRAY_SIZE(ar9271Common_japan_2484_cck_fir_coeff_9271), 2);
640 INIT_INI_ARRAY(&ah->iniModes_9271_1_0_only,
641 ar9271Modes_9271_1_0_only,
642 ARRAY_SIZE(ar9271Modes_9271_1_0_only), 6);
643 INIT_INI_ARRAY(&ah->iniModes_9271_ANI_reg, ar9271Modes_9271_ANI_reg,
644 ARRAY_SIZE(ar9271Modes_9271_ANI_reg), 6);
645 INIT_INI_ARRAY(&ah->iniModes_high_power_tx_gain_9271,
646 ar9271Modes_high_power_tx_gain_9271,
647 ARRAY_SIZE(ar9271Modes_high_power_tx_gain_9271), 6);
648 INIT_INI_ARRAY(&ah->iniModes_normal_power_tx_gain_9271,
649 ar9271Modes_normal_power_tx_gain_9271,
650 ARRAY_SIZE(ar9271Modes_normal_power_tx_gain_9271), 6);
651 return;
652 }
653
654 if (AR_SREV_9287_11_OR_LATER(ah)) {
655 INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_1,
656 ARRAY_SIZE(ar9287Modes_9287_1_1), 6);
657 INIT_INI_ARRAY(&ah->iniCommon, ar9287Common_9287_1_1,
658 ARRAY_SIZE(ar9287Common_9287_1_1), 2);
659 if (ah->config.pcie_clock_req)
660 INIT_INI_ARRAY(&ah->iniPcieSerdes,
661 ar9287PciePhy_clkreq_off_L1_9287_1_1,
662 ARRAY_SIZE(ar9287PciePhy_clkreq_off_L1_9287_1_1), 2);
663 else
664 INIT_INI_ARRAY(&ah->iniPcieSerdes,
665 ar9287PciePhy_clkreq_always_on_L1_9287_1_1,
666 ARRAY_SIZE(ar9287PciePhy_clkreq_always_on_L1_9287_1_1),
667 2);
668 } else if (AR_SREV_9287_10_OR_LATER(ah)) {
669 INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_0,
670 ARRAY_SIZE(ar9287Modes_9287_1_0), 6);
671 INIT_INI_ARRAY(&ah->iniCommon, ar9287Common_9287_1_0,
672 ARRAY_SIZE(ar9287Common_9287_1_0), 2);
673
674 if (ah->config.pcie_clock_req)
675 INIT_INI_ARRAY(&ah->iniPcieSerdes,
676 ar9287PciePhy_clkreq_off_L1_9287_1_0,
677 ARRAY_SIZE(ar9287PciePhy_clkreq_off_L1_9287_1_0), 2);
678 else
679 INIT_INI_ARRAY(&ah->iniPcieSerdes,
680 ar9287PciePhy_clkreq_always_on_L1_9287_1_0,
681 ARRAY_SIZE(ar9287PciePhy_clkreq_always_on_L1_9287_1_0),
682 2);
683 } else if (AR_SREV_9285_12_OR_LATER(ah)) {
684
685
686 INIT_INI_ARRAY(&ah->iniModes, ar9285Modes_9285_1_2,
687 ARRAY_SIZE(ar9285Modes_9285_1_2), 6);
688 INIT_INI_ARRAY(&ah->iniCommon, ar9285Common_9285_1_2,
689 ARRAY_SIZE(ar9285Common_9285_1_2), 2);
690
691 if (ah->config.pcie_clock_req) {
692 INIT_INI_ARRAY(&ah->iniPcieSerdes,
693 ar9285PciePhy_clkreq_off_L1_9285_1_2,
694 ARRAY_SIZE(ar9285PciePhy_clkreq_off_L1_9285_1_2), 2);
695 } else {
696 INIT_INI_ARRAY(&ah->iniPcieSerdes,
697 ar9285PciePhy_clkreq_always_on_L1_9285_1_2,
698 ARRAY_SIZE(ar9285PciePhy_clkreq_always_on_L1_9285_1_2),
699 2);
700 }
701 } else if (AR_SREV_9285_10_OR_LATER(ah)) {
702 INIT_INI_ARRAY(&ah->iniModes, ar9285Modes_9285,
703 ARRAY_SIZE(ar9285Modes_9285), 6);
704 INIT_INI_ARRAY(&ah->iniCommon, ar9285Common_9285,
705 ARRAY_SIZE(ar9285Common_9285), 2);
706
707 if (ah->config.pcie_clock_req) {
708 INIT_INI_ARRAY(&ah->iniPcieSerdes,
709 ar9285PciePhy_clkreq_off_L1_9285,
710 ARRAY_SIZE(ar9285PciePhy_clkreq_off_L1_9285), 2);
711 } else {
712 INIT_INI_ARRAY(&ah->iniPcieSerdes,
713 ar9285PciePhy_clkreq_always_on_L1_9285,
714 ARRAY_SIZE(ar9285PciePhy_clkreq_always_on_L1_9285), 2);
715 }
716 } else if (AR_SREV_9280_20_OR_LATER(ah)) {
717 INIT_INI_ARRAY(&ah->iniModes, ar9280Modes_9280_2,
718 ARRAY_SIZE(ar9280Modes_9280_2), 6);
719 INIT_INI_ARRAY(&ah->iniCommon, ar9280Common_9280_2,
720 ARRAY_SIZE(ar9280Common_9280_2), 2);
721
722 if (ah->config.pcie_clock_req) {
723 INIT_INI_ARRAY(&ah->iniPcieSerdes,
724 ar9280PciePhy_clkreq_off_L1_9280,
725 ARRAY_SIZE(ar9280PciePhy_clkreq_off_L1_9280),2);
726 } else {
727 INIT_INI_ARRAY(&ah->iniPcieSerdes,
728 ar9280PciePhy_clkreq_always_on_L1_9280,
729 ARRAY_SIZE(ar9280PciePhy_clkreq_always_on_L1_9280), 2);
730 }
731 INIT_INI_ARRAY(&ah->iniModesAdditional,
732 ar9280Modes_fast_clock_9280_2,
733 ARRAY_SIZE(ar9280Modes_fast_clock_9280_2), 3);
734 } else if (AR_SREV_9280_10_OR_LATER(ah)) {
735 INIT_INI_ARRAY(&ah->iniModes, ar9280Modes_9280,
736 ARRAY_SIZE(ar9280Modes_9280), 6);
737 INIT_INI_ARRAY(&ah->iniCommon, ar9280Common_9280,
738 ARRAY_SIZE(ar9280Common_9280), 2);
739 } else if (AR_SREV_9160_10_OR_LATER(ah)) {
740 INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9160,
741 ARRAY_SIZE(ar5416Modes_9160), 6);
742 INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9160,
743 ARRAY_SIZE(ar5416Common_9160), 2);
744 INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0_9160,
745 ARRAY_SIZE(ar5416Bank0_9160), 2);
746 INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain_9160,
747 ARRAY_SIZE(ar5416BB_RfGain_9160), 3);
748 INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1_9160,
749 ARRAY_SIZE(ar5416Bank1_9160), 2);
750 INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2_9160,
751 ARRAY_SIZE(ar5416Bank2_9160), 2);
752 INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3_9160,
753 ARRAY_SIZE(ar5416Bank3_9160), 3);
754 INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6_9160,
755 ARRAY_SIZE(ar5416Bank6_9160), 3);
756 INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC_9160,
757 ARRAY_SIZE(ar5416Bank6TPC_9160), 3);
758 INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7_9160,
759 ARRAY_SIZE(ar5416Bank7_9160), 2);
760 if (AR_SREV_9160_11(ah)) {
761 INIT_INI_ARRAY(&ah->iniAddac,
762 ar5416Addac_91601_1,
763 ARRAY_SIZE(ar5416Addac_91601_1), 2);
764 } else {
765 INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9160,
766 ARRAY_SIZE(ar5416Addac_9160), 2);
767 }
768 } else if (AR_SREV_9100_OR_LATER(ah)) {
769 INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9100,
770 ARRAY_SIZE(ar5416Modes_9100), 6);
771 INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9100,
772 ARRAY_SIZE(ar5416Common_9100), 2);
773 INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0_9100,
774 ARRAY_SIZE(ar5416Bank0_9100), 2);
775 INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain_9100,
776 ARRAY_SIZE(ar5416BB_RfGain_9100), 3);
777 INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1_9100,
778 ARRAY_SIZE(ar5416Bank1_9100), 2);
779 INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2_9100,
780 ARRAY_SIZE(ar5416Bank2_9100), 2);
781 INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3_9100,
782 ARRAY_SIZE(ar5416Bank3_9100), 3);
783 INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6_9100,
784 ARRAY_SIZE(ar5416Bank6_9100), 3);
785 INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC_9100,
786 ARRAY_SIZE(ar5416Bank6TPC_9100), 3);
787 INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7_9100,
788 ARRAY_SIZE(ar5416Bank7_9100), 2);
789 INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9100,
790 ARRAY_SIZE(ar5416Addac_9100), 2);
791 } else {
792 INIT_INI_ARRAY(&ah->iniModes, ar5416Modes,
793 ARRAY_SIZE(ar5416Modes), 6);
794 INIT_INI_ARRAY(&ah->iniCommon, ar5416Common,
795 ARRAY_SIZE(ar5416Common), 2);
796 INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0,
797 ARRAY_SIZE(ar5416Bank0), 2);
798 INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain,
799 ARRAY_SIZE(ar5416BB_RfGain), 3);
800 INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1,
801 ARRAY_SIZE(ar5416Bank1), 2);
802 INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2,
803 ARRAY_SIZE(ar5416Bank2), 2);
804 INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3,
805 ARRAY_SIZE(ar5416Bank3), 3);
806 INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6,
807 ARRAY_SIZE(ar5416Bank6), 3);
808 INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC,
809 ARRAY_SIZE(ar5416Bank6TPC), 3);
810 INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7,
811 ARRAY_SIZE(ar5416Bank7), 2);
812 INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac,
813 ARRAY_SIZE(ar5416Addac), 2);
814 }
815 }
816
817 /* AR9003 2.0 - new INI format (pre, core, post arrays per subsystem) */
818 static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
819 {
820 /* mac */
821 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
822 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
823 ar9300_2p0_mac_core,
824 ARRAY_SIZE(ar9300_2p0_mac_core), 2);
825 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
826 ar9300_2p0_mac_postamble,
827 ARRAY_SIZE(ar9300_2p0_mac_postamble), 5);
828
829 /* bb */
830 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], NULL, 0, 0);
831 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
832 ar9300_2p0_baseband_core,
833 ARRAY_SIZE(ar9300_2p0_baseband_core), 2);
834 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
835 ar9300_2p0_baseband_postamble,
836 ARRAY_SIZE(ar9300_2p0_baseband_postamble), 5);
837
838 /* radio */
839 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0);
840 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
841 ar9300_2p0_radio_core,
842 ARRAY_SIZE(ar9300_2p0_radio_core), 2);
843 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
844 ar9300_2p0_radio_postamble,
845 ARRAY_SIZE(ar9300_2p0_radio_postamble), 5);
846
847 /* soc */
848 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
849 ar9300_2p0_soc_preamble,
850 ARRAY_SIZE(ar9300_2p0_soc_preamble), 2);
851 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0);
852 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
853 ar9300_2p0_soc_postamble,
854 ARRAY_SIZE(ar9300_2p0_soc_postamble), 5);
855
856 /* rx/tx gain */
857 INIT_INI_ARRAY(&ah->iniModesRxGain,
858 ar9300Common_rx_gain_table_2p0,
859 ARRAY_SIZE(ar9300Common_rx_gain_table_2p0), 2);
860 INIT_INI_ARRAY(&ah->iniModesTxGain,
861 ar9300Modes_lowest_ob_db_tx_gain_table_2p0,
862 ARRAY_SIZE(ar9300Modes_lowest_ob_db_tx_gain_table_2p0),
863 5);
864
865 /* Load PCIE SERDES settings from INI */
866
867 /* Awake Setting */
868
869 INIT_INI_ARRAY(&ah->iniPcieSerdes,
870 ar9300PciePhy_pll_on_clkreq_disable_L1_2p0,
871 ARRAY_SIZE(ar9300PciePhy_pll_on_clkreq_disable_L1_2p0),
872 2);
873
874 /* Sleep Setting */
875
876 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
877 ar9300PciePhy_clkreq_enable_L1_2p0,
878 ARRAY_SIZE(ar9300PciePhy_clkreq_enable_L1_2p0),
879 2);
880
881 /* Fast clock modal settings */
882 INIT_INI_ARRAY(&ah->iniModesAdditional,
883 ar9300Modes_fast_clock_2p0,
884 ARRAY_SIZE(ar9300Modes_fast_clock_2p0),
885 3);
886 }
887
888 static void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah)
889 {
890 if (AR_SREV_9287_11_OR_LATER(ah))
891 INIT_INI_ARRAY(&ah->iniModesRxGain,
892 ar9287Modes_rx_gain_9287_1_1,
893 ARRAY_SIZE(ar9287Modes_rx_gain_9287_1_1), 6);
894 else if (AR_SREV_9287_10(ah))
895 INIT_INI_ARRAY(&ah->iniModesRxGain,
896 ar9287Modes_rx_gain_9287_1_0,
897 ARRAY_SIZE(ar9287Modes_rx_gain_9287_1_0), 6);
898 else if (AR_SREV_9280_20(ah))
899 ath9k_hw_init_rxgain_ini(ah);
900
901 if (AR_SREV_9287_11_OR_LATER(ah)) {
902 INIT_INI_ARRAY(&ah->iniModesTxGain,
903 ar9287Modes_tx_gain_9287_1_1,
904 ARRAY_SIZE(ar9287Modes_tx_gain_9287_1_1), 6);
905 } else if (AR_SREV_9287_10(ah)) {
906 INIT_INI_ARRAY(&ah->iniModesTxGain,
907 ar9287Modes_tx_gain_9287_1_0,
908 ARRAY_SIZE(ar9287Modes_tx_gain_9287_1_0), 6);
909 } else if (AR_SREV_9280_20(ah)) {
910 ath9k_hw_init_txgain_ini(ah);
911 } else if (AR_SREV_9285_12_OR_LATER(ah)) {
912 u32 txgain_type = ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE);
913
914 /* txgain table */
915 if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER) {
916 if (AR_SREV_9285E_20(ah)) {
917 INIT_INI_ARRAY(&ah->iniModesTxGain,
918 ar9285Modes_XE2_0_high_power,
919 ARRAY_SIZE(
920 ar9285Modes_XE2_0_high_power), 6);
921 } else {
922 INIT_INI_ARRAY(&ah->iniModesTxGain,
923 ar9285Modes_high_power_tx_gain_9285_1_2,
924 ARRAY_SIZE(
925 ar9285Modes_high_power_tx_gain_9285_1_2), 6);
926 }
927 } else {
928 if (AR_SREV_9285E_20(ah)) {
929 INIT_INI_ARRAY(&ah->iniModesTxGain,
930 ar9285Modes_XE2_0_normal_power,
931 ARRAY_SIZE(
932 ar9285Modes_XE2_0_normal_power), 6);
933 } else {
934 INIT_INI_ARRAY(&ah->iniModesTxGain,
935 ar9285Modes_original_tx_gain_9285_1_2,
936 ARRAY_SIZE(
937 ar9285Modes_original_tx_gain_9285_1_2), 6);
938 }
939 }
940 }
941 }
942
943 static void ath9k_hw_init_eeprom_fix(struct ath_hw *ah)
944 {
945 struct base_eep_header *pBase = &(ah->eeprom.def.baseEepHeader);
946 struct ath_common *common = ath9k_hw_common(ah);
947
948 ah->need_an_top2_fixup = (ah->hw_version.devid == AR9280_DEVID_PCI) &&
949 (ah->eep_map != EEP_MAP_4KBITS) &&
950 ((pBase->version & 0xff) > 0x0a) &&
951 (pBase->pwdclkind == 0);
952
953 if (ah->need_an_top2_fixup)
954 ath_print(common, ATH_DBG_EEPROM,
955 "needs fixup for AR_AN_TOP2 register\n");
956 }
957
958 static void ath9k_hw_attach_ops(struct ath_hw *ah)
959 {
960 if (AR_SREV_9300_20_OR_LATER(ah))
961 ar9003_hw_attach_ops(ah);
962 else
963 ar9002_hw_attach_ops(ah);
964 }
965
966 /* Called for all hardware families */
967 static int __ath9k_hw_init(struct ath_hw *ah)
968 {
969 struct ath_common *common = ath9k_hw_common(ah);
970 int r = 0;
971
972 if (ah->hw_version.devid == AR5416_AR9100_DEVID)
973 ah->hw_version.macVersion = AR_SREV_VERSION_9100;
974
975 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) {
976 ath_print(common, ATH_DBG_FATAL,
977 "Couldn't reset chip\n");
978 return -EIO;
979 }
980
981 ath9k_hw_init_defaults(ah);
982 ath9k_hw_init_config(ah);
983
984 ath9k_hw_attach_ops(ah);
985
986 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) {
987 ath_print(common, ATH_DBG_FATAL, "Couldn't wakeup chip\n");
988 return -EIO;
989 }
990
991 if (ah->config.serialize_regmode == SER_REG_MODE_AUTO) {
992 if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCI ||
993 (AR_SREV_9280(ah) && !ah->is_pciexpress)) {
994 ah->config.serialize_regmode =
995 SER_REG_MODE_ON;
996 } else {
997 ah->config.serialize_regmode =
998 SER_REG_MODE_OFF;
999 }
1000 }
1001
1002 ath_print(common, ATH_DBG_RESET, "serialize_regmode is %d\n",
1003 ah->config.serialize_regmode);
1004
1005 if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
1006 ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD >> 1;
1007 else
1008 ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD;
1009
1010 if (!ath9k_hw_macversion_supported(ah)) {
1011 ath_print(common, ATH_DBG_FATAL,
1012 "Mac Chip Rev 0x%02x.%x is not supported by "
1013 "this driver\n", ah->hw_version.macVersion,
1014 ah->hw_version.macRev);
1015 return -EOPNOTSUPP;
1016 }
1017
1018 if (AR_SREV_9100(ah)) {
1019 ah->iq_caldata.calData = &iq_cal_multi_sample;
1020 ah->supp_cals = IQ_MISMATCH_CAL;
1021 ah->is_pciexpress = false;
1022 }
1023
1024 if (AR_SREV_9271(ah))
1025 ah->is_pciexpress = false;
1026
1027 ah->hw_version.phyRev = REG_READ(ah, AR_PHY_CHIP_ID);
1028 ath9k_hw_init_cal_settings(ah);
1029
1030 ah->ani_function = ATH9K_ANI_ALL;
1031 if (AR_SREV_9280_10_OR_LATER(ah) && !AR_SREV_9300_20_OR_LATER(ah))
1032 ah->ani_function &= ~ATH9K_ANI_NOISE_IMMUNITY_LEVEL;
1033
1034 ath9k_hw_init_mode_regs(ah);
1035
1036 if (ah->is_pciexpress)
1037 ath9k_hw_configpcipowersave(ah, 0, 0);
1038 else
1039 ath9k_hw_disablepcie(ah);
1040
1041 /* Support for Japan ch.14 (2484) spread */
1042 if (AR_SREV_9287_11_OR_LATER(ah)) {
1043 INIT_INI_ARRAY(&ah->iniCckfirNormal,
1044 ar9287Common_normal_cck_fir_coeff_92871_1,
1045 ARRAY_SIZE(ar9287Common_normal_cck_fir_coeff_92871_1), 2);
1046 INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
1047 ar9287Common_japan_2484_cck_fir_coeff_92871_1,
1048 ARRAY_SIZE(ar9287Common_japan_2484_cck_fir_coeff_92871_1), 2);
1049 }
1050
1051 r = ath9k_hw_post_init(ah);
1052 if (r)
1053 return r;
1054
1055 ath9k_hw_init_mode_gain_regs(ah);
1056 r = ath9k_hw_fill_cap_info(ah);
1057 if (r)
1058 return r;
1059
1060 ath9k_hw_init_eeprom_fix(ah);
1061
1062 r = ath9k_hw_init_macaddr(ah);
1063 if (r) {
1064 ath_print(common, ATH_DBG_FATAL,
1065 "Failed to initialize MAC address\n");
1066 return r;
1067 }
1068
1069 if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
1070 ah->tx_trig_level = (AR_FTRIG_256B >> AR_FTRIG_S);
1071 else
1072 ah->tx_trig_level = (AR_FTRIG_512B >> AR_FTRIG_S);
1073
1074 ath9k_init_nfcal_hist_buffer(ah);
1075
1076 common->state = ATH_HW_INITIALIZED;
1077
1078 return 0;
1079 }
1080
1081 int ath9k_hw_init(struct ath_hw *ah)
1082 {
1083 int ret;
1084 struct ath_common *common = ath9k_hw_common(ah);
1085
1086 /* These are all the AR5008/AR9001/AR9002 hardware family of chipsets */
1087 switch (ah->hw_version.devid) {
1088 case AR5416_DEVID_PCI:
1089 case AR5416_DEVID_PCIE:
1090 case AR5416_AR9100_DEVID:
1091 case AR9160_DEVID_PCI:
1092 case AR9280_DEVID_PCI:
1093 case AR9280_DEVID_PCIE:
1094 case AR9285_DEVID_PCIE:
1095 case AR9287_DEVID_PCI:
1096 case AR9287_DEVID_PCIE:
1097 case AR2427_DEVID_PCIE:
1098 case AR9300_DEVID_PCIE:
1099 break;
1100 default:
1101 if (common->bus_ops->ath_bus_type == ATH_USB)
1102 break;
1103 ath_print(common, ATH_DBG_FATAL,
1104 "Hardware device ID 0x%04x not supported\n",
1105 ah->hw_version.devid);
1106 return -EOPNOTSUPP;
1107 }
1108
1109 ret = __ath9k_hw_init(ah);
1110 if (ret) {
1111 ath_print(common, ATH_DBG_FATAL,
1112 "Unable to initialize hardware; "
1113 "initialization status: %d\n", ret);
1114 return ret;
1115 }
1116
1117 return 0;
1118 }
1119 EXPORT_SYMBOL(ath9k_hw_init);
1120
1121 static void ath9k_hw_init_qos(struct ath_hw *ah)
1122 {
1123 REG_WRITE(ah, AR_MIC_QOS_CONTROL, 0x100aa);
1124 REG_WRITE(ah, AR_MIC_QOS_SELECT, 0x3210);
1125
1126 REG_WRITE(ah, AR_QOS_NO_ACK,
1127 SM(2, AR_QOS_NO_ACK_TWO_BIT) |
1128 SM(5, AR_QOS_NO_ACK_BIT_OFF) |
1129 SM(0, AR_QOS_NO_ACK_BYTE_OFF));
1130
1131 REG_WRITE(ah, AR_TXOP_X, AR_TXOP_X_VAL);
1132 REG_WRITE(ah, AR_TXOP_0_3, 0xFFFFFFFF);
1133 REG_WRITE(ah, AR_TXOP_4_7, 0xFFFFFFFF);
1134 REG_WRITE(ah, AR_TXOP_8_11, 0xFFFFFFFF);
1135 REG_WRITE(ah, AR_TXOP_12_15, 0xFFFFFFFF);
1136 }
1137
1138 static void ath9k_hw_init_pll(struct ath_hw *ah,
1139 struct ath9k_channel *chan)
1140 {
1141 u32 pll = ath9k_hw_compute_pll_control(ah, chan);
1142
1143 REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll);
1144
1145 /* Switch the core clock for ar9271 to 117Mhz */
1146 if (AR_SREV_9271(ah)) {
1147 udelay(500);
1148 REG_WRITE(ah, 0x50040, 0x304);
1149 }
1150
1151 udelay(RTC_PLL_SETTLE_DELAY);
1152
1153 REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK);
1154 }
1155
1156 static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
1157 enum nl80211_iftype opmode)
1158 {
1159 u32 imr_reg = AR_IMR_TXERR |
1160 AR_IMR_TXURN |
1161 AR_IMR_RXERR |
1162 AR_IMR_RXORN |
1163 AR_IMR_BCNMISC;
1164
1165 if (ah->config.rx_intr_mitigation)
1166 imr_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR;
1167 else
1168 imr_reg |= AR_IMR_RXOK;
1169
1170 imr_reg |= AR_IMR_TXOK;
1171
1172 if (opmode == NL80211_IFTYPE_AP)
1173 imr_reg |= AR_IMR_MIB;
1174
1175 REG_WRITE(ah, AR_IMR, imr_reg);
1176 ah->imrs2_reg |= AR_IMR_S2_GTT;
1177 REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
1178
1179 if (!AR_SREV_9100(ah)) {
1180 REG_WRITE(ah, AR_INTR_SYNC_CAUSE, 0xFFFFFFFF);
1181 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, AR_INTR_SYNC_DEFAULT);
1182 REG_WRITE(ah, AR_INTR_SYNC_MASK, 0);
1183 }
1184 }
1185
1186 static void ath9k_hw_setslottime(struct ath_hw *ah, u32 us)
1187 {
1188 u32 val = ath9k_hw_mac_to_clks(ah, us);
1189 val = min(val, (u32) 0xFFFF);
1190 REG_WRITE(ah, AR_D_GBL_IFS_SLOT, val);
1191 }
1192
1193 static void ath9k_hw_set_ack_timeout(struct ath_hw *ah, u32 us)
1194 {
1195 u32 val = ath9k_hw_mac_to_clks(ah, us);
1196 val = min(val, (u32) MS(0xFFFFFFFF, AR_TIME_OUT_ACK));
1197 REG_RMW_FIELD(ah, AR_TIME_OUT, AR_TIME_OUT_ACK, val);
1198 }
1199
1200 static void ath9k_hw_set_cts_timeout(struct ath_hw *ah, u32 us)
1201 {
1202 u32 val = ath9k_hw_mac_to_clks(ah, us);
1203 val = min(val, (u32) MS(0xFFFFFFFF, AR_TIME_OUT_CTS));
1204 REG_RMW_FIELD(ah, AR_TIME_OUT, AR_TIME_OUT_CTS, val);
1205 }
1206
1207 static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu)
1208 {
1209 if (tu > 0xFFFF) {
1210 ath_print(ath9k_hw_common(ah), ATH_DBG_XMIT,
1211 "bad global tx timeout %u\n", tu);
1212 ah->globaltxtimeout = (u32) -1;
1213 return false;
1214 } else {
1215 REG_RMW_FIELD(ah, AR_GTXTO, AR_GTXTO_TIMEOUT_LIMIT, tu);
1216 ah->globaltxtimeout = tu;
1217 return true;
1218 }
1219 }
1220
1221 void ath9k_hw_init_global_settings(struct ath_hw *ah)
1222 {
1223 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
1224 int acktimeout;
1225 int slottime;
1226 int sifstime;
1227
1228 ath_print(ath9k_hw_common(ah), ATH_DBG_RESET, "ah->misc_mode 0x%x\n",
1229 ah->misc_mode);
1230
1231 if (ah->misc_mode != 0)
1232 REG_WRITE(ah, AR_PCU_MISC,
1233 REG_READ(ah, AR_PCU_MISC) | ah->misc_mode);
1234
1235 if (conf->channel && conf->channel->band == IEEE80211_BAND_5GHZ)
1236 sifstime = 16;
1237 else
1238 sifstime = 10;
1239
1240 /* As defined by IEEE 802.11-2007 17.3.8.6 */
1241 slottime = ah->slottime + 3 * ah->coverage_class;
1242 acktimeout = slottime + sifstime;
1243
1244 /*
1245 * Workaround for early ACK timeouts, add an offset to match the
1246 * initval's 64us ack timeout value.
1247 * This was initially only meant to work around an issue with delayed
1248 * BA frames in some implementations, but it has been found to fix ACK
1249 * timeout issues in other cases as well.
1250 */
1251 if (conf->channel && conf->channel->band == IEEE80211_BAND_2GHZ)
1252 acktimeout += 64 - sifstime - ah->slottime;
1253
1254 ath9k_hw_setslottime(ah, slottime);
1255 ath9k_hw_set_ack_timeout(ah, acktimeout);
1256 ath9k_hw_set_cts_timeout(ah, acktimeout);
1257 if (ah->globaltxtimeout != (u32) -1)
1258 ath9k_hw_set_global_txtimeout(ah, ah->globaltxtimeout);
1259 }
1260 EXPORT_SYMBOL(ath9k_hw_init_global_settings);
1261
1262 void ath9k_hw_deinit(struct ath_hw *ah)
1263 {
1264 struct ath_common *common = ath9k_hw_common(ah);
1265
1266 if (common->state < ATH_HW_INITIALIZED)
1267 goto free_hw;
1268
1269 if (!AR_SREV_9100(ah))
1270 ath9k_hw_ani_disable(ah);
1271
1272 ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP);
1273
1274 free_hw:
1275 ath9k_hw_rf_free_ext_banks(ah);
1276 }
1277 EXPORT_SYMBOL(ath9k_hw_deinit);
1278
1279 /*******/
1280 /* INI */
1281 /*******/
1282
1283 u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan)
1284 {
1285 u32 ctl = ath_regd_get_band_ctl(reg, chan->chan->band);
1286
1287 if (IS_CHAN_B(chan))
1288 ctl |= CTL_11B;
1289 else if (IS_CHAN_G(chan))
1290 ctl |= CTL_11G;
1291 else
1292 ctl |= CTL_11A;
1293
1294 return ctl;
1295 }
1296
1297 /****************************************/
1298 /* Reset and Channel Switching Routines */
1299 /****************************************/
1300
1301 static inline void ath9k_hw_set_dma(struct ath_hw *ah)
1302 {
1303 u32 regval;
1304
1305 /*
1306 * set AHB_MODE not to do cacheline prefetches
1307 */
1308 regval = REG_READ(ah, AR_AHB_MODE);
1309 REG_WRITE(ah, AR_AHB_MODE, regval | AR_AHB_PREFETCH_RD_EN);
1310
1311 /*
1312 * let mac dma reads be in 128 byte chunks
1313 */
1314 regval = REG_READ(ah, AR_TXCFG) & ~AR_TXCFG_DMASZ_MASK;
1315 REG_WRITE(ah, AR_TXCFG, regval | AR_TXCFG_DMASZ_128B);
1316
1317 /*
1318 * Restore TX Trigger Level to its pre-reset value.
1319 * The initial value depends on whether aggregation is enabled, and is
1320 * adjusted whenever underruns are detected.
1321 */
1322 REG_RMW_FIELD(ah, AR_TXCFG, AR_FTRIG, ah->tx_trig_level);
1323
1324 /*
1325 * let mac dma writes be in 128 byte chunks
1326 */
1327 regval = REG_READ(ah, AR_RXCFG) & ~AR_RXCFG_DMASZ_MASK;
1328 REG_WRITE(ah, AR_RXCFG, regval | AR_RXCFG_DMASZ_128B);
1329
1330 /*
1331 * Setup receive FIFO threshold to hold off TX activities
1332 */
1333 REG_WRITE(ah, AR_RXFIFO_CFG, 0x200);
1334
1335 /*
1336 * reduce the number of usable entries in PCU TXBUF to avoid
1337 * wrap around issues.
1338 */
1339 if (AR_SREV_9285(ah)) {
1340 /* For AR9285 the number of Fifos are reduced to half.
1341 * So set the usable tx buf size also to half to
1342 * avoid data/delimiter underruns
1343 */
1344 REG_WRITE(ah, AR_PCU_TXBUF_CTRL,
1345 AR_9285_PCU_TXBUF_CTRL_USABLE_SIZE);
1346 } else if (!AR_SREV_9271(ah)) {
1347 REG_WRITE(ah, AR_PCU_TXBUF_CTRL,
1348 AR_PCU_TXBUF_CTRL_USABLE_SIZE);
1349 }
1350 }
1351
1352 static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode)
1353 {
1354 u32 val;
1355
1356 val = REG_READ(ah, AR_STA_ID1);
1357 val &= ~(AR_STA_ID1_STA_AP | AR_STA_ID1_ADHOC);
1358 switch (opmode) {
1359 case NL80211_IFTYPE_AP:
1360 REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_STA_AP
1361 | AR_STA_ID1_KSRCH_MODE);
1362 REG_CLR_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
1363 break;
1364 case NL80211_IFTYPE_ADHOC:
1365 case NL80211_IFTYPE_MESH_POINT:
1366 REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_ADHOC
1367 | AR_STA_ID1_KSRCH_MODE);
1368 REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
1369 break;
1370 case NL80211_IFTYPE_STATION:
1371 case NL80211_IFTYPE_MONITOR:
1372 REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_KSRCH_MODE);
1373 break;
1374 }
1375 }
1376
1377 void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled,
1378 u32 *coef_mantissa, u32 *coef_exponent)
1379 {
1380 u32 coef_exp, coef_man;
1381
1382 for (coef_exp = 31; coef_exp > 0; coef_exp--)
1383 if ((coef_scaled >> coef_exp) & 0x1)
1384 break;
1385
1386 coef_exp = 14 - (coef_exp - COEF_SCALE_S);
1387
1388 coef_man = coef_scaled + (1 << (COEF_SCALE_S - coef_exp - 1));
1389
1390 *coef_mantissa = coef_man >> (COEF_SCALE_S - coef_exp);
1391 *coef_exponent = coef_exp - 16;
1392 }
1393
1394 static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
1395 {
1396 u32 rst_flags;
1397 u32 tmpReg;
1398
1399 if (AR_SREV_9100(ah)) {
1400 u32 val = REG_READ(ah, AR_RTC_DERIVED_CLK);
1401 val &= ~AR_RTC_DERIVED_CLK_PERIOD;
1402 val |= SM(1, AR_RTC_DERIVED_CLK_PERIOD);
1403 REG_WRITE(ah, AR_RTC_DERIVED_CLK, val);
1404 (void)REG_READ(ah, AR_RTC_DERIVED_CLK);
1405 }
1406
1407 REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN |
1408 AR_RTC_FORCE_WAKE_ON_INT);
1409
1410 if (AR_SREV_9100(ah)) {
1411 rst_flags = AR_RTC_RC_MAC_WARM | AR_RTC_RC_MAC_COLD |
1412 AR_RTC_RC_COLD_RESET | AR_RTC_RC_WARM_RESET;
1413 } else {
1414 tmpReg = REG_READ(ah, AR_INTR_SYNC_CAUSE);
1415 if (tmpReg &
1416 (AR_INTR_SYNC_LOCAL_TIMEOUT |
1417 AR_INTR_SYNC_RADM_CPL_TIMEOUT)) {
1418 u32 val;
1419 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
1420
1421 val = AR_RC_HOSTIF;
1422 if (!AR_SREV_9300_20_OR_LATER(ah))
1423 val |= AR_RC_AHB;
1424 REG_WRITE(ah, AR_RC, val);
1425
1426 } else if (!AR_SREV_9300_20_OR_LATER(ah))
1427 REG_WRITE(ah, AR_RC, AR_RC_AHB);
1428
1429 rst_flags = AR_RTC_RC_MAC_WARM;
1430 if (type == ATH9K_RESET_COLD)
1431 rst_flags |= AR_RTC_RC_MAC_COLD;
1432 }
1433
1434 REG_WRITE(ah, AR_RTC_RC, rst_flags);
1435 udelay(50);
1436
1437 REG_WRITE(ah, AR_RTC_RC, 0);
1438 if (!ath9k_hw_wait(ah, AR_RTC_RC, AR_RTC_RC_M, 0, AH_WAIT_TIMEOUT)) {
1439 ath_print(ath9k_hw_common(ah), ATH_DBG_RESET,
1440 "RTC stuck in MAC reset\n");
1441 return false;
1442 }
1443
1444 if (!AR_SREV_9100(ah))
1445 REG_WRITE(ah, AR_RC, 0);
1446
1447 if (AR_SREV_9100(ah))
1448 udelay(50);
1449
1450 return true;
1451 }
1452
1453 static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
1454 {
1455 REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN |
1456 AR_RTC_FORCE_WAKE_ON_INT);
1457
1458 if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah))
1459 REG_WRITE(ah, AR_RC, AR_RC_AHB);
1460
1461 REG_WRITE(ah, AR_RTC_RESET, 0);
1462
1463 if (!AR_SREV_9300_20_OR_LATER(ah))
1464 udelay(2);
1465
1466 if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah))
1467 REG_WRITE(ah, AR_RC, 0);
1468
1469 REG_WRITE(ah, AR_RTC_RESET, 1);
1470
1471 if (!ath9k_hw_wait(ah,
1472 AR_RTC_STATUS,
1473 AR_RTC_STATUS_M,
1474 AR_RTC_STATUS_ON,
1475 AH_WAIT_TIMEOUT)) {
1476 ath_print(ath9k_hw_common(ah), ATH_DBG_RESET,
1477 "RTC not waking up\n");
1478 return false;
1479 }
1480
1481 ath9k_hw_read_revisions(ah);
1482
1483 return ath9k_hw_set_reset(ah, ATH9K_RESET_WARM);
1484 }
1485
1486 static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type)
1487 {
1488 REG_WRITE(ah, AR_RTC_FORCE_WAKE,
1489 AR_RTC_FORCE_WAKE_EN | AR_RTC_FORCE_WAKE_ON_INT);
1490
1491 switch (type) {
1492 case ATH9K_RESET_POWER_ON:
1493 return ath9k_hw_set_reset_power_on(ah);
1494 case ATH9K_RESET_WARM:
1495 case ATH9K_RESET_COLD:
1496 return ath9k_hw_set_reset(ah, type);
1497 default:
1498 return false;
1499 }
1500 }
1501
1502 static bool ath9k_hw_chip_reset(struct ath_hw *ah,
1503 struct ath9k_channel *chan)
1504 {
1505 if (AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL)) {
1506 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON))
1507 return false;
1508 } else if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM))
1509 return false;
1510
1511 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
1512 return false;
1513
1514 ah->chip_fullsleep = false;
1515 ath9k_hw_init_pll(ah, chan);
1516 ath9k_hw_set_rfmode(ah, chan);
1517
1518 return true;
1519 }
1520
1521 static bool ath9k_hw_channel_change(struct ath_hw *ah,
1522 struct ath9k_channel *chan)
1523 {
1524 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
1525 struct ath_common *common = ath9k_hw_common(ah);
1526 struct ieee80211_channel *channel = chan->chan;
1527 u32 qnum;
1528 int r;
1529
1530 for (qnum = 0; qnum < AR_NUM_QCU; qnum++) {
1531 if (ath9k_hw_numtxpending(ah, qnum)) {
1532 ath_print(common, ATH_DBG_QUEUE,
1533 "Transmit frames pending on "
1534 "queue %d\n", qnum);
1535 return false;
1536 }
1537 }
1538
1539 if (!ath9k_hw_rfbus_req(ah)) {
1540 ath_print(common, ATH_DBG_FATAL,
1541 "Could not kill baseband RX\n");
1542 return false;
1543 }
1544
1545 ath9k_hw_set_channel_regs(ah, chan);
1546
1547 r = ath9k_hw_rf_set_freq(ah, chan);
1548 if (r) {
1549 ath_print(common, ATH_DBG_FATAL,
1550 "Failed to set channel\n");
1551 return false;
1552 }
1553
1554 ah->eep_ops->set_txpower(ah, chan,
1555 ath9k_regd_get_ctl(regulatory, chan),
1556 channel->max_antenna_gain * 2,
1557 channel->max_power * 2,
1558 min((u32) MAX_RATE_POWER,
1559 (u32) regulatory->power_limit));
1560
1561 ath9k_hw_rfbus_done(ah);
1562
1563 if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
1564 ath9k_hw_set_delta_slope(ah, chan);
1565
1566 ath9k_hw_spur_mitigate_freq(ah, chan);
1567
1568 if (!chan->oneTimeCalsDone)
1569 chan->oneTimeCalsDone = true;
1570
1571 return true;
1572 }
1573
1574 int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1575 bool bChannelChange)
1576 {
1577 struct ath_common *common = ath9k_hw_common(ah);
1578 u32 saveLedState;
1579 struct ath9k_channel *curchan = ah->curchan;
1580 u32 saveDefAntenna;
1581 u32 macStaId1;
1582 u64 tsf = 0;
1583 int i, r;
1584
1585 ah->txchainmask = common->tx_chainmask;
1586 ah->rxchainmask = common->rx_chainmask;
1587
1588 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
1589 return -EIO;
1590
1591 if (curchan && !ah->chip_fullsleep)
1592 ath9k_hw_getnf(ah, curchan);
1593
1594 if (bChannelChange &&
1595 (ah->chip_fullsleep != true) &&
1596 (ah->curchan != NULL) &&
1597 (chan->channel != ah->curchan->channel) &&
1598 ((chan->channelFlags & CHANNEL_ALL) ==
1599 (ah->curchan->channelFlags & CHANNEL_ALL)) &&
1600 !(AR_SREV_9280(ah) || IS_CHAN_A_5MHZ_SPACED(chan) ||
1601 IS_CHAN_A_5MHZ_SPACED(ah->curchan))) {
1602
1603 if (ath9k_hw_channel_change(ah, chan)) {
1604 ath9k_hw_loadnf(ah, ah->curchan);
1605 ath9k_hw_start_nfcal(ah);
1606 return 0;
1607 }
1608 }
1609
1610 saveDefAntenna = REG_READ(ah, AR_DEF_ANTENNA);
1611 if (saveDefAntenna == 0)
1612 saveDefAntenna = 1;
1613
1614 macStaId1 = REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_BASE_RATE_11B;
1615
1616 /* For chips on which RTC reset is done, save TSF before it gets cleared */
1617 if (AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL))
1618 tsf = ath9k_hw_gettsf64(ah);
1619
1620 saveLedState = REG_READ(ah, AR_CFG_LED) &
1621 (AR_CFG_LED_ASSOC_CTL | AR_CFG_LED_MODE_SEL |
1622 AR_CFG_LED_BLINK_THRESH_SEL | AR_CFG_LED_BLINK_SLOW);
1623
1624 ath9k_hw_mark_phy_inactive(ah);
1625
1626 /* Only required on the first reset */
1627 if (AR_SREV_9271(ah) && ah->htc_reset_init) {
1628 REG_WRITE(ah,
1629 AR9271_RESET_POWER_DOWN_CONTROL,
1630 AR9271_RADIO_RF_RST);
1631 udelay(50);
1632 }
1633
1634 if (!ath9k_hw_chip_reset(ah, chan)) {
1635 ath_print(common, ATH_DBG_FATAL, "Chip reset failed\n");
1636 return -EINVAL;
1637 }
1638
1639 /* Only required on the first reset */
1640 if (AR_SREV_9271(ah) && ah->htc_reset_init) {
1641 ah->htc_reset_init = false;
1642 REG_WRITE(ah,
1643 AR9271_RESET_POWER_DOWN_CONTROL,
1644 AR9271_GATE_MAC_CTL);
1645 udelay(50);
1646 }
1647
1648 /* Restore TSF */
1649 if (tsf && AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL))
1650 ath9k_hw_settsf64(ah, tsf);
1651
1652 if (AR_SREV_9280_10_OR_LATER(ah))
1653 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE);
1654
1655 r = ath9k_hw_process_ini(ah, chan);
1656 if (r)
1657 return r;
1658
1659 /* Setup MFP options for CCMP */
1660 if (AR_SREV_9280_20_OR_LATER(ah)) {
1661 /* Mask Retry(b11), PwrMgt(b12), MoreData(b13) to 0 in mgmt
1662 * frames when constructing CCMP AAD. */
1663 REG_RMW_FIELD(ah, AR_AES_MUTE_MASK1, AR_AES_MUTE_MASK1_FC_MGMT,
1664 0xc7ff);
1665 ah->sw_mgmt_crypto = false;
1666 } else if (AR_SREV_9160_10_OR_LATER(ah)) {
1667 /* Disable hardware crypto for management frames */
1668 REG_CLR_BIT(ah, AR_PCU_MISC_MODE2,
1669 AR_PCU_MISC_MODE2_MGMT_CRYPTO_ENABLE);
1670 REG_SET_BIT(ah, AR_PCU_MISC_MODE2,
1671 AR_PCU_MISC_MODE2_NO_CRYPTO_FOR_NON_DATA_PKT);
1672 ah->sw_mgmt_crypto = true;
1673 } else
1674 ah->sw_mgmt_crypto = true;
1675
1676 if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
1677 ath9k_hw_set_delta_slope(ah, chan);
1678
1679 ath9k_hw_spur_mitigate_freq(ah, chan);
1680 ah->eep_ops->set_board_values(ah, chan);
1681
1682 REG_WRITE(ah, AR_STA_ID0, get_unaligned_le32(common->macaddr));
1683 REG_WRITE(ah, AR_STA_ID1, get_unaligned_le16(common->macaddr + 4)
1684 | macStaId1
1685 | AR_STA_ID1_RTS_USE_DEF
1686 | (ah->config.
1687 ack_6mb ? AR_STA_ID1_ACKCTS_6MB : 0)
1688 | ah->sta_id1_defaults);
1689 ath9k_hw_set_operating_mode(ah, ah->opmode);
1690
1691 ath_hw_setbssidmask(common);
1692
1693 REG_WRITE(ah, AR_DEF_ANTENNA, saveDefAntenna);
1694
1695 ath9k_hw_write_associd(ah);
1696
1697 REG_WRITE(ah, AR_ISR, ~0);
1698
1699 REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR);
1700
1701 r = ath9k_hw_rf_set_freq(ah, chan);
1702 if (r)
1703 return r;
1704
1705 for (i = 0; i < AR_NUM_DCU; i++)
1706 REG_WRITE(ah, AR_DQCUMASK(i), 1 << i);
1707
1708 ah->intr_txqs = 0;
1709 for (i = 0; i < ah->caps.total_queues; i++)
1710 ath9k_hw_resettxqueue(ah, i);
1711
1712 ath9k_hw_init_interrupt_masks(ah, ah->opmode);
1713 ath9k_hw_init_qos(ah);
1714
1715 if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
1716 ath9k_enable_rfkill(ah);
1717
1718 ath9k_hw_init_global_settings(ah);
1719
1720 if (AR_SREV_9287_12_OR_LATER(ah)) {
1721 REG_WRITE(ah, AR_D_GBL_IFS_SIFS,
1722 AR_D_GBL_IFS_SIFS_ASYNC_FIFO_DUR);
1723 REG_WRITE(ah, AR_D_GBL_IFS_SLOT,
1724 AR_D_GBL_IFS_SLOT_ASYNC_FIFO_DUR);
1725 REG_WRITE(ah, AR_D_GBL_IFS_EIFS,
1726 AR_D_GBL_IFS_EIFS_ASYNC_FIFO_DUR);
1727
1728 REG_WRITE(ah, AR_TIME_OUT, AR_TIME_OUT_ACK_CTS_ASYNC_FIFO_DUR);
1729 REG_WRITE(ah, AR_USEC, AR_USEC_ASYNC_FIFO_DUR);
1730
1731 REG_SET_BIT(ah, AR_MAC_PCU_LOGIC_ANALYZER,
1732 AR_MAC_PCU_LOGIC_ANALYZER_DISBUG20768);
1733 REG_RMW_FIELD(ah, AR_AHB_MODE, AR_AHB_CUSTOM_BURST_EN,
1734 AR_AHB_CUSTOM_BURST_ASYNC_FIFO_VAL);
1735 }
1736 if (AR_SREV_9287_12_OR_LATER(ah)) {
1737 REG_SET_BIT(ah, AR_PCU_MISC_MODE2,
1738 AR_PCU_MISC_MODE2_ENABLE_AGGWEP);
1739 }
1740
1741 REG_WRITE(ah, AR_STA_ID1,
1742 REG_READ(ah, AR_STA_ID1) | AR_STA_ID1_PRESERVE_SEQNUM);
1743
1744 ath9k_hw_set_dma(ah);
1745
1746 REG_WRITE(ah, AR_OBS, 8);
1747
1748 if (ah->config.rx_intr_mitigation) {
1749 REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, 500);
1750 REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, 2000);
1751 }
1752
1753 ath9k_hw_init_bb(ah, chan);
1754
1755 if (!ath9k_hw_init_cal(ah, chan))
1756 return -EIO;
1757
1758 ath9k_hw_restore_chainmask(ah);
1759 REG_WRITE(ah, AR_CFG_LED, saveLedState | AR_CFG_SCLK_32KHZ);
1760
1761 /*
1762 * For big endian systems turn on swapping for descriptors
1763 */
1764 if (AR_SREV_9100(ah)) {
1765 u32 mask;
1766 mask = REG_READ(ah, AR_CFG);
1767 if (mask & (AR_CFG_SWRB | AR_CFG_SWTB | AR_CFG_SWRG)) {
1768 ath_print(common, ATH_DBG_RESET,
1769 "CFG Byte Swap Set 0x%x\n", mask);
1770 } else {
1771 mask =
1772 INIT_CONFIG_STATUS | AR_CFG_SWRB | AR_CFG_SWTB;
1773 REG_WRITE(ah, AR_CFG, mask);
1774 ath_print(common, ATH_DBG_RESET,
1775 "Setting CFG 0x%x\n", REG_READ(ah, AR_CFG));
1776 }
1777 } else {
1778 /* Configure AR9271 target WLAN */
1779 if (AR_SREV_9271(ah))
1780 REG_WRITE(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB);
1781 #ifdef __BIG_ENDIAN
1782 else
1783 REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD);
1784 #endif
1785 }
1786
1787 if (ah->btcoex_hw.enabled)
1788 ath9k_hw_btcoex_enable(ah);
1789
1790 return 0;
1791 }
1792 EXPORT_SYMBOL(ath9k_hw_reset);
1793
1794 /************************/
1795 /* Key Cache Management */
1796 /************************/
1797
1798 bool ath9k_hw_keyreset(struct ath_hw *ah, u16 entry)
1799 {
1800 u32 keyType;
1801
1802 if (entry >= ah->caps.keycache_size) {
1803 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
1804 "keychache entry %u out of range\n", entry);
1805 return false;
1806 }
1807
1808 keyType = REG_READ(ah, AR_KEYTABLE_TYPE(entry));
1809
1810 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), 0);
1811 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), 0);
1812 REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), 0);
1813 REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), 0);
1814 REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), 0);
1815 REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), AR_KEYTABLE_TYPE_CLR);
1816 REG_WRITE(ah, AR_KEYTABLE_MAC0(entry), 0);
1817 REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), 0);
1818
1819 if (keyType == AR_KEYTABLE_TYPE_TKIP && ATH9K_IS_MIC_ENABLED(ah)) {
1820 u16 micentry = entry + 64;
1821
1822 REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), 0);
1823 REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0);
1824 REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), 0);
1825 REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), 0);
1826
1827 }
1828
1829 return true;
1830 }
1831 EXPORT_SYMBOL(ath9k_hw_keyreset);
1832
1833 bool ath9k_hw_keysetmac(struct ath_hw *ah, u16 entry, const u8 *mac)
1834 {
1835 u32 macHi, macLo;
1836
1837 if (entry >= ah->caps.keycache_size) {
1838 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
1839 "keychache entry %u out of range\n", entry);
1840 return false;
1841 }
1842
1843 if (mac != NULL) {
1844 macHi = (mac[5] << 8) | mac[4];
1845 macLo = (mac[3] << 24) |
1846 (mac[2] << 16) |
1847 (mac[1] << 8) |
1848 mac[0];
1849 macLo >>= 1;
1850 macLo |= (macHi & 1) << 31;
1851 macHi >>= 1;
1852 } else {
1853 macLo = macHi = 0;
1854 }
1855 REG_WRITE(ah, AR_KEYTABLE_MAC0(entry), macLo);
1856 REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), macHi | AR_KEYTABLE_VALID);
1857
1858 return true;
1859 }
1860 EXPORT_SYMBOL(ath9k_hw_keysetmac);
1861
1862 bool ath9k_hw_set_keycache_entry(struct ath_hw *ah, u16 entry,
1863 const struct ath9k_keyval *k,
1864 const u8 *mac)
1865 {
1866 const struct ath9k_hw_capabilities *pCap = &ah->caps;
1867 struct ath_common *common = ath9k_hw_common(ah);
1868 u32 key0, key1, key2, key3, key4;
1869 u32 keyType;
1870
1871 if (entry >= pCap->keycache_size) {
1872 ath_print(common, ATH_DBG_FATAL,
1873 "keycache entry %u out of range\n", entry);
1874 return false;
1875 }
1876
1877 switch (k->kv_type) {
1878 case ATH9K_CIPHER_AES_OCB:
1879 keyType = AR_KEYTABLE_TYPE_AES;
1880 break;
1881 case ATH9K_CIPHER_AES_CCM:
1882 if (!(pCap->hw_caps & ATH9K_HW_CAP_CIPHER_AESCCM)) {
1883 ath_print(common, ATH_DBG_ANY,
1884 "AES-CCM not supported by mac rev 0x%x\n",
1885 ah->hw_version.macRev);
1886 return false;
1887 }
1888 keyType = AR_KEYTABLE_TYPE_CCM;
1889 break;
1890 case ATH9K_CIPHER_TKIP:
1891 keyType = AR_KEYTABLE_TYPE_TKIP;
1892 if (ATH9K_IS_MIC_ENABLED(ah)
1893 && entry + 64 >= pCap->keycache_size) {
1894 ath_print(common, ATH_DBG_ANY,
1895 "entry %u inappropriate for TKIP\n", entry);
1896 return false;
1897 }
1898 break;
1899 case ATH9K_CIPHER_WEP:
1900 if (k->kv_len < WLAN_KEY_LEN_WEP40) {
1901 ath_print(common, ATH_DBG_ANY,
1902 "WEP key length %u too small\n", k->kv_len);
1903 return false;
1904 }
1905 if (k->kv_len <= WLAN_KEY_LEN_WEP40)
1906 keyType = AR_KEYTABLE_TYPE_40;
1907 else if (k->kv_len <= WLAN_KEY_LEN_WEP104)
1908 keyType = AR_KEYTABLE_TYPE_104;
1909 else
1910 keyType = AR_KEYTABLE_TYPE_128;
1911 break;
1912 case ATH9K_CIPHER_CLR:
1913 keyType = AR_KEYTABLE_TYPE_CLR;
1914 break;
1915 default:
1916 ath_print(common, ATH_DBG_FATAL,
1917 "cipher %u not supported\n", k->kv_type);
1918 return false;
1919 }
1920
1921 key0 = get_unaligned_le32(k->kv_val + 0);
1922 key1 = get_unaligned_le16(k->kv_val + 4);
1923 key2 = get_unaligned_le32(k->kv_val + 6);
1924 key3 = get_unaligned_le16(k->kv_val + 10);
1925 key4 = get_unaligned_le32(k->kv_val + 12);
1926 if (k->kv_len <= WLAN_KEY_LEN_WEP104)
1927 key4 &= 0xff;
1928
1929 /*
1930 * Note: Key cache registers access special memory area that requires
1931 * two 32-bit writes to actually update the values in the internal
1932 * memory. Consequently, the exact order and pairs used here must be
1933 * maintained.
1934 */
1935
1936 if (keyType == AR_KEYTABLE_TYPE_TKIP && ATH9K_IS_MIC_ENABLED(ah)) {
1937 u16 micentry = entry + 64;
1938
1939 /*
1940 * Write inverted key[47:0] first to avoid Michael MIC errors
1941 * on frames that could be sent or received at the same time.
1942 * The correct key will be written in the end once everything
1943 * else is ready.
1944 */
1945 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), ~key0);
1946 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), ~key1);
1947
1948 /* Write key[95:48] */
1949 REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), key2);
1950 REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), key3);
1951
1952 /* Write key[127:96] and key type */
1953 REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), key4);
1954 REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), keyType);
1955
1956 /* Write MAC address for the entry */
1957 (void) ath9k_hw_keysetmac(ah, entry, mac);
1958
1959 if (ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA) {
1960 /*
1961 * TKIP uses two key cache entries:
1962 * Michael MIC TX/RX keys in the same key cache entry
1963 * (idx = main index + 64):
1964 * key0 [31:0] = RX key [31:0]
1965 * key1 [15:0] = TX key [31:16]
1966 * key1 [31:16] = reserved
1967 * key2 [31:0] = RX key [63:32]
1968 * key3 [15:0] = TX key [15:0]
1969 * key3 [31:16] = reserved
1970 * key4 [31:0] = TX key [63:32]
1971 */
1972 u32 mic0, mic1, mic2, mic3, mic4;
1973
1974 mic0 = get_unaligned_le32(k->kv_mic + 0);
1975 mic2 = get_unaligned_le32(k->kv_mic + 4);
1976 mic1 = get_unaligned_le16(k->kv_txmic + 2) & 0xffff;
1977 mic3 = get_unaligned_le16(k->kv_txmic + 0) & 0xffff;
1978 mic4 = get_unaligned_le32(k->kv_txmic + 4);
1979
1980 /* Write RX[31:0] and TX[31:16] */
1981 REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), mic0);
1982 REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), mic1);
1983
1984 /* Write RX[63:32] and TX[15:0] */
1985 REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), mic2);
1986 REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), mic3);
1987
1988 /* Write TX[63:32] and keyType(reserved) */
1989 REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), mic4);
1990 REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry),
1991 AR_KEYTABLE_TYPE_CLR);
1992
1993 } else {
1994 /*
1995 * TKIP uses four key cache entries (two for group
1996 * keys):
1997 * Michael MIC TX/RX keys are in different key cache
1998 * entries (idx = main index + 64 for TX and
1999 * main index + 32 + 96 for RX):
2000 * key0 [31:0] = TX/RX MIC key [31:0]
2001 * key1 [31:0] = reserved
2002 * key2 [31:0] = TX/RX MIC key [63:32]
2003 * key3 [31:0] = reserved
2004 * key4 [31:0] = reserved
2005 *
2006 * Upper layer code will call this function separately
2007 * for TX and RX keys when these registers offsets are
2008 * used.
2009 */
2010 u32 mic0, mic2;
2011
2012 mic0 = get_unaligned_le32(k->kv_mic + 0);
2013 mic2 = get_unaligned_le32(k->kv_mic + 4);
2014
2015 /* Write MIC key[31:0] */
2016 REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), mic0);
2017 REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0);
2018
2019 /* Write MIC key[63:32] */
2020 REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), mic2);
2021 REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), 0);
2022
2023 /* Write TX[63:32] and keyType(reserved) */
2024 REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), 0);
2025 REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry),
2026 AR_KEYTABLE_TYPE_CLR);
2027 }
2028
2029 /* MAC address registers are reserved for the MIC entry */
2030 REG_WRITE(ah, AR_KEYTABLE_MAC0(micentry), 0);
2031 REG_WRITE(ah, AR_KEYTABLE_MAC1(micentry), 0);
2032
2033 /*
2034 * Write the correct (un-inverted) key[47:0] last to enable
2035 * TKIP now that all other registers are set with correct
2036 * values.
2037 */
2038 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), key0);
2039 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), key1);
2040 } else {
2041 /* Write key[47:0] */
2042 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), key0);
2043 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), key1);
2044
2045 /* Write key[95:48] */
2046 REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), key2);
2047 REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), key3);
2048
2049 /* Write key[127:96] and key type */
2050 REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), key4);
2051 REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), keyType);
2052
2053 /* Write MAC address for the entry */
2054 (void) ath9k_hw_keysetmac(ah, entry, mac);
2055 }
2056
2057 return true;
2058 }
2059 EXPORT_SYMBOL(ath9k_hw_set_keycache_entry);
2060
2061 bool ath9k_hw_keyisvalid(struct ath_hw *ah, u16 entry)
2062 {
2063 if (entry < ah->caps.keycache_size) {
2064 u32 val = REG_READ(ah, AR_KEYTABLE_MAC1(entry));
2065 if (val & AR_KEYTABLE_VALID)
2066 return true;
2067 }
2068 return false;
2069 }
2070 EXPORT_SYMBOL(ath9k_hw_keyisvalid);
2071
2072 /******************************/
2073 /* Power Management (Chipset) */
2074 /******************************/
2075
2076 /*
2077 * Notify Power Mgt is disabled in self-generated frames.
2078 * If requested, force chip to sleep.
2079 */
2080 static void ath9k_set_power_sleep(struct ath_hw *ah, int setChip)
2081 {
2082 REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
2083 if (setChip) {
2084 /*
2085 * Clear the RTC force wake bit to allow the
2086 * mac to go to sleep.
2087 */
2088 REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE,
2089 AR_RTC_FORCE_WAKE_EN);
2090 if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah))
2091 REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF);
2092
2093 /* Shutdown chip. Active low */
2094 if (!AR_SREV_5416(ah) && !AR_SREV_9271(ah))
2095 REG_CLR_BIT(ah, (AR_RTC_RESET),
2096 AR_RTC_RESET_EN);
2097 }
2098 }
2099
2100 /*
2101 * Notify Power Management is enabled in self-generating
2102 * frames. If request, set power mode of chip to
2103 * auto/normal. Duration in units of 128us (1/8 TU).
2104 */
2105 static void ath9k_set_power_network_sleep(struct ath_hw *ah, int setChip)
2106 {
2107 REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
2108 if (setChip) {
2109 struct ath9k_hw_capabilities *pCap = &ah->caps;
2110
2111 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
2112 /* Set WakeOnInterrupt bit; clear ForceWake bit */
2113 REG_WRITE(ah, AR_RTC_FORCE_WAKE,
2114 AR_RTC_FORCE_WAKE_ON_INT);
2115 } else {
2116 /*
2117 * Clear the RTC force wake bit to allow the
2118 * mac to go to sleep.
2119 */
2120 REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE,
2121 AR_RTC_FORCE_WAKE_EN);
2122 }
2123 }
2124 }
2125
2126 static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip)
2127 {
2128 u32 val;
2129 int i;
2130
2131 if (setChip) {
2132 if ((REG_READ(ah, AR_RTC_STATUS) &
2133 AR_RTC_STATUS_M) == AR_RTC_STATUS_SHUTDOWN) {
2134 if (ath9k_hw_set_reset_reg(ah,
2135 ATH9K_RESET_POWER_ON) != true) {
2136 return false;
2137 }
2138 if (!AR_SREV_9300_20_OR_LATER(ah))
2139 ath9k_hw_init_pll(ah, NULL);
2140 }
2141 if (AR_SREV_9100(ah))
2142 REG_SET_BIT(ah, AR_RTC_RESET,
2143 AR_RTC_RESET_EN);
2144
2145 REG_SET_BIT(ah, AR_RTC_FORCE_WAKE,
2146 AR_RTC_FORCE_WAKE_EN);
2147 udelay(50);
2148
2149 for (i = POWER_UP_TIME / 50; i > 0; i--) {
2150 val = REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M;
2151 if (val == AR_RTC_STATUS_ON)
2152 break;
2153 udelay(50);
2154 REG_SET_BIT(ah, AR_RTC_FORCE_WAKE,
2155 AR_RTC_FORCE_WAKE_EN);
2156 }
2157 if (i == 0) {
2158 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
2159 "Failed to wakeup in %uus\n",
2160 POWER_UP_TIME / 20);
2161 return false;
2162 }
2163 }
2164
2165 REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
2166
2167 return true;
2168 }
2169
2170 bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
2171 {
2172 struct ath_common *common = ath9k_hw_common(ah);
2173 int status = true, setChip = true;
2174 static const char *modes[] = {
2175 "AWAKE",
2176 "FULL-SLEEP",
2177 "NETWORK SLEEP",
2178 "UNDEFINED"
2179 };
2180
2181 if (ah->power_mode == mode)
2182 return status;
2183
2184 ath_print(common, ATH_DBG_RESET, "%s -> %s\n",
2185 modes[ah->power_mode], modes[mode]);
2186
2187 switch (mode) {
2188 case ATH9K_PM_AWAKE:
2189 status = ath9k_hw_set_power_awake(ah, setChip);
2190 break;
2191 case ATH9K_PM_FULL_SLEEP:
2192 ath9k_set_power_sleep(ah, setChip);
2193 ah->chip_fullsleep = true;
2194 break;
2195 case ATH9K_PM_NETWORK_SLEEP:
2196 ath9k_set_power_network_sleep(ah, setChip);
2197 break;
2198 default:
2199 ath_print(common, ATH_DBG_FATAL,
2200 "Unknown power mode %u\n", mode);
2201 return false;
2202 }
2203 ah->power_mode = mode;
2204
2205 return status;
2206 }
2207 EXPORT_SYMBOL(ath9k_hw_setpower);
2208
2209 /*
2210 * Helper for ASPM support.
2211 *
2212 * Disable PLL when in L0s as well as receiver clock when in L1.
2213 * This power saving option must be enabled through the SerDes.
2214 *
2215 * Programming the SerDes must go through the same 288 bit serial shift
2216 * register as the other analog registers. Hence the 9 writes.
2217 */
2218 static void ar9002_hw_configpcipowersave(struct ath_hw *ah,
2219 int restore,
2220 int power_off)
2221 {
2222 u8 i;
2223 u32 val;
2224
2225 if (ah->is_pciexpress != true)
2226 return;
2227
2228 /* Do not touch SerDes registers */
2229 if (ah->config.pcie_powersave_enable == 2)
2230 return;
2231
2232 /* Nothing to do on restore for 11N */
2233 if (!restore) {
2234 if (AR_SREV_9280_20_OR_LATER(ah)) {
2235 /*
2236 * AR9280 2.0 or later chips use SerDes values from the
2237 * initvals.h initialized depending on chipset during
2238 * __ath9k_hw_init()
2239 */
2240 for (i = 0; i < ah->iniPcieSerdes.ia_rows; i++) {
2241 REG_WRITE(ah, INI_RA(&ah->iniPcieSerdes, i, 0),
2242 INI_RA(&ah->iniPcieSerdes, i, 1));
2243 }
2244 } else if (AR_SREV_9280(ah) &&
2245 (ah->hw_version.macRev == AR_SREV_REVISION_9280_10)) {
2246 REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fd00);
2247 REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
2248
2249 /* RX shut off when elecidle is asserted */
2250 REG_WRITE(ah, AR_PCIE_SERDES, 0xa8000019);
2251 REG_WRITE(ah, AR_PCIE_SERDES, 0x13160820);
2252 REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980560);
2253
2254 /* Shut off CLKREQ active in L1 */
2255 if (ah->config.pcie_clock_req)
2256 REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffc);
2257 else
2258 REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffd);
2259
2260 REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
2261 REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
2262 REG_WRITE(ah, AR_PCIE_SERDES, 0x00043007);
2263
2264 /* Load the new settings */
2265 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
2266
2267 } else {
2268 REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00);
2269 REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
2270
2271 /* RX shut off when elecidle is asserted */
2272 REG_WRITE(ah, AR_PCIE_SERDES, 0x28000039);
2273 REG_WRITE(ah, AR_PCIE_SERDES, 0x53160824);
2274 REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980579);
2275
2276 /*
2277 * Ignore ah->ah_config.pcie_clock_req setting for
2278 * pre-AR9280 11n
2279 */
2280 REG_WRITE(ah, AR_PCIE_SERDES, 0x001defff);
2281
2282 REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
2283 REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
2284 REG_WRITE(ah, AR_PCIE_SERDES, 0x000e3007);
2285
2286 /* Load the new settings */
2287 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
2288 }
2289
2290 udelay(1000);
2291
2292 /* set bit 19 to allow forcing of pcie core into L1 state */
2293 REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
2294
2295 /* Several PCIe massages to ensure proper behaviour */
2296 if (ah->config.pcie_waen) {
2297 val = ah->config.pcie_waen;
2298 if (!power_off)
2299 val &= (~AR_WA_D3_L1_DISABLE);
2300 } else {
2301 if (AR_SREV_9285(ah) || AR_SREV_9271(ah) ||
2302 AR_SREV_9287(ah)) {
2303 val = AR9285_WA_DEFAULT;
2304 if (!power_off)
2305 val &= (~AR_WA_D3_L1_DISABLE);
2306 } else if (AR_SREV_9280(ah)) {
2307 /*
2308 * On AR9280 chips bit 22 of 0x4004 needs to be
2309 * set otherwise card may disappear.
2310 */
2311 val = AR9280_WA_DEFAULT;
2312 if (!power_off)
2313 val &= (~AR_WA_D3_L1_DISABLE);
2314 } else
2315 val = AR_WA_DEFAULT;
2316 }
2317
2318 REG_WRITE(ah, AR_WA, val);
2319 }
2320
2321 if (power_off) {
2322 /*
2323 * Set PCIe workaround bits
2324 * bit 14 in WA register (disable L1) should only
2325 * be set when device enters D3 and be cleared
2326 * when device comes back to D0.
2327 */
2328 if (ah->config.pcie_waen) {
2329 if (ah->config.pcie_waen & AR_WA_D3_L1_DISABLE)
2330 REG_SET_BIT(ah, AR_WA, AR_WA_D3_L1_DISABLE);
2331 } else {
2332 if (((AR_SREV_9285(ah) || AR_SREV_9271(ah) ||
2333 AR_SREV_9287(ah)) &&
2334 (AR9285_WA_DEFAULT & AR_WA_D3_L1_DISABLE)) ||
2335 (AR_SREV_9280(ah) &&
2336 (AR9280_WA_DEFAULT & AR_WA_D3_L1_DISABLE))) {
2337 REG_SET_BIT(ah, AR_WA, AR_WA_D3_L1_DISABLE);
2338 }
2339 }
2340 }
2341 }
2342
2343 /**********************/
2344 /* Interrupt Handling */
2345 /**********************/
2346
2347 bool ath9k_hw_intrpend(struct ath_hw *ah)
2348 {
2349 u32 host_isr;
2350
2351 if (AR_SREV_9100(ah))
2352 return true;
2353
2354 host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
2355 if ((host_isr & AR_INTR_MAC_IRQ) && (host_isr != AR_INTR_SPURIOUS))
2356 return true;
2357
2358 host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE);
2359 if ((host_isr & AR_INTR_SYNC_DEFAULT)
2360 && (host_isr != AR_INTR_SPURIOUS))
2361 return true;
2362
2363 return false;
2364 }
2365 EXPORT_SYMBOL(ath9k_hw_intrpend);
2366
2367 bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked)
2368 {
2369 u32 isr = 0;
2370 u32 mask2 = 0;
2371 struct ath9k_hw_capabilities *pCap = &ah->caps;
2372 u32 sync_cause = 0;
2373 bool fatal_int = false;
2374 struct ath_common *common = ath9k_hw_common(ah);
2375
2376 if (!AR_SREV_9100(ah)) {
2377 if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) {
2378 if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M)
2379 == AR_RTC_STATUS_ON) {
2380 isr = REG_READ(ah, AR_ISR);
2381 }
2382 }
2383
2384 sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) &
2385 AR_INTR_SYNC_DEFAULT;
2386
2387 *masked = 0;
2388
2389 if (!isr && !sync_cause)
2390 return false;
2391 } else {
2392 *masked = 0;
2393 isr = REG_READ(ah, AR_ISR);
2394 }
2395
2396 if (isr) {
2397 if (isr & AR_ISR_BCNMISC) {
2398 u32 isr2;
2399 isr2 = REG_READ(ah, AR_ISR_S2);
2400 if (isr2 & AR_ISR_S2_TIM)
2401 mask2 |= ATH9K_INT_TIM;
2402 if (isr2 & AR_ISR_S2_DTIM)
2403 mask2 |= ATH9K_INT_DTIM;
2404 if (isr2 & AR_ISR_S2_DTIMSYNC)
2405 mask2 |= ATH9K_INT_DTIMSYNC;
2406 if (isr2 & (AR_ISR_S2_CABEND))
2407 mask2 |= ATH9K_INT_CABEND;
2408 if (isr2 & AR_ISR_S2_GTT)
2409 mask2 |= ATH9K_INT_GTT;
2410 if (isr2 & AR_ISR_S2_CST)
2411 mask2 |= ATH9K_INT_CST;
2412 if (isr2 & AR_ISR_S2_TSFOOR)
2413 mask2 |= ATH9K_INT_TSFOOR;
2414 }
2415
2416 isr = REG_READ(ah, AR_ISR_RAC);
2417 if (isr == 0xffffffff) {
2418 *masked = 0;
2419 return false;
2420 }
2421
2422 *masked = isr & ATH9K_INT_COMMON;
2423
2424 if (ah->config.rx_intr_mitigation) {
2425 if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM))
2426 *masked |= ATH9K_INT_RX;
2427 }
2428
2429 if (isr & (AR_ISR_RXOK | AR_ISR_RXERR))
2430 *masked |= ATH9K_INT_RX;
2431 if (isr &
2432 (AR_ISR_TXOK | AR_ISR_TXDESC | AR_ISR_TXERR |
2433 AR_ISR_TXEOL)) {
2434 u32 s0_s, s1_s;
2435
2436 *masked |= ATH9K_INT_TX;
2437
2438 s0_s = REG_READ(ah, AR_ISR_S0_S);
2439 ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK);
2440 ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC);
2441
2442 s1_s = REG_READ(ah, AR_ISR_S1_S);
2443 ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR);
2444 ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL);
2445 }
2446
2447 if (isr & AR_ISR_RXORN) {
2448 ath_print(common, ATH_DBG_INTERRUPT,
2449 "receive FIFO overrun interrupt\n");
2450 }
2451
2452 if (!AR_SREV_9100(ah)) {
2453 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
2454 u32 isr5 = REG_READ(ah, AR_ISR_S5_S);
2455 if (isr5 & AR_ISR_S5_TIM_TIMER)
2456 *masked |= ATH9K_INT_TIM_TIMER;
2457 }
2458 }
2459
2460 *masked |= mask2;
2461 }
2462
2463 if (AR_SREV_9100(ah))
2464 return true;
2465
2466 if (isr & AR_ISR_GENTMR) {
2467 u32 s5_s;
2468
2469 s5_s = REG_READ(ah, AR_ISR_S5_S);
2470 if (isr & AR_ISR_GENTMR) {
2471 ah->intr_gen_timer_trigger =
2472 MS(s5_s, AR_ISR_S5_GENTIMER_TRIG);
2473
2474 ah->intr_gen_timer_thresh =
2475 MS(s5_s, AR_ISR_S5_GENTIMER_THRESH);
2476
2477 if (ah->intr_gen_timer_trigger)
2478 *masked |= ATH9K_INT_GENTIMER;
2479
2480 }
2481 }
2482
2483 if (sync_cause) {
2484 fatal_int =
2485 (sync_cause &
2486 (AR_INTR_SYNC_HOST1_FATAL | AR_INTR_SYNC_HOST1_PERR))
2487 ? true : false;
2488
2489 if (fatal_int) {
2490 if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) {
2491 ath_print(common, ATH_DBG_ANY,
2492 "received PCI FATAL interrupt\n");
2493 }
2494 if (sync_cause & AR_INTR_SYNC_HOST1_PERR) {
2495 ath_print(common, ATH_DBG_ANY,
2496 "received PCI PERR interrupt\n");
2497 }
2498 *masked |= ATH9K_INT_FATAL;
2499 }
2500 if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
2501 ath_print(common, ATH_DBG_INTERRUPT,
2502 "AR_INTR_SYNC_RADM_CPL_TIMEOUT\n");
2503 REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
2504 REG_WRITE(ah, AR_RC, 0);
2505 *masked |= ATH9K_INT_FATAL;
2506 }
2507 if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT) {
2508 ath_print(common, ATH_DBG_INTERRUPT,
2509 "AR_INTR_SYNC_LOCAL_TIMEOUT\n");
2510 }
2511
2512 REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause);
2513 (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR);
2514 }
2515
2516 return true;
2517 }
2518 EXPORT_SYMBOL(ath9k_hw_getisr);
2519
2520 enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
2521 {
2522 enum ath9k_int omask = ah->imask;
2523 u32 mask, mask2;
2524 struct ath9k_hw_capabilities *pCap = &ah->caps;
2525 struct ath_common *common = ath9k_hw_common(ah);
2526
2527 ath_print(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints);
2528
2529 if (omask & ATH9K_INT_GLOBAL) {
2530 ath_print(common, ATH_DBG_INTERRUPT, "disable IER\n");
2531 REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
2532 (void) REG_READ(ah, AR_IER);
2533 if (!AR_SREV_9100(ah)) {
2534 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0);
2535 (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE);
2536
2537 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
2538 (void) REG_READ(ah, AR_INTR_SYNC_ENABLE);
2539 }
2540 }
2541
2542 mask = ints & ATH9K_INT_COMMON;
2543 mask2 = 0;
2544
2545 if (ints & ATH9K_INT_TX) {
2546 if (ah->txok_interrupt_mask)
2547 mask |= AR_IMR_TXOK;
2548 if (ah->txdesc_interrupt_mask)
2549 mask |= AR_IMR_TXDESC;
2550 if (ah->txerr_interrupt_mask)
2551 mask |= AR_IMR_TXERR;
2552 if (ah->txeol_interrupt_mask)
2553 mask |= AR_IMR_TXEOL;
2554 }
2555 if (ints & ATH9K_INT_RX) {
2556 mask |= AR_IMR_RXERR;
2557 if (ah->config.rx_intr_mitigation)
2558 mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
2559 else
2560 mask |= AR_IMR_RXOK | AR_IMR_RXDESC;
2561 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
2562 mask |= AR_IMR_GENTMR;
2563 }
2564
2565 if (ints & (ATH9K_INT_BMISC)) {
2566 mask |= AR_IMR_BCNMISC;
2567 if (ints & ATH9K_INT_TIM)
2568 mask2 |= AR_IMR_S2_TIM;
2569 if (ints & ATH9K_INT_DTIM)
2570 mask2 |= AR_IMR_S2_DTIM;
2571 if (ints & ATH9K_INT_DTIMSYNC)
2572 mask2 |= AR_IMR_S2_DTIMSYNC;
2573 if (ints & ATH9K_INT_CABEND)
2574 mask2 |= AR_IMR_S2_CABEND;
2575 if (ints & ATH9K_INT_TSFOOR)
2576 mask2 |= AR_IMR_S2_TSFOOR;
2577 }
2578
2579 if (ints & (ATH9K_INT_GTT | ATH9K_INT_CST)) {
2580 mask |= AR_IMR_BCNMISC;
2581 if (ints & ATH9K_INT_GTT)
2582 mask2 |= AR_IMR_S2_GTT;
2583 if (ints & ATH9K_INT_CST)
2584 mask2 |= AR_IMR_S2_CST;
2585 }
2586
2587 ath_print(common, ATH_DBG_INTERRUPT, "new IMR 0x%x\n", mask);
2588 REG_WRITE(ah, AR_IMR, mask);
2589 ah->imrs2_reg &= ~(AR_IMR_S2_TIM | AR_IMR_S2_DTIM | AR_IMR_S2_DTIMSYNC |
2590 AR_IMR_S2_CABEND | AR_IMR_S2_CABTO |
2591 AR_IMR_S2_TSFOOR | AR_IMR_S2_GTT | AR_IMR_S2_CST);
2592 ah->imrs2_reg |= mask2;
2593 REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
2594
2595 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
2596 if (ints & ATH9K_INT_TIM_TIMER)
2597 REG_SET_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
2598 else
2599 REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
2600 }
2601
2602 if (ints & ATH9K_INT_GLOBAL) {
2603 ath_print(common, ATH_DBG_INTERRUPT, "enable IER\n");
2604 REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
2605 if (!AR_SREV_9100(ah)) {
2606 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE,
2607 AR_INTR_MAC_IRQ);
2608 REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ);
2609
2610
2611 REG_WRITE(ah, AR_INTR_SYNC_ENABLE,
2612 AR_INTR_SYNC_DEFAULT);
2613 REG_WRITE(ah, AR_INTR_SYNC_MASK,
2614 AR_INTR_SYNC_DEFAULT);
2615 }
2616 ath_print(common, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
2617 REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
2618 }
2619
2620 return omask;
2621 }
2622 EXPORT_SYMBOL(ath9k_hw_set_interrupts);
2623
2624 /*******************/
2625 /* Beacon Handling */
2626 /*******************/
2627
2628 void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
2629 {
2630 int flags = 0;
2631
2632 ah->beacon_interval = beacon_period;
2633
2634 switch (ah->opmode) {
2635 case NL80211_IFTYPE_STATION:
2636 case NL80211_IFTYPE_MONITOR:
2637 REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(next_beacon));
2638 REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, 0xffff);
2639 REG_WRITE(ah, AR_NEXT_SWBA, 0x7ffff);
2640 flags |= AR_TBTT_TIMER_EN;
2641 break;
2642 case NL80211_IFTYPE_ADHOC:
2643 case NL80211_IFTYPE_MESH_POINT:
2644 REG_SET_BIT(ah, AR_TXCFG,
2645 AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY);
2646 REG_WRITE(ah, AR_NEXT_NDP_TIMER,
2647 TU_TO_USEC(next_beacon +
2648 (ah->atim_window ? ah->
2649 atim_window : 1)));
2650 flags |= AR_NDP_TIMER_EN;
2651 case NL80211_IFTYPE_AP:
2652 REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(next_beacon));
2653 REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT,
2654 TU_TO_USEC(next_beacon -
2655 ah->config.
2656 dma_beacon_response_time));
2657 REG_WRITE(ah, AR_NEXT_SWBA,
2658 TU_TO_USEC(next_beacon -
2659 ah->config.
2660 sw_beacon_response_time));
2661 flags |=
2662 AR_TBTT_TIMER_EN | AR_DBA_TIMER_EN | AR_SWBA_TIMER_EN;
2663 break;
2664 default:
2665 ath_print(ath9k_hw_common(ah), ATH_DBG_BEACON,
2666 "%s: unsupported opmode: %d\n",
2667 __func__, ah->opmode);
2668 return;
2669 break;
2670 }
2671
2672 REG_WRITE(ah, AR_BEACON_PERIOD, TU_TO_USEC(beacon_period));
2673 REG_WRITE(ah, AR_DMA_BEACON_PERIOD, TU_TO_USEC(beacon_period));
2674 REG_WRITE(ah, AR_SWBA_PERIOD, TU_TO_USEC(beacon_period));
2675 REG_WRITE(ah, AR_NDP_PERIOD, TU_TO_USEC(beacon_period));
2676
2677 beacon_period &= ~ATH9K_BEACON_ENA;
2678 if (beacon_period & ATH9K_BEACON_RESET_TSF) {
2679 ath9k_hw_reset_tsf(ah);
2680 }
2681
2682 REG_SET_BIT(ah, AR_TIMER_MODE, flags);
2683 }
2684 EXPORT_SYMBOL(ath9k_hw_beaconinit);
2685
2686 void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
2687 const struct ath9k_beacon_state *bs)
2688 {
2689 u32 nextTbtt, beaconintval, dtimperiod, beacontimeout;
2690 struct ath9k_hw_capabilities *pCap = &ah->caps;
2691 struct ath_common *common = ath9k_hw_common(ah);
2692
2693 REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(bs->bs_nexttbtt));
2694
2695 REG_WRITE(ah, AR_BEACON_PERIOD,
2696 TU_TO_USEC(bs->bs_intval & ATH9K_BEACON_PERIOD));
2697 REG_WRITE(ah, AR_DMA_BEACON_PERIOD,
2698 TU_TO_USEC(bs->bs_intval & ATH9K_BEACON_PERIOD));
2699
2700 REG_RMW_FIELD(ah, AR_RSSI_THR,
2701 AR_RSSI_THR_BM_THR, bs->bs_bmissthreshold);
2702
2703 beaconintval = bs->bs_intval & ATH9K_BEACON_PERIOD;
2704
2705 if (bs->bs_sleepduration > beaconintval)
2706 beaconintval = bs->bs_sleepduration;
2707
2708 dtimperiod = bs->bs_dtimperiod;
2709 if (bs->bs_sleepduration > dtimperiod)
2710 dtimperiod = bs->bs_sleepduration;
2711
2712 if (beaconintval == dtimperiod)
2713 nextTbtt = bs->bs_nextdtim;
2714 else
2715 nextTbtt = bs->bs_nexttbtt;
2716
2717 ath_print(common, ATH_DBG_BEACON, "next DTIM %d\n", bs->bs_nextdtim);
2718 ath_print(common, ATH_DBG_BEACON, "next beacon %d\n", nextTbtt);
2719 ath_print(common, ATH_DBG_BEACON, "beacon period %d\n", beaconintval);
2720 ath_print(common, ATH_DBG_BEACON, "DTIM period %d\n", dtimperiod);
2721
2722 REG_WRITE(ah, AR_NEXT_DTIM,
2723 TU_TO_USEC(bs->bs_nextdtim - SLEEP_SLOP));
2724 REG_WRITE(ah, AR_NEXT_TIM, TU_TO_USEC(nextTbtt - SLEEP_SLOP));
2725
2726 REG_WRITE(ah, AR_SLEEP1,
2727 SM((CAB_TIMEOUT_VAL << 3), AR_SLEEP1_CAB_TIMEOUT)
2728 | AR_SLEEP1_ASSUME_DTIM);
2729
2730 if (pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)
2731 beacontimeout = (BEACON_TIMEOUT_VAL << 3);
2732 else
2733 beacontimeout = MIN_BEACON_TIMEOUT_VAL;
2734
2735 REG_WRITE(ah, AR_SLEEP2,
2736 SM(beacontimeout, AR_SLEEP2_BEACON_TIMEOUT));
2737
2738 REG_WRITE(ah, AR_TIM_PERIOD, TU_TO_USEC(beaconintval));
2739 REG_WRITE(ah, AR_DTIM_PERIOD, TU_TO_USEC(dtimperiod));
2740
2741 REG_SET_BIT(ah, AR_TIMER_MODE,
2742 AR_TBTT_TIMER_EN | AR_TIM_TIMER_EN |
2743 AR_DTIM_TIMER_EN);
2744
2745 /* TSF Out of Range Threshold */
2746 REG_WRITE(ah, AR_TSFOOR_THRESHOLD, bs->bs_tsfoor_threshold);
2747 }
2748 EXPORT_SYMBOL(ath9k_hw_set_sta_beacon_timers);
2749
2750 /*******************/
2751 /* HW Capabilities */
2752 /*******************/
2753
2754 int ath9k_hw_fill_cap_info(struct ath_hw *ah)
2755 {
2756 struct ath9k_hw_capabilities *pCap = &ah->caps;
2757 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
2758 struct ath_common *common = ath9k_hw_common(ah);
2759 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
2760
2761 u16 capField = 0, eeval;
2762
2763 eeval = ah->eep_ops->get_eeprom(ah, EEP_REG_0);
2764 regulatory->current_rd = eeval;
2765
2766 eeval = ah->eep_ops->get_eeprom(ah, EEP_REG_1);
2767 if (AR_SREV_9285_10_OR_LATER(ah))
2768 eeval |= AR9285_RDEXT_DEFAULT;
2769 regulatory->current_rd_ext = eeval;
2770
2771 capField = ah->eep_ops->get_eeprom(ah, EEP_OP_CAP);
2772
2773 if (ah->opmode != NL80211_IFTYPE_AP &&
2774 ah->hw_version.subvendorid == AR_SUBVENDOR_ID_NEW_A) {
2775 if (regulatory->current_rd == 0x64 ||
2776 regulatory->current_rd == 0x65)
2777 regulatory->current_rd += 5;
2778 else if (regulatory->current_rd == 0x41)
2779 regulatory->current_rd = 0x43;
2780 ath_print(common, ATH_DBG_REGULATORY,
2781 "regdomain mapped to 0x%x\n", regulatory->current_rd);
2782 }
2783
2784 eeval = ah->eep_ops->get_eeprom(ah, EEP_OP_MODE);
2785 if ((eeval & (AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A)) == 0) {
2786 ath_print(common, ATH_DBG_FATAL,
2787 "no band has been marked as supported in EEPROM.\n");
2788 return -EINVAL;
2789 }
2790
2791 bitmap_zero(pCap->wireless_modes, ATH9K_MODE_MAX);
2792
2793 if (eeval & AR5416_OPFLAGS_11A) {
2794 set_bit(ATH9K_MODE_11A, pCap->wireless_modes);
2795 if (ah->config.ht_enable) {
2796 if (!(eeval & AR5416_OPFLAGS_N_5G_HT20))
2797 set_bit(ATH9K_MODE_11NA_HT20,
2798 pCap->wireless_modes);
2799 if (!(eeval & AR5416_OPFLAGS_N_5G_HT40)) {
2800 set_bit(ATH9K_MODE_11NA_HT40PLUS,
2801 pCap->wireless_modes);
2802 set_bit(ATH9K_MODE_11NA_HT40MINUS,
2803 pCap->wireless_modes);
2804 }
2805 }
2806 }
2807
2808 if (eeval & AR5416_OPFLAGS_11G) {
2809 set_bit(ATH9K_MODE_11G, pCap->wireless_modes);
2810 if (ah->config.ht_enable) {
2811 if (!(eeval & AR5416_OPFLAGS_N_2G_HT20))
2812 set_bit(ATH9K_MODE_11NG_HT20,
2813 pCap->wireless_modes);
2814 if (!(eeval & AR5416_OPFLAGS_N_2G_HT40)) {
2815 set_bit(ATH9K_MODE_11NG_HT40PLUS,
2816 pCap->wireless_modes);
2817 set_bit(ATH9K_MODE_11NG_HT40MINUS,
2818 pCap->wireless_modes);
2819 }
2820 }
2821 }
2822
2823 pCap->tx_chainmask = ah->eep_ops->get_eeprom(ah, EEP_TX_MASK);
2824 /*
2825 * For AR9271 we will temporarilly uses the rx chainmax as read from
2826 * the EEPROM.
2827 */
2828 if ((ah->hw_version.devid == AR5416_DEVID_PCI) &&
2829 !(eeval & AR5416_OPFLAGS_11A) &&
2830 !(AR_SREV_9271(ah)))
2831 /* CB71: GPIO 0 is pulled down to indicate 3 rx chains */
2832 pCap->rx_chainmask = ath9k_hw_gpio_get(ah, 0) ? 0x5 : 0x7;
2833 else
2834 /* Use rx_chainmask from EEPROM. */
2835 pCap->rx_chainmask = ah->eep_ops->get_eeprom(ah, EEP_RX_MASK);
2836
2837 if (!(AR_SREV_9280(ah) && (ah->hw_version.macRev == 0)))
2838 ah->misc_mode |= AR_PCU_MIC_NEW_LOC_ENA;
2839
2840 pCap->low_2ghz_chan = 2312;
2841 pCap->high_2ghz_chan = 2732;
2842
2843 pCap->low_5ghz_chan = 4920;
2844 pCap->high_5ghz_chan = 6100;
2845
2846 pCap->hw_caps &= ~ATH9K_HW_CAP_CIPHER_CKIP;
2847 pCap->hw_caps |= ATH9K_HW_CAP_CIPHER_TKIP;
2848 pCap->hw_caps |= ATH9K_HW_CAP_CIPHER_AESCCM;
2849
2850 pCap->hw_caps &= ~ATH9K_HW_CAP_MIC_CKIP;
2851 pCap->hw_caps |= ATH9K_HW_CAP_MIC_TKIP;
2852 pCap->hw_caps |= ATH9K_HW_CAP_MIC_AESCCM;
2853
2854 if (ah->config.ht_enable)
2855 pCap->hw_caps |= ATH9K_HW_CAP_HT;
2856 else
2857 pCap->hw_caps &= ~ATH9K_HW_CAP_HT;
2858
2859 pCap->hw_caps |= ATH9K_HW_CAP_GTT;
2860 pCap->hw_caps |= ATH9K_HW_CAP_VEOL;
2861 pCap->hw_caps |= ATH9K_HW_CAP_BSSIDMASK;
2862 pCap->hw_caps &= ~ATH9K_HW_CAP_MCAST_KEYSEARCH;
2863
2864 if (capField & AR_EEPROM_EEPCAP_MAXQCU)
2865 pCap->total_queues =
2866 MS(capField, AR_EEPROM_EEPCAP_MAXQCU);
2867 else
2868 pCap->total_queues = ATH9K_NUM_TX_QUEUES;
2869
2870 if (capField & AR_EEPROM_EEPCAP_KC_ENTRIES)
2871 pCap->keycache_size =
2872 1 << MS(capField, AR_EEPROM_EEPCAP_KC_ENTRIES);
2873 else
2874 pCap->keycache_size = AR_KEYTABLE_SIZE;
2875
2876 pCap->hw_caps |= ATH9K_HW_CAP_FASTCC;
2877
2878 if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
2879 pCap->tx_triglevel_max = MAX_TX_FIFO_THRESHOLD >> 1;
2880 else
2881 pCap->tx_triglevel_max = MAX_TX_FIFO_THRESHOLD;
2882
2883 if (AR_SREV_9271(ah))
2884 pCap->num_gpio_pins = AR9271_NUM_GPIO;
2885 else if (AR_SREV_9285_10_OR_LATER(ah))
2886 pCap->num_gpio_pins = AR9285_NUM_GPIO;
2887 else if (AR_SREV_9280_10_OR_LATER(ah))
2888 pCap->num_gpio_pins = AR928X_NUM_GPIO;
2889 else
2890 pCap->num_gpio_pins = AR_NUM_GPIO;
2891
2892 if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah)) {
2893 pCap->hw_caps |= ATH9K_HW_CAP_CST;
2894 pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX;
2895 } else {
2896 pCap->rts_aggr_limit = (8 * 1024);
2897 }
2898
2899 pCap->hw_caps |= ATH9K_HW_CAP_ENHANCEDPM;
2900
2901 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
2902 ah->rfsilent = ah->eep_ops->get_eeprom(ah, EEP_RF_SILENT);
2903 if (ah->rfsilent & EEP_RFSILENT_ENABLED) {
2904 ah->rfkill_gpio =
2905 MS(ah->rfsilent, EEP_RFSILENT_GPIO_SEL);
2906 ah->rfkill_polarity =
2907 MS(ah->rfsilent, EEP_RFSILENT_POLARITY);
2908
2909 pCap->hw_caps |= ATH9K_HW_CAP_RFSILENT;
2910 }
2911 #endif
2912 if (AR_SREV_9271(ah))
2913 pCap->hw_caps |= ATH9K_HW_CAP_AUTOSLEEP;
2914 else
2915 pCap->hw_caps &= ~ATH9K_HW_CAP_AUTOSLEEP;
2916
2917 if (AR_SREV_9280(ah) || AR_SREV_9285(ah))
2918 pCap->hw_caps &= ~ATH9K_HW_CAP_4KB_SPLITTRANS;
2919 else
2920 pCap->hw_caps |= ATH9K_HW_CAP_4KB_SPLITTRANS;
2921
2922 if (regulatory->current_rd_ext & (1 << REG_EXT_JAPAN_MIDBAND)) {
2923 pCap->reg_cap =
2924 AR_EEPROM_EEREGCAP_EN_KK_NEW_11A |
2925 AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN |
2926 AR_EEPROM_EEREGCAP_EN_KK_U2 |
2927 AR_EEPROM_EEREGCAP_EN_KK_MIDBAND;
2928 } else {
2929 pCap->reg_cap =
2930 AR_EEPROM_EEREGCAP_EN_KK_NEW_11A |
2931 AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN;
2932 }
2933
2934 /* Advertise midband for AR5416 with FCC midband set in eeprom */
2935 if (regulatory->current_rd_ext & (1 << REG_EXT_FCC_MIDBAND) &&
2936 AR_SREV_5416(ah))
2937 pCap->reg_cap |= AR_EEPROM_EEREGCAP_EN_FCC_MIDBAND;
2938
2939 pCap->num_antcfg_5ghz =
2940 ah->eep_ops->get_num_ant_config(ah, ATH9K_HAL_FREQ_BAND_5GHZ);
2941 pCap->num_antcfg_2ghz =
2942 ah->eep_ops->get_num_ant_config(ah, ATH9K_HAL_FREQ_BAND_2GHZ);
2943
2944 if (AR_SREV_9280_10_OR_LATER(ah) &&
2945 ath9k_hw_btcoex_supported(ah)) {
2946 btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO;
2947 btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO;
2948
2949 if (AR_SREV_9285(ah)) {
2950 btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE;
2951 btcoex_hw->btpriority_gpio = ATH_BTPRIORITY_GPIO;
2952 } else {
2953 btcoex_hw->scheme = ATH_BTCOEX_CFG_2WIRE;
2954 }
2955 } else {
2956 btcoex_hw->scheme = ATH_BTCOEX_CFG_NONE;
2957 }
2958
2959 if (AR_SREV_9300_20_OR_LATER(ah)) {
2960 pCap->hw_caps |= ATH9K_HW_CAP_EDMA;
2961 pCap->rx_hp_qdepth = ATH9K_HW_RX_HP_QDEPTH;
2962 pCap->rx_lp_qdepth = ATH9K_HW_RX_LP_QDEPTH;
2963 pCap->rx_status_len = sizeof(struct ar9003_rxs);
2964 }
2965
2966 return 0;
2967 }
2968
2969 bool ath9k_hw_getcapability(struct ath_hw *ah, enum ath9k_capability_type type,
2970 u32 capability, u32 *result)
2971 {
2972 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
2973 switch (type) {
2974 case ATH9K_CAP_CIPHER:
2975 switch (capability) {
2976 case ATH9K_CIPHER_AES_CCM:
2977 case ATH9K_CIPHER_AES_OCB:
2978 case ATH9K_CIPHER_TKIP:
2979 case ATH9K_CIPHER_WEP:
2980 case ATH9K_CIPHER_MIC:
2981 case ATH9K_CIPHER_CLR:
2982 return true;
2983 default:
2984 return false;
2985 }
2986 case ATH9K_CAP_TKIP_MIC:
2987 switch (capability) {
2988 case 0:
2989 return true;
2990 case 1:
2991 return (ah->sta_id1_defaults &
2992 AR_STA_ID1_CRPT_MIC_ENABLE) ? true :
2993 false;
2994 }
2995 case ATH9K_CAP_TKIP_SPLIT:
2996 return (ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA) ?
2997 false : true;
2998 case ATH9K_CAP_MCAST_KEYSRCH:
2999 switch (capability) {
3000 case 0:
3001 return true;
3002 case 1:
3003 if (REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_ADHOC) {
3004 return false;
3005 } else {
3006 return (ah->sta_id1_defaults &
3007 AR_STA_ID1_MCAST_KSRCH) ? true :
3008 false;
3009 }
3010 }
3011 return false;
3012 case ATH9K_CAP_TXPOW:
3013 switch (capability) {
3014 case 0:
3015 return 0;
3016 case 1:
3017 *result = regulatory->power_limit;
3018 return 0;
3019 case 2:
3020 *result = regulatory->max_power_level;
3021 return 0;
3022 case 3:
3023 *result = regulatory->tp_scale;
3024 return 0;
3025 }
3026 return false;
3027 case ATH9K_CAP_DS:
3028 return (AR_SREV_9280_20_OR_LATER(ah) &&
3029 (ah->eep_ops->get_eeprom(ah, EEP_RC_CHAIN_MASK) == 1))
3030 ? false : true;
3031 default:
3032 return false;
3033 }
3034 }
3035 EXPORT_SYMBOL(ath9k_hw_getcapability);
3036
3037 bool ath9k_hw_setcapability(struct ath_hw *ah, enum ath9k_capability_type type,
3038 u32 capability, u32 setting, int *status)
3039 {
3040 switch (type) {
3041 case ATH9K_CAP_TKIP_MIC:
3042 if (setting)
3043 ah->sta_id1_defaults |=
3044 AR_STA_ID1_CRPT_MIC_ENABLE;
3045 else
3046 ah->sta_id1_defaults &=
3047 ~AR_STA_ID1_CRPT_MIC_ENABLE;
3048 return true;
3049 case ATH9K_CAP_MCAST_KEYSRCH:
3050 if (setting)
3051 ah->sta_id1_defaults |= AR_STA_ID1_MCAST_KSRCH;
3052 else
3053 ah->sta_id1_defaults &= ~AR_STA_ID1_MCAST_KSRCH;
3054 return true;
3055 default:
3056 return false;
3057 }
3058 }
3059 EXPORT_SYMBOL(ath9k_hw_setcapability);
3060
3061 /****************************/
3062 /* GPIO / RFKILL / Antennae */
3063 /****************************/
3064
3065 static void ath9k_hw_gpio_cfg_output_mux(struct ath_hw *ah,
3066 u32 gpio, u32 type)
3067 {
3068 int addr;
3069 u32 gpio_shift, tmp;
3070
3071 if (gpio > 11)
3072 addr = AR_GPIO_OUTPUT_MUX3;
3073 else if (gpio > 5)
3074 addr = AR_GPIO_OUTPUT_MUX2;
3075 else
3076 addr = AR_GPIO_OUTPUT_MUX1;
3077
3078 gpio_shift = (gpio % 6) * 5;
3079
3080 if (AR_SREV_9280_20_OR_LATER(ah)
3081 || (addr != AR_GPIO_OUTPUT_MUX1)) {
3082 REG_RMW(ah, addr, (type << gpio_shift),
3083 (0x1f << gpio_shift));
3084 } else {
3085 tmp = REG_READ(ah, addr);
3086 tmp = ((tmp & 0x1F0) << 1) | (tmp & ~0x1F0);
3087 tmp &= ~(0x1f << gpio_shift);
3088 tmp |= (type << gpio_shift);
3089 REG_WRITE(ah, addr, tmp);
3090 }
3091 }
3092
3093 void ath9k_hw_cfg_gpio_input(struct ath_hw *ah, u32 gpio)
3094 {
3095 u32 gpio_shift;
3096
3097 BUG_ON(gpio >= ah->caps.num_gpio_pins);
3098
3099 gpio_shift = gpio << 1;
3100
3101 REG_RMW(ah,
3102 AR_GPIO_OE_OUT,
3103 (AR_GPIO_OE_OUT_DRV_NO << gpio_shift),
3104 (AR_GPIO_OE_OUT_DRV << gpio_shift));
3105 }
3106 EXPORT_SYMBOL(ath9k_hw_cfg_gpio_input);
3107
3108 u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio)
3109 {
3110 #define MS_REG_READ(x, y) \
3111 (MS(REG_READ(ah, AR_GPIO_IN_OUT), x##_GPIO_IN_VAL) & (AR_GPIO_BIT(y)))
3112
3113 if (gpio >= ah->caps.num_gpio_pins)
3114 return 0xffffffff;
3115
3116 if (AR_SREV_9300_20_OR_LATER(ah))
3117 return MS_REG_READ(AR9300, gpio) != 0;
3118 else if (AR_SREV_9271(ah))
3119 return MS_REG_READ(AR9271, gpio) != 0;
3120 else if (AR_SREV_9287_10_OR_LATER(ah))
3121 return MS_REG_READ(AR9287, gpio) != 0;
3122 else if (AR_SREV_9285_10_OR_LATER(ah))
3123 return MS_REG_READ(AR9285, gpio) != 0;
3124 else if (AR_SREV_9280_10_OR_LATER(ah))
3125 return MS_REG_READ(AR928X, gpio) != 0;
3126 else
3127 return MS_REG_READ(AR, gpio) != 0;
3128 }
3129 EXPORT_SYMBOL(ath9k_hw_gpio_get);
3130
3131 void ath9k_hw_cfg_output(struct ath_hw *ah, u32 gpio,
3132 u32 ah_signal_type)
3133 {
3134 u32 gpio_shift;
3135
3136 ath9k_hw_gpio_cfg_output_mux(ah, gpio, ah_signal_type);
3137
3138 gpio_shift = 2 * gpio;
3139
3140 REG_RMW(ah,
3141 AR_GPIO_OE_OUT,
3142 (AR_GPIO_OE_OUT_DRV_ALL << gpio_shift),
3143 (AR_GPIO_OE_OUT_DRV << gpio_shift));
3144 }
3145 EXPORT_SYMBOL(ath9k_hw_cfg_output);
3146
3147 void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val)
3148 {
3149 if (AR_SREV_9271(ah))
3150 val = ~val;
3151
3152 REG_RMW(ah, AR_GPIO_IN_OUT, ((val & 1) << gpio),
3153 AR_GPIO_BIT(gpio));
3154 }
3155 EXPORT_SYMBOL(ath9k_hw_set_gpio);
3156
3157 u32 ath9k_hw_getdefantenna(struct ath_hw *ah)
3158 {
3159 return REG_READ(ah, AR_DEF_ANTENNA) & 0x7;
3160 }
3161 EXPORT_SYMBOL(ath9k_hw_getdefantenna);
3162
3163 void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna)
3164 {
3165 REG_WRITE(ah, AR_DEF_ANTENNA, (antenna & 0x7));
3166 }
3167 EXPORT_SYMBOL(ath9k_hw_setantenna);
3168
3169 /*********************/
3170 /* General Operation */
3171 /*********************/
3172
3173 u32 ath9k_hw_getrxfilter(struct ath_hw *ah)
3174 {
3175 u32 bits = REG_READ(ah, AR_RX_FILTER);
3176 u32 phybits = REG_READ(ah, AR_PHY_ERR);
3177
3178 if (phybits & AR_PHY_ERR_RADAR)
3179 bits |= ATH9K_RX_FILTER_PHYRADAR;
3180 if (phybits & (AR_PHY_ERR_OFDM_TIMING | AR_PHY_ERR_CCK_TIMING))
3181 bits |= ATH9K_RX_FILTER_PHYERR;
3182
3183 return bits;
3184 }
3185 EXPORT_SYMBOL(ath9k_hw_getrxfilter);
3186
3187 void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits)
3188 {
3189 u32 phybits;
3190
3191 REG_WRITE(ah, AR_RX_FILTER, bits);
3192
3193 phybits = 0;
3194 if (bits & ATH9K_RX_FILTER_PHYRADAR)
3195 phybits |= AR_PHY_ERR_RADAR;
3196 if (bits & ATH9K_RX_FILTER_PHYERR)
3197 phybits |= AR_PHY_ERR_OFDM_TIMING | AR_PHY_ERR_CCK_TIMING;
3198 REG_WRITE(ah, AR_PHY_ERR, phybits);
3199
3200 if (phybits)
3201 REG_WRITE(ah, AR_RXCFG,
3202 REG_READ(ah, AR_RXCFG) | AR_RXCFG_ZLFDMA);
3203 else
3204 REG_WRITE(ah, AR_RXCFG,
3205 REG_READ(ah, AR_RXCFG) & ~AR_RXCFG_ZLFDMA);
3206 }
3207 EXPORT_SYMBOL(ath9k_hw_setrxfilter);
3208
3209 bool ath9k_hw_phy_disable(struct ath_hw *ah)
3210 {
3211 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM))
3212 return false;
3213
3214 ath9k_hw_init_pll(ah, NULL);
3215 return true;
3216 }
3217 EXPORT_SYMBOL(ath9k_hw_phy_disable);
3218
3219 bool ath9k_hw_disable(struct ath_hw *ah)
3220 {
3221 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
3222 return false;
3223
3224 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_COLD))
3225 return false;
3226
3227 ath9k_hw_init_pll(ah, NULL);
3228 return true;
3229 }
3230 EXPORT_SYMBOL(ath9k_hw_disable);
3231
3232 void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit)
3233 {
3234 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
3235 struct ath9k_channel *chan = ah->curchan;
3236 struct ieee80211_channel *channel = chan->chan;
3237
3238 regulatory->power_limit = min(limit, (u32) MAX_RATE_POWER);
3239
3240 ah->eep_ops->set_txpower(ah, chan,
3241 ath9k_regd_get_ctl(regulatory, chan),
3242 channel->max_antenna_gain * 2,
3243 channel->max_power * 2,
3244 min((u32) MAX_RATE_POWER,
3245 (u32) regulatory->power_limit));
3246 }
3247 EXPORT_SYMBOL(ath9k_hw_set_txpowerlimit);
3248
3249 void ath9k_hw_setmac(struct ath_hw *ah, const u8 *mac)
3250 {
3251 memcpy(ath9k_hw_common(ah)->macaddr, mac, ETH_ALEN);
3252 }
3253 EXPORT_SYMBOL(ath9k_hw_setmac);
3254
3255 void ath9k_hw_setopmode(struct ath_hw *ah)
3256 {
3257 ath9k_hw_set_operating_mode(ah, ah->opmode);
3258 }
3259 EXPORT_SYMBOL(ath9k_hw_setopmode);
3260
3261 void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1)
3262 {
3263 REG_WRITE(ah, AR_MCAST_FIL0, filter0);
3264 REG_WRITE(ah, AR_MCAST_FIL1, filter1);
3265 }
3266 EXPORT_SYMBOL(ath9k_hw_setmcastfilter);
3267
3268 void ath9k_hw_write_associd(struct ath_hw *ah)
3269 {
3270 struct ath_common *common = ath9k_hw_common(ah);
3271
3272 REG_WRITE(ah, AR_BSS_ID0, get_unaligned_le32(common->curbssid));
3273 REG_WRITE(ah, AR_BSS_ID1, get_unaligned_le16(common->curbssid + 4) |
3274 ((common->curaid & 0x3fff) << AR_BSS_ID1_AID_S));
3275 }
3276 EXPORT_SYMBOL(ath9k_hw_write_associd);
3277
3278 u64 ath9k_hw_gettsf64(struct ath_hw *ah)
3279 {
3280 u64 tsf;
3281
3282 tsf = REG_READ(ah, AR_TSF_U32);
3283 tsf = (tsf << 32) | REG_READ(ah, AR_TSF_L32);
3284
3285 return tsf;
3286 }
3287 EXPORT_SYMBOL(ath9k_hw_gettsf64);
3288
3289 void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64)
3290 {
3291 REG_WRITE(ah, AR_TSF_L32, tsf64 & 0xffffffff);
3292 REG_WRITE(ah, AR_TSF_U32, (tsf64 >> 32) & 0xffffffff);
3293 }
3294 EXPORT_SYMBOL(ath9k_hw_settsf64);
3295
3296 void ath9k_hw_reset_tsf(struct ath_hw *ah)
3297 {
3298 if (!ath9k_hw_wait(ah, AR_SLP32_MODE, AR_SLP32_TSF_WRITE_STATUS, 0,
3299 AH_TSF_WRITE_TIMEOUT))
3300 ath_print(ath9k_hw_common(ah), ATH_DBG_RESET,
3301 "AR_SLP32_TSF_WRITE_STATUS limit exceeded\n");
3302
3303 REG_WRITE(ah, AR_RESET_TSF, AR_RESET_TSF_ONCE);
3304 }
3305 EXPORT_SYMBOL(ath9k_hw_reset_tsf);
3306
3307 void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting)
3308 {
3309 if (setting)
3310 ah->misc_mode |= AR_PCU_TX_ADD_TSF;
3311 else
3312 ah->misc_mode &= ~AR_PCU_TX_ADD_TSF;
3313 }
3314 EXPORT_SYMBOL(ath9k_hw_set_tsfadjust);
3315
3316 /*
3317 * Extend 15-bit time stamp from rx descriptor to
3318 * a full 64-bit TSF using the current h/w TSF.
3319 */
3320 u64 ath9k_hw_extend_tsf(struct ath_hw *ah, u32 rstamp)
3321 {
3322 u64 tsf;
3323
3324 tsf = ath9k_hw_gettsf64(ah);
3325 if ((tsf & 0x7fff) < rstamp)
3326 tsf -= 0x8000;
3327 return (tsf & ~0x7fff) | rstamp;
3328 }
3329 EXPORT_SYMBOL(ath9k_hw_extend_tsf);
3330
3331 void ath9k_hw_set11nmac2040(struct ath_hw *ah)
3332 {
3333 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
3334 u32 macmode;
3335
3336 if (conf_is_ht40(conf) && !ah->config.cwm_ignore_extcca)
3337 macmode = AR_2040_JOINED_RX_CLEAR;
3338 else
3339 macmode = 0;
3340
3341 REG_WRITE(ah, AR_2040_MODE, macmode);
3342 }
3343
3344 /* HW Generic timers configuration */
3345
3346 static const struct ath_gen_timer_configuration gen_tmr_configuration[] =
3347 {
3348 {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080},
3349 {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080},
3350 {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080},
3351 {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080},
3352 {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080},
3353 {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080},
3354 {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080},
3355 {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080},
3356 {AR_NEXT_NDP2_TIMER, AR_NDP2_PERIOD, AR_NDP2_TIMER_MODE, 0x0001},
3357 {AR_NEXT_NDP2_TIMER + 1*4, AR_NDP2_PERIOD + 1*4,
3358 AR_NDP2_TIMER_MODE, 0x0002},
3359 {AR_NEXT_NDP2_TIMER + 2*4, AR_NDP2_PERIOD + 2*4,
3360 AR_NDP2_TIMER_MODE, 0x0004},
3361 {AR_NEXT_NDP2_TIMER + 3*4, AR_NDP2_PERIOD + 3*4,
3362 AR_NDP2_TIMER_MODE, 0x0008},
3363 {AR_NEXT_NDP2_TIMER + 4*4, AR_NDP2_PERIOD + 4*4,
3364 AR_NDP2_TIMER_MODE, 0x0010},
3365 {AR_NEXT_NDP2_TIMER + 5*4, AR_NDP2_PERIOD + 5*4,
3366 AR_NDP2_TIMER_MODE, 0x0020},
3367 {AR_NEXT_NDP2_TIMER + 6*4, AR_NDP2_PERIOD + 6*4,
3368 AR_NDP2_TIMER_MODE, 0x0040},
3369 {AR_NEXT_NDP2_TIMER + 7*4, AR_NDP2_PERIOD + 7*4,
3370 AR_NDP2_TIMER_MODE, 0x0080}
3371 };
3372
3373 /* HW generic timer primitives */
3374
3375 /* compute and clear index of rightmost 1 */
3376 static u32 rightmost_index(struct ath_gen_timer_table *timer_table, u32 *mask)
3377 {
3378 u32 b;
3379
3380 b = *mask;
3381 b &= (0-b);
3382 *mask &= ~b;
3383 b *= debruijn32;
3384 b >>= 27;
3385
3386 return timer_table->gen_timer_index[b];
3387 }
3388
3389 u32 ath9k_hw_gettsf32(struct ath_hw *ah)
3390 {
3391 return REG_READ(ah, AR_TSF_L32);
3392 }
3393 EXPORT_SYMBOL(ath9k_hw_gettsf32);
3394
3395 struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
3396 void (*trigger)(void *),
3397 void (*overflow)(void *),
3398 void *arg,
3399 u8 timer_index)
3400 {
3401 struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
3402 struct ath_gen_timer *timer;
3403
3404 timer = kzalloc(sizeof(struct ath_gen_timer), GFP_KERNEL);
3405
3406 if (timer == NULL) {
3407 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
3408 "Failed to allocate memory"
3409 "for hw timer[%d]\n", timer_index);
3410 return NULL;
3411 }
3412
3413 /* allocate a hardware generic timer slot */
3414 timer_table->timers[timer_index] = timer;
3415 timer->index = timer_index;
3416 timer->trigger = trigger;
3417 timer->overflow = overflow;
3418 timer->arg = arg;
3419
3420 return timer;
3421 }
3422 EXPORT_SYMBOL(ath_gen_timer_alloc);
3423
3424 void ath9k_hw_gen_timer_start(struct ath_hw *ah,
3425 struct ath_gen_timer *timer,
3426 u32 timer_next,
3427 u32 timer_period)
3428 {
3429 struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
3430 u32 tsf;
3431
3432 BUG_ON(!timer_period);
3433
3434 set_bit(timer->index, &timer_table->timer_mask.timer_bits);
3435
3436 tsf = ath9k_hw_gettsf32(ah);
3437
3438 ath_print(ath9k_hw_common(ah), ATH_DBG_HWTIMER,
3439 "curent tsf %x period %x"
3440 "timer_next %x\n", tsf, timer_period, timer_next);
3441
3442 /*
3443 * Pull timer_next forward if the current TSF already passed it
3444 * because of software latency
3445 */
3446 if (timer_next < tsf)
3447 timer_next = tsf + timer_period;
3448
3449 /*
3450 * Program generic timer registers
3451 */
3452 REG_WRITE(ah, gen_tmr_configuration[timer->index].next_addr,
3453 timer_next);
3454 REG_WRITE(ah, gen_tmr_configuration[timer->index].period_addr,
3455 timer_period);
3456 REG_SET_BIT(ah, gen_tmr_configuration[timer->index].mode_addr,
3457 gen_tmr_configuration[timer->index].mode_mask);
3458
3459 /* Enable both trigger and thresh interrupt masks */
3460 REG_SET_BIT(ah, AR_IMR_S5,
3461 (SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_THRESH) |
3462 SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_TRIG)));
3463 }
3464 EXPORT_SYMBOL(ath9k_hw_gen_timer_start);
3465
3466 void ath9k_hw_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
3467 {
3468 struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
3469
3470 if ((timer->index < AR_FIRST_NDP_TIMER) ||
3471 (timer->index >= ATH_MAX_GEN_TIMER)) {
3472 return;
3473 }
3474
3475 /* Clear generic timer enable bits. */
3476 REG_CLR_BIT(ah, gen_tmr_configuration[timer->index].mode_addr,
3477 gen_tmr_configuration[timer->index].mode_mask);
3478
3479 /* Disable both trigger and thresh interrupt masks */
3480 REG_CLR_BIT(ah, AR_IMR_S5,
3481 (SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_THRESH) |
3482 SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_TRIG)));
3483
3484 clear_bit(timer->index, &timer_table->timer_mask.timer_bits);
3485 }
3486 EXPORT_SYMBOL(ath9k_hw_gen_timer_stop);
3487
3488 void ath_gen_timer_free(struct ath_hw *ah, struct ath_gen_timer *timer)
3489 {
3490 struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
3491
3492 /* free the hardware generic timer slot */
3493 timer_table->timers[timer->index] = NULL;
3494 kfree(timer);
3495 }
3496 EXPORT_SYMBOL(ath_gen_timer_free);
3497
3498 /*
3499 * Generic Timer Interrupts handling
3500 */
3501 void ath_gen_timer_isr(struct ath_hw *ah)
3502 {
3503 struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
3504 struct ath_gen_timer *timer;
3505 struct ath_common *common = ath9k_hw_common(ah);
3506 u32 trigger_mask, thresh_mask, index;
3507
3508 /* get hardware generic timer interrupt status */
3509 trigger_mask = ah->intr_gen_timer_trigger;
3510 thresh_mask = ah->intr_gen_timer_thresh;
3511 trigger_mask &= timer_table->timer_mask.val;
3512 thresh_mask &= timer_table->timer_mask.val;
3513
3514 trigger_mask &= ~thresh_mask;
3515
3516 while (thresh_mask) {
3517 index = rightmost_index(timer_table, &thresh_mask);
3518 timer = timer_table->timers[index];
3519 BUG_ON(!timer);
3520 ath_print(common, ATH_DBG_HWTIMER,
3521 "TSF overflow for Gen timer %d\n", index);
3522 timer->overflow(timer->arg);
3523 }
3524
3525 while (trigger_mask) {
3526 index = rightmost_index(timer_table, &trigger_mask);
3527 timer = timer_table->timers[index];
3528 BUG_ON(!timer);
3529 ath_print(common, ATH_DBG_HWTIMER,
3530 "Gen timer[%d] trigger\n", index);
3531 timer->trigger(timer->arg);
3532 }
3533 }
3534 EXPORT_SYMBOL(ath_gen_timer_isr);
3535
3536 /********/
3537 /* HTC */
3538 /********/
3539
3540 void ath9k_hw_htc_resetinit(struct ath_hw *ah)
3541 {
3542 ah->htc_reset_init = true;
3543 }
3544 EXPORT_SYMBOL(ath9k_hw_htc_resetinit);
3545
3546 static struct {
3547 u32 version;
3548 const char * name;
3549 } ath_mac_bb_names[] = {
3550 /* Devices with external radios */
3551 { AR_SREV_VERSION_5416_PCI, "5416" },
3552 { AR_SREV_VERSION_5416_PCIE, "5418" },
3553 { AR_SREV_VERSION_9100, "9100" },
3554 { AR_SREV_VERSION_9160, "9160" },
3555 /* Single-chip solutions */
3556 { AR_SREV_VERSION_9280, "9280" },
3557 { AR_SREV_VERSION_9285, "9285" },
3558 { AR_SREV_VERSION_9287, "9287" },
3559 { AR_SREV_VERSION_9271, "9271" },
3560 };
3561
3562 /* For devices with external radios */
3563 static struct {
3564 u16 version;
3565 const char * name;
3566 } ath_rf_names[] = {
3567 { 0, "5133" },
3568 { AR_RAD5133_SREV_MAJOR, "5133" },
3569 { AR_RAD5122_SREV_MAJOR, "5122" },
3570 { AR_RAD2133_SREV_MAJOR, "2133" },
3571 { AR_RAD2122_SREV_MAJOR, "2122" }
3572 };
3573
3574 /*
3575 * Return the MAC/BB name. "????" is returned if the MAC/BB is unknown.
3576 */
3577 static const char *ath9k_hw_mac_bb_name(u32 mac_bb_version)
3578 {
3579 int i;
3580
3581 for (i=0; i<ARRAY_SIZE(ath_mac_bb_names); i++) {
3582 if (ath_mac_bb_names[i].version == mac_bb_version) {
3583 return ath_mac_bb_names[i].name;
3584 }
3585 }
3586
3587 return "????";
3588 }
3589
3590 /*
3591 * Return the RF name. "????" is returned if the RF is unknown.
3592 * Used for devices with external radios.
3593 */
3594 static const char *ath9k_hw_rf_name(u16 rf_version)
3595 {
3596 int i;
3597
3598 for (i=0; i<ARRAY_SIZE(ath_rf_names); i++) {
3599 if (ath_rf_names[i].version == rf_version) {
3600 return ath_rf_names[i].name;
3601 }
3602 }
3603
3604 return "????";
3605 }
3606
3607 void ath9k_hw_name(struct ath_hw *ah, char *hw_name, size_t len)
3608 {
3609 int used;
3610
3611 /* chipsets >= AR9280 are single-chip */
3612 if (AR_SREV_9280_10_OR_LATER(ah)) {
3613 used = snprintf(hw_name, len,
3614 "Atheros AR%s Rev:%x",
3615 ath9k_hw_mac_bb_name(ah->hw_version.macVersion),
3616 ah->hw_version.macRev);
3617 }
3618 else {
3619 used = snprintf(hw_name, len,
3620 "Atheros AR%s MAC/BB Rev:%x AR%s RF Rev:%x",
3621 ath9k_hw_mac_bb_name(ah->hw_version.macVersion),
3622 ah->hw_version.macRev,
3623 ath9k_hw_rf_name((ah->hw_version.analog5GhzRev &
3624 AR_RADIO_SREV_MAJOR)),
3625 ah->hw_version.phyRev);
3626 }
3627
3628 hw_name[used] = '\0';
3629 }
3630 EXPORT_SYMBOL(ath9k_hw_name);
3631
3632 /* Sets up the AR5008/AR9001/AR9002 hardware familiy callbacks */
3633 static void ar9002_hw_attach_ops(struct ath_hw *ah)
3634 {
3635 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
3636 struct ath_hw_ops *ops = ath9k_hw_ops(ah);
3637
3638 priv_ops->init_cal_settings = ar9002_hw_init_cal_settings;
3639 priv_ops->init_mode_regs = ar9002_hw_init_mode_regs;
3640 priv_ops->macversion_supported = ar9002_hw_macversion_supported;
3641
3642 ops->config_pci_powersave = ar9002_hw_configpcipowersave;
3643
3644 ar5008_hw_attach_phy_ops(ah);
3645 if (AR_SREV_9280_10_OR_LATER(ah))
3646 ar9002_hw_attach_phy_ops(ah);
3647
3648 ar9002_hw_attach_mac_ops(ah);
3649 }
3650
3651 /* Sets up the AR9003 hardware familiy callbacks */
3652 static void ar9003_hw_attach_ops(struct ath_hw *ah)
3653 {
3654 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
3655
3656 priv_ops->init_mode_regs = ar9003_hw_init_mode_regs;
3657 priv_ops->macversion_supported = ar9003_hw_macversion_supported;
3658
3659 ar9003_hw_attach_phy_ops(ah);
3660
3661 ar9003_hw_attach_mac_ops(ah);
3662 }
This page took 0.149843 seconds and 6 git commands to generate.