Merge branch 'tipc-Sep17-2011' of git://openlinux.windriver.com/people/paulg/net...
[deliverable/linux.git] / drivers / net / wireless / ath / ath9k / init.c
1 /*
2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include <linux/dma-mapping.h>
18 #include <linux/slab.h>
19 #include <linux/ath9k_platform.h>
20
21 #include "ath9k.h"
22
23 static char *dev_info = "ath9k";
24
25 MODULE_AUTHOR("Atheros Communications");
26 MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
27 MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
28 MODULE_LICENSE("Dual BSD/GPL");
29
30 static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
31 module_param_named(debug, ath9k_debug, uint, 0);
32 MODULE_PARM_DESC(debug, "Debugging mask");
33
34 int ath9k_modparam_nohwcrypt;
35 module_param_named(nohwcrypt, ath9k_modparam_nohwcrypt, int, 0444);
36 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
37
38 int led_blink;
39 module_param_named(blink, led_blink, int, 0444);
40 MODULE_PARM_DESC(blink, "Enable LED blink on activity");
41
42 static int ath9k_btcoex_enable;
43 module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444);
44 MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
45
46 bool is_ath9k_unloaded;
47 /* We use the hw_value as an index into our private channel structure */
48
49 #define CHAN2G(_freq, _idx) { \
50 .band = IEEE80211_BAND_2GHZ, \
51 .center_freq = (_freq), \
52 .hw_value = (_idx), \
53 .max_power = 20, \
54 }
55
56 #define CHAN5G(_freq, _idx) { \
57 .band = IEEE80211_BAND_5GHZ, \
58 .center_freq = (_freq), \
59 .hw_value = (_idx), \
60 .max_power = 20, \
61 }
62
63 /* Some 2 GHz radios are actually tunable on 2312-2732
64 * on 5 MHz steps, we support the channels which we know
65 * we have calibration data for all cards though to make
66 * this static */
67 static const struct ieee80211_channel ath9k_2ghz_chantable[] = {
68 CHAN2G(2412, 0), /* Channel 1 */
69 CHAN2G(2417, 1), /* Channel 2 */
70 CHAN2G(2422, 2), /* Channel 3 */
71 CHAN2G(2427, 3), /* Channel 4 */
72 CHAN2G(2432, 4), /* Channel 5 */
73 CHAN2G(2437, 5), /* Channel 6 */
74 CHAN2G(2442, 6), /* Channel 7 */
75 CHAN2G(2447, 7), /* Channel 8 */
76 CHAN2G(2452, 8), /* Channel 9 */
77 CHAN2G(2457, 9), /* Channel 10 */
78 CHAN2G(2462, 10), /* Channel 11 */
79 CHAN2G(2467, 11), /* Channel 12 */
80 CHAN2G(2472, 12), /* Channel 13 */
81 CHAN2G(2484, 13), /* Channel 14 */
82 };
83
84 /* Some 5 GHz radios are actually tunable on XXXX-YYYY
85 * on 5 MHz steps, we support the channels which we know
86 * we have calibration data for all cards though to make
87 * this static */
88 static const struct ieee80211_channel ath9k_5ghz_chantable[] = {
89 /* _We_ call this UNII 1 */
90 CHAN5G(5180, 14), /* Channel 36 */
91 CHAN5G(5200, 15), /* Channel 40 */
92 CHAN5G(5220, 16), /* Channel 44 */
93 CHAN5G(5240, 17), /* Channel 48 */
94 /* _We_ call this UNII 2 */
95 CHAN5G(5260, 18), /* Channel 52 */
96 CHAN5G(5280, 19), /* Channel 56 */
97 CHAN5G(5300, 20), /* Channel 60 */
98 CHAN5G(5320, 21), /* Channel 64 */
99 /* _We_ call this "Middle band" */
100 CHAN5G(5500, 22), /* Channel 100 */
101 CHAN5G(5520, 23), /* Channel 104 */
102 CHAN5G(5540, 24), /* Channel 108 */
103 CHAN5G(5560, 25), /* Channel 112 */
104 CHAN5G(5580, 26), /* Channel 116 */
105 CHAN5G(5600, 27), /* Channel 120 */
106 CHAN5G(5620, 28), /* Channel 124 */
107 CHAN5G(5640, 29), /* Channel 128 */
108 CHAN5G(5660, 30), /* Channel 132 */
109 CHAN5G(5680, 31), /* Channel 136 */
110 CHAN5G(5700, 32), /* Channel 140 */
111 /* _We_ call this UNII 3 */
112 CHAN5G(5745, 33), /* Channel 149 */
113 CHAN5G(5765, 34), /* Channel 153 */
114 CHAN5G(5785, 35), /* Channel 157 */
115 CHAN5G(5805, 36), /* Channel 161 */
116 CHAN5G(5825, 37), /* Channel 165 */
117 };
118
119 /* Atheros hardware rate code addition for short premble */
120 #define SHPCHECK(__hw_rate, __flags) \
121 ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
122
123 #define RATE(_bitrate, _hw_rate, _flags) { \
124 .bitrate = (_bitrate), \
125 .flags = (_flags), \
126 .hw_value = (_hw_rate), \
127 .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
128 }
129
130 static struct ieee80211_rate ath9k_legacy_rates[] = {
131 RATE(10, 0x1b, 0),
132 RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
133 RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
134 RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
135 RATE(60, 0x0b, 0),
136 RATE(90, 0x0f, 0),
137 RATE(120, 0x0a, 0),
138 RATE(180, 0x0e, 0),
139 RATE(240, 0x09, 0),
140 RATE(360, 0x0d, 0),
141 RATE(480, 0x08, 0),
142 RATE(540, 0x0c, 0),
143 };
144
145 #ifdef CONFIG_MAC80211_LEDS
146 static const struct ieee80211_tpt_blink ath9k_tpt_blink[] = {
147 { .throughput = 0 * 1024, .blink_time = 334 },
148 { .throughput = 1 * 1024, .blink_time = 260 },
149 { .throughput = 5 * 1024, .blink_time = 220 },
150 { .throughput = 10 * 1024, .blink_time = 190 },
151 { .throughput = 20 * 1024, .blink_time = 170 },
152 { .throughput = 50 * 1024, .blink_time = 150 },
153 { .throughput = 70 * 1024, .blink_time = 130 },
154 { .throughput = 100 * 1024, .blink_time = 110 },
155 { .throughput = 200 * 1024, .blink_time = 80 },
156 { .throughput = 300 * 1024, .blink_time = 50 },
157 };
158 #endif
159
160 static void ath9k_deinit_softc(struct ath_softc *sc);
161
162 /*
163 * Read and write, they both share the same lock. We do this to serialize
164 * reads and writes on Atheros 802.11n PCI devices only. This is required
165 * as the FIFO on these devices can only accept sanely 2 requests.
166 */
167
168 static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
169 {
170 struct ath_hw *ah = (struct ath_hw *) hw_priv;
171 struct ath_common *common = ath9k_hw_common(ah);
172 struct ath_softc *sc = (struct ath_softc *) common->priv;
173
174 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
175 unsigned long flags;
176 spin_lock_irqsave(&sc->sc_serial_rw, flags);
177 iowrite32(val, sc->mem + reg_offset);
178 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
179 } else
180 iowrite32(val, sc->mem + reg_offset);
181 }
182
183 static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
184 {
185 struct ath_hw *ah = (struct ath_hw *) hw_priv;
186 struct ath_common *common = ath9k_hw_common(ah);
187 struct ath_softc *sc = (struct ath_softc *) common->priv;
188 u32 val;
189
190 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
191 unsigned long flags;
192 spin_lock_irqsave(&sc->sc_serial_rw, flags);
193 val = ioread32(sc->mem + reg_offset);
194 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
195 } else
196 val = ioread32(sc->mem + reg_offset);
197 return val;
198 }
199
200 static unsigned int __ath9k_reg_rmw(struct ath_softc *sc, u32 reg_offset,
201 u32 set, u32 clr)
202 {
203 u32 val;
204
205 val = ioread32(sc->mem + reg_offset);
206 val &= ~clr;
207 val |= set;
208 iowrite32(val, sc->mem + reg_offset);
209
210 return val;
211 }
212
213 static unsigned int ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 clr)
214 {
215 struct ath_hw *ah = (struct ath_hw *) hw_priv;
216 struct ath_common *common = ath9k_hw_common(ah);
217 struct ath_softc *sc = (struct ath_softc *) common->priv;
218 unsigned long uninitialized_var(flags);
219 u32 val;
220
221 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
222 spin_lock_irqsave(&sc->sc_serial_rw, flags);
223 val = __ath9k_reg_rmw(sc, reg_offset, set, clr);
224 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
225 } else
226 val = __ath9k_reg_rmw(sc, reg_offset, set, clr);
227
228 return val;
229 }
230
231 /**************************/
232 /* Initialization */
233 /**************************/
234
235 static void setup_ht_cap(struct ath_softc *sc,
236 struct ieee80211_sta_ht_cap *ht_info)
237 {
238 struct ath_hw *ah = sc->sc_ah;
239 struct ath_common *common = ath9k_hw_common(ah);
240 u8 tx_streams, rx_streams;
241 int i, max_streams;
242
243 ht_info->ht_supported = true;
244 ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
245 IEEE80211_HT_CAP_SM_PS |
246 IEEE80211_HT_CAP_SGI_40 |
247 IEEE80211_HT_CAP_DSSSCCK40;
248
249 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_LDPC)
250 ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
251
252 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
253 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
254
255 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
256 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
257
258 if (AR_SREV_9330(ah) || AR_SREV_9485(ah))
259 max_streams = 1;
260 else if (AR_SREV_9300_20_OR_LATER(ah))
261 max_streams = 3;
262 else
263 max_streams = 2;
264
265 if (AR_SREV_9280_20_OR_LATER(ah)) {
266 if (max_streams >= 2)
267 ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
268 ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
269 }
270
271 /* set up supported mcs set */
272 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
273 tx_streams = ath9k_cmn_count_streams(ah->txchainmask, max_streams);
274 rx_streams = ath9k_cmn_count_streams(ah->rxchainmask, max_streams);
275
276 ath_dbg(common, ATH_DBG_CONFIG,
277 "TX streams %d, RX streams: %d\n",
278 tx_streams, rx_streams);
279
280 if (tx_streams != rx_streams) {
281 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
282 ht_info->mcs.tx_params |= ((tx_streams - 1) <<
283 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
284 }
285
286 for (i = 0; i < rx_streams; i++)
287 ht_info->mcs.rx_mask[i] = 0xff;
288
289 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
290 }
291
292 static int ath9k_reg_notifier(struct wiphy *wiphy,
293 struct regulatory_request *request)
294 {
295 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
296 struct ath_softc *sc = hw->priv;
297 struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
298
299 return ath_reg_notifier_apply(wiphy, request, reg);
300 }
301
302 /*
303 * This function will allocate both the DMA descriptor structure, and the
304 * buffers it contains. These are used to contain the descriptors used
305 * by the system.
306 */
307 int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
308 struct list_head *head, const char *name,
309 int nbuf, int ndesc, bool is_tx)
310 {
311 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
312 u8 *ds;
313 struct ath_buf *bf;
314 int i, bsize, error, desc_len;
315
316 ath_dbg(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
317 name, nbuf, ndesc);
318
319 INIT_LIST_HEAD(head);
320
321 if (is_tx)
322 desc_len = sc->sc_ah->caps.tx_desc_len;
323 else
324 desc_len = sizeof(struct ath_desc);
325
326 /* ath_desc must be a multiple of DWORDs */
327 if ((desc_len % 4) != 0) {
328 ath_err(common, "ath_desc not DWORD aligned\n");
329 BUG_ON((desc_len % 4) != 0);
330 error = -ENOMEM;
331 goto fail;
332 }
333
334 dd->dd_desc_len = desc_len * nbuf * ndesc;
335
336 /*
337 * Need additional DMA memory because we can't use
338 * descriptors that cross the 4K page boundary. Assume
339 * one skipped descriptor per 4K page.
340 */
341 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
342 u32 ndesc_skipped =
343 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
344 u32 dma_len;
345
346 while (ndesc_skipped) {
347 dma_len = ndesc_skipped * desc_len;
348 dd->dd_desc_len += dma_len;
349
350 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
351 }
352 }
353
354 /* allocate descriptors */
355 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
356 &dd->dd_desc_paddr, GFP_KERNEL);
357 if (dd->dd_desc == NULL) {
358 error = -ENOMEM;
359 goto fail;
360 }
361 ds = (u8 *) dd->dd_desc;
362 ath_dbg(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
363 name, ds, (u32) dd->dd_desc_len,
364 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
365
366 /* allocate buffers */
367 bsize = sizeof(struct ath_buf) * nbuf;
368 bf = kzalloc(bsize, GFP_KERNEL);
369 if (bf == NULL) {
370 error = -ENOMEM;
371 goto fail2;
372 }
373 dd->dd_bufptr = bf;
374
375 for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
376 bf->bf_desc = ds;
377 bf->bf_daddr = DS2PHYS(dd, ds);
378
379 if (!(sc->sc_ah->caps.hw_caps &
380 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
381 /*
382 * Skip descriptor addresses which can cause 4KB
383 * boundary crossing (addr + length) with a 32 dword
384 * descriptor fetch.
385 */
386 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
387 BUG_ON((caddr_t) bf->bf_desc >=
388 ((caddr_t) dd->dd_desc +
389 dd->dd_desc_len));
390
391 ds += (desc_len * ndesc);
392 bf->bf_desc = ds;
393 bf->bf_daddr = DS2PHYS(dd, ds);
394 }
395 }
396 list_add_tail(&bf->list, head);
397 }
398 return 0;
399 fail2:
400 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
401 dd->dd_desc_paddr);
402 fail:
403 memset(dd, 0, sizeof(*dd));
404 return error;
405 }
406
407 static int ath9k_init_btcoex(struct ath_softc *sc)
408 {
409 struct ath_txq *txq;
410 int r;
411
412 switch (sc->sc_ah->btcoex_hw.scheme) {
413 case ATH_BTCOEX_CFG_NONE:
414 break;
415 case ATH_BTCOEX_CFG_2WIRE:
416 ath9k_hw_btcoex_init_2wire(sc->sc_ah);
417 break;
418 case ATH_BTCOEX_CFG_3WIRE:
419 ath9k_hw_btcoex_init_3wire(sc->sc_ah);
420 r = ath_init_btcoex_timer(sc);
421 if (r)
422 return -1;
423 txq = sc->tx.txq_map[WME_AC_BE];
424 ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum);
425 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
426 break;
427 default:
428 WARN_ON(1);
429 break;
430 }
431
432 return 0;
433 }
434
435 static int ath9k_init_queues(struct ath_softc *sc)
436 {
437 int i = 0;
438
439 sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
440 sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
441
442 sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
443 ath_cabq_update(sc);
444
445 for (i = 0; i < WME_NUM_AC; i++) {
446 sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i);
447 sc->tx.txq_map[i]->mac80211_qnum = i;
448 }
449 return 0;
450 }
451
452 static int ath9k_init_channels_rates(struct ath_softc *sc)
453 {
454 void *channels;
455
456 BUILD_BUG_ON(ARRAY_SIZE(ath9k_2ghz_chantable) +
457 ARRAY_SIZE(ath9k_5ghz_chantable) !=
458 ATH9K_NUM_CHANNELS);
459
460 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) {
461 channels = kmemdup(ath9k_2ghz_chantable,
462 sizeof(ath9k_2ghz_chantable), GFP_KERNEL);
463 if (!channels)
464 return -ENOMEM;
465
466 sc->sbands[IEEE80211_BAND_2GHZ].channels = channels;
467 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
468 sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
469 ARRAY_SIZE(ath9k_2ghz_chantable);
470 sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
471 sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
472 ARRAY_SIZE(ath9k_legacy_rates);
473 }
474
475 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) {
476 channels = kmemdup(ath9k_5ghz_chantable,
477 sizeof(ath9k_5ghz_chantable), GFP_KERNEL);
478 if (!channels) {
479 if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
480 kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
481 return -ENOMEM;
482 }
483
484 sc->sbands[IEEE80211_BAND_5GHZ].channels = channels;
485 sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
486 sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
487 ARRAY_SIZE(ath9k_5ghz_chantable);
488 sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
489 ath9k_legacy_rates + 4;
490 sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
491 ARRAY_SIZE(ath9k_legacy_rates) - 4;
492 }
493 return 0;
494 }
495
496 static void ath9k_init_misc(struct ath_softc *sc)
497 {
498 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
499 int i = 0;
500 setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
501
502 sc->config.txpowlimit = ATH_TXPOWER_MAX;
503
504 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
505 sc->sc_flags |= SC_OP_TXAGGR;
506 sc->sc_flags |= SC_OP_RXAGGR;
507 }
508
509 ath9k_hw_set_diversity(sc->sc_ah, true);
510 sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah);
511
512 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
513
514 sc->beacon.slottime = ATH9K_SLOT_TIME_9;
515
516 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++)
517 sc->beacon.bslot[i] = NULL;
518
519 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
520 sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT;
521 }
522
523 static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
524 const struct ath_bus_ops *bus_ops)
525 {
526 struct ath9k_platform_data *pdata = sc->dev->platform_data;
527 struct ath_hw *ah = NULL;
528 struct ath_common *common;
529 int ret = 0, i;
530 int csz = 0;
531
532 ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
533 if (!ah)
534 return -ENOMEM;
535
536 ah->hw = sc->hw;
537 ah->hw_version.devid = devid;
538 ah->reg_ops.read = ath9k_ioread32;
539 ah->reg_ops.write = ath9k_iowrite32;
540 ah->reg_ops.rmw = ath9k_reg_rmw;
541 atomic_set(&ah->intr_ref_cnt, -1);
542 sc->sc_ah = ah;
543
544 if (!pdata) {
545 ah->ah_flags |= AH_USE_EEPROM;
546 sc->sc_ah->led_pin = -1;
547 } else {
548 sc->sc_ah->gpio_mask = pdata->gpio_mask;
549 sc->sc_ah->gpio_val = pdata->gpio_val;
550 sc->sc_ah->led_pin = pdata->led_pin;
551 ah->is_clk_25mhz = pdata->is_clk_25mhz;
552 ah->get_mac_revision = pdata->get_mac_revision;
553 ah->external_reset = pdata->external_reset;
554 }
555
556 common = ath9k_hw_common(ah);
557 common->ops = &ah->reg_ops;
558 common->bus_ops = bus_ops;
559 common->ah = ah;
560 common->hw = sc->hw;
561 common->priv = sc;
562 common->debug_mask = ath9k_debug;
563 common->btcoex_enabled = ath9k_btcoex_enable == 1;
564 common->disable_ani = false;
565 spin_lock_init(&common->cc_lock);
566
567 spin_lock_init(&sc->sc_serial_rw);
568 spin_lock_init(&sc->sc_pm_lock);
569 mutex_init(&sc->mutex);
570 #ifdef CONFIG_ATH9K_DEBUGFS
571 spin_lock_init(&sc->nodes_lock);
572 spin_lock_init(&sc->debug.samp_lock);
573 INIT_LIST_HEAD(&sc->nodes);
574 #endif
575 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
576 tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
577 (unsigned long)sc);
578
579 /*
580 * Cache line size is used to size and align various
581 * structures used to communicate with the hardware.
582 */
583 ath_read_cachesize(common, &csz);
584 common->cachelsz = csz << 2; /* convert to bytes */
585
586 /* Initializes the hardware for all supported chipsets */
587 ret = ath9k_hw_init(ah);
588 if (ret)
589 goto err_hw;
590
591 if (pdata && pdata->macaddr)
592 memcpy(common->macaddr, pdata->macaddr, ETH_ALEN);
593
594 ret = ath9k_init_queues(sc);
595 if (ret)
596 goto err_queues;
597
598 ret = ath9k_init_btcoex(sc);
599 if (ret)
600 goto err_btcoex;
601
602 ret = ath9k_init_channels_rates(sc);
603 if (ret)
604 goto err_btcoex;
605
606 ath9k_cmn_init_crypto(sc->sc_ah);
607 ath9k_init_misc(sc);
608
609 return 0;
610
611 err_btcoex:
612 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
613 if (ATH_TXQ_SETUP(sc, i))
614 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
615 err_queues:
616 ath9k_hw_deinit(ah);
617 err_hw:
618
619 kfree(ah);
620 sc->sc_ah = NULL;
621
622 return ret;
623 }
624
625 static void ath9k_init_band_txpower(struct ath_softc *sc, int band)
626 {
627 struct ieee80211_supported_band *sband;
628 struct ieee80211_channel *chan;
629 struct ath_hw *ah = sc->sc_ah;
630 struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
631 int i;
632
633 sband = &sc->sbands[band];
634 for (i = 0; i < sband->n_channels; i++) {
635 chan = &sband->channels[i];
636 ah->curchan = &ah->channels[chan->hw_value];
637 ath9k_cmn_update_ichannel(ah->curchan, chan, NL80211_CHAN_HT20);
638 ath9k_hw_set_txpowerlimit(ah, MAX_RATE_POWER, true);
639 chan->max_power = reg->max_power_level / 2;
640 }
641 }
642
643 static void ath9k_init_txpower_limits(struct ath_softc *sc)
644 {
645 struct ath_hw *ah = sc->sc_ah;
646 struct ath9k_channel *curchan = ah->curchan;
647
648 if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
649 ath9k_init_band_txpower(sc, IEEE80211_BAND_2GHZ);
650 if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
651 ath9k_init_band_txpower(sc, IEEE80211_BAND_5GHZ);
652
653 ah->curchan = curchan;
654 }
655
656 void ath9k_reload_chainmask_settings(struct ath_softc *sc)
657 {
658 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT))
659 return;
660
661 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
662 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
663 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
664 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
665 }
666
667
668 void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
669 {
670 struct ath_hw *ah = sc->sc_ah;
671 struct ath_common *common = ath9k_hw_common(ah);
672
673 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
674 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
675 IEEE80211_HW_SIGNAL_DBM |
676 IEEE80211_HW_SUPPORTS_PS |
677 IEEE80211_HW_PS_NULLFUNC_STACK |
678 IEEE80211_HW_SPECTRUM_MGMT |
679 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
680
681 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
682 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
683
684 if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || ath9k_modparam_nohwcrypt)
685 hw->flags |= IEEE80211_HW_MFP_CAPABLE;
686
687 hw->wiphy->interface_modes =
688 BIT(NL80211_IFTYPE_P2P_GO) |
689 BIT(NL80211_IFTYPE_P2P_CLIENT) |
690 BIT(NL80211_IFTYPE_AP) |
691 BIT(NL80211_IFTYPE_WDS) |
692 BIT(NL80211_IFTYPE_STATION) |
693 BIT(NL80211_IFTYPE_ADHOC) |
694 BIT(NL80211_IFTYPE_MESH_POINT);
695
696 if (AR_SREV_5416(sc->sc_ah))
697 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
698
699 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
700
701 hw->queues = 4;
702 hw->max_rates = 4;
703 hw->channel_change_time = 5000;
704 hw->max_listen_interval = 10;
705 hw->max_rate_tries = 10;
706 hw->sta_data_size = sizeof(struct ath_node);
707 hw->vif_data_size = sizeof(struct ath_vif);
708
709 hw->wiphy->available_antennas_rx = BIT(ah->caps.max_rxchains) - 1;
710 hw->wiphy->available_antennas_tx = BIT(ah->caps.max_txchains) - 1;
711
712 /* single chain devices with rx diversity */
713 if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
714 hw->wiphy->available_antennas_rx = BIT(0) | BIT(1);
715
716 sc->ant_rx = hw->wiphy->available_antennas_rx;
717 sc->ant_tx = hw->wiphy->available_antennas_tx;
718
719 #ifdef CONFIG_ATH9K_RATE_CONTROL
720 hw->rate_control_algorithm = "ath9k_rate_control";
721 #endif
722
723 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
724 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
725 &sc->sbands[IEEE80211_BAND_2GHZ];
726 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
727 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
728 &sc->sbands[IEEE80211_BAND_5GHZ];
729
730 ath9k_reload_chainmask_settings(sc);
731
732 SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
733 }
734
735 int ath9k_init_device(u16 devid, struct ath_softc *sc,
736 const struct ath_bus_ops *bus_ops)
737 {
738 struct ieee80211_hw *hw = sc->hw;
739 struct ath_common *common;
740 struct ath_hw *ah;
741 int error = 0;
742 struct ath_regulatory *reg;
743
744 /* Bring up device */
745 error = ath9k_init_softc(devid, sc, bus_ops);
746 if (error != 0)
747 goto error_init;
748
749 ah = sc->sc_ah;
750 common = ath9k_hw_common(ah);
751 ath9k_set_hw_capab(sc, hw);
752
753 /* Initialize regulatory */
754 error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
755 ath9k_reg_notifier);
756 if (error)
757 goto error_regd;
758
759 reg = &common->regulatory;
760
761 /* Setup TX DMA */
762 error = ath_tx_init(sc, ATH_TXBUF);
763 if (error != 0)
764 goto error_tx;
765
766 /* Setup RX DMA */
767 error = ath_rx_init(sc, ATH_RXBUF);
768 if (error != 0)
769 goto error_rx;
770
771 ath9k_init_txpower_limits(sc);
772
773 #ifdef CONFIG_MAC80211_LEDS
774 /* must be initialized before ieee80211_register_hw */
775 sc->led_cdev.default_trigger = ieee80211_create_tpt_led_trigger(sc->hw,
776 IEEE80211_TPT_LEDTRIG_FL_RADIO, ath9k_tpt_blink,
777 ARRAY_SIZE(ath9k_tpt_blink));
778 #endif
779
780 /* Register with mac80211 */
781 error = ieee80211_register_hw(hw);
782 if (error)
783 goto error_register;
784
785 error = ath9k_init_debug(ah);
786 if (error) {
787 ath_err(common, "Unable to create debugfs files\n");
788 goto error_world;
789 }
790
791 /* Handle world regulatory */
792 if (!ath_is_world_regd(reg)) {
793 error = regulatory_hint(hw->wiphy, reg->alpha2);
794 if (error)
795 goto error_world;
796 }
797
798 INIT_WORK(&sc->hw_reset_work, ath_reset_work);
799 INIT_WORK(&sc->hw_check_work, ath_hw_check);
800 INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
801 INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
802 sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
803
804 ath_init_leds(sc);
805 ath_start_rfkill_poll(sc);
806
807 return 0;
808
809 error_world:
810 ieee80211_unregister_hw(hw);
811 error_register:
812 ath_rx_cleanup(sc);
813 error_rx:
814 ath_tx_cleanup(sc);
815 error_tx:
816 /* Nothing */
817 error_regd:
818 ath9k_deinit_softc(sc);
819 error_init:
820 return error;
821 }
822
823 /*****************************/
824 /* De-Initialization */
825 /*****************************/
826
827 static void ath9k_deinit_softc(struct ath_softc *sc)
828 {
829 int i = 0;
830
831 if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
832 kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
833
834 if (sc->sbands[IEEE80211_BAND_5GHZ].channels)
835 kfree(sc->sbands[IEEE80211_BAND_5GHZ].channels);
836
837 if ((sc->btcoex.no_stomp_timer) &&
838 sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
839 ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
840
841 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
842 if (ATH_TXQ_SETUP(sc, i))
843 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
844
845 ath9k_hw_deinit(sc->sc_ah);
846
847 kfree(sc->sc_ah);
848 sc->sc_ah = NULL;
849 }
850
851 void ath9k_deinit_device(struct ath_softc *sc)
852 {
853 struct ieee80211_hw *hw = sc->hw;
854
855 ath9k_ps_wakeup(sc);
856
857 wiphy_rfkill_stop_polling(sc->hw->wiphy);
858 ath_deinit_leds(sc);
859
860 ath9k_ps_restore(sc);
861
862 ieee80211_unregister_hw(hw);
863 ath_rx_cleanup(sc);
864 ath_tx_cleanup(sc);
865 ath9k_deinit_softc(sc);
866 }
867
868 void ath_descdma_cleanup(struct ath_softc *sc,
869 struct ath_descdma *dd,
870 struct list_head *head)
871 {
872 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
873 dd->dd_desc_paddr);
874
875 INIT_LIST_HEAD(head);
876 kfree(dd->dd_bufptr);
877 memset(dd, 0, sizeof(*dd));
878 }
879
880 /************************/
881 /* Module Hooks */
882 /************************/
883
884 static int __init ath9k_init(void)
885 {
886 int error;
887
888 /* Register rate control algorithm */
889 error = ath_rate_control_register();
890 if (error != 0) {
891 printk(KERN_ERR
892 "ath9k: Unable to register rate control "
893 "algorithm: %d\n",
894 error);
895 goto err_out;
896 }
897
898 error = ath_pci_init();
899 if (error < 0) {
900 printk(KERN_ERR
901 "ath9k: No PCI devices found, driver not installed.\n");
902 error = -ENODEV;
903 goto err_rate_unregister;
904 }
905
906 error = ath_ahb_init();
907 if (error < 0) {
908 error = -ENODEV;
909 goto err_pci_exit;
910 }
911
912 return 0;
913
914 err_pci_exit:
915 ath_pci_exit();
916
917 err_rate_unregister:
918 ath_rate_control_unregister();
919 err_out:
920 return error;
921 }
922 module_init(ath9k_init);
923
924 static void __exit ath9k_exit(void)
925 {
926 is_ath9k_unloaded = true;
927 ath_ahb_exit();
928 ath_pci_exit();
929 ath_rate_control_unregister();
930 printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
931 }
932 module_exit(ath9k_exit);
This page took 0.05142 seconds and 6 git commands to generate.