drivers/net: Add module.h to drivers who were implicitly using it
[deliverable/linux.git] / drivers / net / wireless / ath / ath9k / init.c
1 /*
2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include <linux/dma-mapping.h>
18 #include <linux/slab.h>
19 #include <linux/ath9k_platform.h>
20 #include <linux/module.h>
21
22 #include "ath9k.h"
23
24 static char *dev_info = "ath9k";
25
26 MODULE_AUTHOR("Atheros Communications");
27 MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
28 MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
29 MODULE_LICENSE("Dual BSD/GPL");
30
31 static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
32 module_param_named(debug, ath9k_debug, uint, 0);
33 MODULE_PARM_DESC(debug, "Debugging mask");
34
35 int ath9k_modparam_nohwcrypt;
36 module_param_named(nohwcrypt, ath9k_modparam_nohwcrypt, int, 0444);
37 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
38
39 int led_blink;
40 module_param_named(blink, led_blink, int, 0444);
41 MODULE_PARM_DESC(blink, "Enable LED blink on activity");
42
43 static int ath9k_btcoex_enable;
44 module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444);
45 MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
46
47 bool is_ath9k_unloaded;
48 /* We use the hw_value as an index into our private channel structure */
49
50 #define CHAN2G(_freq, _idx) { \
51 .band = IEEE80211_BAND_2GHZ, \
52 .center_freq = (_freq), \
53 .hw_value = (_idx), \
54 .max_power = 20, \
55 }
56
57 #define CHAN5G(_freq, _idx) { \
58 .band = IEEE80211_BAND_5GHZ, \
59 .center_freq = (_freq), \
60 .hw_value = (_idx), \
61 .max_power = 20, \
62 }
63
64 /* Some 2 GHz radios are actually tunable on 2312-2732
65 * on 5 MHz steps, we support the channels which we know
66 * we have calibration data for all cards though to make
67 * this static */
68 static const struct ieee80211_channel ath9k_2ghz_chantable[] = {
69 CHAN2G(2412, 0), /* Channel 1 */
70 CHAN2G(2417, 1), /* Channel 2 */
71 CHAN2G(2422, 2), /* Channel 3 */
72 CHAN2G(2427, 3), /* Channel 4 */
73 CHAN2G(2432, 4), /* Channel 5 */
74 CHAN2G(2437, 5), /* Channel 6 */
75 CHAN2G(2442, 6), /* Channel 7 */
76 CHAN2G(2447, 7), /* Channel 8 */
77 CHAN2G(2452, 8), /* Channel 9 */
78 CHAN2G(2457, 9), /* Channel 10 */
79 CHAN2G(2462, 10), /* Channel 11 */
80 CHAN2G(2467, 11), /* Channel 12 */
81 CHAN2G(2472, 12), /* Channel 13 */
82 CHAN2G(2484, 13), /* Channel 14 */
83 };
84
85 /* Some 5 GHz radios are actually tunable on XXXX-YYYY
86 * on 5 MHz steps, we support the channels which we know
87 * we have calibration data for all cards though to make
88 * this static */
89 static const struct ieee80211_channel ath9k_5ghz_chantable[] = {
90 /* _We_ call this UNII 1 */
91 CHAN5G(5180, 14), /* Channel 36 */
92 CHAN5G(5200, 15), /* Channel 40 */
93 CHAN5G(5220, 16), /* Channel 44 */
94 CHAN5G(5240, 17), /* Channel 48 */
95 /* _We_ call this UNII 2 */
96 CHAN5G(5260, 18), /* Channel 52 */
97 CHAN5G(5280, 19), /* Channel 56 */
98 CHAN5G(5300, 20), /* Channel 60 */
99 CHAN5G(5320, 21), /* Channel 64 */
100 /* _We_ call this "Middle band" */
101 CHAN5G(5500, 22), /* Channel 100 */
102 CHAN5G(5520, 23), /* Channel 104 */
103 CHAN5G(5540, 24), /* Channel 108 */
104 CHAN5G(5560, 25), /* Channel 112 */
105 CHAN5G(5580, 26), /* Channel 116 */
106 CHAN5G(5600, 27), /* Channel 120 */
107 CHAN5G(5620, 28), /* Channel 124 */
108 CHAN5G(5640, 29), /* Channel 128 */
109 CHAN5G(5660, 30), /* Channel 132 */
110 CHAN5G(5680, 31), /* Channel 136 */
111 CHAN5G(5700, 32), /* Channel 140 */
112 /* _We_ call this UNII 3 */
113 CHAN5G(5745, 33), /* Channel 149 */
114 CHAN5G(5765, 34), /* Channel 153 */
115 CHAN5G(5785, 35), /* Channel 157 */
116 CHAN5G(5805, 36), /* Channel 161 */
117 CHAN5G(5825, 37), /* Channel 165 */
118 };
119
120 /* Atheros hardware rate code addition for short premble */
121 #define SHPCHECK(__hw_rate, __flags) \
122 ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
123
124 #define RATE(_bitrate, _hw_rate, _flags) { \
125 .bitrate = (_bitrate), \
126 .flags = (_flags), \
127 .hw_value = (_hw_rate), \
128 .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
129 }
130
131 static struct ieee80211_rate ath9k_legacy_rates[] = {
132 RATE(10, 0x1b, 0),
133 RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
134 RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
135 RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
136 RATE(60, 0x0b, 0),
137 RATE(90, 0x0f, 0),
138 RATE(120, 0x0a, 0),
139 RATE(180, 0x0e, 0),
140 RATE(240, 0x09, 0),
141 RATE(360, 0x0d, 0),
142 RATE(480, 0x08, 0),
143 RATE(540, 0x0c, 0),
144 };
145
146 #ifdef CONFIG_MAC80211_LEDS
147 static const struct ieee80211_tpt_blink ath9k_tpt_blink[] = {
148 { .throughput = 0 * 1024, .blink_time = 334 },
149 { .throughput = 1 * 1024, .blink_time = 260 },
150 { .throughput = 5 * 1024, .blink_time = 220 },
151 { .throughput = 10 * 1024, .blink_time = 190 },
152 { .throughput = 20 * 1024, .blink_time = 170 },
153 { .throughput = 50 * 1024, .blink_time = 150 },
154 { .throughput = 70 * 1024, .blink_time = 130 },
155 { .throughput = 100 * 1024, .blink_time = 110 },
156 { .throughput = 200 * 1024, .blink_time = 80 },
157 { .throughput = 300 * 1024, .blink_time = 50 },
158 };
159 #endif
160
161 static void ath9k_deinit_softc(struct ath_softc *sc);
162
163 /*
164 * Read and write, they both share the same lock. We do this to serialize
165 * reads and writes on Atheros 802.11n PCI devices only. This is required
166 * as the FIFO on these devices can only accept sanely 2 requests.
167 */
168
169 static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
170 {
171 struct ath_hw *ah = (struct ath_hw *) hw_priv;
172 struct ath_common *common = ath9k_hw_common(ah);
173 struct ath_softc *sc = (struct ath_softc *) common->priv;
174
175 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
176 unsigned long flags;
177 spin_lock_irqsave(&sc->sc_serial_rw, flags);
178 iowrite32(val, sc->mem + reg_offset);
179 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
180 } else
181 iowrite32(val, sc->mem + reg_offset);
182 }
183
184 static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
185 {
186 struct ath_hw *ah = (struct ath_hw *) hw_priv;
187 struct ath_common *common = ath9k_hw_common(ah);
188 struct ath_softc *sc = (struct ath_softc *) common->priv;
189 u32 val;
190
191 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
192 unsigned long flags;
193 spin_lock_irqsave(&sc->sc_serial_rw, flags);
194 val = ioread32(sc->mem + reg_offset);
195 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
196 } else
197 val = ioread32(sc->mem + reg_offset);
198 return val;
199 }
200
201 static unsigned int __ath9k_reg_rmw(struct ath_softc *sc, u32 reg_offset,
202 u32 set, u32 clr)
203 {
204 u32 val;
205
206 val = ioread32(sc->mem + reg_offset);
207 val &= ~clr;
208 val |= set;
209 iowrite32(val, sc->mem + reg_offset);
210
211 return val;
212 }
213
214 static unsigned int ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 clr)
215 {
216 struct ath_hw *ah = (struct ath_hw *) hw_priv;
217 struct ath_common *common = ath9k_hw_common(ah);
218 struct ath_softc *sc = (struct ath_softc *) common->priv;
219 unsigned long uninitialized_var(flags);
220 u32 val;
221
222 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
223 spin_lock_irqsave(&sc->sc_serial_rw, flags);
224 val = __ath9k_reg_rmw(sc, reg_offset, set, clr);
225 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
226 } else
227 val = __ath9k_reg_rmw(sc, reg_offset, set, clr);
228
229 return val;
230 }
231
232 /**************************/
233 /* Initialization */
234 /**************************/
235
236 static void setup_ht_cap(struct ath_softc *sc,
237 struct ieee80211_sta_ht_cap *ht_info)
238 {
239 struct ath_hw *ah = sc->sc_ah;
240 struct ath_common *common = ath9k_hw_common(ah);
241 u8 tx_streams, rx_streams;
242 int i, max_streams;
243
244 ht_info->ht_supported = true;
245 ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
246 IEEE80211_HT_CAP_SM_PS |
247 IEEE80211_HT_CAP_SGI_40 |
248 IEEE80211_HT_CAP_DSSSCCK40;
249
250 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_LDPC)
251 ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
252
253 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
254 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
255
256 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
257 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
258
259 if (AR_SREV_9330(ah) || AR_SREV_9485(ah))
260 max_streams = 1;
261 else if (AR_SREV_9300_20_OR_LATER(ah))
262 max_streams = 3;
263 else
264 max_streams = 2;
265
266 if (AR_SREV_9280_20_OR_LATER(ah)) {
267 if (max_streams >= 2)
268 ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
269 ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
270 }
271
272 /* set up supported mcs set */
273 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
274 tx_streams = ath9k_cmn_count_streams(ah->txchainmask, max_streams);
275 rx_streams = ath9k_cmn_count_streams(ah->rxchainmask, max_streams);
276
277 ath_dbg(common, ATH_DBG_CONFIG,
278 "TX streams %d, RX streams: %d\n",
279 tx_streams, rx_streams);
280
281 if (tx_streams != rx_streams) {
282 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
283 ht_info->mcs.tx_params |= ((tx_streams - 1) <<
284 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
285 }
286
287 for (i = 0; i < rx_streams; i++)
288 ht_info->mcs.rx_mask[i] = 0xff;
289
290 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
291 }
292
293 static int ath9k_reg_notifier(struct wiphy *wiphy,
294 struct regulatory_request *request)
295 {
296 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
297 struct ath_softc *sc = hw->priv;
298 struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
299
300 return ath_reg_notifier_apply(wiphy, request, reg);
301 }
302
303 /*
304 * This function will allocate both the DMA descriptor structure, and the
305 * buffers it contains. These are used to contain the descriptors used
306 * by the system.
307 */
308 int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
309 struct list_head *head, const char *name,
310 int nbuf, int ndesc, bool is_tx)
311 {
312 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
313 u8 *ds;
314 struct ath_buf *bf;
315 int i, bsize, error, desc_len;
316
317 ath_dbg(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
318 name, nbuf, ndesc);
319
320 INIT_LIST_HEAD(head);
321
322 if (is_tx)
323 desc_len = sc->sc_ah->caps.tx_desc_len;
324 else
325 desc_len = sizeof(struct ath_desc);
326
327 /* ath_desc must be a multiple of DWORDs */
328 if ((desc_len % 4) != 0) {
329 ath_err(common, "ath_desc not DWORD aligned\n");
330 BUG_ON((desc_len % 4) != 0);
331 error = -ENOMEM;
332 goto fail;
333 }
334
335 dd->dd_desc_len = desc_len * nbuf * ndesc;
336
337 /*
338 * Need additional DMA memory because we can't use
339 * descriptors that cross the 4K page boundary. Assume
340 * one skipped descriptor per 4K page.
341 */
342 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
343 u32 ndesc_skipped =
344 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
345 u32 dma_len;
346
347 while (ndesc_skipped) {
348 dma_len = ndesc_skipped * desc_len;
349 dd->dd_desc_len += dma_len;
350
351 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
352 }
353 }
354
355 /* allocate descriptors */
356 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
357 &dd->dd_desc_paddr, GFP_KERNEL);
358 if (dd->dd_desc == NULL) {
359 error = -ENOMEM;
360 goto fail;
361 }
362 ds = (u8 *) dd->dd_desc;
363 ath_dbg(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
364 name, ds, (u32) dd->dd_desc_len,
365 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
366
367 /* allocate buffers */
368 bsize = sizeof(struct ath_buf) * nbuf;
369 bf = kzalloc(bsize, GFP_KERNEL);
370 if (bf == NULL) {
371 error = -ENOMEM;
372 goto fail2;
373 }
374 dd->dd_bufptr = bf;
375
376 for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
377 bf->bf_desc = ds;
378 bf->bf_daddr = DS2PHYS(dd, ds);
379
380 if (!(sc->sc_ah->caps.hw_caps &
381 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
382 /*
383 * Skip descriptor addresses which can cause 4KB
384 * boundary crossing (addr + length) with a 32 dword
385 * descriptor fetch.
386 */
387 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
388 BUG_ON((caddr_t) bf->bf_desc >=
389 ((caddr_t) dd->dd_desc +
390 dd->dd_desc_len));
391
392 ds += (desc_len * ndesc);
393 bf->bf_desc = ds;
394 bf->bf_daddr = DS2PHYS(dd, ds);
395 }
396 }
397 list_add_tail(&bf->list, head);
398 }
399 return 0;
400 fail2:
401 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
402 dd->dd_desc_paddr);
403 fail:
404 memset(dd, 0, sizeof(*dd));
405 return error;
406 }
407
408 static int ath9k_init_btcoex(struct ath_softc *sc)
409 {
410 struct ath_txq *txq;
411 int r;
412
413 switch (sc->sc_ah->btcoex_hw.scheme) {
414 case ATH_BTCOEX_CFG_NONE:
415 break;
416 case ATH_BTCOEX_CFG_2WIRE:
417 ath9k_hw_btcoex_init_2wire(sc->sc_ah);
418 break;
419 case ATH_BTCOEX_CFG_3WIRE:
420 ath9k_hw_btcoex_init_3wire(sc->sc_ah);
421 r = ath_init_btcoex_timer(sc);
422 if (r)
423 return -1;
424 txq = sc->tx.txq_map[WME_AC_BE];
425 ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum);
426 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
427 break;
428 default:
429 WARN_ON(1);
430 break;
431 }
432
433 return 0;
434 }
435
436 static int ath9k_init_queues(struct ath_softc *sc)
437 {
438 int i = 0;
439
440 sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
441 sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
442
443 sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
444 ath_cabq_update(sc);
445
446 for (i = 0; i < WME_NUM_AC; i++) {
447 sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i);
448 sc->tx.txq_map[i]->mac80211_qnum = i;
449 }
450 return 0;
451 }
452
453 static int ath9k_init_channels_rates(struct ath_softc *sc)
454 {
455 void *channels;
456
457 BUILD_BUG_ON(ARRAY_SIZE(ath9k_2ghz_chantable) +
458 ARRAY_SIZE(ath9k_5ghz_chantable) !=
459 ATH9K_NUM_CHANNELS);
460
461 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) {
462 channels = kmemdup(ath9k_2ghz_chantable,
463 sizeof(ath9k_2ghz_chantable), GFP_KERNEL);
464 if (!channels)
465 return -ENOMEM;
466
467 sc->sbands[IEEE80211_BAND_2GHZ].channels = channels;
468 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
469 sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
470 ARRAY_SIZE(ath9k_2ghz_chantable);
471 sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
472 sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
473 ARRAY_SIZE(ath9k_legacy_rates);
474 }
475
476 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) {
477 channels = kmemdup(ath9k_5ghz_chantable,
478 sizeof(ath9k_5ghz_chantable), GFP_KERNEL);
479 if (!channels) {
480 if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
481 kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
482 return -ENOMEM;
483 }
484
485 sc->sbands[IEEE80211_BAND_5GHZ].channels = channels;
486 sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
487 sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
488 ARRAY_SIZE(ath9k_5ghz_chantable);
489 sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
490 ath9k_legacy_rates + 4;
491 sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
492 ARRAY_SIZE(ath9k_legacy_rates) - 4;
493 }
494 return 0;
495 }
496
497 static void ath9k_init_misc(struct ath_softc *sc)
498 {
499 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
500 int i = 0;
501 setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
502
503 sc->config.txpowlimit = ATH_TXPOWER_MAX;
504
505 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
506 sc->sc_flags |= SC_OP_TXAGGR;
507 sc->sc_flags |= SC_OP_RXAGGR;
508 }
509
510 sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah);
511
512 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
513
514 sc->beacon.slottime = ATH9K_SLOT_TIME_9;
515
516 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++)
517 sc->beacon.bslot[i] = NULL;
518
519 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
520 sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT;
521 }
522
523 static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
524 const struct ath_bus_ops *bus_ops)
525 {
526 struct ath9k_platform_data *pdata = sc->dev->platform_data;
527 struct ath_hw *ah = NULL;
528 struct ath_common *common;
529 int ret = 0, i;
530 int csz = 0;
531
532 ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
533 if (!ah)
534 return -ENOMEM;
535
536 ah->hw = sc->hw;
537 ah->hw_version.devid = devid;
538 ah->reg_ops.read = ath9k_ioread32;
539 ah->reg_ops.write = ath9k_iowrite32;
540 ah->reg_ops.rmw = ath9k_reg_rmw;
541 atomic_set(&ah->intr_ref_cnt, -1);
542 sc->sc_ah = ah;
543
544 if (!pdata) {
545 ah->ah_flags |= AH_USE_EEPROM;
546 sc->sc_ah->led_pin = -1;
547 } else {
548 sc->sc_ah->gpio_mask = pdata->gpio_mask;
549 sc->sc_ah->gpio_val = pdata->gpio_val;
550 sc->sc_ah->led_pin = pdata->led_pin;
551 ah->is_clk_25mhz = pdata->is_clk_25mhz;
552 ah->get_mac_revision = pdata->get_mac_revision;
553 ah->external_reset = pdata->external_reset;
554 }
555
556 common = ath9k_hw_common(ah);
557 common->ops = &ah->reg_ops;
558 common->bus_ops = bus_ops;
559 common->ah = ah;
560 common->hw = sc->hw;
561 common->priv = sc;
562 common->debug_mask = ath9k_debug;
563 common->btcoex_enabled = ath9k_btcoex_enable == 1;
564 common->disable_ani = false;
565 spin_lock_init(&common->cc_lock);
566
567 spin_lock_init(&sc->sc_serial_rw);
568 spin_lock_init(&sc->sc_pm_lock);
569 mutex_init(&sc->mutex);
570 #ifdef CONFIG_ATH9K_DEBUGFS
571 spin_lock_init(&sc->nodes_lock);
572 spin_lock_init(&sc->debug.samp_lock);
573 INIT_LIST_HEAD(&sc->nodes);
574 #endif
575 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
576 tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
577 (unsigned long)sc);
578
579 /*
580 * Cache line size is used to size and align various
581 * structures used to communicate with the hardware.
582 */
583 ath_read_cachesize(common, &csz);
584 common->cachelsz = csz << 2; /* convert to bytes */
585
586 /* Initializes the hardware for all supported chipsets */
587 ret = ath9k_hw_init(ah);
588 if (ret)
589 goto err_hw;
590
591 if (pdata && pdata->macaddr)
592 memcpy(common->macaddr, pdata->macaddr, ETH_ALEN);
593
594 ret = ath9k_init_queues(sc);
595 if (ret)
596 goto err_queues;
597
598 ret = ath9k_init_btcoex(sc);
599 if (ret)
600 goto err_btcoex;
601
602 ret = ath9k_init_channels_rates(sc);
603 if (ret)
604 goto err_btcoex;
605
606 ath9k_cmn_init_crypto(sc->sc_ah);
607 ath9k_init_misc(sc);
608
609 return 0;
610
611 err_btcoex:
612 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
613 if (ATH_TXQ_SETUP(sc, i))
614 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
615 err_queues:
616 ath9k_hw_deinit(ah);
617 err_hw:
618
619 kfree(ah);
620 sc->sc_ah = NULL;
621
622 return ret;
623 }
624
625 static void ath9k_init_band_txpower(struct ath_softc *sc, int band)
626 {
627 struct ieee80211_supported_band *sband;
628 struct ieee80211_channel *chan;
629 struct ath_hw *ah = sc->sc_ah;
630 int i;
631
632 sband = &sc->sbands[band];
633 for (i = 0; i < sband->n_channels; i++) {
634 chan = &sband->channels[i];
635 ah->curchan = &ah->channels[chan->hw_value];
636 ath9k_cmn_update_ichannel(ah->curchan, chan, NL80211_CHAN_HT20);
637 ath9k_hw_set_txpowerlimit(ah, MAX_RATE_POWER, true);
638 }
639 }
640
641 static void ath9k_init_txpower_limits(struct ath_softc *sc)
642 {
643 struct ath_hw *ah = sc->sc_ah;
644 struct ath9k_channel *curchan = ah->curchan;
645
646 if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
647 ath9k_init_band_txpower(sc, IEEE80211_BAND_2GHZ);
648 if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
649 ath9k_init_band_txpower(sc, IEEE80211_BAND_5GHZ);
650
651 ah->curchan = curchan;
652 }
653
654 void ath9k_reload_chainmask_settings(struct ath_softc *sc)
655 {
656 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT))
657 return;
658
659 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
660 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
661 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
662 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
663 }
664
665
666 void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
667 {
668 struct ath_hw *ah = sc->sc_ah;
669 struct ath_common *common = ath9k_hw_common(ah);
670
671 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
672 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
673 IEEE80211_HW_SIGNAL_DBM |
674 IEEE80211_HW_SUPPORTS_PS |
675 IEEE80211_HW_PS_NULLFUNC_STACK |
676 IEEE80211_HW_SPECTRUM_MGMT |
677 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
678
679 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
680 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
681
682 if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || ath9k_modparam_nohwcrypt)
683 hw->flags |= IEEE80211_HW_MFP_CAPABLE;
684
685 hw->wiphy->interface_modes =
686 BIT(NL80211_IFTYPE_P2P_GO) |
687 BIT(NL80211_IFTYPE_P2P_CLIENT) |
688 BIT(NL80211_IFTYPE_AP) |
689 BIT(NL80211_IFTYPE_WDS) |
690 BIT(NL80211_IFTYPE_STATION) |
691 BIT(NL80211_IFTYPE_ADHOC) |
692 BIT(NL80211_IFTYPE_MESH_POINT);
693
694 if (AR_SREV_5416(sc->sc_ah))
695 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
696
697 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
698
699 hw->queues = 4;
700 hw->max_rates = 4;
701 hw->channel_change_time = 5000;
702 hw->max_listen_interval = 10;
703 hw->max_rate_tries = 10;
704 hw->sta_data_size = sizeof(struct ath_node);
705 hw->vif_data_size = sizeof(struct ath_vif);
706
707 hw->wiphy->available_antennas_rx = BIT(ah->caps.max_rxchains) - 1;
708 hw->wiphy->available_antennas_tx = BIT(ah->caps.max_txchains) - 1;
709
710 /* single chain devices with rx diversity */
711 if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
712 hw->wiphy->available_antennas_rx = BIT(0) | BIT(1);
713
714 sc->ant_rx = hw->wiphy->available_antennas_rx;
715 sc->ant_tx = hw->wiphy->available_antennas_tx;
716
717 #ifdef CONFIG_ATH9K_RATE_CONTROL
718 hw->rate_control_algorithm = "ath9k_rate_control";
719 #endif
720
721 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
722 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
723 &sc->sbands[IEEE80211_BAND_2GHZ];
724 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
725 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
726 &sc->sbands[IEEE80211_BAND_5GHZ];
727
728 ath9k_reload_chainmask_settings(sc);
729
730 SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
731 }
732
733 int ath9k_init_device(u16 devid, struct ath_softc *sc,
734 const struct ath_bus_ops *bus_ops)
735 {
736 struct ieee80211_hw *hw = sc->hw;
737 struct ath_common *common;
738 struct ath_hw *ah;
739 int error = 0;
740 struct ath_regulatory *reg;
741
742 /* Bring up device */
743 error = ath9k_init_softc(devid, sc, bus_ops);
744 if (error != 0)
745 goto error_init;
746
747 ah = sc->sc_ah;
748 common = ath9k_hw_common(ah);
749 ath9k_set_hw_capab(sc, hw);
750
751 /* Initialize regulatory */
752 error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
753 ath9k_reg_notifier);
754 if (error)
755 goto error_regd;
756
757 reg = &common->regulatory;
758
759 /* Setup TX DMA */
760 error = ath_tx_init(sc, ATH_TXBUF);
761 if (error != 0)
762 goto error_tx;
763
764 /* Setup RX DMA */
765 error = ath_rx_init(sc, ATH_RXBUF);
766 if (error != 0)
767 goto error_rx;
768
769 ath9k_init_txpower_limits(sc);
770
771 #ifdef CONFIG_MAC80211_LEDS
772 /* must be initialized before ieee80211_register_hw */
773 sc->led_cdev.default_trigger = ieee80211_create_tpt_led_trigger(sc->hw,
774 IEEE80211_TPT_LEDTRIG_FL_RADIO, ath9k_tpt_blink,
775 ARRAY_SIZE(ath9k_tpt_blink));
776 #endif
777
778 /* Register with mac80211 */
779 error = ieee80211_register_hw(hw);
780 if (error)
781 goto error_register;
782
783 error = ath9k_init_debug(ah);
784 if (error) {
785 ath_err(common, "Unable to create debugfs files\n");
786 goto error_world;
787 }
788
789 /* Handle world regulatory */
790 if (!ath_is_world_regd(reg)) {
791 error = regulatory_hint(hw->wiphy, reg->alpha2);
792 if (error)
793 goto error_world;
794 }
795
796 INIT_WORK(&sc->hw_reset_work, ath_reset_work);
797 INIT_WORK(&sc->hw_check_work, ath_hw_check);
798 INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
799 INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
800 sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
801
802 ath_init_leds(sc);
803 ath_start_rfkill_poll(sc);
804
805 return 0;
806
807 error_world:
808 ieee80211_unregister_hw(hw);
809 error_register:
810 ath_rx_cleanup(sc);
811 error_rx:
812 ath_tx_cleanup(sc);
813 error_tx:
814 /* Nothing */
815 error_regd:
816 ath9k_deinit_softc(sc);
817 error_init:
818 return error;
819 }
820
821 /*****************************/
822 /* De-Initialization */
823 /*****************************/
824
825 static void ath9k_deinit_softc(struct ath_softc *sc)
826 {
827 int i = 0;
828
829 if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
830 kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
831
832 if (sc->sbands[IEEE80211_BAND_5GHZ].channels)
833 kfree(sc->sbands[IEEE80211_BAND_5GHZ].channels);
834
835 if ((sc->btcoex.no_stomp_timer) &&
836 sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
837 ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
838
839 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
840 if (ATH_TXQ_SETUP(sc, i))
841 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
842
843 ath9k_hw_deinit(sc->sc_ah);
844
845 kfree(sc->sc_ah);
846 sc->sc_ah = NULL;
847 }
848
849 void ath9k_deinit_device(struct ath_softc *sc)
850 {
851 struct ieee80211_hw *hw = sc->hw;
852
853 ath9k_ps_wakeup(sc);
854
855 wiphy_rfkill_stop_polling(sc->hw->wiphy);
856 ath_deinit_leds(sc);
857
858 ath9k_ps_restore(sc);
859
860 ieee80211_unregister_hw(hw);
861 ath_rx_cleanup(sc);
862 ath_tx_cleanup(sc);
863 ath9k_deinit_softc(sc);
864 }
865
866 void ath_descdma_cleanup(struct ath_softc *sc,
867 struct ath_descdma *dd,
868 struct list_head *head)
869 {
870 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
871 dd->dd_desc_paddr);
872
873 INIT_LIST_HEAD(head);
874 kfree(dd->dd_bufptr);
875 memset(dd, 0, sizeof(*dd));
876 }
877
878 /************************/
879 /* Module Hooks */
880 /************************/
881
882 static int __init ath9k_init(void)
883 {
884 int error;
885
886 /* Register rate control algorithm */
887 error = ath_rate_control_register();
888 if (error != 0) {
889 printk(KERN_ERR
890 "ath9k: Unable to register rate control "
891 "algorithm: %d\n",
892 error);
893 goto err_out;
894 }
895
896 error = ath_pci_init();
897 if (error < 0) {
898 printk(KERN_ERR
899 "ath9k: No PCI devices found, driver not installed.\n");
900 error = -ENODEV;
901 goto err_rate_unregister;
902 }
903
904 error = ath_ahb_init();
905 if (error < 0) {
906 error = -ENODEV;
907 goto err_pci_exit;
908 }
909
910 return 0;
911
912 err_pci_exit:
913 ath_pci_exit();
914
915 err_rate_unregister:
916 ath_rate_control_unregister();
917 err_out:
918 return error;
919 }
920 module_init(ath9k_init);
921
922 static void __exit ath9k_exit(void)
923 {
924 is_ath9k_unloaded = true;
925 ath_ahb_exit();
926 ath_pci_exit();
927 ath_rate_control_unregister();
928 printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
929 }
930 module_exit(ath9k_exit);
This page took 0.094689 seconds and 5 git commands to generate.