Merge branch 'x86-x32-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / drivers / net / wireless / ath / ath9k / init.c
1 /*
2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include <linux/dma-mapping.h>
18 #include <linux/slab.h>
19 #include <linux/ath9k_platform.h>
20 #include <linux/module.h>
21
22 #include "ath9k.h"
23
24 static char *dev_info = "ath9k";
25
26 MODULE_AUTHOR("Atheros Communications");
27 MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
28 MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
29 MODULE_LICENSE("Dual BSD/GPL");
30
31 static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
32 module_param_named(debug, ath9k_debug, uint, 0);
33 MODULE_PARM_DESC(debug, "Debugging mask");
34
35 int ath9k_modparam_nohwcrypt;
36 module_param_named(nohwcrypt, ath9k_modparam_nohwcrypt, int, 0444);
37 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
38
39 int led_blink;
40 module_param_named(blink, led_blink, int, 0444);
41 MODULE_PARM_DESC(blink, "Enable LED blink on activity");
42
43 static int ath9k_btcoex_enable;
44 module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444);
45 MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
46
47 bool is_ath9k_unloaded;
48 /* We use the hw_value as an index into our private channel structure */
49
50 #define CHAN2G(_freq, _idx) { \
51 .band = IEEE80211_BAND_2GHZ, \
52 .center_freq = (_freq), \
53 .hw_value = (_idx), \
54 .max_power = 20, \
55 }
56
57 #define CHAN5G(_freq, _idx) { \
58 .band = IEEE80211_BAND_5GHZ, \
59 .center_freq = (_freq), \
60 .hw_value = (_idx), \
61 .max_power = 20, \
62 }
63
64 /* Some 2 GHz radios are actually tunable on 2312-2732
65 * on 5 MHz steps, we support the channels which we know
66 * we have calibration data for all cards though to make
67 * this static */
68 static const struct ieee80211_channel ath9k_2ghz_chantable[] = {
69 CHAN2G(2412, 0), /* Channel 1 */
70 CHAN2G(2417, 1), /* Channel 2 */
71 CHAN2G(2422, 2), /* Channel 3 */
72 CHAN2G(2427, 3), /* Channel 4 */
73 CHAN2G(2432, 4), /* Channel 5 */
74 CHAN2G(2437, 5), /* Channel 6 */
75 CHAN2G(2442, 6), /* Channel 7 */
76 CHAN2G(2447, 7), /* Channel 8 */
77 CHAN2G(2452, 8), /* Channel 9 */
78 CHAN2G(2457, 9), /* Channel 10 */
79 CHAN2G(2462, 10), /* Channel 11 */
80 CHAN2G(2467, 11), /* Channel 12 */
81 CHAN2G(2472, 12), /* Channel 13 */
82 CHAN2G(2484, 13), /* Channel 14 */
83 };
84
85 /* Some 5 GHz radios are actually tunable on XXXX-YYYY
86 * on 5 MHz steps, we support the channels which we know
87 * we have calibration data for all cards though to make
88 * this static */
89 static const struct ieee80211_channel ath9k_5ghz_chantable[] = {
90 /* _We_ call this UNII 1 */
91 CHAN5G(5180, 14), /* Channel 36 */
92 CHAN5G(5200, 15), /* Channel 40 */
93 CHAN5G(5220, 16), /* Channel 44 */
94 CHAN5G(5240, 17), /* Channel 48 */
95 /* _We_ call this UNII 2 */
96 CHAN5G(5260, 18), /* Channel 52 */
97 CHAN5G(5280, 19), /* Channel 56 */
98 CHAN5G(5300, 20), /* Channel 60 */
99 CHAN5G(5320, 21), /* Channel 64 */
100 /* _We_ call this "Middle band" */
101 CHAN5G(5500, 22), /* Channel 100 */
102 CHAN5G(5520, 23), /* Channel 104 */
103 CHAN5G(5540, 24), /* Channel 108 */
104 CHAN5G(5560, 25), /* Channel 112 */
105 CHAN5G(5580, 26), /* Channel 116 */
106 CHAN5G(5600, 27), /* Channel 120 */
107 CHAN5G(5620, 28), /* Channel 124 */
108 CHAN5G(5640, 29), /* Channel 128 */
109 CHAN5G(5660, 30), /* Channel 132 */
110 CHAN5G(5680, 31), /* Channel 136 */
111 CHAN5G(5700, 32), /* Channel 140 */
112 /* _We_ call this UNII 3 */
113 CHAN5G(5745, 33), /* Channel 149 */
114 CHAN5G(5765, 34), /* Channel 153 */
115 CHAN5G(5785, 35), /* Channel 157 */
116 CHAN5G(5805, 36), /* Channel 161 */
117 CHAN5G(5825, 37), /* Channel 165 */
118 };
119
120 /* Atheros hardware rate code addition for short premble */
121 #define SHPCHECK(__hw_rate, __flags) \
122 ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
123
124 #define RATE(_bitrate, _hw_rate, _flags) { \
125 .bitrate = (_bitrate), \
126 .flags = (_flags), \
127 .hw_value = (_hw_rate), \
128 .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
129 }
130
131 static struct ieee80211_rate ath9k_legacy_rates[] = {
132 RATE(10, 0x1b, 0),
133 RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
134 RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
135 RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
136 RATE(60, 0x0b, 0),
137 RATE(90, 0x0f, 0),
138 RATE(120, 0x0a, 0),
139 RATE(180, 0x0e, 0),
140 RATE(240, 0x09, 0),
141 RATE(360, 0x0d, 0),
142 RATE(480, 0x08, 0),
143 RATE(540, 0x0c, 0),
144 };
145
146 #ifdef CONFIG_MAC80211_LEDS
147 static const struct ieee80211_tpt_blink ath9k_tpt_blink[] = {
148 { .throughput = 0 * 1024, .blink_time = 334 },
149 { .throughput = 1 * 1024, .blink_time = 260 },
150 { .throughput = 5 * 1024, .blink_time = 220 },
151 { .throughput = 10 * 1024, .blink_time = 190 },
152 { .throughput = 20 * 1024, .blink_time = 170 },
153 { .throughput = 50 * 1024, .blink_time = 150 },
154 { .throughput = 70 * 1024, .blink_time = 130 },
155 { .throughput = 100 * 1024, .blink_time = 110 },
156 { .throughput = 200 * 1024, .blink_time = 80 },
157 { .throughput = 300 * 1024, .blink_time = 50 },
158 };
159 #endif
160
161 static void ath9k_deinit_softc(struct ath_softc *sc);
162
163 /*
164 * Read and write, they both share the same lock. We do this to serialize
165 * reads and writes on Atheros 802.11n PCI devices only. This is required
166 * as the FIFO on these devices can only accept sanely 2 requests.
167 */
168
169 static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
170 {
171 struct ath_hw *ah = (struct ath_hw *) hw_priv;
172 struct ath_common *common = ath9k_hw_common(ah);
173 struct ath_softc *sc = (struct ath_softc *) common->priv;
174
175 if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_ON) {
176 unsigned long flags;
177 spin_lock_irqsave(&sc->sc_serial_rw, flags);
178 iowrite32(val, sc->mem + reg_offset);
179 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
180 } else
181 iowrite32(val, sc->mem + reg_offset);
182 }
183
184 static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
185 {
186 struct ath_hw *ah = (struct ath_hw *) hw_priv;
187 struct ath_common *common = ath9k_hw_common(ah);
188 struct ath_softc *sc = (struct ath_softc *) common->priv;
189 u32 val;
190
191 if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_ON) {
192 unsigned long flags;
193 spin_lock_irqsave(&sc->sc_serial_rw, flags);
194 val = ioread32(sc->mem + reg_offset);
195 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
196 } else
197 val = ioread32(sc->mem + reg_offset);
198 return val;
199 }
200
201 static unsigned int __ath9k_reg_rmw(struct ath_softc *sc, u32 reg_offset,
202 u32 set, u32 clr)
203 {
204 u32 val;
205
206 val = ioread32(sc->mem + reg_offset);
207 val &= ~clr;
208 val |= set;
209 iowrite32(val, sc->mem + reg_offset);
210
211 return val;
212 }
213
214 static unsigned int ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 clr)
215 {
216 struct ath_hw *ah = (struct ath_hw *) hw_priv;
217 struct ath_common *common = ath9k_hw_common(ah);
218 struct ath_softc *sc = (struct ath_softc *) common->priv;
219 unsigned long uninitialized_var(flags);
220 u32 val;
221
222 if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_ON) {
223 spin_lock_irqsave(&sc->sc_serial_rw, flags);
224 val = __ath9k_reg_rmw(sc, reg_offset, set, clr);
225 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
226 } else
227 val = __ath9k_reg_rmw(sc, reg_offset, set, clr);
228
229 return val;
230 }
231
232 /**************************/
233 /* Initialization */
234 /**************************/
235
236 static void setup_ht_cap(struct ath_softc *sc,
237 struct ieee80211_sta_ht_cap *ht_info)
238 {
239 struct ath_hw *ah = sc->sc_ah;
240 struct ath_common *common = ath9k_hw_common(ah);
241 u8 tx_streams, rx_streams;
242 int i, max_streams;
243
244 ht_info->ht_supported = true;
245 ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
246 IEEE80211_HT_CAP_SM_PS |
247 IEEE80211_HT_CAP_SGI_40 |
248 IEEE80211_HT_CAP_DSSSCCK40;
249
250 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_LDPC)
251 ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
252
253 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
254 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
255
256 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
257 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
258
259 if (AR_SREV_9330(ah) || AR_SREV_9485(ah))
260 max_streams = 1;
261 else if (AR_SREV_9462(ah))
262 max_streams = 2;
263 else if (AR_SREV_9300_20_OR_LATER(ah))
264 max_streams = 3;
265 else
266 max_streams = 2;
267
268 if (AR_SREV_9280_20_OR_LATER(ah)) {
269 if (max_streams >= 2)
270 ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
271 ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
272 }
273
274 /* set up supported mcs set */
275 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
276 tx_streams = ath9k_cmn_count_streams(ah->txchainmask, max_streams);
277 rx_streams = ath9k_cmn_count_streams(ah->rxchainmask, max_streams);
278
279 ath_dbg(common, CONFIG, "TX streams %d, RX streams: %d\n",
280 tx_streams, rx_streams);
281
282 if (tx_streams != rx_streams) {
283 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
284 ht_info->mcs.tx_params |= ((tx_streams - 1) <<
285 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
286 }
287
288 for (i = 0; i < rx_streams; i++)
289 ht_info->mcs.rx_mask[i] = 0xff;
290
291 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
292 }
293
294 static int ath9k_reg_notifier(struct wiphy *wiphy,
295 struct regulatory_request *request)
296 {
297 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
298 struct ath_softc *sc = hw->priv;
299 struct ath_hw *ah = sc->sc_ah;
300 struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
301 int ret;
302
303 ret = ath_reg_notifier_apply(wiphy, request, reg);
304
305 /* Set tx power */
306 if (ah->curchan) {
307 sc->config.txpowlimit = 2 * ah->curchan->chan->max_power;
308 ath9k_ps_wakeup(sc);
309 ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit, false);
310 sc->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
311 ath9k_ps_restore(sc);
312 }
313
314 return ret;
315 }
316
317 /*
318 * This function will allocate both the DMA descriptor structure, and the
319 * buffers it contains. These are used to contain the descriptors used
320 * by the system.
321 */
322 int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
323 struct list_head *head, const char *name,
324 int nbuf, int ndesc, bool is_tx)
325 {
326 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
327 u8 *ds;
328 struct ath_buf *bf;
329 int i, bsize, error, desc_len;
330
331 ath_dbg(common, CONFIG, "%s DMA: %u buffers %u desc/buf\n",
332 name, nbuf, ndesc);
333
334 INIT_LIST_HEAD(head);
335
336 if (is_tx)
337 desc_len = sc->sc_ah->caps.tx_desc_len;
338 else
339 desc_len = sizeof(struct ath_desc);
340
341 /* ath_desc must be a multiple of DWORDs */
342 if ((desc_len % 4) != 0) {
343 ath_err(common, "ath_desc not DWORD aligned\n");
344 BUG_ON((desc_len % 4) != 0);
345 error = -ENOMEM;
346 goto fail;
347 }
348
349 dd->dd_desc_len = desc_len * nbuf * ndesc;
350
351 /*
352 * Need additional DMA memory because we can't use
353 * descriptors that cross the 4K page boundary. Assume
354 * one skipped descriptor per 4K page.
355 */
356 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
357 u32 ndesc_skipped =
358 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
359 u32 dma_len;
360
361 while (ndesc_skipped) {
362 dma_len = ndesc_skipped * desc_len;
363 dd->dd_desc_len += dma_len;
364
365 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
366 }
367 }
368
369 /* allocate descriptors */
370 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
371 &dd->dd_desc_paddr, GFP_KERNEL);
372 if (dd->dd_desc == NULL) {
373 error = -ENOMEM;
374 goto fail;
375 }
376 ds = (u8 *) dd->dd_desc;
377 ath_dbg(common, CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
378 name, ds, (u32) dd->dd_desc_len,
379 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
380
381 /* allocate buffers */
382 bsize = sizeof(struct ath_buf) * nbuf;
383 bf = kzalloc(bsize, GFP_KERNEL);
384 if (bf == NULL) {
385 error = -ENOMEM;
386 goto fail2;
387 }
388 dd->dd_bufptr = bf;
389
390 for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
391 bf->bf_desc = ds;
392 bf->bf_daddr = DS2PHYS(dd, ds);
393
394 if (!(sc->sc_ah->caps.hw_caps &
395 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
396 /*
397 * Skip descriptor addresses which can cause 4KB
398 * boundary crossing (addr + length) with a 32 dword
399 * descriptor fetch.
400 */
401 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
402 BUG_ON((caddr_t) bf->bf_desc >=
403 ((caddr_t) dd->dd_desc +
404 dd->dd_desc_len));
405
406 ds += (desc_len * ndesc);
407 bf->bf_desc = ds;
408 bf->bf_daddr = DS2PHYS(dd, ds);
409 }
410 }
411 list_add_tail(&bf->list, head);
412 }
413 return 0;
414 fail2:
415 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
416 dd->dd_desc_paddr);
417 fail:
418 memset(dd, 0, sizeof(*dd));
419 return error;
420 }
421
422 static int ath9k_init_queues(struct ath_softc *sc)
423 {
424 int i = 0;
425
426 sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
427 sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
428
429 sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
430 ath_cabq_update(sc);
431
432 for (i = 0; i < WME_NUM_AC; i++) {
433 sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i);
434 sc->tx.txq_map[i]->mac80211_qnum = i;
435 }
436 return 0;
437 }
438
439 static int ath9k_init_channels_rates(struct ath_softc *sc)
440 {
441 void *channels;
442
443 BUILD_BUG_ON(ARRAY_SIZE(ath9k_2ghz_chantable) +
444 ARRAY_SIZE(ath9k_5ghz_chantable) !=
445 ATH9K_NUM_CHANNELS);
446
447 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) {
448 channels = kmemdup(ath9k_2ghz_chantable,
449 sizeof(ath9k_2ghz_chantable), GFP_KERNEL);
450 if (!channels)
451 return -ENOMEM;
452
453 sc->sbands[IEEE80211_BAND_2GHZ].channels = channels;
454 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
455 sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
456 ARRAY_SIZE(ath9k_2ghz_chantable);
457 sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
458 sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
459 ARRAY_SIZE(ath9k_legacy_rates);
460 }
461
462 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) {
463 channels = kmemdup(ath9k_5ghz_chantable,
464 sizeof(ath9k_5ghz_chantable), GFP_KERNEL);
465 if (!channels) {
466 if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
467 kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
468 return -ENOMEM;
469 }
470
471 sc->sbands[IEEE80211_BAND_5GHZ].channels = channels;
472 sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
473 sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
474 ARRAY_SIZE(ath9k_5ghz_chantable);
475 sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
476 ath9k_legacy_rates + 4;
477 sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
478 ARRAY_SIZE(ath9k_legacy_rates) - 4;
479 }
480 return 0;
481 }
482
483 static void ath9k_init_misc(struct ath_softc *sc)
484 {
485 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
486 int i = 0;
487
488 setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
489
490 sc->config.txpowlimit = ATH_TXPOWER_MAX;
491 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
492 sc->beacon.slottime = ATH9K_SLOT_TIME_9;
493
494 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++)
495 sc->beacon.bslot[i] = NULL;
496
497 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
498 sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT;
499 }
500
501 static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
502 const struct ath_bus_ops *bus_ops)
503 {
504 struct ath9k_platform_data *pdata = sc->dev->platform_data;
505 struct ath_hw *ah = NULL;
506 struct ath_common *common;
507 int ret = 0, i;
508 int csz = 0;
509
510 ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
511 if (!ah)
512 return -ENOMEM;
513
514 ah->hw = sc->hw;
515 ah->hw_version.devid = devid;
516 ah->reg_ops.read = ath9k_ioread32;
517 ah->reg_ops.write = ath9k_iowrite32;
518 ah->reg_ops.rmw = ath9k_reg_rmw;
519 atomic_set(&ah->intr_ref_cnt, -1);
520 sc->sc_ah = ah;
521
522 if (!pdata) {
523 ah->ah_flags |= AH_USE_EEPROM;
524 sc->sc_ah->led_pin = -1;
525 } else {
526 sc->sc_ah->gpio_mask = pdata->gpio_mask;
527 sc->sc_ah->gpio_val = pdata->gpio_val;
528 sc->sc_ah->led_pin = pdata->led_pin;
529 ah->is_clk_25mhz = pdata->is_clk_25mhz;
530 ah->get_mac_revision = pdata->get_mac_revision;
531 ah->external_reset = pdata->external_reset;
532 }
533
534 common = ath9k_hw_common(ah);
535 common->ops = &ah->reg_ops;
536 common->bus_ops = bus_ops;
537 common->ah = ah;
538 common->hw = sc->hw;
539 common->priv = sc;
540 common->debug_mask = ath9k_debug;
541 common->btcoex_enabled = ath9k_btcoex_enable == 1;
542 common->disable_ani = false;
543 spin_lock_init(&common->cc_lock);
544
545 spin_lock_init(&sc->sc_serial_rw);
546 spin_lock_init(&sc->sc_pm_lock);
547 mutex_init(&sc->mutex);
548 #ifdef CONFIG_ATH9K_DEBUGFS
549 spin_lock_init(&sc->nodes_lock);
550 INIT_LIST_HEAD(&sc->nodes);
551 #endif
552 #ifdef CONFIG_ATH9K_MAC_DEBUG
553 spin_lock_init(&sc->debug.samp_lock);
554 #endif
555 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
556 tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
557 (unsigned long)sc);
558
559 /*
560 * Cache line size is used to size and align various
561 * structures used to communicate with the hardware.
562 */
563 ath_read_cachesize(common, &csz);
564 common->cachelsz = csz << 2; /* convert to bytes */
565
566 /* Initializes the hardware for all supported chipsets */
567 ret = ath9k_hw_init(ah);
568 if (ret)
569 goto err_hw;
570
571 if (pdata && pdata->macaddr)
572 memcpy(common->macaddr, pdata->macaddr, ETH_ALEN);
573
574 ret = ath9k_init_queues(sc);
575 if (ret)
576 goto err_queues;
577
578 ret = ath9k_init_btcoex(sc);
579 if (ret)
580 goto err_btcoex;
581
582 ret = ath9k_init_channels_rates(sc);
583 if (ret)
584 goto err_btcoex;
585
586 ath9k_cmn_init_crypto(sc->sc_ah);
587 ath9k_init_misc(sc);
588
589 return 0;
590
591 err_btcoex:
592 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
593 if (ATH_TXQ_SETUP(sc, i))
594 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
595 err_queues:
596 ath9k_hw_deinit(ah);
597 err_hw:
598
599 kfree(ah);
600 sc->sc_ah = NULL;
601
602 return ret;
603 }
604
605 static void ath9k_init_band_txpower(struct ath_softc *sc, int band)
606 {
607 struct ieee80211_supported_band *sband;
608 struct ieee80211_channel *chan;
609 struct ath_hw *ah = sc->sc_ah;
610 int i;
611
612 sband = &sc->sbands[band];
613 for (i = 0; i < sband->n_channels; i++) {
614 chan = &sband->channels[i];
615 ah->curchan = &ah->channels[chan->hw_value];
616 ath9k_cmn_update_ichannel(ah->curchan, chan, NL80211_CHAN_HT20);
617 ath9k_hw_set_txpowerlimit(ah, MAX_RATE_POWER, true);
618 }
619 }
620
621 static void ath9k_init_txpower_limits(struct ath_softc *sc)
622 {
623 struct ath_hw *ah = sc->sc_ah;
624 struct ath9k_channel *curchan = ah->curchan;
625
626 if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
627 ath9k_init_band_txpower(sc, IEEE80211_BAND_2GHZ);
628 if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
629 ath9k_init_band_txpower(sc, IEEE80211_BAND_5GHZ);
630
631 ah->curchan = curchan;
632 }
633
634 void ath9k_reload_chainmask_settings(struct ath_softc *sc)
635 {
636 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT))
637 return;
638
639 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
640 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
641 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
642 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
643 }
644
645
646 void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
647 {
648 struct ath_hw *ah = sc->sc_ah;
649 struct ath_common *common = ath9k_hw_common(ah);
650
651 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
652 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
653 IEEE80211_HW_SIGNAL_DBM |
654 IEEE80211_HW_SUPPORTS_PS |
655 IEEE80211_HW_PS_NULLFUNC_STACK |
656 IEEE80211_HW_SPECTRUM_MGMT |
657 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
658
659 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
660 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
661
662 if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || ath9k_modparam_nohwcrypt)
663 hw->flags |= IEEE80211_HW_MFP_CAPABLE;
664
665 hw->wiphy->interface_modes =
666 BIT(NL80211_IFTYPE_P2P_GO) |
667 BIT(NL80211_IFTYPE_P2P_CLIENT) |
668 BIT(NL80211_IFTYPE_AP) |
669 BIT(NL80211_IFTYPE_WDS) |
670 BIT(NL80211_IFTYPE_STATION) |
671 BIT(NL80211_IFTYPE_ADHOC) |
672 BIT(NL80211_IFTYPE_MESH_POINT);
673
674 if (AR_SREV_5416(sc->sc_ah))
675 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
676
677 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
678 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
679
680 hw->queues = 4;
681 hw->max_rates = 4;
682 hw->channel_change_time = 5000;
683 hw->max_listen_interval = 10;
684 hw->max_rate_tries = 10;
685 hw->sta_data_size = sizeof(struct ath_node);
686 hw->vif_data_size = sizeof(struct ath_vif);
687
688 hw->wiphy->available_antennas_rx = BIT(ah->caps.max_rxchains) - 1;
689 hw->wiphy->available_antennas_tx = BIT(ah->caps.max_txchains) - 1;
690
691 /* single chain devices with rx diversity */
692 if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
693 hw->wiphy->available_antennas_rx = BIT(0) | BIT(1);
694
695 sc->ant_rx = hw->wiphy->available_antennas_rx;
696 sc->ant_tx = hw->wiphy->available_antennas_tx;
697
698 #ifdef CONFIG_ATH9K_RATE_CONTROL
699 hw->rate_control_algorithm = "ath9k_rate_control";
700 #endif
701
702 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
703 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
704 &sc->sbands[IEEE80211_BAND_2GHZ];
705 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
706 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
707 &sc->sbands[IEEE80211_BAND_5GHZ];
708
709 ath9k_reload_chainmask_settings(sc);
710
711 SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
712 }
713
714 int ath9k_init_device(u16 devid, struct ath_softc *sc,
715 const struct ath_bus_ops *bus_ops)
716 {
717 struct ieee80211_hw *hw = sc->hw;
718 struct ath_common *common;
719 struct ath_hw *ah;
720 int error = 0;
721 struct ath_regulatory *reg;
722
723 /* Bring up device */
724 error = ath9k_init_softc(devid, sc, bus_ops);
725 if (error != 0)
726 goto error_init;
727
728 ah = sc->sc_ah;
729 common = ath9k_hw_common(ah);
730 ath9k_set_hw_capab(sc, hw);
731
732 /* Initialize regulatory */
733 error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
734 ath9k_reg_notifier);
735 if (error)
736 goto error_regd;
737
738 reg = &common->regulatory;
739
740 /* Setup TX DMA */
741 error = ath_tx_init(sc, ATH_TXBUF);
742 if (error != 0)
743 goto error_tx;
744
745 /* Setup RX DMA */
746 error = ath_rx_init(sc, ATH_RXBUF);
747 if (error != 0)
748 goto error_rx;
749
750 ath9k_init_txpower_limits(sc);
751
752 #ifdef CONFIG_MAC80211_LEDS
753 /* must be initialized before ieee80211_register_hw */
754 sc->led_cdev.default_trigger = ieee80211_create_tpt_led_trigger(sc->hw,
755 IEEE80211_TPT_LEDTRIG_FL_RADIO, ath9k_tpt_blink,
756 ARRAY_SIZE(ath9k_tpt_blink));
757 #endif
758
759 INIT_WORK(&sc->hw_reset_work, ath_reset_work);
760 INIT_WORK(&sc->hw_check_work, ath_hw_check);
761 INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
762 INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
763
764 /* Register with mac80211 */
765 error = ieee80211_register_hw(hw);
766 if (error)
767 goto error_register;
768
769 error = ath9k_init_debug(ah);
770 if (error) {
771 ath_err(common, "Unable to create debugfs files\n");
772 goto error_world;
773 }
774
775 /* Handle world regulatory */
776 if (!ath_is_world_regd(reg)) {
777 error = regulatory_hint(hw->wiphy, reg->alpha2);
778 if (error)
779 goto error_world;
780 }
781
782 sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
783
784 ath_init_leds(sc);
785 ath_start_rfkill_poll(sc);
786
787 return 0;
788
789 error_world:
790 ieee80211_unregister_hw(hw);
791 error_register:
792 ath_rx_cleanup(sc);
793 error_rx:
794 ath_tx_cleanup(sc);
795 error_tx:
796 /* Nothing */
797 error_regd:
798 ath9k_deinit_softc(sc);
799 error_init:
800 return error;
801 }
802
803 /*****************************/
804 /* De-Initialization */
805 /*****************************/
806
807 static void ath9k_deinit_softc(struct ath_softc *sc)
808 {
809 int i = 0;
810
811 if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
812 kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
813
814 if (sc->sbands[IEEE80211_BAND_5GHZ].channels)
815 kfree(sc->sbands[IEEE80211_BAND_5GHZ].channels);
816
817 ath9k_deinit_btcoex(sc);
818
819 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
820 if (ATH_TXQ_SETUP(sc, i))
821 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
822
823 ath9k_hw_deinit(sc->sc_ah);
824
825 kfree(sc->sc_ah);
826 sc->sc_ah = NULL;
827 }
828
829 void ath9k_deinit_device(struct ath_softc *sc)
830 {
831 struct ieee80211_hw *hw = sc->hw;
832
833 ath9k_ps_wakeup(sc);
834
835 wiphy_rfkill_stop_polling(sc->hw->wiphy);
836 ath_deinit_leds(sc);
837
838 ath9k_ps_restore(sc);
839
840 ieee80211_unregister_hw(hw);
841 ath_rx_cleanup(sc);
842 ath_tx_cleanup(sc);
843 ath9k_deinit_softc(sc);
844 }
845
846 void ath_descdma_cleanup(struct ath_softc *sc,
847 struct ath_descdma *dd,
848 struct list_head *head)
849 {
850 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
851 dd->dd_desc_paddr);
852
853 INIT_LIST_HEAD(head);
854 kfree(dd->dd_bufptr);
855 memset(dd, 0, sizeof(*dd));
856 }
857
858 /************************/
859 /* Module Hooks */
860 /************************/
861
862 static int __init ath9k_init(void)
863 {
864 int error;
865
866 /* Register rate control algorithm */
867 error = ath_rate_control_register();
868 if (error != 0) {
869 printk(KERN_ERR
870 "ath9k: Unable to register rate control "
871 "algorithm: %d\n",
872 error);
873 goto err_out;
874 }
875
876 error = ath_pci_init();
877 if (error < 0) {
878 printk(KERN_ERR
879 "ath9k: No PCI devices found, driver not installed.\n");
880 error = -ENODEV;
881 goto err_rate_unregister;
882 }
883
884 error = ath_ahb_init();
885 if (error < 0) {
886 error = -ENODEV;
887 goto err_pci_exit;
888 }
889
890 return 0;
891
892 err_pci_exit:
893 ath_pci_exit();
894
895 err_rate_unregister:
896 ath_rate_control_unregister();
897 err_out:
898 return error;
899 }
900 module_init(ath9k_init);
901
902 static void __exit ath9k_exit(void)
903 {
904 is_ath9k_unloaded = true;
905 ath_ahb_exit();
906 ath_pci_exit();
907 ath_rate_control_unregister();
908 printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
909 }
910 module_exit(ath9k_exit);
This page took 0.050143 seconds and 5 git commands to generate.