ath9k: Add new file init.c
[deliverable/linux.git] / drivers / net / wireless / ath / ath9k / init.c
CommitLineData
55624204
S
1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "ath9k.h"
18
19static char *dev_info = "ath9k";
20
21MODULE_AUTHOR("Atheros Communications");
22MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
23MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
24MODULE_LICENSE("Dual BSD/GPL");
25
26static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
27module_param_named(debug, ath9k_debug, uint, 0);
28MODULE_PARM_DESC(debug, "Debugging mask");
29
30int modparam_nohwcrypt;
31module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
32MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
33
34/* We use the hw_value as an index into our private channel structure */
35
36#define CHAN2G(_freq, _idx) { \
37 .center_freq = (_freq), \
38 .hw_value = (_idx), \
39 .max_power = 20, \
40}
41
42#define CHAN5G(_freq, _idx) { \
43 .band = IEEE80211_BAND_5GHZ, \
44 .center_freq = (_freq), \
45 .hw_value = (_idx), \
46 .max_power = 20, \
47}
48
49/* Some 2 GHz radios are actually tunable on 2312-2732
50 * on 5 MHz steps, we support the channels which we know
51 * we have calibration data for all cards though to make
52 * this static */
53static struct ieee80211_channel ath9k_2ghz_chantable[] = {
54 CHAN2G(2412, 0), /* Channel 1 */
55 CHAN2G(2417, 1), /* Channel 2 */
56 CHAN2G(2422, 2), /* Channel 3 */
57 CHAN2G(2427, 3), /* Channel 4 */
58 CHAN2G(2432, 4), /* Channel 5 */
59 CHAN2G(2437, 5), /* Channel 6 */
60 CHAN2G(2442, 6), /* Channel 7 */
61 CHAN2G(2447, 7), /* Channel 8 */
62 CHAN2G(2452, 8), /* Channel 9 */
63 CHAN2G(2457, 9), /* Channel 10 */
64 CHAN2G(2462, 10), /* Channel 11 */
65 CHAN2G(2467, 11), /* Channel 12 */
66 CHAN2G(2472, 12), /* Channel 13 */
67 CHAN2G(2484, 13), /* Channel 14 */
68};
69
70/* Some 5 GHz radios are actually tunable on XXXX-YYYY
71 * on 5 MHz steps, we support the channels which we know
72 * we have calibration data for all cards though to make
73 * this static */
74static struct ieee80211_channel ath9k_5ghz_chantable[] = {
75 /* _We_ call this UNII 1 */
76 CHAN5G(5180, 14), /* Channel 36 */
77 CHAN5G(5200, 15), /* Channel 40 */
78 CHAN5G(5220, 16), /* Channel 44 */
79 CHAN5G(5240, 17), /* Channel 48 */
80 /* _We_ call this UNII 2 */
81 CHAN5G(5260, 18), /* Channel 52 */
82 CHAN5G(5280, 19), /* Channel 56 */
83 CHAN5G(5300, 20), /* Channel 60 */
84 CHAN5G(5320, 21), /* Channel 64 */
85 /* _We_ call this "Middle band" */
86 CHAN5G(5500, 22), /* Channel 100 */
87 CHAN5G(5520, 23), /* Channel 104 */
88 CHAN5G(5540, 24), /* Channel 108 */
89 CHAN5G(5560, 25), /* Channel 112 */
90 CHAN5G(5580, 26), /* Channel 116 */
91 CHAN5G(5600, 27), /* Channel 120 */
92 CHAN5G(5620, 28), /* Channel 124 */
93 CHAN5G(5640, 29), /* Channel 128 */
94 CHAN5G(5660, 30), /* Channel 132 */
95 CHAN5G(5680, 31), /* Channel 136 */
96 CHAN5G(5700, 32), /* Channel 140 */
97 /* _We_ call this UNII 3 */
98 CHAN5G(5745, 33), /* Channel 149 */
99 CHAN5G(5765, 34), /* Channel 153 */
100 CHAN5G(5785, 35), /* Channel 157 */
101 CHAN5G(5805, 36), /* Channel 161 */
102 CHAN5G(5825, 37), /* Channel 165 */
103};
104
105/* Atheros hardware rate code addition for short premble */
106#define SHPCHECK(__hw_rate, __flags) \
107 ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
108
109#define RATE(_bitrate, _hw_rate, _flags) { \
110 .bitrate = (_bitrate), \
111 .flags = (_flags), \
112 .hw_value = (_hw_rate), \
113 .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
114}
115
116static struct ieee80211_rate ath9k_legacy_rates[] = {
117 RATE(10, 0x1b, 0),
118 RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
119 RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
120 RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
121 RATE(60, 0x0b, 0),
122 RATE(90, 0x0f, 0),
123 RATE(120, 0x0a, 0),
124 RATE(180, 0x0e, 0),
125 RATE(240, 0x09, 0),
126 RATE(360, 0x0d, 0),
127 RATE(480, 0x08, 0),
128 RATE(540, 0x0c, 0),
129};
130
131static void ath9k_uninit_hw(struct ath_softc *sc);
132
133/*
134 * Read and write, they both share the same lock. We do this to serialize
135 * reads and writes on Atheros 802.11n PCI devices only. This is required
136 * as the FIFO on these devices can only accept sanely 2 requests.
137 */
138
139static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
140{
141 struct ath_hw *ah = (struct ath_hw *) hw_priv;
142 struct ath_common *common = ath9k_hw_common(ah);
143 struct ath_softc *sc = (struct ath_softc *) common->priv;
144
145 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
146 unsigned long flags;
147 spin_lock_irqsave(&sc->sc_serial_rw, flags);
148 iowrite32(val, sc->mem + reg_offset);
149 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
150 } else
151 iowrite32(val, sc->mem + reg_offset);
152}
153
154static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
155{
156 struct ath_hw *ah = (struct ath_hw *) hw_priv;
157 struct ath_common *common = ath9k_hw_common(ah);
158 struct ath_softc *sc = (struct ath_softc *) common->priv;
159 u32 val;
160
161 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
162 unsigned long flags;
163 spin_lock_irqsave(&sc->sc_serial_rw, flags);
164 val = ioread32(sc->mem + reg_offset);
165 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
166 } else
167 val = ioread32(sc->mem + reg_offset);
168 return val;
169}
170
171static const struct ath_ops ath9k_common_ops = {
172 .read = ath9k_ioread32,
173 .write = ath9k_iowrite32,
174};
175
176/**************************/
177/* Initialization */
178/**************************/
179
180static void setup_ht_cap(struct ath_softc *sc,
181 struct ieee80211_sta_ht_cap *ht_info)
182{
183 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
184 u8 tx_streams, rx_streams;
185
186 ht_info->ht_supported = true;
187 ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
188 IEEE80211_HT_CAP_SM_PS |
189 IEEE80211_HT_CAP_SGI_40 |
190 IEEE80211_HT_CAP_DSSSCCK40;
191
192 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
193 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
194
195 /* set up supported mcs set */
196 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
197 tx_streams = !(common->tx_chainmask & (common->tx_chainmask - 1)) ?
198 1 : 2;
199 rx_streams = !(common->rx_chainmask & (common->rx_chainmask - 1)) ?
200 1 : 2;
201
202 if (tx_streams != rx_streams) {
203 ath_print(common, ATH_DBG_CONFIG,
204 "TX streams %d, RX streams: %d\n",
205 tx_streams, rx_streams);
206 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
207 ht_info->mcs.tx_params |= ((tx_streams - 1) <<
208 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
209 }
210
211 ht_info->mcs.rx_mask[0] = 0xff;
212 if (rx_streams >= 2)
213 ht_info->mcs.rx_mask[1] = 0xff;
214
215 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
216}
217
218static int ath9k_reg_notifier(struct wiphy *wiphy,
219 struct regulatory_request *request)
220{
221 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
222 struct ath_wiphy *aphy = hw->priv;
223 struct ath_softc *sc = aphy->sc;
224 struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
225
226 return ath_reg_notifier_apply(wiphy, request, reg);
227}
228
229/*
230 * This function will allocate both the DMA descriptor structure, and the
231 * buffers it contains. These are used to contain the descriptors used
232 * by the system.
233*/
234int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
235 struct list_head *head, const char *name,
236 int nbuf, int ndesc)
237{
238#define DS2PHYS(_dd, _ds) \
239 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
240#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
241#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
242 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
243 struct ath_desc *ds;
244 struct ath_buf *bf;
245 int i, bsize, error;
246
247 ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
248 name, nbuf, ndesc);
249
250 INIT_LIST_HEAD(head);
251 /* ath_desc must be a multiple of DWORDs */
252 if ((sizeof(struct ath_desc) % 4) != 0) {
253 ath_print(common, ATH_DBG_FATAL,
254 "ath_desc not DWORD aligned\n");
255 BUG_ON((sizeof(struct ath_desc) % 4) != 0);
256 error = -ENOMEM;
257 goto fail;
258 }
259
260 dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
261
262 /*
263 * Need additional DMA memory because we can't use
264 * descriptors that cross the 4K page boundary. Assume
265 * one skipped descriptor per 4K page.
266 */
267 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
268 u32 ndesc_skipped =
269 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
270 u32 dma_len;
271
272 while (ndesc_skipped) {
273 dma_len = ndesc_skipped * sizeof(struct ath_desc);
274 dd->dd_desc_len += dma_len;
275
276 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
277 };
278 }
279
280 /* allocate descriptors */
281 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
282 &dd->dd_desc_paddr, GFP_KERNEL);
283 if (dd->dd_desc == NULL) {
284 error = -ENOMEM;
285 goto fail;
286 }
287 ds = dd->dd_desc;
288 ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
289 name, ds, (u32) dd->dd_desc_len,
290 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
291
292 /* allocate buffers */
293 bsize = sizeof(struct ath_buf) * nbuf;
294 bf = kzalloc(bsize, GFP_KERNEL);
295 if (bf == NULL) {
296 error = -ENOMEM;
297 goto fail2;
298 }
299 dd->dd_bufptr = bf;
300
301 for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
302 bf->bf_desc = ds;
303 bf->bf_daddr = DS2PHYS(dd, ds);
304
305 if (!(sc->sc_ah->caps.hw_caps &
306 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
307 /*
308 * Skip descriptor addresses which can cause 4KB
309 * boundary crossing (addr + length) with a 32 dword
310 * descriptor fetch.
311 */
312 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
313 BUG_ON((caddr_t) bf->bf_desc >=
314 ((caddr_t) dd->dd_desc +
315 dd->dd_desc_len));
316
317 ds += ndesc;
318 bf->bf_desc = ds;
319 bf->bf_daddr = DS2PHYS(dd, ds);
320 }
321 }
322 list_add_tail(&bf->list, head);
323 }
324 return 0;
325fail2:
326 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
327 dd->dd_desc_paddr);
328fail:
329 memset(dd, 0, sizeof(*dd));
330 return error;
331#undef ATH_DESC_4KB_BOUND_CHECK
332#undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
333#undef DS2PHYS
334}
335
336static int ath_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
337 const struct ath_bus_ops *bus_ops)
338{
339 struct ath_hw *ah = NULL;
340 struct ath_common *common;
341 int r = 0, i;
342 int csz = 0;
343 int qnum;
344
345 /* XXX: hardware will not be ready until ath_open() being called */
346 sc->sc_flags |= SC_OP_INVALID;
347
348 spin_lock_init(&sc->wiphy_lock);
349 spin_lock_init(&sc->sc_resetlock);
350 spin_lock_init(&sc->sc_serial_rw);
351 spin_lock_init(&sc->sc_pm_lock);
352 mutex_init(&sc->mutex);
353 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
354 tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
355 (unsigned long)sc);
356
357 ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
358 if (!ah)
359 return -ENOMEM;
360
361 ah->hw_version.devid = devid;
362 ah->hw_version.subsysid = subsysid;
363 sc->sc_ah = ah;
364
365 common = ath9k_hw_common(ah);
366 common->ops = &ath9k_common_ops;
367 common->bus_ops = bus_ops;
368 common->ah = ah;
369 common->hw = sc->hw;
370 common->priv = sc;
371 common->debug_mask = ath9k_debug;
372
373 /*
374 * Cache line size is used to size and align various
375 * structures used to communicate with the hardware.
376 */
377 ath_read_cachesize(common, &csz);
378 /* XXX assert csz is non-zero */
379 common->cachelsz = csz << 2; /* convert to bytes */
380
381 r = ath9k_hw_init(ah);
382 if (r) {
383 ath_print(common, ATH_DBG_FATAL,
384 "Unable to initialize hardware; "
385 "initialization status: %d\n", r);
386 goto bad_free_hw;
387 }
388
389 if (ath9k_init_debug(ah) < 0) {
390 ath_print(common, ATH_DBG_FATAL,
391 "Unable to create debugfs files\n");
392 goto bad_free_hw;
393 }
394
395 /* Get the hardware key cache size. */
396 common->keymax = ah->caps.keycache_size;
397 if (common->keymax > ATH_KEYMAX) {
398 ath_print(common, ATH_DBG_ANY,
399 "Warning, using only %u entries in %u key cache\n",
400 ATH_KEYMAX, common->keymax);
401 common->keymax = ATH_KEYMAX;
402 }
403
404 /*
405 * Reset the key cache since some parts do not
406 * reset the contents on initial power up.
407 */
408 for (i = 0; i < common->keymax; i++)
409 ath9k_hw_keyreset(ah, (u16) i);
410
411 /* default to MONITOR mode */
412 sc->sc_ah->opmode = NL80211_IFTYPE_MONITOR;
413
414 /*
415 * Allocate hardware transmit queues: one queue for
416 * beacon frames and one data queue for each QoS
417 * priority. Note that the hal handles reseting
418 * these queues at the needed time.
419 */
420 sc->beacon.beaconq = ath9k_hw_beaconq_setup(ah);
421 if (sc->beacon.beaconq == -1) {
422 ath_print(common, ATH_DBG_FATAL,
423 "Unable to setup a beacon xmit queue\n");
424 r = -EIO;
425 goto bad2;
426 }
427 sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
428 if (sc->beacon.cabq == NULL) {
429 ath_print(common, ATH_DBG_FATAL,
430 "Unable to setup CAB xmit queue\n");
431 r = -EIO;
432 goto bad2;
433 }
434
435 sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
436 ath_cabq_update(sc);
437
438 for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++)
439 sc->tx.hwq_map[i] = -1;
440
441 /* Setup data queues */
442 /* NB: ensure BK queue is the lowest priority h/w queue */
443 if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
444 ath_print(common, ATH_DBG_FATAL,
445 "Unable to setup xmit queue for BK traffic\n");
446 r = -EIO;
447 goto bad2;
448 }
449
450 if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
451 ath_print(common, ATH_DBG_FATAL,
452 "Unable to setup xmit queue for BE traffic\n");
453 r = -EIO;
454 goto bad2;
455 }
456 if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
457 ath_print(common, ATH_DBG_FATAL,
458 "Unable to setup xmit queue for VI traffic\n");
459 r = -EIO;
460 goto bad2;
461 }
462 if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
463 ath_print(common, ATH_DBG_FATAL,
464 "Unable to setup xmit queue for VO traffic\n");
465 r = -EIO;
466 goto bad2;
467 }
468
469 /* Initializes the noise floor to a reasonable default value.
470 * Later on this will be updated during ANI processing. */
471
472 common->ani.noise_floor = ATH_DEFAULT_NOISE_FLOOR;
473 setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
474
475 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
476 ATH9K_CIPHER_TKIP, NULL)) {
477 /*
478 * Whether we should enable h/w TKIP MIC.
479 * XXX: if we don't support WME TKIP MIC, then we wouldn't
480 * report WMM capable, so it's always safe to turn on
481 * TKIP MIC in this case.
482 */
483 ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC,
484 0, 1, NULL);
485 }
486
487 /*
488 * Check whether the separate key cache entries
489 * are required to handle both tx+rx MIC keys.
490 * With split mic keys the number of stations is limited
491 * to 27 otherwise 59.
492 */
493 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
494 ATH9K_CIPHER_TKIP, NULL)
495 && ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
496 ATH9K_CIPHER_MIC, NULL)
497 && ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT,
498 0, NULL))
499 common->splitmic = 1;
500
501 /* turn on mcast key search if possible */
502 if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
503 (void)ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1,
504 1, NULL);
505
506 sc->config.txpowlimit = ATH_TXPOWER_MAX;
507
508 /* 11n Capabilities */
509 if (ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
510 sc->sc_flags |= SC_OP_TXAGGR;
511 sc->sc_flags |= SC_OP_RXAGGR;
512 }
513
514 common->tx_chainmask = ah->caps.tx_chainmask;
515 common->rx_chainmask = ah->caps.rx_chainmask;
516
517 ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
518 sc->rx.defant = ath9k_hw_getdefantenna(ah);
519
520 if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
521 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
522
523 sc->beacon.slottime = ATH9K_SLOT_TIME_9; /* default to short slot time */
524
525 /* initialize beacon slots */
526 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
527 sc->beacon.bslot[i] = NULL;
528 sc->beacon.bslot_aphy[i] = NULL;
529 }
530
531 /* setup channels and rates */
532
533 if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes)) {
534 sc->sbands[IEEE80211_BAND_2GHZ].channels = ath9k_2ghz_chantable;
535 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
536 sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
537 ARRAY_SIZE(ath9k_2ghz_chantable);
538 sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
539 sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
540 ARRAY_SIZE(ath9k_legacy_rates);
541 }
542
543 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) {
544 sc->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_chantable;
545 sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
546 sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
547 ARRAY_SIZE(ath9k_5ghz_chantable);
548 sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
549 ath9k_legacy_rates + 4;
550 sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
551 ARRAY_SIZE(ath9k_legacy_rates) - 4;
552 }
553
554 switch (ah->btcoex_hw.scheme) {
555 case ATH_BTCOEX_CFG_NONE:
556 break;
557 case ATH_BTCOEX_CFG_2WIRE:
558 ath9k_hw_btcoex_init_2wire(ah);
559 break;
560 case ATH_BTCOEX_CFG_3WIRE:
561 ath9k_hw_btcoex_init_3wire(ah);
562 r = ath_init_btcoex_timer(sc);
563 if (r)
564 goto bad2;
565 qnum = ath_tx_get_qnum(sc, ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
566 ath9k_hw_init_btcoex_hw(ah, qnum);
567 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
568 break;
569 default:
570 WARN_ON(1);
571 break;
572 }
573
574 return 0;
575bad2:
576 /* cleanup tx queues */
577 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
578 if (ATH_TXQ_SETUP(sc, i))
579 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
580
581bad_free_hw:
582 ath9k_uninit_hw(sc);
583 return r;
584}
585
586void ath_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
587{
588 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
589 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
590 IEEE80211_HW_SIGNAL_DBM |
591 IEEE80211_HW_AMPDU_AGGREGATION |
592 IEEE80211_HW_SUPPORTS_PS |
593 IEEE80211_HW_PS_NULLFUNC_STACK |
594 IEEE80211_HW_SPECTRUM_MGMT;
595
596 if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || modparam_nohwcrypt)
597 hw->flags |= IEEE80211_HW_MFP_CAPABLE;
598
599 hw->wiphy->interface_modes =
600 BIT(NL80211_IFTYPE_AP) |
601 BIT(NL80211_IFTYPE_STATION) |
602 BIT(NL80211_IFTYPE_ADHOC) |
603 BIT(NL80211_IFTYPE_MESH_POINT);
604
605 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
606
607 hw->queues = 4;
608 hw->max_rates = 4;
609 hw->channel_change_time = 5000;
610 hw->max_listen_interval = 10;
611 /* Hardware supports 10 but we use 4 */
612 hw->max_rate_tries = 4;
613 hw->sta_data_size = sizeof(struct ath_node);
614 hw->vif_data_size = sizeof(struct ath_vif);
615
616 hw->rate_control_algorithm = "ath9k_rate_control";
617
618 if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
619 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
620 &sc->sbands[IEEE80211_BAND_2GHZ];
621 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
622 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
623 &sc->sbands[IEEE80211_BAND_5GHZ];
624}
625
626/* Device driver core initialization */
627int ath_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
628 const struct ath_bus_ops *bus_ops)
629{
630 struct ieee80211_hw *hw = sc->hw;
631 struct ath_common *common;
632 struct ath_hw *ah;
633 int error = 0, i;
634 struct ath_regulatory *reg;
635
636 dev_dbg(sc->dev, "Attach ATH hw\n");
637
638 error = ath_init_softc(devid, sc, subsysid, bus_ops);
639 if (error != 0)
640 return error;
641
642 ah = sc->sc_ah;
643 common = ath9k_hw_common(ah);
644
645 /* get mac address from hardware and set in mac80211 */
646
647 SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
648
649 ath_set_hw_capab(sc, hw);
650
651 error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
652 ath9k_reg_notifier);
653 if (error)
654 return error;
655
656 reg = &common->regulatory;
657
658 if (ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
659 if (test_bit(ATH9K_MODE_11G, ah->caps.wireless_modes))
660 setup_ht_cap(sc,
661 &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
662 if (test_bit(ATH9K_MODE_11A, ah->caps.wireless_modes))
663 setup_ht_cap(sc,
664 &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
665 }
666
667 /* initialize tx/rx engine */
668 error = ath_tx_init(sc, ATH_TXBUF);
669 if (error != 0)
670 goto error_attach;
671
672 error = ath_rx_init(sc, ATH_RXBUF);
673 if (error != 0)
674 goto error_attach;
675
676 INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
677 INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
678 sc->wiphy_scheduler_int = msecs_to_jiffies(500);
679
680 error = ieee80211_register_hw(hw);
681
682 if (!ath_is_world_regd(reg)) {
683 error = regulatory_hint(hw->wiphy, reg->alpha2);
684 if (error)
685 goto error_attach;
686 }
687
688 /* Initialize LED control */
689 ath_init_leds(sc);
690
691 ath_start_rfkill_poll(sc);
692
693 return 0;
694
695error_attach:
696 /* cleanup tx queues */
697 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
698 if (ATH_TXQ_SETUP(sc, i))
699 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
700
701 ath9k_uninit_hw(sc);
702
703 return error;
704}
705
706/*****************************/
707/* De-Initialization */
708/*****************************/
709
710static void ath9k_uninit_hw(struct ath_softc *sc)
711{
712 struct ath_hw *ah = sc->sc_ah;
713
714 BUG_ON(!ah);
715
716 ath9k_exit_debug(ah);
717 ath9k_hw_detach(ah);
718 sc->sc_ah = NULL;
719}
720
721static void ath_clean_core(struct ath_softc *sc)
722{
723 struct ieee80211_hw *hw = sc->hw;
724 struct ath_hw *ah = sc->sc_ah;
725 int i = 0;
726
727 ath9k_ps_wakeup(sc);
728
729 dev_dbg(sc->dev, "Detach ATH hw\n");
730
731 ath_deinit_leds(sc);
732 wiphy_rfkill_stop_polling(sc->hw->wiphy);
733
734 for (i = 0; i < sc->num_sec_wiphy; i++) {
735 struct ath_wiphy *aphy = sc->sec_wiphy[i];
736 if (aphy == NULL)
737 continue;
738 sc->sec_wiphy[i] = NULL;
739 ieee80211_unregister_hw(aphy->hw);
740 ieee80211_free_hw(aphy->hw);
741 }
742 ieee80211_unregister_hw(hw);
743 ath_rx_cleanup(sc);
744 ath_tx_cleanup(sc);
745
746 tasklet_kill(&sc->intr_tq);
747 tasklet_kill(&sc->bcon_tasklet);
748
749 if (!(sc->sc_flags & SC_OP_INVALID))
750 ath9k_setpower(sc, ATH9K_PM_AWAKE);
751
752 /* cleanup tx queues */
753 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
754 if (ATH_TXQ_SETUP(sc, i))
755 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
756
757 if ((sc->btcoex.no_stomp_timer) &&
758 ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
759 ath_gen_timer_free(ah, sc->btcoex.no_stomp_timer);
760}
761
762void ath_descdma_cleanup(struct ath_softc *sc,
763 struct ath_descdma *dd,
764 struct list_head *head)
765{
766 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
767 dd->dd_desc_paddr);
768
769 INIT_LIST_HEAD(head);
770 kfree(dd->dd_bufptr);
771 memset(dd, 0, sizeof(*dd));
772}
773
774void ath_detach(struct ath_softc *sc)
775{
776 ath_clean_core(sc);
777 ath9k_uninit_hw(sc);
778}
779
780void ath_cleanup(struct ath_softc *sc)
781{
782 struct ath_hw *ah = sc->sc_ah;
783 struct ath_common *common = ath9k_hw_common(ah);
784
785 ath_clean_core(sc);
786 free_irq(sc->irq, sc);
787 ath_bus_cleanup(common);
788 kfree(sc->sec_wiphy);
789 ieee80211_free_hw(sc->hw);
790
791 ath9k_uninit_hw(sc);
792}
793
794/************************/
795/* Module Hooks */
796/************************/
797
798static int __init ath9k_init(void)
799{
800 int error;
801
802 /* Register rate control algorithm */
803 error = ath_rate_control_register();
804 if (error != 0) {
805 printk(KERN_ERR
806 "ath9k: Unable to register rate control "
807 "algorithm: %d\n",
808 error);
809 goto err_out;
810 }
811
812 error = ath9k_debug_create_root();
813 if (error) {
814 printk(KERN_ERR
815 "ath9k: Unable to create debugfs root: %d\n",
816 error);
817 goto err_rate_unregister;
818 }
819
820 error = ath_pci_init();
821 if (error < 0) {
822 printk(KERN_ERR
823 "ath9k: No PCI devices found, driver not installed.\n");
824 error = -ENODEV;
825 goto err_remove_root;
826 }
827
828 error = ath_ahb_init();
829 if (error < 0) {
830 error = -ENODEV;
831 goto err_pci_exit;
832 }
833
834 return 0;
835
836 err_pci_exit:
837 ath_pci_exit();
838
839 err_remove_root:
840 ath9k_debug_remove_root();
841 err_rate_unregister:
842 ath_rate_control_unregister();
843 err_out:
844 return error;
845}
846module_init(ath9k_init);
847
848static void __exit ath9k_exit(void)
849{
850 ath_ahb_exit();
851 ath_pci_exit();
852 ath9k_debug_remove_root();
853 ath_rate_control_unregister();
854 printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
855}
856module_exit(ath9k_exit);
This page took 0.057459 seconds and 5 git commands to generate.