ar9170: remove unneeded flush_workqueue()
[deliverable/linux.git] / drivers / net / wireless / ath / ar9170 / main.c
1 /*
2 * Atheros AR9170 driver
3 *
4 * mac80211 interaction code
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2009, Christian Lamparter <chunkeey@web.de>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, see
21 * http://www.gnu.org/licenses/.
22 *
23 * This file incorporates work covered by the following copyright and
24 * permission notice:
25 * Copyright (c) 2007-2008 Atheros Communications, Inc.
26 *
27 * Permission to use, copy, modify, and/or distribute this software for any
28 * purpose with or without fee is hereby granted, provided that the above
29 * copyright notice and this permission notice appear in all copies.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
38 */
39
40 #include <linux/init.h>
41 #include <linux/module.h>
42 #include <linux/etherdevice.h>
43 #include <net/mac80211.h>
44 #include "ar9170.h"
45 #include "hw.h"
46 #include "cmd.h"
47
48 static int modparam_nohwcrypt;
49 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
50 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
51
52 static int modparam_ht;
53 module_param_named(ht, modparam_ht, bool, S_IRUGO);
54 MODULE_PARM_DESC(ht, "enable MPDU aggregation.");
55
56 #define RATE(_bitrate, _hw_rate, _txpidx, _flags) { \
57 .bitrate = (_bitrate), \
58 .flags = (_flags), \
59 .hw_value = (_hw_rate) | (_txpidx) << 4, \
60 }
61
62 static struct ieee80211_rate __ar9170_ratetable[] = {
63 RATE(10, 0, 0, 0),
64 RATE(20, 1, 1, IEEE80211_RATE_SHORT_PREAMBLE),
65 RATE(55, 2, 2, IEEE80211_RATE_SHORT_PREAMBLE),
66 RATE(110, 3, 3, IEEE80211_RATE_SHORT_PREAMBLE),
67 RATE(60, 0xb, 0, 0),
68 RATE(90, 0xf, 0, 0),
69 RATE(120, 0xa, 0, 0),
70 RATE(180, 0xe, 0, 0),
71 RATE(240, 0x9, 0, 0),
72 RATE(360, 0xd, 1, 0),
73 RATE(480, 0x8, 2, 0),
74 RATE(540, 0xc, 3, 0),
75 };
76 #undef RATE
77
78 #define ar9170_g_ratetable (__ar9170_ratetable + 0)
79 #define ar9170_g_ratetable_size 12
80 #define ar9170_a_ratetable (__ar9170_ratetable + 4)
81 #define ar9170_a_ratetable_size 8
82
83 /*
84 * NB: The hw_value is used as an index into the ar9170_phy_freq_params
85 * array in phy.c so that we don't have to do frequency lookups!
86 */
87 #define CHAN(_freq, _idx) { \
88 .center_freq = (_freq), \
89 .hw_value = (_idx), \
90 .max_power = 18, /* XXX */ \
91 }
92
93 static struct ieee80211_channel ar9170_2ghz_chantable[] = {
94 CHAN(2412, 0),
95 CHAN(2417, 1),
96 CHAN(2422, 2),
97 CHAN(2427, 3),
98 CHAN(2432, 4),
99 CHAN(2437, 5),
100 CHAN(2442, 6),
101 CHAN(2447, 7),
102 CHAN(2452, 8),
103 CHAN(2457, 9),
104 CHAN(2462, 10),
105 CHAN(2467, 11),
106 CHAN(2472, 12),
107 CHAN(2484, 13),
108 };
109
110 static struct ieee80211_channel ar9170_5ghz_chantable[] = {
111 CHAN(4920, 14),
112 CHAN(4940, 15),
113 CHAN(4960, 16),
114 CHAN(4980, 17),
115 CHAN(5040, 18),
116 CHAN(5060, 19),
117 CHAN(5080, 20),
118 CHAN(5180, 21),
119 CHAN(5200, 22),
120 CHAN(5220, 23),
121 CHAN(5240, 24),
122 CHAN(5260, 25),
123 CHAN(5280, 26),
124 CHAN(5300, 27),
125 CHAN(5320, 28),
126 CHAN(5500, 29),
127 CHAN(5520, 30),
128 CHAN(5540, 31),
129 CHAN(5560, 32),
130 CHAN(5580, 33),
131 CHAN(5600, 34),
132 CHAN(5620, 35),
133 CHAN(5640, 36),
134 CHAN(5660, 37),
135 CHAN(5680, 38),
136 CHAN(5700, 39),
137 CHAN(5745, 40),
138 CHAN(5765, 41),
139 CHAN(5785, 42),
140 CHAN(5805, 43),
141 CHAN(5825, 44),
142 CHAN(5170, 45),
143 CHAN(5190, 46),
144 CHAN(5210, 47),
145 CHAN(5230, 48),
146 };
147 #undef CHAN
148
149 #define AR9170_HT_CAP \
150 { \
151 .ht_supported = true, \
152 .cap = IEEE80211_HT_CAP_MAX_AMSDU | \
153 IEEE80211_HT_CAP_SUP_WIDTH_20_40 | \
154 IEEE80211_HT_CAP_SGI_40 | \
155 IEEE80211_HT_CAP_GRN_FLD | \
156 IEEE80211_HT_CAP_DSSSCCK40 | \
157 IEEE80211_HT_CAP_SM_PS, \
158 .ampdu_factor = 3, \
159 .ampdu_density = 6, \
160 .mcs = { \
161 .rx_mask = { 0xff, 0xff, 0, 0, 0x1, 0, 0, 0, 0, 0, }, \
162 .rx_highest = cpu_to_le16(300), \
163 .tx_params = IEEE80211_HT_MCS_TX_DEFINED, \
164 }, \
165 }
166
167 static struct ieee80211_supported_band ar9170_band_2GHz = {
168 .channels = ar9170_2ghz_chantable,
169 .n_channels = ARRAY_SIZE(ar9170_2ghz_chantable),
170 .bitrates = ar9170_g_ratetable,
171 .n_bitrates = ar9170_g_ratetable_size,
172 .ht_cap = AR9170_HT_CAP,
173 };
174
175 static struct ieee80211_supported_band ar9170_band_5GHz = {
176 .channels = ar9170_5ghz_chantable,
177 .n_channels = ARRAY_SIZE(ar9170_5ghz_chantable),
178 .bitrates = ar9170_a_ratetable,
179 .n_bitrates = ar9170_a_ratetable_size,
180 .ht_cap = AR9170_HT_CAP,
181 };
182
183 static void ar9170_tx(struct ar9170 *ar);
184 static bool ar9170_tx_ampdu(struct ar9170 *ar);
185
186 static inline u16 ar9170_get_seq_h(struct ieee80211_hdr *hdr)
187 {
188 return le16_to_cpu(hdr->seq_ctrl) >> 4;
189 }
190
191 static inline u16 ar9170_get_seq(struct sk_buff *skb)
192 {
193 struct ar9170_tx_control *txc = (void *) skb->data;
194 return ar9170_get_seq_h((void *) txc->frame_data);
195 }
196
197 static inline u16 ar9170_get_tid(struct sk_buff *skb)
198 {
199 struct ar9170_tx_control *txc = (void *) skb->data;
200 struct ieee80211_hdr *hdr = (void *) txc->frame_data;
201
202 return (ieee80211_get_qos_ctl(hdr))[0] & IEEE80211_QOS_CTL_TID_MASK;
203 }
204
205 #define GET_NEXT_SEQ(seq) ((seq + 1) & 0x0fff)
206 #define GET_NEXT_SEQ_FROM_SKB(skb) (GET_NEXT_SEQ(ar9170_get_seq(skb)))
207
208 #if (defined AR9170_QUEUE_DEBUG) || (defined AR9170_TXAGG_DEBUG)
209 static void ar9170_print_txheader(struct ar9170 *ar, struct sk_buff *skb)
210 {
211 struct ar9170_tx_control *txc = (void *) skb->data;
212 struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
213 struct ar9170_tx_info *arinfo = (void *) txinfo->rate_driver_data;
214 struct ieee80211_hdr *hdr = (void *) txc->frame_data;
215
216 printk(KERN_DEBUG "%s: => FRAME [skb:%p, q:%d, DA:[%pM] flags:%x s:%d "
217 "mac_ctrl:%04x, phy_ctrl:%08x, timeout:[%d ms]]\n",
218 wiphy_name(ar->hw->wiphy), skb, skb_get_queue_mapping(skb),
219 ieee80211_get_DA(hdr), arinfo->flags, ar9170_get_seq_h(hdr),
220 le16_to_cpu(txc->mac_control), le32_to_cpu(txc->phy_control),
221 jiffies_to_msecs(arinfo->timeout - jiffies));
222 }
223
224 static void __ar9170_dump_txqueue(struct ar9170 *ar,
225 struct sk_buff_head *queue)
226 {
227 struct sk_buff *skb;
228 int i = 0;
229
230 printk(KERN_DEBUG "---[ cut here ]---\n");
231 printk(KERN_DEBUG "%s: %d entries in queue.\n",
232 wiphy_name(ar->hw->wiphy), skb_queue_len(queue));
233
234 skb_queue_walk(queue, skb) {
235 printk(KERN_DEBUG "index:%d => \n", i++);
236 ar9170_print_txheader(ar, skb);
237 }
238 if (i != skb_queue_len(queue))
239 printk(KERN_DEBUG "WARNING: queue frame counter "
240 "mismatch %d != %d\n", skb_queue_len(queue), i);
241 printk(KERN_DEBUG "---[ end ]---\n");
242 }
243 #endif /* AR9170_QUEUE_DEBUG || AR9170_TXAGG_DEBUG */
244
245 #ifdef AR9170_QUEUE_DEBUG
246 static void ar9170_dump_txqueue(struct ar9170 *ar,
247 struct sk_buff_head *queue)
248 {
249 unsigned long flags;
250
251 spin_lock_irqsave(&queue->lock, flags);
252 __ar9170_dump_txqueue(ar, queue);
253 spin_unlock_irqrestore(&queue->lock, flags);
254 }
255 #endif /* AR9170_QUEUE_DEBUG */
256
257 #ifdef AR9170_QUEUE_STOP_DEBUG
258 static void __ar9170_dump_txstats(struct ar9170 *ar)
259 {
260 int i;
261
262 printk(KERN_DEBUG "%s: QoS queue stats\n",
263 wiphy_name(ar->hw->wiphy));
264
265 for (i = 0; i < __AR9170_NUM_TXQ; i++)
266 printk(KERN_DEBUG "%s: queue:%d limit:%d len:%d waitack:%d "
267 " stopped:%d\n", wiphy_name(ar->hw->wiphy), i,
268 ar->tx_stats[i].limit, ar->tx_stats[i].len,
269 skb_queue_len(&ar->tx_status[i]),
270 ieee80211_queue_stopped(ar->hw, i));
271 }
272 #endif /* AR9170_QUEUE_STOP_DEBUG */
273
274 #ifdef AR9170_TXAGG_DEBUG
275 static void ar9170_dump_tx_status_ampdu(struct ar9170 *ar)
276 {
277 unsigned long flags;
278
279 spin_lock_irqsave(&ar->tx_status_ampdu.lock, flags);
280 printk(KERN_DEBUG "%s: A-MPDU tx_status queue => \n",
281 wiphy_name(ar->hw->wiphy));
282 __ar9170_dump_txqueue(ar, &ar->tx_status_ampdu);
283 spin_unlock_irqrestore(&ar->tx_status_ampdu.lock, flags);
284 }
285
286 #endif /* AR9170_TXAGG_DEBUG */
287
288 /* caller must guarantee exclusive access for _bin_ queue. */
289 static void ar9170_recycle_expired(struct ar9170 *ar,
290 struct sk_buff_head *queue,
291 struct sk_buff_head *bin)
292 {
293 struct sk_buff *skb, *old = NULL;
294 unsigned long flags;
295
296 spin_lock_irqsave(&queue->lock, flags);
297 while ((skb = skb_peek(queue))) {
298 struct ieee80211_tx_info *txinfo;
299 struct ar9170_tx_info *arinfo;
300
301 txinfo = IEEE80211_SKB_CB(skb);
302 arinfo = (void *) txinfo->rate_driver_data;
303
304 if (time_is_before_jiffies(arinfo->timeout)) {
305 #ifdef AR9170_QUEUE_DEBUG
306 printk(KERN_DEBUG "%s: [%ld > %ld] frame expired => "
307 "recycle \n", wiphy_name(ar->hw->wiphy),
308 jiffies, arinfo->timeout);
309 ar9170_print_txheader(ar, skb);
310 #endif /* AR9170_QUEUE_DEBUG */
311 __skb_unlink(skb, queue);
312 __skb_queue_tail(bin, skb);
313 } else {
314 break;
315 }
316
317 if (unlikely(old == skb)) {
318 /* bail out - queue is shot. */
319
320 WARN_ON(1);
321 break;
322 }
323 old = skb;
324 }
325 spin_unlock_irqrestore(&queue->lock, flags);
326 }
327
328 static void ar9170_tx_status(struct ar9170 *ar, struct sk_buff *skb,
329 u16 tx_status)
330 {
331 struct ieee80211_tx_info *txinfo;
332 unsigned int retries = 0;
333
334 txinfo = IEEE80211_SKB_CB(skb);
335 ieee80211_tx_info_clear_status(txinfo);
336
337 switch (tx_status) {
338 case AR9170_TX_STATUS_RETRY:
339 retries = 2;
340 case AR9170_TX_STATUS_COMPLETE:
341 txinfo->flags |= IEEE80211_TX_STAT_ACK;
342 break;
343
344 case AR9170_TX_STATUS_FAILED:
345 retries = ar->hw->conf.long_frame_max_tx_count;
346 break;
347
348 default:
349 printk(KERN_ERR "%s: invalid tx_status response (%x).\n",
350 wiphy_name(ar->hw->wiphy), tx_status);
351 break;
352 }
353
354 txinfo->status.rates[0].count = retries + 1;
355 skb_pull(skb, sizeof(struct ar9170_tx_control));
356 ieee80211_tx_status_irqsafe(ar->hw, skb);
357 }
358
359 static void ar9170_tx_fake_ampdu_status(struct ar9170 *ar)
360 {
361 struct sk_buff_head success;
362 struct sk_buff *skb;
363 unsigned int i;
364 unsigned long queue_bitmap = 0;
365
366 skb_queue_head_init(&success);
367
368 while (skb_queue_len(&ar->tx_status_ampdu) > AR9170_NUM_TX_STATUS)
369 __skb_queue_tail(&success, skb_dequeue(&ar->tx_status_ampdu));
370
371 ar9170_recycle_expired(ar, &ar->tx_status_ampdu, &success);
372
373 #ifdef AR9170_TXAGG_DEBUG
374 printk(KERN_DEBUG "%s: collected %d A-MPDU frames.\n",
375 wiphy_name(ar->hw->wiphy), skb_queue_len(&success));
376 __ar9170_dump_txqueue(ar, &success);
377 #endif /* AR9170_TXAGG_DEBUG */
378
379 while ((skb = __skb_dequeue(&success))) {
380 struct ieee80211_tx_info *txinfo;
381
382 queue_bitmap |= BIT(skb_get_queue_mapping(skb));
383
384 txinfo = IEEE80211_SKB_CB(skb);
385 ieee80211_tx_info_clear_status(txinfo);
386
387 txinfo->flags |= IEEE80211_TX_STAT_ACK;
388 txinfo->status.rates[0].count = 1;
389
390 skb_pull(skb, sizeof(struct ar9170_tx_control));
391 ieee80211_tx_status_irqsafe(ar->hw, skb);
392 }
393
394 for_each_bit(i, &queue_bitmap, BITS_PER_BYTE) {
395 #ifdef AR9170_QUEUE_STOP_DEBUG
396 printk(KERN_DEBUG "%s: wake queue %d\n",
397 wiphy_name(ar->hw->wiphy), i);
398 __ar9170_dump_txstats(ar);
399 #endif /* AR9170_QUEUE_STOP_DEBUG */
400 ieee80211_wake_queue(ar->hw, i);
401 }
402
403 if (queue_bitmap)
404 ar9170_tx(ar);
405 }
406
407 static void ar9170_tx_ampdu_callback(struct ar9170 *ar, struct sk_buff *skb)
408 {
409 struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
410 struct ar9170_tx_info *arinfo = (void *) txinfo->rate_driver_data;
411
412 arinfo->timeout = jiffies +
413 msecs_to_jiffies(AR9170_BA_TIMEOUT);
414
415 skb_queue_tail(&ar->tx_status_ampdu, skb);
416 ar9170_tx_fake_ampdu_status(ar);
417 ar->tx_ampdu_pending--;
418
419 if (!list_empty(&ar->tx_ampdu_list) && !ar->tx_ampdu_pending)
420 ar9170_tx_ampdu(ar);
421 }
422
423 void ar9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
424 {
425 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
426 struct ar9170_tx_info *arinfo = (void *) info->rate_driver_data;
427 unsigned int queue = skb_get_queue_mapping(skb);
428 unsigned long flags;
429
430 spin_lock_irqsave(&ar->tx_stats_lock, flags);
431 ar->tx_stats[queue].len--;
432
433 if (skb_queue_empty(&ar->tx_pending[queue])) {
434 #ifdef AR9170_QUEUE_STOP_DEBUG
435 printk(KERN_DEBUG "%s: wake queue %d\n",
436 wiphy_name(ar->hw->wiphy), queue);
437 __ar9170_dump_txstats(ar);
438 #endif /* AR9170_QUEUE_STOP_DEBUG */
439 ieee80211_wake_queue(ar->hw, queue);
440 }
441 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
442
443 if (arinfo->flags & AR9170_TX_FLAG_BLOCK_ACK) {
444 ar9170_tx_ampdu_callback(ar, skb);
445 } else if (arinfo->flags & AR9170_TX_FLAG_WAIT_FOR_ACK) {
446 arinfo->timeout = jiffies +
447 msecs_to_jiffies(AR9170_TX_TIMEOUT);
448
449 skb_queue_tail(&ar->tx_status[queue], skb);
450 } else if (arinfo->flags & AR9170_TX_FLAG_NO_ACK) {
451 ar9170_tx_status(ar, skb, AR9170_TX_STATUS_FAILED);
452 } else {
453 #ifdef AR9170_QUEUE_DEBUG
454 printk(KERN_DEBUG "%s: unsupported frame flags!\n",
455 wiphy_name(ar->hw->wiphy));
456 ar9170_print_txheader(ar, skb);
457 #endif /* AR9170_QUEUE_DEBUG */
458 dev_kfree_skb_any(skb);
459 }
460
461 if (!ar->tx_stats[queue].len &&
462 !skb_queue_empty(&ar->tx_pending[queue])) {
463 ar9170_tx(ar);
464 }
465 }
466
467 static struct sk_buff *ar9170_get_queued_skb(struct ar9170 *ar,
468 const u8 *mac,
469 struct sk_buff_head *queue,
470 const u32 rate)
471 {
472 unsigned long flags;
473 struct sk_buff *skb;
474
475 /*
476 * Unfortunately, the firmware does not tell to which (queued) frame
477 * this transmission status report belongs to.
478 *
479 * So we have to make risky guesses - with the scarce information
480 * the firmware provided (-> destination MAC, and phy_control) -
481 * and hope that we picked the right one...
482 */
483
484 spin_lock_irqsave(&queue->lock, flags);
485 skb_queue_walk(queue, skb) {
486 struct ar9170_tx_control *txc = (void *) skb->data;
487 struct ieee80211_hdr *hdr = (void *) txc->frame_data;
488 u32 r;
489
490 if (mac && compare_ether_addr(ieee80211_get_DA(hdr), mac)) {
491 #ifdef AR9170_QUEUE_DEBUG
492 printk(KERN_DEBUG "%s: skip frame => DA %pM != %pM\n",
493 wiphy_name(ar->hw->wiphy), mac,
494 ieee80211_get_DA(hdr));
495 ar9170_print_txheader(ar, skb);
496 #endif /* AR9170_QUEUE_DEBUG */
497 continue;
498 }
499
500 r = (le32_to_cpu(txc->phy_control) & AR9170_TX_PHY_MCS_MASK) >>
501 AR9170_TX_PHY_MCS_SHIFT;
502
503 if ((rate != AR9170_TX_INVALID_RATE) && (r != rate)) {
504 #ifdef AR9170_QUEUE_DEBUG
505 printk(KERN_DEBUG "%s: skip frame => rate %d != %d\n",
506 wiphy_name(ar->hw->wiphy), rate, r);
507 ar9170_print_txheader(ar, skb);
508 #endif /* AR9170_QUEUE_DEBUG */
509 continue;
510 }
511
512 __skb_unlink(skb, queue);
513 spin_unlock_irqrestore(&queue->lock, flags);
514 return skb;
515 }
516
517 #ifdef AR9170_QUEUE_DEBUG
518 printk(KERN_ERR "%s: ESS:[%pM] does not have any "
519 "outstanding frames in queue.\n",
520 wiphy_name(ar->hw->wiphy), mac);
521 __ar9170_dump_txqueue(ar, queue);
522 #endif /* AR9170_QUEUE_DEBUG */
523 spin_unlock_irqrestore(&queue->lock, flags);
524
525 return NULL;
526 }
527
528 static void ar9170_handle_block_ack(struct ar9170 *ar, u16 count, u16 r)
529 {
530 struct sk_buff *skb;
531 struct ieee80211_tx_info *txinfo;
532
533 while (count) {
534 skb = ar9170_get_queued_skb(ar, NULL, &ar->tx_status_ampdu, r);
535 if (!skb)
536 break;
537
538 txinfo = IEEE80211_SKB_CB(skb);
539 ieee80211_tx_info_clear_status(txinfo);
540
541 /* FIXME: maybe more ? */
542 txinfo->status.rates[0].count = 1;
543
544 skb_pull(skb, sizeof(struct ar9170_tx_control));
545 ieee80211_tx_status_irqsafe(ar->hw, skb);
546 count--;
547 }
548
549 #ifdef AR9170_TXAGG_DEBUG
550 if (count) {
551 printk(KERN_DEBUG "%s: got %d more failed mpdus, but no more "
552 "suitable frames left in tx_status queue.\n",
553 wiphy_name(ar->hw->wiphy), count);
554
555 ar9170_dump_tx_status_ampdu(ar);
556 }
557 #endif /* AR9170_TXAGG_DEBUG */
558 }
559
560 /*
561 * This worker tries to keeps an maintain tx_status queues.
562 * So we can guarantee that incoming tx_status reports are
563 * actually for a pending frame.
564 */
565
566 static void ar9170_tx_janitor(struct work_struct *work)
567 {
568 struct ar9170 *ar = container_of(work, struct ar9170,
569 tx_janitor.work);
570 struct sk_buff_head waste;
571 unsigned int i;
572 bool resched = false;
573
574 if (unlikely(!IS_STARTED(ar)))
575 return ;
576
577 skb_queue_head_init(&waste);
578
579 for (i = 0; i < __AR9170_NUM_TXQ; i++) {
580 #ifdef AR9170_QUEUE_DEBUG
581 printk(KERN_DEBUG "%s: garbage collector scans queue:%d\n",
582 wiphy_name(ar->hw->wiphy), i);
583 ar9170_dump_txqueue(ar, &ar->tx_pending[i]);
584 ar9170_dump_txqueue(ar, &ar->tx_status[i]);
585 #endif /* AR9170_QUEUE_DEBUG */
586
587 ar9170_recycle_expired(ar, &ar->tx_status[i], &waste);
588 ar9170_recycle_expired(ar, &ar->tx_pending[i], &waste);
589 skb_queue_purge(&waste);
590
591 if (!skb_queue_empty(&ar->tx_status[i]) ||
592 !skb_queue_empty(&ar->tx_pending[i]))
593 resched = true;
594 }
595
596 ar9170_tx_fake_ampdu_status(ar);
597
598 if (resched)
599 queue_delayed_work(ar->hw->workqueue,
600 &ar->tx_janitor,
601 msecs_to_jiffies(AR9170_JANITOR_DELAY));
602 }
603
604 void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
605 {
606 struct ar9170_cmd_response *cmd = (void *) buf;
607
608 if ((cmd->type & 0xc0) != 0xc0) {
609 ar->callback_cmd(ar, len, buf);
610 return;
611 }
612
613 /* hardware event handlers */
614 switch (cmd->type) {
615 case 0xc1: {
616 /*
617 * TX status notification:
618 * bytes: 0c c1 XX YY M1 M2 M3 M4 M5 M6 R4 R3 R2 R1 S2 S1
619 *
620 * XX always 81
621 * YY always 00
622 * M1-M6 is the MAC address
623 * R1-R4 is the transmit rate
624 * S1-S2 is the transmit status
625 */
626
627 struct sk_buff *skb;
628 u32 phy = le32_to_cpu(cmd->tx_status.rate);
629 u32 q = (phy & AR9170_TX_PHY_QOS_MASK) >>
630 AR9170_TX_PHY_QOS_SHIFT;
631 #ifdef AR9170_QUEUE_DEBUG
632 printk(KERN_DEBUG "%s: recv tx_status for %pM, p:%08x, q:%d\n",
633 wiphy_name(ar->hw->wiphy), cmd->tx_status.dst, phy, q);
634 #endif /* AR9170_QUEUE_DEBUG */
635
636 skb = ar9170_get_queued_skb(ar, cmd->tx_status.dst,
637 &ar->tx_status[q],
638 AR9170_TX_INVALID_RATE);
639 if (unlikely(!skb))
640 return ;
641
642 ar9170_tx_status(ar, skb, le16_to_cpu(cmd->tx_status.status));
643 break;
644 }
645
646 case 0xc0:
647 /*
648 * pre-TBTT event
649 */
650 if (ar->vif && ar->vif->type == NL80211_IFTYPE_AP)
651 queue_work(ar->hw->workqueue, &ar->beacon_work);
652 break;
653
654 case 0xc2:
655 /*
656 * (IBSS) beacon send notification
657 * bytes: 04 c2 XX YY B4 B3 B2 B1
658 *
659 * XX always 80
660 * YY always 00
661 * B1-B4 "should" be the number of send out beacons.
662 */
663 break;
664
665 case 0xc3:
666 /* End of Atim Window */
667 break;
668
669 case 0xc4:
670 /* BlockACK bitmap */
671 break;
672
673 case 0xc5:
674 /* BlockACK events */
675 ar9170_handle_block_ack(ar,
676 le16_to_cpu(cmd->ba_fail_cnt.failed),
677 le16_to_cpu(cmd->ba_fail_cnt.rate));
678 ar9170_tx_fake_ampdu_status(ar);
679 break;
680
681 case 0xc6:
682 /* Watchdog Interrupt */
683 break;
684
685 case 0xc9:
686 /* retransmission issue / SIFS/EIFS collision ?! */
687 break;
688
689 /* firmware debug */
690 case 0xca:
691 printk(KERN_DEBUG "ar9170 FW: %.*s\n", len - 4, (char *)buf + 4);
692 break;
693 case 0xcb:
694 len -= 4;
695
696 switch (len) {
697 case 1:
698 printk(KERN_DEBUG "ar9170 FW: u8: %#.2x\n",
699 *((char *)buf + 4));
700 break;
701 case 2:
702 printk(KERN_DEBUG "ar9170 FW: u8: %#.4x\n",
703 le16_to_cpup((__le16 *)((char *)buf + 4)));
704 break;
705 case 4:
706 printk(KERN_DEBUG "ar9170 FW: u8: %#.8x\n",
707 le32_to_cpup((__le32 *)((char *)buf + 4)));
708 break;
709 case 8:
710 printk(KERN_DEBUG "ar9170 FW: u8: %#.16lx\n",
711 (unsigned long)le64_to_cpup(
712 (__le64 *)((char *)buf + 4)));
713 break;
714 }
715 break;
716 case 0xcc:
717 print_hex_dump_bytes("ar9170 FW:", DUMP_PREFIX_NONE,
718 (char *)buf + 4, len - 4);
719 break;
720
721 default:
722 printk(KERN_INFO "received unhandled event %x\n", cmd->type);
723 print_hex_dump_bytes("dump:", DUMP_PREFIX_NONE, buf, len);
724 break;
725 }
726 }
727
728 static void ar9170_rx_reset_rx_mpdu(struct ar9170 *ar)
729 {
730 memset(&ar->rx_mpdu.plcp, 0, sizeof(struct ar9170_rx_head));
731 ar->rx_mpdu.has_plcp = false;
732 }
733
734 int ar9170_nag_limiter(struct ar9170 *ar)
735 {
736 bool print_message;
737
738 /*
739 * we expect all sorts of errors in promiscuous mode.
740 * don't bother with it, it's OK!
741 */
742 if (ar->sniffer_enabled)
743 return false;
744
745 /*
746 * only go for frequent errors! The hardware tends to
747 * do some stupid thing once in a while under load, in
748 * noisy environments or just for fun!
749 */
750 if (time_before(jiffies, ar->bad_hw_nagger) && net_ratelimit())
751 print_message = true;
752 else
753 print_message = false;
754
755 /* reset threshold for "once in a while" */
756 ar->bad_hw_nagger = jiffies + HZ / 4;
757 return print_message;
758 }
759
760 static int ar9170_rx_mac_status(struct ar9170 *ar,
761 struct ar9170_rx_head *head,
762 struct ar9170_rx_macstatus *mac,
763 struct ieee80211_rx_status *status)
764 {
765 u8 error, decrypt;
766
767 BUILD_BUG_ON(sizeof(struct ar9170_rx_head) != 12);
768 BUILD_BUG_ON(sizeof(struct ar9170_rx_macstatus) != 4);
769
770 error = mac->error;
771 if (error & AR9170_RX_ERROR_MMIC) {
772 status->flag |= RX_FLAG_MMIC_ERROR;
773 error &= ~AR9170_RX_ERROR_MMIC;
774 }
775
776 if (error & AR9170_RX_ERROR_PLCP) {
777 status->flag |= RX_FLAG_FAILED_PLCP_CRC;
778 error &= ~AR9170_RX_ERROR_PLCP;
779
780 if (!(ar->filter_state & FIF_PLCPFAIL))
781 return -EINVAL;
782 }
783
784 if (error & AR9170_RX_ERROR_FCS) {
785 status->flag |= RX_FLAG_FAILED_FCS_CRC;
786 error &= ~AR9170_RX_ERROR_FCS;
787
788 if (!(ar->filter_state & FIF_FCSFAIL))
789 return -EINVAL;
790 }
791
792 decrypt = ar9170_get_decrypt_type(mac);
793 if (!(decrypt & AR9170_RX_ENC_SOFTWARE) &&
794 decrypt != AR9170_ENC_ALG_NONE)
795 status->flag |= RX_FLAG_DECRYPTED;
796
797 /* ignore wrong RA errors */
798 error &= ~AR9170_RX_ERROR_WRONG_RA;
799
800 if (error & AR9170_RX_ERROR_DECRYPT) {
801 error &= ~AR9170_RX_ERROR_DECRYPT;
802 /*
803 * Rx decryption is done in place,
804 * the original data is lost anyway.
805 */
806
807 return -EINVAL;
808 }
809
810 /* drop any other error frames */
811 if (unlikely(error)) {
812 /* TODO: update netdevice's RX dropped/errors statistics */
813
814 if (ar9170_nag_limiter(ar))
815 printk(KERN_DEBUG "%s: received frame with "
816 "suspicious error code (%#x).\n",
817 wiphy_name(ar->hw->wiphy), error);
818
819 return -EINVAL;
820 }
821
822 status->band = ar->channel->band;
823 status->freq = ar->channel->center_freq;
824
825 switch (mac->status & AR9170_RX_STATUS_MODULATION_MASK) {
826 case AR9170_RX_STATUS_MODULATION_CCK:
827 if (mac->status & AR9170_RX_STATUS_SHORT_PREAMBLE)
828 status->flag |= RX_FLAG_SHORTPRE;
829 switch (head->plcp[0]) {
830 case 0x0a:
831 status->rate_idx = 0;
832 break;
833 case 0x14:
834 status->rate_idx = 1;
835 break;
836 case 0x37:
837 status->rate_idx = 2;
838 break;
839 case 0x6e:
840 status->rate_idx = 3;
841 break;
842 default:
843 if (ar9170_nag_limiter(ar))
844 printk(KERN_ERR "%s: invalid plcp cck rate "
845 "(%x).\n", wiphy_name(ar->hw->wiphy),
846 head->plcp[0]);
847 return -EINVAL;
848 }
849 break;
850
851 case AR9170_RX_STATUS_MODULATION_OFDM:
852 switch (head->plcp[0] & 0xf) {
853 case 0xb:
854 status->rate_idx = 0;
855 break;
856 case 0xf:
857 status->rate_idx = 1;
858 break;
859 case 0xa:
860 status->rate_idx = 2;
861 break;
862 case 0xe:
863 status->rate_idx = 3;
864 break;
865 case 0x9:
866 status->rate_idx = 4;
867 break;
868 case 0xd:
869 status->rate_idx = 5;
870 break;
871 case 0x8:
872 status->rate_idx = 6;
873 break;
874 case 0xc:
875 status->rate_idx = 7;
876 break;
877 default:
878 if (ar9170_nag_limiter(ar))
879 printk(KERN_ERR "%s: invalid plcp ofdm rate "
880 "(%x).\n", wiphy_name(ar->hw->wiphy),
881 head->plcp[0]);
882 return -EINVAL;
883 }
884 if (status->band == IEEE80211_BAND_2GHZ)
885 status->rate_idx += 4;
886 break;
887
888 case AR9170_RX_STATUS_MODULATION_HT:
889 if (head->plcp[3] & 0x80)
890 status->flag |= RX_FLAG_40MHZ;
891 if (head->plcp[6] & 0x80)
892 status->flag |= RX_FLAG_SHORT_GI;
893
894 status->rate_idx = clamp(0, 75, head->plcp[6] & 0x7f);
895 status->flag |= RX_FLAG_HT;
896 break;
897
898 case AR9170_RX_STATUS_MODULATION_DUPOFDM:
899 /* XXX */
900 if (ar9170_nag_limiter(ar))
901 printk(KERN_ERR "%s: invalid modulation\n",
902 wiphy_name(ar->hw->wiphy));
903 return -EINVAL;
904 }
905
906 return 0;
907 }
908
909 static void ar9170_rx_phy_status(struct ar9170 *ar,
910 struct ar9170_rx_phystatus *phy,
911 struct ieee80211_rx_status *status)
912 {
913 int i;
914
915 BUILD_BUG_ON(sizeof(struct ar9170_rx_phystatus) != 20);
916
917 for (i = 0; i < 3; i++)
918 if (phy->rssi[i] != 0x80)
919 status->antenna |= BIT(i);
920
921 /* post-process RSSI */
922 for (i = 0; i < 7; i++)
923 if (phy->rssi[i] & 0x80)
924 phy->rssi[i] = ((phy->rssi[i] & 0x7f) + 1) & 0x7f;
925
926 /* TODO: we could do something with phy_errors */
927 status->signal = ar->noise[0] + phy->rssi_combined;
928 status->noise = ar->noise[0];
929 }
930
931 static struct sk_buff *ar9170_rx_copy_data(u8 *buf, int len)
932 {
933 struct sk_buff *skb;
934 int reserved = 0;
935 struct ieee80211_hdr *hdr = (void *) buf;
936
937 if (ieee80211_is_data_qos(hdr->frame_control)) {
938 u8 *qc = ieee80211_get_qos_ctl(hdr);
939 reserved += NET_IP_ALIGN;
940
941 if (*qc & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)
942 reserved += NET_IP_ALIGN;
943 }
944
945 if (ieee80211_has_a4(hdr->frame_control))
946 reserved += NET_IP_ALIGN;
947
948 reserved = 32 + (reserved & NET_IP_ALIGN);
949
950 skb = dev_alloc_skb(len + reserved);
951 if (likely(skb)) {
952 skb_reserve(skb, reserved);
953 memcpy(skb_put(skb, len), buf, len);
954 }
955
956 return skb;
957 }
958
959 /*
960 * If the frame alignment is right (or the kernel has
961 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS), and there
962 * is only a single MPDU in the USB frame, then we could
963 * submit to mac80211 the SKB directly. However, since
964 * there may be multiple packets in one SKB in stream
965 * mode, and we need to observe the proper ordering,
966 * this is non-trivial.
967 */
968
969 static void ar9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
970 {
971 struct ar9170_rx_head *head;
972 struct ar9170_rx_macstatus *mac;
973 struct ar9170_rx_phystatus *phy = NULL;
974 struct ieee80211_rx_status status;
975 struct sk_buff *skb;
976 int mpdu_len;
977
978 if (unlikely(!IS_STARTED(ar) || len < (sizeof(*mac))))
979 return ;
980
981 /* Received MPDU */
982 mpdu_len = len - sizeof(*mac);
983
984 mac = (void *)(buf + mpdu_len);
985 if (unlikely(mac->error & AR9170_RX_ERROR_FATAL)) {
986 /* this frame is too damaged and can't be used - drop it */
987
988 return ;
989 }
990
991 switch (mac->status & AR9170_RX_STATUS_MPDU_MASK) {
992 case AR9170_RX_STATUS_MPDU_FIRST:
993 /* first mpdu packet has the plcp header */
994 if (likely(mpdu_len >= sizeof(struct ar9170_rx_head))) {
995 head = (void *) buf;
996 memcpy(&ar->rx_mpdu.plcp, (void *) buf,
997 sizeof(struct ar9170_rx_head));
998
999 mpdu_len -= sizeof(struct ar9170_rx_head);
1000 buf += sizeof(struct ar9170_rx_head);
1001 ar->rx_mpdu.has_plcp = true;
1002 } else {
1003 if (ar9170_nag_limiter(ar))
1004 printk(KERN_ERR "%s: plcp info is clipped.\n",
1005 wiphy_name(ar->hw->wiphy));
1006 return ;
1007 }
1008 break;
1009
1010 case AR9170_RX_STATUS_MPDU_LAST:
1011 /* last mpdu has a extra tail with phy status information */
1012
1013 if (likely(mpdu_len >= sizeof(struct ar9170_rx_phystatus))) {
1014 mpdu_len -= sizeof(struct ar9170_rx_phystatus);
1015 phy = (void *)(buf + mpdu_len);
1016 } else {
1017 if (ar9170_nag_limiter(ar))
1018 printk(KERN_ERR "%s: frame tail is clipped.\n",
1019 wiphy_name(ar->hw->wiphy));
1020 return ;
1021 }
1022
1023 case AR9170_RX_STATUS_MPDU_MIDDLE:
1024 /* middle mpdus are just data */
1025 if (unlikely(!ar->rx_mpdu.has_plcp)) {
1026 if (!ar9170_nag_limiter(ar))
1027 return ;
1028
1029 printk(KERN_ERR "%s: rx stream did not start "
1030 "with a first_mpdu frame tag.\n",
1031 wiphy_name(ar->hw->wiphy));
1032
1033 return ;
1034 }
1035
1036 head = &ar->rx_mpdu.plcp;
1037 break;
1038
1039 case AR9170_RX_STATUS_MPDU_SINGLE:
1040 /* single mpdu - has plcp (head) and phy status (tail) */
1041 head = (void *) buf;
1042
1043 mpdu_len -= sizeof(struct ar9170_rx_head);
1044 mpdu_len -= sizeof(struct ar9170_rx_phystatus);
1045
1046 buf += sizeof(struct ar9170_rx_head);
1047 phy = (void *)(buf + mpdu_len);
1048 break;
1049
1050 default:
1051 BUG_ON(1);
1052 break;
1053 }
1054
1055 if (unlikely(mpdu_len < FCS_LEN))
1056 return ;
1057
1058 memset(&status, 0, sizeof(status));
1059 if (unlikely(ar9170_rx_mac_status(ar, head, mac, &status)))
1060 return ;
1061
1062 if (phy)
1063 ar9170_rx_phy_status(ar, phy, &status);
1064
1065 skb = ar9170_rx_copy_data(buf, mpdu_len);
1066 if (likely(skb)) {
1067 memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
1068 ieee80211_rx_irqsafe(ar->hw, skb);
1069 }
1070 }
1071
1072 void ar9170_rx(struct ar9170 *ar, struct sk_buff *skb)
1073 {
1074 unsigned int i, tlen, resplen, wlen = 0, clen = 0;
1075 u8 *tbuf, *respbuf;
1076
1077 tbuf = skb->data;
1078 tlen = skb->len;
1079
1080 while (tlen >= 4) {
1081 clen = tbuf[1] << 8 | tbuf[0];
1082 wlen = ALIGN(clen, 4);
1083
1084 /* check if this is stream has a valid tag.*/
1085 if (tbuf[2] != 0 || tbuf[3] != 0x4e) {
1086 /*
1087 * TODO: handle the highly unlikely event that the
1088 * corrupted stream has the TAG at the right position.
1089 */
1090
1091 /* check if the frame can be repaired. */
1092 if (!ar->rx_failover_missing) {
1093 /* this is no "short read". */
1094 if (ar9170_nag_limiter(ar)) {
1095 printk(KERN_ERR "%s: missing tag!\n",
1096 wiphy_name(ar->hw->wiphy));
1097 goto err_telluser;
1098 } else
1099 goto err_silent;
1100 }
1101
1102 if (ar->rx_failover_missing > tlen) {
1103 if (ar9170_nag_limiter(ar)) {
1104 printk(KERN_ERR "%s: possible multi "
1105 "stream corruption!\n",
1106 wiphy_name(ar->hw->wiphy));
1107 goto err_telluser;
1108 } else
1109 goto err_silent;
1110 }
1111
1112 memcpy(skb_put(ar->rx_failover, tlen), tbuf, tlen);
1113 ar->rx_failover_missing -= tlen;
1114
1115 if (ar->rx_failover_missing <= 0) {
1116 /*
1117 * nested ar9170_rx call!
1118 * termination is guranteed, even when the
1119 * combined frame also have a element with
1120 * a bad tag.
1121 */
1122
1123 ar->rx_failover_missing = 0;
1124 ar9170_rx(ar, ar->rx_failover);
1125
1126 skb_reset_tail_pointer(ar->rx_failover);
1127 skb_trim(ar->rx_failover, 0);
1128 }
1129
1130 return ;
1131 }
1132
1133 /* check if stream is clipped */
1134 if (wlen > tlen - 4) {
1135 if (ar->rx_failover_missing) {
1136 /* TODO: handle double stream corruption. */
1137 if (ar9170_nag_limiter(ar)) {
1138 printk(KERN_ERR "%s: double rx stream "
1139 "corruption!\n",
1140 wiphy_name(ar->hw->wiphy));
1141 goto err_telluser;
1142 } else
1143 goto err_silent;
1144 }
1145
1146 /*
1147 * save incomplete data set.
1148 * the firmware will resend the missing bits when
1149 * the rx - descriptor comes round again.
1150 */
1151
1152 memcpy(skb_put(ar->rx_failover, tlen), tbuf, tlen);
1153 ar->rx_failover_missing = clen - tlen;
1154 return ;
1155 }
1156 resplen = clen;
1157 respbuf = tbuf + 4;
1158 tbuf += wlen + 4;
1159 tlen -= wlen + 4;
1160
1161 i = 0;
1162
1163 /* weird thing, but this is the same in the original driver */
1164 while (resplen > 2 && i < 12 &&
1165 respbuf[0] == 0xff && respbuf[1] == 0xff) {
1166 i += 2;
1167 resplen -= 2;
1168 respbuf += 2;
1169 }
1170
1171 if (resplen < 4)
1172 continue;
1173
1174 /* found the 6 * 0xffff marker? */
1175 if (i == 12)
1176 ar9170_handle_command_response(ar, respbuf, resplen);
1177 else
1178 ar9170_handle_mpdu(ar, respbuf, clen);
1179 }
1180
1181 if (tlen) {
1182 if (net_ratelimit())
1183 printk(KERN_ERR "%s: %d bytes of unprocessed "
1184 "data left in rx stream!\n",
1185 wiphy_name(ar->hw->wiphy), tlen);
1186
1187 goto err_telluser;
1188 }
1189
1190 return ;
1191
1192 err_telluser:
1193 printk(KERN_ERR "%s: damaged RX stream data [want:%d, "
1194 "data:%d, rx:%d, pending:%d ]\n",
1195 wiphy_name(ar->hw->wiphy), clen, wlen, tlen,
1196 ar->rx_failover_missing);
1197
1198 if (ar->rx_failover_missing)
1199 print_hex_dump_bytes("rxbuf:", DUMP_PREFIX_OFFSET,
1200 ar->rx_failover->data,
1201 ar->rx_failover->len);
1202
1203 print_hex_dump_bytes("stream:", DUMP_PREFIX_OFFSET,
1204 skb->data, skb->len);
1205
1206 printk(KERN_ERR "%s: please check your hardware and cables, if "
1207 "you see this message frequently.\n",
1208 wiphy_name(ar->hw->wiphy));
1209
1210 err_silent:
1211 if (ar->rx_failover_missing) {
1212 skb_reset_tail_pointer(ar->rx_failover);
1213 skb_trim(ar->rx_failover, 0);
1214 ar->rx_failover_missing = 0;
1215 }
1216 }
1217
1218 #define AR9170_FILL_QUEUE(queue, ai_fs, cwmin, cwmax, _txop) \
1219 do { \
1220 queue.aifs = ai_fs; \
1221 queue.cw_min = cwmin; \
1222 queue.cw_max = cwmax; \
1223 queue.txop = _txop; \
1224 } while (0)
1225
1226 static int ar9170_op_start(struct ieee80211_hw *hw)
1227 {
1228 struct ar9170 *ar = hw->priv;
1229 int err, i;
1230
1231 mutex_lock(&ar->mutex);
1232
1233 ar->filter_changed = 0;
1234
1235 /* reinitialize queues statistics */
1236 memset(&ar->tx_stats, 0, sizeof(ar->tx_stats));
1237 for (i = 0; i < __AR9170_NUM_TXQ; i++)
1238 ar->tx_stats[i].limit = AR9170_TXQ_DEPTH;
1239
1240 /* reset QoS defaults */
1241 AR9170_FILL_QUEUE(ar->edcf[0], 3, 15, 1023, 0); /* BEST EFFORT*/
1242 AR9170_FILL_QUEUE(ar->edcf[1], 7, 15, 1023, 0); /* BACKGROUND */
1243 AR9170_FILL_QUEUE(ar->edcf[2], 2, 7, 15, 94); /* VIDEO */
1244 AR9170_FILL_QUEUE(ar->edcf[3], 2, 3, 7, 47); /* VOICE */
1245 AR9170_FILL_QUEUE(ar->edcf[4], 2, 3, 7, 0); /* SPECIAL */
1246
1247 /* set sane AMPDU defaults */
1248 ar->global_ampdu_density = 6;
1249 ar->global_ampdu_factor = 3;
1250
1251 ar->bad_hw_nagger = jiffies;
1252
1253 err = ar->open(ar);
1254 if (err)
1255 goto out;
1256
1257 err = ar9170_init_mac(ar);
1258 if (err)
1259 goto out;
1260
1261 err = ar9170_set_qos(ar);
1262 if (err)
1263 goto out;
1264
1265 err = ar9170_init_phy(ar, IEEE80211_BAND_2GHZ);
1266 if (err)
1267 goto out;
1268
1269 err = ar9170_init_rf(ar);
1270 if (err)
1271 goto out;
1272
1273 /* start DMA */
1274 err = ar9170_write_reg(ar, 0x1c3d30, 0x100);
1275 if (err)
1276 goto out;
1277
1278 ar->state = AR9170_STARTED;
1279
1280 out:
1281 mutex_unlock(&ar->mutex);
1282 return err;
1283 }
1284
1285 static void ar9170_op_stop(struct ieee80211_hw *hw)
1286 {
1287 struct ar9170 *ar = hw->priv;
1288 unsigned int i;
1289
1290 if (IS_STARTED(ar))
1291 ar->state = AR9170_IDLE;
1292
1293 cancel_delayed_work_sync(&ar->tx_janitor);
1294 #ifdef CONFIG_AR9170_LEDS
1295 cancel_delayed_work_sync(&ar->led_work);
1296 #endif
1297 cancel_work_sync(&ar->filter_config_work);
1298 cancel_work_sync(&ar->beacon_work);
1299
1300 mutex_lock(&ar->mutex);
1301
1302 if (IS_ACCEPTING_CMD(ar)) {
1303 ar9170_set_leds_state(ar, 0);
1304
1305 /* stop DMA */
1306 ar9170_write_reg(ar, 0x1c3d30, 0);
1307 ar->stop(ar);
1308 }
1309
1310 for (i = 0; i < __AR9170_NUM_TXQ; i++) {
1311 skb_queue_purge(&ar->tx_pending[i]);
1312 skb_queue_purge(&ar->tx_status[i]);
1313 }
1314 skb_queue_purge(&ar->tx_status_ampdu);
1315
1316 mutex_unlock(&ar->mutex);
1317 }
1318
1319 static void ar9170_tx_indicate_immba(struct ar9170 *ar, struct sk_buff *skb)
1320 {
1321 struct ar9170_tx_control *txc = (void *) skb->data;
1322
1323 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_IMM_AMPDU);
1324 }
1325
1326 static void ar9170_tx_copy_phy(struct ar9170 *ar, struct sk_buff *dst,
1327 struct sk_buff *src)
1328 {
1329 struct ar9170_tx_control *dst_txc, *src_txc;
1330 struct ieee80211_tx_info *dst_info, *src_info;
1331 struct ar9170_tx_info *dst_arinfo, *src_arinfo;
1332
1333 src_txc = (void *) src->data;
1334 src_info = IEEE80211_SKB_CB(src);
1335 src_arinfo = (void *) src_info->rate_driver_data;
1336
1337 dst_txc = (void *) dst->data;
1338 dst_info = IEEE80211_SKB_CB(dst);
1339 dst_arinfo = (void *) dst_info->rate_driver_data;
1340
1341 dst_txc->phy_control = src_txc->phy_control;
1342
1343 /* same MCS for the whole aggregate */
1344 memcpy(dst_info->driver_rates, src_info->driver_rates,
1345 sizeof(dst_info->driver_rates));
1346 }
1347
1348 static int ar9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
1349 {
1350 struct ieee80211_hdr *hdr;
1351 struct ar9170_tx_control *txc;
1352 struct ieee80211_tx_info *info;
1353 struct ieee80211_tx_rate *txrate;
1354 struct ar9170_tx_info *arinfo;
1355 unsigned int queue = skb_get_queue_mapping(skb);
1356 u16 keytype = 0;
1357 u16 len, icv = 0;
1358
1359 BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data));
1360
1361 hdr = (void *)skb->data;
1362 info = IEEE80211_SKB_CB(skb);
1363 len = skb->len;
1364
1365 txc = (void *)skb_push(skb, sizeof(*txc));
1366
1367 if (info->control.hw_key) {
1368 icv = info->control.hw_key->icv_len;
1369
1370 switch (info->control.hw_key->alg) {
1371 case ALG_WEP:
1372 keytype = AR9170_TX_MAC_ENCR_RC4;
1373 break;
1374 case ALG_TKIP:
1375 keytype = AR9170_TX_MAC_ENCR_RC4;
1376 break;
1377 case ALG_CCMP:
1378 keytype = AR9170_TX_MAC_ENCR_AES;
1379 break;
1380 default:
1381 WARN_ON(1);
1382 goto err_out;
1383 }
1384 }
1385
1386 /* Length */
1387 txc->length = cpu_to_le16(len + icv + 4);
1388
1389 txc->mac_control = cpu_to_le16(AR9170_TX_MAC_HW_DURATION |
1390 AR9170_TX_MAC_BACKOFF);
1391 txc->mac_control |= cpu_to_le16(ar9170_qos_hwmap[queue] <<
1392 AR9170_TX_MAC_QOS_SHIFT);
1393 txc->mac_control |= cpu_to_le16(keytype);
1394 txc->phy_control = cpu_to_le32(0);
1395
1396 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
1397 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_NO_ACK);
1398
1399 txrate = &info->control.rates[0];
1400 if (txrate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
1401 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS);
1402 else if (txrate->flags & IEEE80211_TX_RC_USE_RTS_CTS)
1403 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS);
1404
1405 arinfo = (void *)info->rate_driver_data;
1406 arinfo->timeout = jiffies + msecs_to_jiffies(AR9170_QUEUE_TIMEOUT);
1407
1408 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
1409 (is_valid_ether_addr(ieee80211_get_DA(hdr)))) {
1410 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
1411 if (unlikely(!info->control.sta))
1412 goto err_out;
1413
1414 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_AGGR);
1415 arinfo->flags = AR9170_TX_FLAG_BLOCK_ACK;
1416
1417 goto out;
1418 }
1419
1420 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE);
1421 /*
1422 * WARNING:
1423 * Putting the QoS queue bits into an unexplored territory is
1424 * certainly not elegant.
1425 *
1426 * In my defense: This idea provides a reasonable way to
1427 * smuggle valuable information to the tx_status callback.
1428 * Also, the idea behind this bit-abuse came straight from
1429 * the original driver code.
1430 */
1431
1432 txc->phy_control |=
1433 cpu_to_le32(queue << AR9170_TX_PHY_QOS_SHIFT);
1434 arinfo->flags = AR9170_TX_FLAG_WAIT_FOR_ACK;
1435 } else {
1436 arinfo->flags = AR9170_TX_FLAG_NO_ACK;
1437 }
1438
1439 out:
1440 return 0;
1441
1442 err_out:
1443 skb_pull(skb, sizeof(*txc));
1444 return -EINVAL;
1445 }
1446
1447 static void ar9170_tx_prepare_phy(struct ar9170 *ar, struct sk_buff *skb)
1448 {
1449 struct ar9170_tx_control *txc;
1450 struct ieee80211_tx_info *info;
1451 struct ieee80211_rate *rate = NULL;
1452 struct ieee80211_tx_rate *txrate;
1453 u32 power, chains;
1454
1455 txc = (void *) skb->data;
1456 info = IEEE80211_SKB_CB(skb);
1457 txrate = &info->control.rates[0];
1458
1459 if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
1460 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_GREENFIELD);
1461
1462 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1463 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_SHORT_PREAMBLE);
1464
1465 if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1466 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ);
1467 /* this works because 40 MHz is 2 and dup is 3 */
1468 if (txrate->flags & IEEE80211_TX_RC_DUP_DATA)
1469 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ_DUP);
1470
1471 if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
1472 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_SHORT_GI);
1473
1474 if (txrate->flags & IEEE80211_TX_RC_MCS) {
1475 u32 r = txrate->idx;
1476 u8 *txpower;
1477
1478 /* heavy clip control */
1479 txc->phy_control |= cpu_to_le32((r & 0x7) << 7);
1480
1481 r <<= AR9170_TX_PHY_MCS_SHIFT;
1482 BUG_ON(r & ~AR9170_TX_PHY_MCS_MASK);
1483
1484 txc->phy_control |= cpu_to_le32(r & AR9170_TX_PHY_MCS_MASK);
1485 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_MOD_HT);
1486
1487 if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
1488 if (info->band == IEEE80211_BAND_5GHZ)
1489 txpower = ar->power_5G_ht40;
1490 else
1491 txpower = ar->power_2G_ht40;
1492 } else {
1493 if (info->band == IEEE80211_BAND_5GHZ)
1494 txpower = ar->power_5G_ht20;
1495 else
1496 txpower = ar->power_2G_ht20;
1497 }
1498
1499 power = txpower[(txrate->idx) & 7];
1500 } else {
1501 u8 *txpower;
1502 u32 mod;
1503 u32 phyrate;
1504 u8 idx = txrate->idx;
1505
1506 if (info->band != IEEE80211_BAND_2GHZ) {
1507 idx += 4;
1508 txpower = ar->power_5G_leg;
1509 mod = AR9170_TX_PHY_MOD_OFDM;
1510 } else {
1511 if (idx < 4) {
1512 txpower = ar->power_2G_cck;
1513 mod = AR9170_TX_PHY_MOD_CCK;
1514 } else {
1515 mod = AR9170_TX_PHY_MOD_OFDM;
1516 txpower = ar->power_2G_ofdm;
1517 }
1518 }
1519
1520 rate = &__ar9170_ratetable[idx];
1521
1522 phyrate = rate->hw_value & 0xF;
1523 power = txpower[(rate->hw_value & 0x30) >> 4];
1524 phyrate <<= AR9170_TX_PHY_MCS_SHIFT;
1525
1526 txc->phy_control |= cpu_to_le32(mod);
1527 txc->phy_control |= cpu_to_le32(phyrate);
1528 }
1529
1530 power <<= AR9170_TX_PHY_TX_PWR_SHIFT;
1531 power &= AR9170_TX_PHY_TX_PWR_MASK;
1532 txc->phy_control |= cpu_to_le32(power);
1533
1534 /* set TX chains */
1535 if (ar->eeprom.tx_mask == 1) {
1536 chains = AR9170_TX_PHY_TXCHAIN_1;
1537 } else {
1538 chains = AR9170_TX_PHY_TXCHAIN_2;
1539
1540 /* >= 36M legacy OFDM - use only one chain */
1541 if (rate && rate->bitrate >= 360)
1542 chains = AR9170_TX_PHY_TXCHAIN_1;
1543 }
1544 txc->phy_control |= cpu_to_le32(chains << AR9170_TX_PHY_TXCHAIN_SHIFT);
1545 }
1546
1547 static bool ar9170_tx_ampdu(struct ar9170 *ar)
1548 {
1549 struct sk_buff_head agg;
1550 struct ar9170_sta_tid *tid_info = NULL, *tmp;
1551 struct sk_buff *skb, *first = NULL;
1552 unsigned long flags, f2;
1553 unsigned int i = 0;
1554 u16 seq, queue, tmpssn;
1555 bool run = false;
1556
1557 skb_queue_head_init(&agg);
1558
1559 spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
1560 if (list_empty(&ar->tx_ampdu_list)) {
1561 #ifdef AR9170_TXAGG_DEBUG
1562 printk(KERN_DEBUG "%s: aggregation list is empty.\n",
1563 wiphy_name(ar->hw->wiphy));
1564 #endif /* AR9170_TXAGG_DEBUG */
1565 goto out_unlock;
1566 }
1567
1568 list_for_each_entry_safe(tid_info, tmp, &ar->tx_ampdu_list, list) {
1569 if (tid_info->state != AR9170_TID_STATE_COMPLETE) {
1570 #ifdef AR9170_TXAGG_DEBUG
1571 printk(KERN_DEBUG "%s: dangling aggregation entry!\n",
1572 wiphy_name(ar->hw->wiphy));
1573 #endif /* AR9170_TXAGG_DEBUG */
1574 continue;
1575 }
1576
1577 if (++i > 64) {
1578 #ifdef AR9170_TXAGG_DEBUG
1579 printk(KERN_DEBUG "%s: enough frames aggregated.\n",
1580 wiphy_name(ar->hw->wiphy));
1581 #endif /* AR9170_TXAGG_DEBUG */
1582 break;
1583 }
1584
1585 queue = TID_TO_WME_AC(tid_info->tid);
1586
1587 if (skb_queue_len(&ar->tx_pending[queue]) >=
1588 AR9170_NUM_TX_AGG_MAX) {
1589 #ifdef AR9170_TXAGG_DEBUG
1590 printk(KERN_DEBUG "%s: queue %d full.\n",
1591 wiphy_name(ar->hw->wiphy), queue);
1592 #endif /* AR9170_TXAGG_DEBUG */
1593 continue;
1594 }
1595
1596 list_del_init(&tid_info->list);
1597
1598 spin_lock_irqsave(&tid_info->queue.lock, f2);
1599 tmpssn = seq = tid_info->ssn;
1600 first = skb_peek(&tid_info->queue);
1601
1602 if (likely(first))
1603 tmpssn = ar9170_get_seq(first);
1604
1605 if (unlikely(tmpssn != seq)) {
1606 #ifdef AR9170_TXAGG_DEBUG
1607 printk(KERN_DEBUG "%s: ssn mismatch [%d != %d]\n.",
1608 wiphy_name(ar->hw->wiphy), seq, tmpssn);
1609 #endif /* AR9170_TXAGG_DEBUG */
1610 tid_info->ssn = tmpssn;
1611 }
1612
1613 #ifdef AR9170_TXAGG_DEBUG
1614 printk(KERN_DEBUG "%s: generate A-MPDU for tid:%d ssn:%d with "
1615 "%d queued frames.\n", wiphy_name(ar->hw->wiphy),
1616 tid_info->tid, tid_info->ssn,
1617 skb_queue_len(&tid_info->queue));
1618 __ar9170_dump_txqueue(ar, &tid_info->queue);
1619 #endif /* AR9170_TXAGG_DEBUG */
1620
1621 while ((skb = skb_peek(&tid_info->queue))) {
1622 if (unlikely(ar9170_get_seq(skb) != seq))
1623 break;
1624
1625 __skb_unlink(skb, &tid_info->queue);
1626 tid_info->ssn = seq = GET_NEXT_SEQ(seq);
1627
1628 if (unlikely(skb_get_queue_mapping(skb) != queue)) {
1629 #ifdef AR9170_TXAGG_DEBUG
1630 printk(KERN_DEBUG "%s: tid:%d(q:%d) queue:%d "
1631 "!match.\n", wiphy_name(ar->hw->wiphy),
1632 tid_info->tid,
1633 TID_TO_WME_AC(tid_info->tid),
1634 skb_get_queue_mapping(skb));
1635 #endif /* AR9170_TXAGG_DEBUG */
1636 dev_kfree_skb_any(skb);
1637 continue;
1638 }
1639
1640 if (unlikely(first == skb)) {
1641 ar9170_tx_prepare_phy(ar, skb);
1642 __skb_queue_tail(&agg, skb);
1643 first = skb;
1644 } else {
1645 ar9170_tx_copy_phy(ar, skb, first);
1646 __skb_queue_tail(&agg, skb);
1647 }
1648
1649 if (unlikely(skb_queue_len(&agg) ==
1650 AR9170_NUM_TX_AGG_MAX))
1651 break;
1652 }
1653
1654 if (skb_queue_empty(&tid_info->queue))
1655 tid_info->active = false;
1656 else
1657 list_add_tail(&tid_info->list,
1658 &ar->tx_ampdu_list);
1659
1660 spin_unlock_irqrestore(&tid_info->queue.lock, f2);
1661
1662 if (unlikely(skb_queue_empty(&agg))) {
1663 #ifdef AR9170_TXAGG_DEBUG
1664 printk(KERN_DEBUG "%s: queued empty list!\n",
1665 wiphy_name(ar->hw->wiphy));
1666 #endif /* AR9170_TXAGG_DEBUG */
1667 continue;
1668 }
1669
1670 /*
1671 * tell the FW/HW that this is the last frame,
1672 * that way it will wait for the immediate block ack.
1673 */
1674 if (likely(skb_peek_tail(&agg)))
1675 ar9170_tx_indicate_immba(ar, skb_peek_tail(&agg));
1676
1677 #ifdef AR9170_TXAGG_DEBUG
1678 printk(KERN_DEBUG "%s: generated A-MPDU looks like this:\n",
1679 wiphy_name(ar->hw->wiphy));
1680 __ar9170_dump_txqueue(ar, &agg);
1681 #endif /* AR9170_TXAGG_DEBUG */
1682
1683 spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
1684
1685 spin_lock_irqsave(&ar->tx_pending[queue].lock, flags);
1686 skb_queue_splice_tail_init(&agg, &ar->tx_pending[queue]);
1687 spin_unlock_irqrestore(&ar->tx_pending[queue].lock, flags);
1688 run = true;
1689
1690 spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
1691 }
1692
1693 out_unlock:
1694 spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
1695 __skb_queue_purge(&agg);
1696
1697 return run;
1698 }
1699
1700 static void ar9170_tx(struct ar9170 *ar)
1701 {
1702 struct sk_buff *skb;
1703 unsigned long flags;
1704 struct ieee80211_tx_info *info;
1705 struct ar9170_tx_info *arinfo;
1706 unsigned int i, frames, frames_failed, remaining_space;
1707 int err;
1708 bool schedule_garbagecollector = false;
1709
1710 BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data));
1711
1712 if (unlikely(!IS_STARTED(ar)))
1713 return ;
1714
1715 remaining_space = AR9170_TX_MAX_PENDING;
1716
1717 for (i = 0; i < __AR9170_NUM_TXQ; i++) {
1718 spin_lock_irqsave(&ar->tx_stats_lock, flags);
1719 if (ar->tx_stats[i].len >= ar->tx_stats[i].limit) {
1720 #ifdef AR9170_QUEUE_DEBUG
1721 printk(KERN_DEBUG "%s: queue %d full\n",
1722 wiphy_name(ar->hw->wiphy), i);
1723
1724 printk(KERN_DEBUG "%s: stuck frames: ===> \n",
1725 wiphy_name(ar->hw->wiphy));
1726 ar9170_dump_txqueue(ar, &ar->tx_pending[i]);
1727 ar9170_dump_txqueue(ar, &ar->tx_status[i]);
1728 #endif /* AR9170_QUEUE_DEBUG */
1729
1730 #ifdef AR9170_QUEUE_STOP_DEBUG
1731 printk(KERN_DEBUG "%s: stop queue %d\n",
1732 wiphy_name(ar->hw->wiphy), i);
1733 __ar9170_dump_txstats(ar);
1734 #endif /* AR9170_QUEUE_STOP_DEBUG */
1735 ieee80211_stop_queue(ar->hw, i);
1736 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
1737 continue;
1738 }
1739
1740 frames = min(ar->tx_stats[i].limit - ar->tx_stats[i].len,
1741 skb_queue_len(&ar->tx_pending[i]));
1742
1743 if (remaining_space < frames) {
1744 #ifdef AR9170_QUEUE_DEBUG
1745 printk(KERN_DEBUG "%s: tx quota reached queue:%d, "
1746 "remaining slots:%d, needed:%d\n",
1747 wiphy_name(ar->hw->wiphy), i, remaining_space,
1748 frames);
1749 #endif /* AR9170_QUEUE_DEBUG */
1750 frames = remaining_space;
1751 }
1752
1753 ar->tx_stats[i].len += frames;
1754 ar->tx_stats[i].count += frames;
1755 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
1756
1757 if (!frames)
1758 continue;
1759
1760 frames_failed = 0;
1761 while (frames) {
1762 skb = skb_dequeue(&ar->tx_pending[i]);
1763 if (unlikely(!skb)) {
1764 frames_failed += frames;
1765 frames = 0;
1766 break;
1767 }
1768
1769 info = IEEE80211_SKB_CB(skb);
1770 arinfo = (void *) info->rate_driver_data;
1771
1772 /* TODO: cancel stuck frames */
1773 arinfo->timeout = jiffies +
1774 msecs_to_jiffies(AR9170_TX_TIMEOUT);
1775
1776 if (arinfo->flags == AR9170_TX_FLAG_BLOCK_ACK)
1777 ar->tx_ampdu_pending++;
1778
1779 #ifdef AR9170_QUEUE_DEBUG
1780 printk(KERN_DEBUG "%s: send frame q:%d =>\n",
1781 wiphy_name(ar->hw->wiphy), i);
1782 ar9170_print_txheader(ar, skb);
1783 #endif /* AR9170_QUEUE_DEBUG */
1784
1785 err = ar->tx(ar, skb);
1786 if (unlikely(err)) {
1787 if (arinfo->flags == AR9170_TX_FLAG_BLOCK_ACK)
1788 ar->tx_ampdu_pending--;
1789
1790 frames_failed++;
1791 dev_kfree_skb_any(skb);
1792 } else {
1793 remaining_space--;
1794 schedule_garbagecollector = true;
1795 }
1796
1797 frames--;
1798 }
1799
1800 #ifdef AR9170_QUEUE_DEBUG
1801 printk(KERN_DEBUG "%s: ar9170_tx report for queue %d\n",
1802 wiphy_name(ar->hw->wiphy), i);
1803
1804 printk(KERN_DEBUG "%s: unprocessed pending frames left:\n",
1805 wiphy_name(ar->hw->wiphy));
1806 ar9170_dump_txqueue(ar, &ar->tx_pending[i]);
1807 #endif /* AR9170_QUEUE_DEBUG */
1808
1809 if (unlikely(frames_failed)) {
1810 #ifdef AR9170_QUEUE_DEBUG
1811 printk(KERN_DEBUG "%s: frames failed %d =>\n",
1812 wiphy_name(ar->hw->wiphy), frames_failed);
1813 #endif /* AR9170_QUEUE_DEBUG */
1814
1815 spin_lock_irqsave(&ar->tx_stats_lock, flags);
1816 ar->tx_stats[i].len -= frames_failed;
1817 ar->tx_stats[i].count -= frames_failed;
1818 #ifdef AR9170_QUEUE_STOP_DEBUG
1819 printk(KERN_DEBUG "%s: wake queue %d\n",
1820 wiphy_name(ar->hw->wiphy), i);
1821 __ar9170_dump_txstats(ar);
1822 #endif /* AR9170_QUEUE_STOP_DEBUG */
1823 ieee80211_wake_queue(ar->hw, i);
1824 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
1825 }
1826 }
1827
1828 if (schedule_garbagecollector)
1829 queue_delayed_work(ar->hw->workqueue,
1830 &ar->tx_janitor,
1831 msecs_to_jiffies(AR9170_JANITOR_DELAY));
1832 }
1833
1834 static bool ar9170_tx_ampdu_queue(struct ar9170 *ar, struct sk_buff *skb)
1835 {
1836 struct ieee80211_tx_info *txinfo;
1837 struct ar9170_sta_info *sta_info;
1838 struct ar9170_sta_tid *agg;
1839 struct sk_buff *iter;
1840 unsigned long flags, f2;
1841 unsigned int max;
1842 u16 tid, seq, qseq;
1843 bool run = false, queue = false;
1844
1845 tid = ar9170_get_tid(skb);
1846 seq = ar9170_get_seq(skb);
1847 txinfo = IEEE80211_SKB_CB(skb);
1848 sta_info = (void *) txinfo->control.sta->drv_priv;
1849 agg = &sta_info->agg[tid];
1850 max = sta_info->ampdu_max_len;
1851
1852 spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
1853
1854 if (unlikely(agg->state != AR9170_TID_STATE_COMPLETE)) {
1855 #ifdef AR9170_TXAGG_DEBUG
1856 printk(KERN_DEBUG "%s: BlockACK session not fully initialized "
1857 "for ESS:%pM tid:%d state:%d.\n",
1858 wiphy_name(ar->hw->wiphy), agg->addr, agg->tid,
1859 agg->state);
1860 #endif /* AR9170_TXAGG_DEBUG */
1861 goto err_unlock;
1862 }
1863
1864 if (!agg->active) {
1865 agg->active = true;
1866 agg->ssn = seq;
1867 queue = true;
1868 }
1869
1870 /* check if seq is within the BA window */
1871 if (unlikely(!BAW_WITHIN(agg->ssn, max, seq))) {
1872 #ifdef AR9170_TXAGG_DEBUG
1873 printk(KERN_DEBUG "%s: frame with tid:%d seq:%d does not "
1874 "fit into BA window (%d - %d)\n",
1875 wiphy_name(ar->hw->wiphy), tid, seq, agg->ssn,
1876 (agg->ssn + max) & 0xfff);
1877 #endif /* AR9170_TXAGG_DEBUG */
1878 goto err_unlock;
1879 }
1880
1881 spin_lock_irqsave(&agg->queue.lock, f2);
1882
1883 skb_queue_reverse_walk(&agg->queue, iter) {
1884 qseq = ar9170_get_seq(iter);
1885
1886 if (GET_NEXT_SEQ(qseq) == seq) {
1887 __skb_queue_after(&agg->queue, iter, skb);
1888 goto queued;
1889 }
1890 }
1891
1892 __skb_queue_head(&agg->queue, skb);
1893
1894 queued:
1895 spin_unlock_irqrestore(&agg->queue.lock, f2);
1896
1897 #ifdef AR9170_TXAGG_DEBUG
1898 printk(KERN_DEBUG "%s: new aggregate %p queued.\n",
1899 wiphy_name(ar->hw->wiphy), skb);
1900 __ar9170_dump_txqueue(ar, &agg->queue);
1901 #endif /* AR9170_TXAGG_DEBUG */
1902
1903 if (skb_queue_len(&agg->queue) >= AR9170_NUM_TX_AGG_MAX)
1904 run = true;
1905
1906 if (queue)
1907 list_add_tail(&agg->list, &ar->tx_ampdu_list);
1908
1909 spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
1910 return run;
1911
1912 err_unlock:
1913 spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
1914 dev_kfree_skb_irq(skb);
1915 return false;
1916 }
1917
1918 int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1919 {
1920 struct ar9170 *ar = hw->priv;
1921 struct ieee80211_tx_info *info;
1922
1923 if (unlikely(!IS_STARTED(ar)))
1924 goto err_free;
1925
1926 if (unlikely(ar9170_tx_prepare(ar, skb)))
1927 goto err_free;
1928
1929 info = IEEE80211_SKB_CB(skb);
1930 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
1931 bool run = ar9170_tx_ampdu_queue(ar, skb);
1932
1933 if (run || !ar->tx_ampdu_pending)
1934 ar9170_tx_ampdu(ar);
1935 } else {
1936 unsigned int queue = skb_get_queue_mapping(skb);
1937
1938 ar9170_tx_prepare_phy(ar, skb);
1939 skb_queue_tail(&ar->tx_pending[queue], skb);
1940 }
1941
1942 ar9170_tx(ar);
1943 return NETDEV_TX_OK;
1944
1945 err_free:
1946 dev_kfree_skb_any(skb);
1947 return NETDEV_TX_OK;
1948 }
1949
1950 static int ar9170_op_add_interface(struct ieee80211_hw *hw,
1951 struct ieee80211_if_init_conf *conf)
1952 {
1953 struct ar9170 *ar = hw->priv;
1954 int err = 0;
1955
1956 mutex_lock(&ar->mutex);
1957
1958 if (ar->vif) {
1959 err = -EBUSY;
1960 goto unlock;
1961 }
1962
1963 ar->vif = conf->vif;
1964 memcpy(ar->mac_addr, conf->mac_addr, ETH_ALEN);
1965
1966 if (modparam_nohwcrypt || (ar->vif->type != NL80211_IFTYPE_STATION)) {
1967 ar->rx_software_decryption = true;
1968 ar->disable_offload = true;
1969 }
1970
1971 ar->cur_filter = 0;
1972 ar->want_filter = AR9170_MAC_REG_FTF_DEFAULTS;
1973 err = ar9170_update_frame_filter(ar);
1974 if (err)
1975 goto unlock;
1976
1977 err = ar9170_set_operating_mode(ar);
1978
1979 unlock:
1980 mutex_unlock(&ar->mutex);
1981 return err;
1982 }
1983
1984 static void ar9170_op_remove_interface(struct ieee80211_hw *hw,
1985 struct ieee80211_if_init_conf *conf)
1986 {
1987 struct ar9170 *ar = hw->priv;
1988
1989 mutex_lock(&ar->mutex);
1990 ar->vif = NULL;
1991 ar->want_filter = 0;
1992 ar9170_update_frame_filter(ar);
1993 ar9170_set_beacon_timers(ar);
1994 dev_kfree_skb(ar->beacon);
1995 ar->beacon = NULL;
1996 ar->sniffer_enabled = false;
1997 ar->rx_software_decryption = false;
1998 ar9170_set_operating_mode(ar);
1999 mutex_unlock(&ar->mutex);
2000 }
2001
2002 static int ar9170_op_config(struct ieee80211_hw *hw, u32 changed)
2003 {
2004 struct ar9170 *ar = hw->priv;
2005 int err = 0;
2006
2007 mutex_lock(&ar->mutex);
2008
2009 if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
2010 /* TODO */
2011 err = 0;
2012 }
2013
2014 if (changed & IEEE80211_CONF_CHANGE_PS) {
2015 /* TODO */
2016 err = 0;
2017 }
2018
2019 if (changed & IEEE80211_CONF_CHANGE_POWER) {
2020 /* TODO */
2021 err = 0;
2022 }
2023
2024 if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
2025 /*
2026 * is it long_frame_max_tx_count or short_frame_max_tx_count?
2027 */
2028
2029 err = ar9170_set_hwretry_limit(ar,
2030 ar->hw->conf.long_frame_max_tx_count);
2031 if (err)
2032 goto out;
2033 }
2034
2035 if (changed & BSS_CHANGED_BEACON_INT) {
2036 err = ar9170_set_beacon_timers(ar);
2037 if (err)
2038 goto out;
2039 }
2040
2041 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
2042
2043 /* adjust slot time for 5 GHz */
2044 err = ar9170_set_slot_time(ar);
2045 if (err)
2046 goto out;
2047
2048 err = ar9170_set_dyn_sifs_ack(ar);
2049 if (err)
2050 goto out;
2051
2052 err = ar9170_set_channel(ar, hw->conf.channel,
2053 AR9170_RFI_NONE,
2054 nl80211_to_ar9170(hw->conf.channel_type));
2055 if (err)
2056 goto out;
2057 }
2058
2059 out:
2060 mutex_unlock(&ar->mutex);
2061 return err;
2062 }
2063
2064 static void ar9170_set_filters(struct work_struct *work)
2065 {
2066 struct ar9170 *ar = container_of(work, struct ar9170,
2067 filter_config_work);
2068 int err;
2069
2070 if (unlikely(!IS_STARTED(ar)))
2071 return ;
2072
2073 mutex_lock(&ar->mutex);
2074 if (test_and_clear_bit(AR9170_FILTER_CHANGED_MODE,
2075 &ar->filter_changed)) {
2076 err = ar9170_set_operating_mode(ar);
2077 if (err)
2078 goto unlock;
2079 }
2080
2081 if (test_and_clear_bit(AR9170_FILTER_CHANGED_MULTICAST,
2082 &ar->filter_changed)) {
2083 err = ar9170_update_multicast(ar);
2084 if (err)
2085 goto unlock;
2086 }
2087
2088 if (test_and_clear_bit(AR9170_FILTER_CHANGED_FRAMEFILTER,
2089 &ar->filter_changed)) {
2090 err = ar9170_update_frame_filter(ar);
2091 if (err)
2092 goto unlock;
2093 }
2094
2095 unlock:
2096 mutex_unlock(&ar->mutex);
2097 }
2098
2099 static void ar9170_op_configure_filter(struct ieee80211_hw *hw,
2100 unsigned int changed_flags,
2101 unsigned int *new_flags,
2102 int mc_count, struct dev_mc_list *mclist)
2103 {
2104 struct ar9170 *ar = hw->priv;
2105
2106 /* mask supported flags */
2107 *new_flags &= FIF_ALLMULTI | FIF_CONTROL | FIF_BCN_PRBRESP_PROMISC |
2108 FIF_PROMISC_IN_BSS | FIF_FCSFAIL | FIF_PLCPFAIL;
2109 ar->filter_state = *new_flags;
2110 /*
2111 * We can support more by setting the sniffer bit and
2112 * then checking the error flags, later.
2113 */
2114
2115 if (changed_flags & FIF_ALLMULTI) {
2116 if (*new_flags & FIF_ALLMULTI) {
2117 ar->want_mc_hash = ~0ULL;
2118 } else {
2119 u64 mchash;
2120 int i;
2121
2122 /* always get broadcast frames */
2123 mchash = 1ULL << (0xff >> 2);
2124
2125 for (i = 0; i < mc_count; i++) {
2126 if (WARN_ON(!mclist))
2127 break;
2128 mchash |= 1ULL << (mclist->dmi_addr[5] >> 2);
2129 mclist = mclist->next;
2130 }
2131 ar->want_mc_hash = mchash;
2132 }
2133 set_bit(AR9170_FILTER_CHANGED_MULTICAST, &ar->filter_changed);
2134 }
2135
2136 if (changed_flags & FIF_CONTROL) {
2137 u32 filter = AR9170_MAC_REG_FTF_PSPOLL |
2138 AR9170_MAC_REG_FTF_RTS |
2139 AR9170_MAC_REG_FTF_CTS |
2140 AR9170_MAC_REG_FTF_ACK |
2141 AR9170_MAC_REG_FTF_CFE |
2142 AR9170_MAC_REG_FTF_CFE_ACK;
2143
2144 if (*new_flags & FIF_CONTROL)
2145 ar->want_filter = ar->cur_filter | filter;
2146 else
2147 ar->want_filter = ar->cur_filter & ~filter;
2148
2149 set_bit(AR9170_FILTER_CHANGED_FRAMEFILTER,
2150 &ar->filter_changed);
2151 }
2152
2153 if (changed_flags & FIF_PROMISC_IN_BSS) {
2154 ar->sniffer_enabled = ((*new_flags) & FIF_PROMISC_IN_BSS) != 0;
2155 set_bit(AR9170_FILTER_CHANGED_MODE,
2156 &ar->filter_changed);
2157 }
2158
2159 if (likely(IS_STARTED(ar)))
2160 queue_work(ar->hw->workqueue, &ar->filter_config_work);
2161 }
2162
2163 static void ar9170_op_bss_info_changed(struct ieee80211_hw *hw,
2164 struct ieee80211_vif *vif,
2165 struct ieee80211_bss_conf *bss_conf,
2166 u32 changed)
2167 {
2168 struct ar9170 *ar = hw->priv;
2169 int err = 0;
2170
2171 mutex_lock(&ar->mutex);
2172
2173 if (changed & BSS_CHANGED_BSSID) {
2174 memcpy(ar->bssid, bss_conf->bssid, ETH_ALEN);
2175 err = ar9170_set_operating_mode(ar);
2176 if (err)
2177 goto out;
2178 }
2179
2180 if (changed & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED)) {
2181 err = ar9170_update_beacon(ar);
2182 if (err)
2183 goto out;
2184
2185 err = ar9170_set_beacon_timers(ar);
2186 if (err)
2187 goto out;
2188 }
2189
2190 if (changed & BSS_CHANGED_ASSOC) {
2191 #ifndef CONFIG_AR9170_LEDS
2192 /* enable assoc LED. */
2193 err = ar9170_set_leds_state(ar, bss_conf->assoc ? 2 : 0);
2194 #endif /* CONFIG_AR9170_LEDS */
2195 }
2196
2197 if (changed & BSS_CHANGED_BEACON_INT) {
2198 err = ar9170_set_beacon_timers(ar);
2199 if (err)
2200 goto out;
2201 }
2202
2203 if (changed & BSS_CHANGED_HT) {
2204 /* TODO */
2205 err = 0;
2206 }
2207
2208 if (changed & BSS_CHANGED_ERP_SLOT) {
2209 err = ar9170_set_slot_time(ar);
2210 if (err)
2211 goto out;
2212 }
2213
2214 if (changed & BSS_CHANGED_BASIC_RATES) {
2215 err = ar9170_set_basic_rates(ar);
2216 if (err)
2217 goto out;
2218 }
2219
2220 out:
2221 mutex_unlock(&ar->mutex);
2222 }
2223
2224 static u64 ar9170_op_get_tsf(struct ieee80211_hw *hw)
2225 {
2226 struct ar9170 *ar = hw->priv;
2227 int err;
2228 u32 tsf_low;
2229 u32 tsf_high;
2230 u64 tsf;
2231
2232 mutex_lock(&ar->mutex);
2233 err = ar9170_read_reg(ar, AR9170_MAC_REG_TSF_L, &tsf_low);
2234 if (!err)
2235 err = ar9170_read_reg(ar, AR9170_MAC_REG_TSF_H, &tsf_high);
2236 mutex_unlock(&ar->mutex);
2237
2238 if (WARN_ON(err))
2239 return 0;
2240
2241 tsf = tsf_high;
2242 tsf = (tsf << 32) | tsf_low;
2243 return tsf;
2244 }
2245
2246 static int ar9170_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2247 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
2248 struct ieee80211_key_conf *key)
2249 {
2250 struct ar9170 *ar = hw->priv;
2251 int err = 0, i;
2252 u8 ktype;
2253
2254 if ((!ar->vif) || (ar->disable_offload))
2255 return -EOPNOTSUPP;
2256
2257 switch (key->alg) {
2258 case ALG_WEP:
2259 if (key->keylen == WLAN_KEY_LEN_WEP40)
2260 ktype = AR9170_ENC_ALG_WEP64;
2261 else
2262 ktype = AR9170_ENC_ALG_WEP128;
2263 break;
2264 case ALG_TKIP:
2265 ktype = AR9170_ENC_ALG_TKIP;
2266 break;
2267 case ALG_CCMP:
2268 ktype = AR9170_ENC_ALG_AESCCMP;
2269 break;
2270 default:
2271 return -EOPNOTSUPP;
2272 }
2273
2274 mutex_lock(&ar->mutex);
2275 if (cmd == SET_KEY) {
2276 if (unlikely(!IS_STARTED(ar))) {
2277 err = -EOPNOTSUPP;
2278 goto out;
2279 }
2280
2281 /* group keys need all-zeroes address */
2282 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
2283 sta = NULL;
2284
2285 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
2286 for (i = 0; i < 64; i++)
2287 if (!(ar->usedkeys & BIT(i)))
2288 break;
2289 if (i == 64) {
2290 ar->rx_software_decryption = true;
2291 ar9170_set_operating_mode(ar);
2292 err = -ENOSPC;
2293 goto out;
2294 }
2295 } else {
2296 i = 64 + key->keyidx;
2297 }
2298
2299 key->hw_key_idx = i;
2300
2301 err = ar9170_upload_key(ar, i, sta ? sta->addr : NULL, ktype, 0,
2302 key->key, min_t(u8, 16, key->keylen));
2303 if (err)
2304 goto out;
2305
2306 if (key->alg == ALG_TKIP) {
2307 err = ar9170_upload_key(ar, i, sta ? sta->addr : NULL,
2308 ktype, 1, key->key + 16, 16);
2309 if (err)
2310 goto out;
2311
2312 /*
2313 * hardware is not capable generating the MMIC
2314 * for fragmented frames!
2315 */
2316 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
2317 }
2318
2319 if (i < 64)
2320 ar->usedkeys |= BIT(i);
2321
2322 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
2323 } else {
2324 if (unlikely(!IS_STARTED(ar))) {
2325 /* The device is gone... together with the key ;-) */
2326 err = 0;
2327 goto out;
2328 }
2329
2330 err = ar9170_disable_key(ar, key->hw_key_idx);
2331 if (err)
2332 goto out;
2333
2334 if (key->hw_key_idx < 64) {
2335 ar->usedkeys &= ~BIT(key->hw_key_idx);
2336 } else {
2337 err = ar9170_upload_key(ar, key->hw_key_idx, NULL,
2338 AR9170_ENC_ALG_NONE, 0,
2339 NULL, 0);
2340 if (err)
2341 goto out;
2342
2343 if (key->alg == ALG_TKIP) {
2344 err = ar9170_upload_key(ar, key->hw_key_idx,
2345 NULL,
2346 AR9170_ENC_ALG_NONE, 1,
2347 NULL, 0);
2348 if (err)
2349 goto out;
2350 }
2351
2352 }
2353 }
2354
2355 ar9170_regwrite_begin(ar);
2356 ar9170_regwrite(AR9170_MAC_REG_ROLL_CALL_TBL_L, ar->usedkeys);
2357 ar9170_regwrite(AR9170_MAC_REG_ROLL_CALL_TBL_H, ar->usedkeys >> 32);
2358 ar9170_regwrite_finish();
2359 err = ar9170_regwrite_result();
2360
2361 out:
2362 mutex_unlock(&ar->mutex);
2363
2364 return err;
2365 }
2366
2367 static void ar9170_sta_notify(struct ieee80211_hw *hw,
2368 struct ieee80211_vif *vif,
2369 enum sta_notify_cmd cmd,
2370 struct ieee80211_sta *sta)
2371 {
2372 struct ar9170 *ar = hw->priv;
2373 struct ar9170_sta_info *sta_info = (void *) sta->drv_priv;
2374 unsigned int i;
2375
2376 switch (cmd) {
2377 case STA_NOTIFY_ADD:
2378 memset(sta_info, 0, sizeof(*sta_info));
2379
2380 if (!sta->ht_cap.ht_supported)
2381 break;
2382
2383 if (sta->ht_cap.ampdu_density > ar->global_ampdu_density)
2384 ar->global_ampdu_density = sta->ht_cap.ampdu_density;
2385
2386 if (sta->ht_cap.ampdu_factor < ar->global_ampdu_factor)
2387 ar->global_ampdu_factor = sta->ht_cap.ampdu_factor;
2388
2389 for (i = 0; i < AR9170_NUM_TID; i++) {
2390 sta_info->agg[i].state = AR9170_TID_STATE_SHUTDOWN;
2391 sta_info->agg[i].active = false;
2392 sta_info->agg[i].ssn = 0;
2393 sta_info->agg[i].retry = 0;
2394 sta_info->agg[i].tid = i;
2395 INIT_LIST_HEAD(&sta_info->agg[i].list);
2396 skb_queue_head_init(&sta_info->agg[i].queue);
2397 }
2398
2399 sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor);
2400 break;
2401
2402 case STA_NOTIFY_REMOVE:
2403 if (!sta->ht_cap.ht_supported)
2404 break;
2405
2406 for (i = 0; i < AR9170_NUM_TID; i++) {
2407 sta_info->agg[i].state = AR9170_TID_STATE_INVALID;
2408 skb_queue_purge(&sta_info->agg[i].queue);
2409 }
2410
2411 break;
2412
2413 default:
2414 break;
2415 }
2416
2417 if (IS_STARTED(ar) && ar->filter_changed)
2418 queue_work(ar->hw->workqueue, &ar->filter_config_work);
2419 }
2420
2421 static int ar9170_get_stats(struct ieee80211_hw *hw,
2422 struct ieee80211_low_level_stats *stats)
2423 {
2424 struct ar9170 *ar = hw->priv;
2425 u32 val;
2426 int err;
2427
2428 mutex_lock(&ar->mutex);
2429 err = ar9170_read_reg(ar, AR9170_MAC_REG_TX_RETRY, &val);
2430 ar->stats.dot11ACKFailureCount += val;
2431
2432 memcpy(stats, &ar->stats, sizeof(*stats));
2433 mutex_unlock(&ar->mutex);
2434
2435 return 0;
2436 }
2437
2438 static int ar9170_get_tx_stats(struct ieee80211_hw *hw,
2439 struct ieee80211_tx_queue_stats *tx_stats)
2440 {
2441 struct ar9170 *ar = hw->priv;
2442
2443 spin_lock_bh(&ar->tx_stats_lock);
2444 memcpy(tx_stats, ar->tx_stats, sizeof(tx_stats[0]) * hw->queues);
2445 spin_unlock_bh(&ar->tx_stats_lock);
2446
2447 return 0;
2448 }
2449
2450 static int ar9170_conf_tx(struct ieee80211_hw *hw, u16 queue,
2451 const struct ieee80211_tx_queue_params *param)
2452 {
2453 struct ar9170 *ar = hw->priv;
2454 int ret;
2455
2456 mutex_lock(&ar->mutex);
2457 if ((param) && !(queue > __AR9170_NUM_TXQ)) {
2458 memcpy(&ar->edcf[ar9170_qos_hwmap[queue]],
2459 param, sizeof(*param));
2460
2461 ret = ar9170_set_qos(ar);
2462 } else
2463 ret = -EINVAL;
2464
2465 mutex_unlock(&ar->mutex);
2466 return ret;
2467 }
2468
2469 static int ar9170_ampdu_action(struct ieee80211_hw *hw,
2470 enum ieee80211_ampdu_mlme_action action,
2471 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2472 {
2473 struct ar9170 *ar = hw->priv;
2474 struct ar9170_sta_info *sta_info = (void *) sta->drv_priv;
2475 struct ar9170_sta_tid *tid_info = &sta_info->agg[tid];
2476 unsigned long flags;
2477
2478 if (!modparam_ht)
2479 return -EOPNOTSUPP;
2480
2481 switch (action) {
2482 case IEEE80211_AMPDU_TX_START:
2483 spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
2484 if (tid_info->state != AR9170_TID_STATE_SHUTDOWN ||
2485 !list_empty(&tid_info->list)) {
2486 spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
2487 #ifdef AR9170_TXAGG_DEBUG
2488 printk(KERN_INFO "%s: A-MPDU [ESS:[%pM] tid:[%d]] "
2489 "is in a very bad state!\n",
2490 wiphy_name(hw->wiphy), sta->addr, tid);
2491 #endif /* AR9170_TXAGG_DEBUG */
2492 return -EBUSY;
2493 }
2494
2495 *ssn = tid_info->ssn;
2496 tid_info->state = AR9170_TID_STATE_PROGRESS;
2497 tid_info->active = false;
2498 spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
2499 ieee80211_start_tx_ba_cb_irqsafe(hw, sta->addr, tid);
2500 break;
2501
2502 case IEEE80211_AMPDU_TX_STOP:
2503 spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
2504 tid_info->state = AR9170_TID_STATE_SHUTDOWN;
2505 list_del_init(&tid_info->list);
2506 tid_info->active = false;
2507 skb_queue_purge(&tid_info->queue);
2508 spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
2509 ieee80211_stop_tx_ba_cb_irqsafe(hw, sta->addr, tid);
2510 break;
2511
2512 case IEEE80211_AMPDU_TX_OPERATIONAL:
2513 #ifdef AR9170_TXAGG_DEBUG
2514 printk(KERN_INFO "%s: A-MPDU for %pM [tid:%d] Operational.\n",
2515 wiphy_name(hw->wiphy), sta->addr, tid);
2516 #endif /* AR9170_TXAGG_DEBUG */
2517 spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
2518 sta_info->agg[tid].state = AR9170_TID_STATE_COMPLETE;
2519 spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
2520 break;
2521
2522 case IEEE80211_AMPDU_RX_START:
2523 case IEEE80211_AMPDU_RX_STOP:
2524 /* Handled by firmware */
2525 break;
2526
2527 default:
2528 return -EOPNOTSUPP;
2529 }
2530
2531 return 0;
2532 }
2533
2534 static const struct ieee80211_ops ar9170_ops = {
2535 .start = ar9170_op_start,
2536 .stop = ar9170_op_stop,
2537 .tx = ar9170_op_tx,
2538 .add_interface = ar9170_op_add_interface,
2539 .remove_interface = ar9170_op_remove_interface,
2540 .config = ar9170_op_config,
2541 .configure_filter = ar9170_op_configure_filter,
2542 .conf_tx = ar9170_conf_tx,
2543 .bss_info_changed = ar9170_op_bss_info_changed,
2544 .get_tsf = ar9170_op_get_tsf,
2545 .set_key = ar9170_set_key,
2546 .sta_notify = ar9170_sta_notify,
2547 .get_stats = ar9170_get_stats,
2548 .get_tx_stats = ar9170_get_tx_stats,
2549 .ampdu_action = ar9170_ampdu_action,
2550 };
2551
2552 void *ar9170_alloc(size_t priv_size)
2553 {
2554 struct ieee80211_hw *hw;
2555 struct ar9170 *ar;
2556 struct sk_buff *skb;
2557 int i;
2558
2559 /*
2560 * this buffer is used for rx stream reconstruction.
2561 * Under heavy load this device (or the transport layer?)
2562 * tends to split the streams into seperate rx descriptors.
2563 */
2564
2565 skb = __dev_alloc_skb(AR9170_MAX_RX_BUFFER_SIZE, GFP_KERNEL);
2566 if (!skb)
2567 goto err_nomem;
2568
2569 hw = ieee80211_alloc_hw(priv_size, &ar9170_ops);
2570 if (!hw)
2571 goto err_nomem;
2572
2573 ar = hw->priv;
2574 ar->hw = hw;
2575 ar->rx_failover = skb;
2576
2577 mutex_init(&ar->mutex);
2578 spin_lock_init(&ar->cmdlock);
2579 spin_lock_init(&ar->tx_stats_lock);
2580 spin_lock_init(&ar->tx_ampdu_list_lock);
2581 skb_queue_head_init(&ar->tx_status_ampdu);
2582 for (i = 0; i < __AR9170_NUM_TXQ; i++) {
2583 skb_queue_head_init(&ar->tx_status[i]);
2584 skb_queue_head_init(&ar->tx_pending[i]);
2585 }
2586 ar9170_rx_reset_rx_mpdu(ar);
2587 INIT_WORK(&ar->filter_config_work, ar9170_set_filters);
2588 INIT_WORK(&ar->beacon_work, ar9170_new_beacon);
2589 INIT_DELAYED_WORK(&ar->tx_janitor, ar9170_tx_janitor);
2590 INIT_LIST_HEAD(&ar->tx_ampdu_list);
2591
2592 /* all hw supports 2.4 GHz, so set channel to 1 by default */
2593 ar->channel = &ar9170_2ghz_chantable[0];
2594
2595 /* first part of wiphy init */
2596 ar->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
2597 BIT(NL80211_IFTYPE_WDS) |
2598 BIT(NL80211_IFTYPE_ADHOC);
2599 ar->hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS |
2600 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
2601 IEEE80211_HW_SIGNAL_DBM |
2602 IEEE80211_HW_NOISE_DBM;
2603
2604 if (modparam_ht) {
2605 ar->hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
2606 } else {
2607 ar9170_band_2GHz.ht_cap.ht_supported = false;
2608 ar9170_band_5GHz.ht_cap.ht_supported = false;
2609 }
2610
2611 ar->hw->queues = __AR9170_NUM_TXQ;
2612 ar->hw->extra_tx_headroom = 8;
2613 ar->hw->sta_data_size = sizeof(struct ar9170_sta_info);
2614
2615 ar->hw->max_rates = 1;
2616 ar->hw->max_rate_tries = 3;
2617
2618 for (i = 0; i < ARRAY_SIZE(ar->noise); i++)
2619 ar->noise[i] = -95; /* ATH_DEFAULT_NOISE_FLOOR */
2620
2621 return ar;
2622
2623 err_nomem:
2624 kfree_skb(skb);
2625 return ERR_PTR(-ENOMEM);
2626 }
2627
2628 static int ar9170_read_eeprom(struct ar9170 *ar)
2629 {
2630 #define RW 8 /* number of words to read at once */
2631 #define RB (sizeof(u32) * RW)
2632 u8 *eeprom = (void *)&ar->eeprom;
2633 u8 *addr = ar->eeprom.mac_address;
2634 __le32 offsets[RW];
2635 unsigned int rx_streams, tx_streams, tx_params = 0;
2636 int i, j, err, bands = 0;
2637
2638 BUILD_BUG_ON(sizeof(ar->eeprom) & 3);
2639
2640 BUILD_BUG_ON(RB > AR9170_MAX_CMD_LEN - 4);
2641 #ifndef __CHECKER__
2642 /* don't want to handle trailing remains */
2643 BUILD_BUG_ON(sizeof(ar->eeprom) % RB);
2644 #endif
2645
2646 for (i = 0; i < sizeof(ar->eeprom)/RB; i++) {
2647 for (j = 0; j < RW; j++)
2648 offsets[j] = cpu_to_le32(AR9170_EEPROM_START +
2649 RB * i + 4 * j);
2650
2651 err = ar->exec_cmd(ar, AR9170_CMD_RREG,
2652 RB, (u8 *) &offsets,
2653 RB, eeprom + RB * i);
2654 if (err)
2655 return err;
2656 }
2657
2658 #undef RW
2659 #undef RB
2660
2661 if (ar->eeprom.length == cpu_to_le16(0xFFFF))
2662 return -ENODATA;
2663
2664 if (ar->eeprom.operating_flags & AR9170_OPFLAG_2GHZ) {
2665 ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &ar9170_band_2GHz;
2666 bands++;
2667 }
2668 if (ar->eeprom.operating_flags & AR9170_OPFLAG_5GHZ) {
2669 ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &ar9170_band_5GHz;
2670 bands++;
2671 }
2672
2673 rx_streams = hweight8(ar->eeprom.rx_mask);
2674 tx_streams = hweight8(ar->eeprom.tx_mask);
2675
2676 if (rx_streams != tx_streams)
2677 tx_params = IEEE80211_HT_MCS_TX_RX_DIFF;
2678
2679 if (tx_streams >= 1 && tx_streams <= IEEE80211_HT_MCS_TX_MAX_STREAMS)
2680 tx_params = (tx_streams - 1) <<
2681 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
2682
2683 ar9170_band_2GHz.ht_cap.mcs.tx_params |= tx_params;
2684 ar9170_band_5GHz.ht_cap.mcs.tx_params |= tx_params;
2685
2686 /*
2687 * I measured this, a bandswitch takes roughly
2688 * 135 ms and a frequency switch about 80.
2689 *
2690 * FIXME: measure these values again once EEPROM settings
2691 * are used, that will influence them!
2692 */
2693 if (bands == 2)
2694 ar->hw->channel_change_time = 135 * 1000;
2695 else
2696 ar->hw->channel_change_time = 80 * 1000;
2697
2698 ar->regulatory.current_rd = le16_to_cpu(ar->eeprom.reg_domain[0]);
2699 ar->regulatory.current_rd_ext = le16_to_cpu(ar->eeprom.reg_domain[1]);
2700
2701 /* second part of wiphy init */
2702 SET_IEEE80211_PERM_ADDR(ar->hw, addr);
2703
2704 return bands ? 0 : -EINVAL;
2705 }
2706
2707 static int ar9170_reg_notifier(struct wiphy *wiphy,
2708 struct regulatory_request *request)
2709 {
2710 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
2711 struct ar9170 *ar = hw->priv;
2712
2713 return ath_reg_notifier_apply(wiphy, request, &ar->regulatory);
2714 }
2715
2716 int ar9170_register(struct ar9170 *ar, struct device *pdev)
2717 {
2718 int err;
2719
2720 /* try to read EEPROM, init MAC addr */
2721 err = ar9170_read_eeprom(ar);
2722 if (err)
2723 goto err_out;
2724
2725 err = ath_regd_init(&ar->regulatory, ar->hw->wiphy,
2726 ar9170_reg_notifier);
2727 if (err)
2728 goto err_out;
2729
2730 err = ieee80211_register_hw(ar->hw);
2731 if (err)
2732 goto err_out;
2733
2734 if (!ath_is_world_regd(&ar->regulatory))
2735 regulatory_hint(ar->hw->wiphy, ar->regulatory.alpha2);
2736
2737 err = ar9170_init_leds(ar);
2738 if (err)
2739 goto err_unreg;
2740
2741 #ifdef CONFIG_AR9170_LEDS
2742 err = ar9170_register_leds(ar);
2743 if (err)
2744 goto err_unreg;
2745 #endif /* CONFIG_AR9170_LEDS */
2746
2747 dev_info(pdev, "Atheros AR9170 is registered as '%s'\n",
2748 wiphy_name(ar->hw->wiphy));
2749
2750 return err;
2751
2752 err_unreg:
2753 ieee80211_unregister_hw(ar->hw);
2754
2755 err_out:
2756 return err;
2757 }
2758
2759 void ar9170_unregister(struct ar9170 *ar)
2760 {
2761 #ifdef CONFIG_AR9170_LEDS
2762 ar9170_unregister_leds(ar);
2763 #endif /* CONFIG_AR9170_LEDS */
2764
2765 kfree_skb(ar->rx_failover);
2766 ieee80211_unregister_hw(ar->hw);
2767 mutex_destroy(&ar->mutex);
2768 }
This page took 0.086855 seconds and 6 git commands to generate.