Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[deliverable/linux.git] / drivers / net / wireless / ath / ath9k / xmit.c
1 /*
2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include "ath9k.h"
18 #include "ar9003_mac.h"
19
20 #define BITS_PER_BYTE 8
21 #define OFDM_PLCP_BITS 22
22 #define HT_RC_2_MCS(_rc) ((_rc) & 0x1f)
23 #define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24 #define L_STF 8
25 #define L_LTF 8
26 #define L_SIG 4
27 #define HT_SIG 8
28 #define HT_STF 4
29 #define HT_LTF(_ns) (4 * (_ns))
30 #define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31 #define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32 #define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33 #define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
35 #define OFDM_SIFS_TIME 16
36
37 static u16 bits_per_symbol[][2] = {
38 /* 20MHz 40MHz */
39 { 26, 54 }, /* 0: BPSK */
40 { 52, 108 }, /* 1: QPSK 1/2 */
41 { 78, 162 }, /* 2: QPSK 3/4 */
42 { 104, 216 }, /* 3: 16-QAM 1/2 */
43 { 156, 324 }, /* 4: 16-QAM 3/4 */
44 { 208, 432 }, /* 5: 64-QAM 2/3 */
45 { 234, 486 }, /* 6: 64-QAM 3/4 */
46 { 260, 540 }, /* 7: 64-QAM 5/6 */
47 };
48
49 #define IS_HT_RATE(_rate) ((_rate) & 0x80)
50
51 static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
52 struct ath_atx_tid *tid,
53 struct list_head *bf_head);
54 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
55 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar);
57 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
58 struct list_head *head);
59 static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf);
60 static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
61 struct ath_tx_status *ts, int txok);
62 static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
63 int nbad, int txok, bool update_rc);
64
65 enum {
66 MCS_HT20,
67 MCS_HT20_SGI,
68 MCS_HT40,
69 MCS_HT40_SGI,
70 };
71
72 static int ath_max_4ms_framelen[4][32] = {
73 [MCS_HT20] = {
74 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
75 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
76 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
77 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
78 },
79 [MCS_HT20_SGI] = {
80 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
81 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
82 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
83 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
84 },
85 [MCS_HT40] = {
86 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
87 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
88 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
89 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
90 },
91 [MCS_HT40_SGI] = {
92 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
93 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
94 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
95 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
96 }
97 };
98
99 /*********************/
100 /* Aggregation logic */
101 /*********************/
102
103 static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
104 {
105 struct ath_atx_ac *ac = tid->ac;
106
107 if (tid->paused)
108 return;
109
110 if (tid->sched)
111 return;
112
113 tid->sched = true;
114 list_add_tail(&tid->list, &ac->tid_q);
115
116 if (ac->sched)
117 return;
118
119 ac->sched = true;
120 list_add_tail(&ac->list, &txq->axq_acq);
121 }
122
123 static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
124 {
125 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
126
127 spin_lock_bh(&txq->axq_lock);
128 tid->paused++;
129 spin_unlock_bh(&txq->axq_lock);
130 }
131
132 static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
133 {
134 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
135
136 BUG_ON(tid->paused <= 0);
137 spin_lock_bh(&txq->axq_lock);
138
139 tid->paused--;
140
141 if (tid->paused > 0)
142 goto unlock;
143
144 if (list_empty(&tid->buf_q))
145 goto unlock;
146
147 ath_tx_queue_tid(txq, tid);
148 ath_txq_schedule(sc, txq);
149 unlock:
150 spin_unlock_bh(&txq->axq_lock);
151 }
152
153 static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
154 {
155 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
156 struct ath_buf *bf;
157 struct list_head bf_head;
158 INIT_LIST_HEAD(&bf_head);
159
160 BUG_ON(tid->paused <= 0);
161 spin_lock_bh(&txq->axq_lock);
162
163 tid->paused--;
164
165 if (tid->paused > 0) {
166 spin_unlock_bh(&txq->axq_lock);
167 return;
168 }
169
170 while (!list_empty(&tid->buf_q)) {
171 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
172 BUG_ON(bf_isretried(bf));
173 list_move_tail(&bf->list, &bf_head);
174 ath_tx_send_ht_normal(sc, txq, tid, &bf_head);
175 }
176
177 spin_unlock_bh(&txq->axq_lock);
178 }
179
180 static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
181 int seqno)
182 {
183 int index, cindex;
184
185 index = ATH_BA_INDEX(tid->seq_start, seqno);
186 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
187
188 tid->tx_buf[cindex] = NULL;
189
190 while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) {
191 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
192 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
193 }
194 }
195
196 static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
197 struct ath_buf *bf)
198 {
199 int index, cindex;
200
201 if (bf_isretried(bf))
202 return;
203
204 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
205 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
206
207 BUG_ON(tid->tx_buf[cindex] != NULL);
208 tid->tx_buf[cindex] = bf;
209
210 if (index >= ((tid->baw_tail - tid->baw_head) &
211 (ATH_TID_MAX_BUFS - 1))) {
212 tid->baw_tail = cindex;
213 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
214 }
215 }
216
217 /*
218 * TODO: For frame(s) that are in the retry state, we will reuse the
219 * sequence number(s) without setting the retry bit. The
220 * alternative is to give up on these and BAR the receiver's window
221 * forward.
222 */
223 static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
224 struct ath_atx_tid *tid)
225
226 {
227 struct ath_buf *bf;
228 struct list_head bf_head;
229 struct ath_tx_status ts;
230
231 memset(&ts, 0, sizeof(ts));
232 INIT_LIST_HEAD(&bf_head);
233
234 for (;;) {
235 if (list_empty(&tid->buf_q))
236 break;
237
238 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
239 list_move_tail(&bf->list, &bf_head);
240
241 if (bf_isretried(bf))
242 ath_tx_update_baw(sc, tid, bf->bf_seqno);
243
244 spin_unlock(&txq->axq_lock);
245 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
246 spin_lock(&txq->axq_lock);
247 }
248
249 tid->seq_next = tid->seq_start;
250 tid->baw_tail = tid->baw_head;
251 }
252
253 static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
254 struct ath_buf *bf)
255 {
256 struct sk_buff *skb;
257 struct ieee80211_hdr *hdr;
258
259 bf->bf_state.bf_type |= BUF_RETRY;
260 bf->bf_retries++;
261 TX_STAT_INC(txq->axq_qnum, a_retries);
262
263 skb = bf->bf_mpdu;
264 hdr = (struct ieee80211_hdr *)skb->data;
265 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
266 }
267
268 static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
269 {
270 struct ath_buf *bf = NULL;
271
272 spin_lock_bh(&sc->tx.txbuflock);
273
274 if (unlikely(list_empty(&sc->tx.txbuf))) {
275 spin_unlock_bh(&sc->tx.txbuflock);
276 return NULL;
277 }
278
279 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
280 list_del(&bf->list);
281
282 spin_unlock_bh(&sc->tx.txbuflock);
283
284 return bf;
285 }
286
287 static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
288 {
289 spin_lock_bh(&sc->tx.txbuflock);
290 list_add_tail(&bf->list, &sc->tx.txbuf);
291 spin_unlock_bh(&sc->tx.txbuflock);
292 }
293
294 static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
295 {
296 struct ath_buf *tbf;
297
298 tbf = ath_tx_get_buffer(sc);
299 if (WARN_ON(!tbf))
300 return NULL;
301
302 ATH_TXBUF_RESET(tbf);
303
304 tbf->aphy = bf->aphy;
305 tbf->bf_mpdu = bf->bf_mpdu;
306 tbf->bf_buf_addr = bf->bf_buf_addr;
307 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
308 tbf->bf_state = bf->bf_state;
309 tbf->bf_dmacontext = bf->bf_dmacontext;
310
311 return tbf;
312 }
313
314 static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
315 struct ath_buf *bf, struct list_head *bf_q,
316 struct ath_tx_status *ts, int txok)
317 {
318 struct ath_node *an = NULL;
319 struct sk_buff *skb;
320 struct ieee80211_sta *sta;
321 struct ieee80211_hw *hw;
322 struct ieee80211_hdr *hdr;
323 struct ieee80211_tx_info *tx_info;
324 struct ath_atx_tid *tid = NULL;
325 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
326 struct list_head bf_head, bf_pending;
327 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
328 u32 ba[WME_BA_BMP_SIZE >> 5];
329 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
330 bool rc_update = true;
331 struct ieee80211_tx_rate rates[4];
332 unsigned long flags;
333
334 skb = bf->bf_mpdu;
335 hdr = (struct ieee80211_hdr *)skb->data;
336
337 tx_info = IEEE80211_SKB_CB(skb);
338 hw = bf->aphy->hw;
339
340 memcpy(rates, tx_info->control.rates, sizeof(rates));
341
342 rcu_read_lock();
343
344 /* XXX: use ieee80211_find_sta! */
345 sta = ieee80211_find_sta_by_hw(hw, hdr->addr1);
346 if (!sta) {
347 rcu_read_unlock();
348
349 spin_lock_irqsave(&sc->tx.txbuflock, flags);
350 list_splice_tail_init(bf_q, &sc->tx.txbuf);
351 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
352 return;
353 }
354
355 an = (struct ath_node *)sta->drv_priv;
356 tid = ATH_AN_2_TID(an, bf->bf_tidno);
357
358 /*
359 * The hardware occasionally sends a tx status for the wrong TID.
360 * In this case, the BA status cannot be considered valid and all
361 * subframes need to be retransmitted
362 */
363 if (bf->bf_tidno != ts->tid)
364 txok = false;
365
366 isaggr = bf_isaggr(bf);
367 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
368
369 if (isaggr && txok) {
370 if (ts->ts_flags & ATH9K_TX_BA) {
371 seq_st = ts->ts_seqnum;
372 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
373 } else {
374 /*
375 * AR5416 can become deaf/mute when BA
376 * issue happens. Chip needs to be reset.
377 * But AP code may have sychronization issues
378 * when perform internal reset in this routine.
379 * Only enable reset in STA mode for now.
380 */
381 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
382 needreset = 1;
383 }
384 }
385
386 INIT_LIST_HEAD(&bf_pending);
387 INIT_LIST_HEAD(&bf_head);
388
389 nbad = ath_tx_num_badfrms(sc, bf, ts, txok);
390 while (bf) {
391 txfail = txpending = 0;
392 bf_next = bf->bf_next;
393
394 skb = bf->bf_mpdu;
395 tx_info = IEEE80211_SKB_CB(skb);
396
397 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
398 /* transmit completion, subframe is
399 * acked by block ack */
400 acked_cnt++;
401 } else if (!isaggr && txok) {
402 /* transmit completion */
403 acked_cnt++;
404 } else {
405 if (!(tid->state & AGGR_CLEANUP) &&
406 !bf_last->bf_tx_aborted) {
407 if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
408 ath_tx_set_retry(sc, txq, bf);
409 txpending = 1;
410 } else {
411 bf->bf_state.bf_type |= BUF_XRETRY;
412 txfail = 1;
413 sendbar = 1;
414 txfail_cnt++;
415 }
416 } else {
417 /*
418 * cleanup in progress, just fail
419 * the un-acked sub-frames
420 */
421 txfail = 1;
422 }
423 }
424
425 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
426 bf_next == NULL) {
427 /*
428 * Make sure the last desc is reclaimed if it
429 * not a holding desc.
430 */
431 if (!bf_last->bf_stale)
432 list_move_tail(&bf->list, &bf_head);
433 else
434 INIT_LIST_HEAD(&bf_head);
435 } else {
436 BUG_ON(list_empty(bf_q));
437 list_move_tail(&bf->list, &bf_head);
438 }
439
440 if (!txpending) {
441 /*
442 * complete the acked-ones/xretried ones; update
443 * block-ack window
444 */
445 spin_lock_bh(&txq->axq_lock);
446 ath_tx_update_baw(sc, tid, bf->bf_seqno);
447 spin_unlock_bh(&txq->axq_lock);
448
449 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
450 memcpy(tx_info->control.rates, rates, sizeof(rates));
451 ath_tx_rc_status(bf, ts, nbad, txok, true);
452 rc_update = false;
453 } else {
454 ath_tx_rc_status(bf, ts, nbad, txok, false);
455 }
456
457 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
458 !txfail, sendbar);
459 } else {
460 /* retry the un-acked ones */
461 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
462 if (bf->bf_next == NULL && bf_last->bf_stale) {
463 struct ath_buf *tbf;
464
465 tbf = ath_clone_txbuf(sc, bf_last);
466 /*
467 * Update tx baw and complete the
468 * frame with failed status if we
469 * run out of tx buf.
470 */
471 if (!tbf) {
472 spin_lock_bh(&txq->axq_lock);
473 ath_tx_update_baw(sc, tid,
474 bf->bf_seqno);
475 spin_unlock_bh(&txq->axq_lock);
476
477 bf->bf_state.bf_type |=
478 BUF_XRETRY;
479 ath_tx_rc_status(bf, ts, nbad,
480 0, false);
481 ath_tx_complete_buf(sc, bf, txq,
482 &bf_head,
483 ts, 0, 0);
484 break;
485 }
486
487 ath9k_hw_cleartxdesc(sc->sc_ah,
488 tbf->bf_desc);
489 list_add_tail(&tbf->list, &bf_head);
490 } else {
491 /*
492 * Clear descriptor status words for
493 * software retry
494 */
495 ath9k_hw_cleartxdesc(sc->sc_ah,
496 bf->bf_desc);
497 }
498 }
499
500 /*
501 * Put this buffer to the temporary pending
502 * queue to retain ordering
503 */
504 list_splice_tail_init(&bf_head, &bf_pending);
505 }
506
507 bf = bf_next;
508 }
509
510 if (tid->state & AGGR_CLEANUP) {
511 if (tid->baw_head == tid->baw_tail) {
512 tid->state &= ~AGGR_ADDBA_COMPLETE;
513 tid->state &= ~AGGR_CLEANUP;
514
515 /* send buffered frames as singles */
516 ath_tx_flush_tid(sc, tid);
517 }
518 rcu_read_unlock();
519 return;
520 }
521
522 /* prepend un-acked frames to the beginning of the pending frame queue */
523 if (!list_empty(&bf_pending)) {
524 spin_lock_bh(&txq->axq_lock);
525 list_splice(&bf_pending, &tid->buf_q);
526 ath_tx_queue_tid(txq, tid);
527 spin_unlock_bh(&txq->axq_lock);
528 }
529
530 rcu_read_unlock();
531
532 if (needreset)
533 ath_reset(sc, false);
534 }
535
536 static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
537 struct ath_atx_tid *tid)
538 {
539 struct sk_buff *skb;
540 struct ieee80211_tx_info *tx_info;
541 struct ieee80211_tx_rate *rates;
542 u32 max_4ms_framelen, frmlen;
543 u16 aggr_limit, legacy = 0;
544 int i;
545
546 skb = bf->bf_mpdu;
547 tx_info = IEEE80211_SKB_CB(skb);
548 rates = tx_info->control.rates;
549
550 /*
551 * Find the lowest frame length among the rate series that will have a
552 * 4ms transmit duration.
553 * TODO - TXOP limit needs to be considered.
554 */
555 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
556
557 for (i = 0; i < 4; i++) {
558 if (rates[i].count) {
559 int modeidx;
560 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
561 legacy = 1;
562 break;
563 }
564
565 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
566 modeidx = MCS_HT40;
567 else
568 modeidx = MCS_HT20;
569
570 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
571 modeidx++;
572
573 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
574 max_4ms_framelen = min(max_4ms_framelen, frmlen);
575 }
576 }
577
578 /*
579 * limit aggregate size by the minimum rate if rate selected is
580 * not a probe rate, if rate selected is a probe rate then
581 * avoid aggregation of this packet.
582 */
583 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
584 return 0;
585
586 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
587 aggr_limit = min((max_4ms_framelen * 3) / 8,
588 (u32)ATH_AMPDU_LIMIT_MAX);
589 else
590 aggr_limit = min(max_4ms_framelen,
591 (u32)ATH_AMPDU_LIMIT_MAX);
592
593 /*
594 * h/w can accept aggregates upto 16 bit lengths (65535).
595 * The IE, however can hold upto 65536, which shows up here
596 * as zero. Ignore 65536 since we are constrained by hw.
597 */
598 if (tid->an->maxampdu)
599 aggr_limit = min(aggr_limit, tid->an->maxampdu);
600
601 return aggr_limit;
602 }
603
604 /*
605 * Returns the number of delimiters to be added to
606 * meet the minimum required mpdudensity.
607 */
608 static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
609 struct ath_buf *bf, u16 frmlen)
610 {
611 struct sk_buff *skb = bf->bf_mpdu;
612 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
613 u32 nsymbits, nsymbols;
614 u16 minlen;
615 u8 flags, rix;
616 int width, streams, half_gi, ndelim, mindelim;
617
618 /* Select standard number of delimiters based on frame length alone */
619 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
620
621 /*
622 * If encryption enabled, hardware requires some more padding between
623 * subframes.
624 * TODO - this could be improved to be dependent on the rate.
625 * The hardware can keep up at lower rates, but not higher rates
626 */
627 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR)
628 ndelim += ATH_AGGR_ENCRYPTDELIM;
629
630 /*
631 * Convert desired mpdu density from microeconds to bytes based
632 * on highest rate in rate series (i.e. first rate) to determine
633 * required minimum length for subframe. Take into account
634 * whether high rate is 20 or 40Mhz and half or full GI.
635 *
636 * If there is no mpdu density restriction, no further calculation
637 * is needed.
638 */
639
640 if (tid->an->mpdudensity == 0)
641 return ndelim;
642
643 rix = tx_info->control.rates[0].idx;
644 flags = tx_info->control.rates[0].flags;
645 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
646 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
647
648 if (half_gi)
649 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
650 else
651 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
652
653 if (nsymbols == 0)
654 nsymbols = 1;
655
656 streams = HT_RC_2_STREAMS(rix);
657 nsymbits = bits_per_symbol[rix % 8][width] * streams;
658 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
659
660 if (frmlen < minlen) {
661 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
662 ndelim = max(mindelim, ndelim);
663 }
664
665 return ndelim;
666 }
667
668 static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
669 struct ath_txq *txq,
670 struct ath_atx_tid *tid,
671 struct list_head *bf_q)
672 {
673 #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
674 struct ath_buf *bf, *bf_first, *bf_prev = NULL;
675 int rl = 0, nframes = 0, ndelim, prev_al = 0;
676 u16 aggr_limit = 0, al = 0, bpad = 0,
677 al_delta, h_baw = tid->baw_size / 2;
678 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
679
680 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
681
682 do {
683 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
684
685 /* do not step over block-ack window */
686 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
687 status = ATH_AGGR_BAW_CLOSED;
688 break;
689 }
690
691 if (!rl) {
692 aggr_limit = ath_lookup_rate(sc, bf, tid);
693 rl = 1;
694 }
695
696 /* do not exceed aggregation limit */
697 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
698
699 if (nframes &&
700 (aggr_limit < (al + bpad + al_delta + prev_al))) {
701 status = ATH_AGGR_LIMITED;
702 break;
703 }
704
705 /* do not exceed subframe limit */
706 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
707 status = ATH_AGGR_LIMITED;
708 break;
709 }
710 nframes++;
711
712 /* add padding for previous frame to aggregation length */
713 al += bpad + al_delta;
714
715 /*
716 * Get the delimiters needed to meet the MPDU
717 * density for this node.
718 */
719 ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen);
720 bpad = PADBYTES(al_delta) + (ndelim << 2);
721
722 bf->bf_next = NULL;
723 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
724
725 /* link buffers of this frame to the aggregate */
726 ath_tx_addto_baw(sc, tid, bf);
727 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
728 list_move_tail(&bf->list, bf_q);
729 if (bf_prev) {
730 bf_prev->bf_next = bf;
731 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
732 bf->bf_daddr);
733 }
734 bf_prev = bf;
735
736 } while (!list_empty(&tid->buf_q));
737
738 bf_first->bf_al = al;
739 bf_first->bf_nframes = nframes;
740
741 return status;
742 #undef PADBYTES
743 }
744
745 static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
746 struct ath_atx_tid *tid)
747 {
748 struct ath_buf *bf;
749 enum ATH_AGGR_STATUS status;
750 struct list_head bf_q;
751
752 do {
753 if (list_empty(&tid->buf_q))
754 return;
755
756 INIT_LIST_HEAD(&bf_q);
757
758 status = ath_tx_form_aggr(sc, txq, tid, &bf_q);
759
760 /*
761 * no frames picked up to be aggregated;
762 * block-ack window is not open.
763 */
764 if (list_empty(&bf_q))
765 break;
766
767 bf = list_first_entry(&bf_q, struct ath_buf, list);
768 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
769
770 /* if only one frame, send as non-aggregate */
771 if (bf->bf_nframes == 1) {
772 bf->bf_state.bf_type &= ~BUF_AGGR;
773 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
774 ath_buf_set_rate(sc, bf);
775 ath_tx_txqaddbuf(sc, txq, &bf_q);
776 continue;
777 }
778
779 /* setup first desc of aggregate */
780 bf->bf_state.bf_type |= BUF_AGGR;
781 ath_buf_set_rate(sc, bf);
782 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
783
784 /* anchor last desc of aggregate */
785 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
786
787 ath_tx_txqaddbuf(sc, txq, &bf_q);
788 TX_STAT_INC(txq->axq_qnum, a_aggr);
789
790 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
791 status != ATH_AGGR_BAW_CLOSED);
792 }
793
794 void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
795 u16 tid, u16 *ssn)
796 {
797 struct ath_atx_tid *txtid;
798 struct ath_node *an;
799
800 an = (struct ath_node *)sta->drv_priv;
801 txtid = ATH_AN_2_TID(an, tid);
802 txtid->state |= AGGR_ADDBA_PROGRESS;
803 ath_tx_pause_tid(sc, txtid);
804 *ssn = txtid->seq_start;
805 }
806
807 void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
808 {
809 struct ath_node *an = (struct ath_node *)sta->drv_priv;
810 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
811 struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum];
812 struct ath_tx_status ts;
813 struct ath_buf *bf;
814 struct list_head bf_head;
815
816 memset(&ts, 0, sizeof(ts));
817 INIT_LIST_HEAD(&bf_head);
818
819 if (txtid->state & AGGR_CLEANUP)
820 return;
821
822 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
823 txtid->state &= ~AGGR_ADDBA_PROGRESS;
824 return;
825 }
826
827 ath_tx_pause_tid(sc, txtid);
828
829 /* drop all software retried frames and mark this TID */
830 spin_lock_bh(&txq->axq_lock);
831 while (!list_empty(&txtid->buf_q)) {
832 bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
833 if (!bf_isretried(bf)) {
834 /*
835 * NB: it's based on the assumption that
836 * software retried frame will always stay
837 * at the head of software queue.
838 */
839 break;
840 }
841 list_move_tail(&bf->list, &bf_head);
842 ath_tx_update_baw(sc, txtid, bf->bf_seqno);
843 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
844 }
845 spin_unlock_bh(&txq->axq_lock);
846
847 if (txtid->baw_head != txtid->baw_tail) {
848 txtid->state |= AGGR_CLEANUP;
849 } else {
850 txtid->state &= ~AGGR_ADDBA_COMPLETE;
851 ath_tx_flush_tid(sc, txtid);
852 }
853 }
854
855 void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
856 {
857 struct ath_atx_tid *txtid;
858 struct ath_node *an;
859
860 an = (struct ath_node *)sta->drv_priv;
861
862 if (sc->sc_flags & SC_OP_TXAGGR) {
863 txtid = ATH_AN_2_TID(an, tid);
864 txtid->baw_size =
865 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
866 txtid->state |= AGGR_ADDBA_COMPLETE;
867 txtid->state &= ~AGGR_ADDBA_PROGRESS;
868 ath_tx_resume_tid(sc, txtid);
869 }
870 }
871
872 bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno)
873 {
874 struct ath_atx_tid *txtid;
875
876 if (!(sc->sc_flags & SC_OP_TXAGGR))
877 return false;
878
879 txtid = ATH_AN_2_TID(an, tidno);
880
881 if (!(txtid->state & (AGGR_ADDBA_COMPLETE | AGGR_ADDBA_PROGRESS)))
882 return true;
883 return false;
884 }
885
886 /********************/
887 /* Queue Management */
888 /********************/
889
890 static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
891 struct ath_txq *txq)
892 {
893 struct ath_atx_ac *ac, *ac_tmp;
894 struct ath_atx_tid *tid, *tid_tmp;
895
896 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
897 list_del(&ac->list);
898 ac->sched = false;
899 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
900 list_del(&tid->list);
901 tid->sched = false;
902 ath_tid_drain(sc, txq, tid);
903 }
904 }
905 }
906
907 struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
908 {
909 struct ath_hw *ah = sc->sc_ah;
910 struct ath_common *common = ath9k_hw_common(ah);
911 struct ath9k_tx_queue_info qi;
912 int qnum, i;
913
914 memset(&qi, 0, sizeof(qi));
915 qi.tqi_subtype = subtype;
916 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
917 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
918 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
919 qi.tqi_physCompBuf = 0;
920
921 /*
922 * Enable interrupts only for EOL and DESC conditions.
923 * We mark tx descriptors to receive a DESC interrupt
924 * when a tx queue gets deep; otherwise waiting for the
925 * EOL to reap descriptors. Note that this is done to
926 * reduce interrupt load and this only defers reaping
927 * descriptors, never transmitting frames. Aside from
928 * reducing interrupts this also permits more concurrency.
929 * The only potential downside is if the tx queue backs
930 * up in which case the top half of the kernel may backup
931 * due to a lack of tx descriptors.
932 *
933 * The UAPSD queue is an exception, since we take a desc-
934 * based intr on the EOSP frames.
935 */
936 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
937 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
938 TXQ_FLAG_TXERRINT_ENABLE;
939 } else {
940 if (qtype == ATH9K_TX_QUEUE_UAPSD)
941 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
942 else
943 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
944 TXQ_FLAG_TXDESCINT_ENABLE;
945 }
946 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
947 if (qnum == -1) {
948 /*
949 * NB: don't print a message, this happens
950 * normally on parts with too few tx queues
951 */
952 return NULL;
953 }
954 if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
955 ath_print(common, ATH_DBG_FATAL,
956 "qnum %u out of range, max %u!\n",
957 qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq));
958 ath9k_hw_releasetxqueue(ah, qnum);
959 return NULL;
960 }
961 if (!ATH_TXQ_SETUP(sc, qnum)) {
962 struct ath_txq *txq = &sc->tx.txq[qnum];
963
964 txq->axq_class = subtype;
965 txq->axq_qnum = qnum;
966 txq->axq_link = NULL;
967 INIT_LIST_HEAD(&txq->axq_q);
968 INIT_LIST_HEAD(&txq->axq_acq);
969 spin_lock_init(&txq->axq_lock);
970 txq->axq_depth = 0;
971 txq->axq_tx_inprogress = false;
972 sc->tx.txqsetup |= 1<<qnum;
973
974 txq->txq_headidx = txq->txq_tailidx = 0;
975 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
976 INIT_LIST_HEAD(&txq->txq_fifo[i]);
977 INIT_LIST_HEAD(&txq->txq_fifo_pending);
978 }
979 return &sc->tx.txq[qnum];
980 }
981
982 int ath_txq_update(struct ath_softc *sc, int qnum,
983 struct ath9k_tx_queue_info *qinfo)
984 {
985 struct ath_hw *ah = sc->sc_ah;
986 int error = 0;
987 struct ath9k_tx_queue_info qi;
988
989 if (qnum == sc->beacon.beaconq) {
990 /*
991 * XXX: for beacon queue, we just save the parameter.
992 * It will be picked up by ath_beaconq_config when
993 * it's necessary.
994 */
995 sc->beacon.beacon_qi = *qinfo;
996 return 0;
997 }
998
999 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
1000
1001 ath9k_hw_get_txq_props(ah, qnum, &qi);
1002 qi.tqi_aifs = qinfo->tqi_aifs;
1003 qi.tqi_cwmin = qinfo->tqi_cwmin;
1004 qi.tqi_cwmax = qinfo->tqi_cwmax;
1005 qi.tqi_burstTime = qinfo->tqi_burstTime;
1006 qi.tqi_readyTime = qinfo->tqi_readyTime;
1007
1008 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
1009 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1010 "Unable to update hardware queue %u!\n", qnum);
1011 error = -EIO;
1012 } else {
1013 ath9k_hw_resettxqueue(ah, qnum);
1014 }
1015
1016 return error;
1017 }
1018
1019 int ath_cabq_update(struct ath_softc *sc)
1020 {
1021 struct ath9k_tx_queue_info qi;
1022 int qnum = sc->beacon.cabq->axq_qnum;
1023
1024 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1025 /*
1026 * Ensure the readytime % is within the bounds.
1027 */
1028 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1029 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1030 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1031 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
1032
1033 qi.tqi_readyTime = (sc->beacon_interval *
1034 sc->config.cabqReadytime) / 100;
1035 ath_txq_update(sc, qnum, &qi);
1036
1037 return 0;
1038 }
1039
1040 /*
1041 * Drain a given TX queue (could be Beacon or Data)
1042 *
1043 * This assumes output has been stopped and
1044 * we do not need to block ath_tx_tasklet.
1045 */
1046 void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1047 {
1048 struct ath_buf *bf, *lastbf;
1049 struct list_head bf_head;
1050 struct ath_tx_status ts;
1051
1052 memset(&ts, 0, sizeof(ts));
1053 INIT_LIST_HEAD(&bf_head);
1054
1055 for (;;) {
1056 spin_lock_bh(&txq->axq_lock);
1057
1058 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1059 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
1060 txq->txq_headidx = txq->txq_tailidx = 0;
1061 spin_unlock_bh(&txq->axq_lock);
1062 break;
1063 } else {
1064 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
1065 struct ath_buf, list);
1066 }
1067 } else {
1068 if (list_empty(&txq->axq_q)) {
1069 txq->axq_link = NULL;
1070 spin_unlock_bh(&txq->axq_lock);
1071 break;
1072 }
1073 bf = list_first_entry(&txq->axq_q, struct ath_buf,
1074 list);
1075
1076 if (bf->bf_stale) {
1077 list_del(&bf->list);
1078 spin_unlock_bh(&txq->axq_lock);
1079
1080 ath_tx_return_buffer(sc, bf);
1081 continue;
1082 }
1083 }
1084
1085 lastbf = bf->bf_lastbf;
1086 if (!retry_tx)
1087 lastbf->bf_tx_aborted = true;
1088
1089 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1090 list_cut_position(&bf_head,
1091 &txq->txq_fifo[txq->txq_tailidx],
1092 &lastbf->list);
1093 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
1094 } else {
1095 /* remove ath_buf's of the same mpdu from txq */
1096 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
1097 }
1098
1099 txq->axq_depth--;
1100
1101 spin_unlock_bh(&txq->axq_lock);
1102
1103 if (bf_isampdu(bf))
1104 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0);
1105 else
1106 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
1107 }
1108
1109 spin_lock_bh(&txq->axq_lock);
1110 txq->axq_tx_inprogress = false;
1111 spin_unlock_bh(&txq->axq_lock);
1112
1113 /* flush any pending frames if aggregation is enabled */
1114 if (sc->sc_flags & SC_OP_TXAGGR) {
1115 if (!retry_tx) {
1116 spin_lock_bh(&txq->axq_lock);
1117 ath_txq_drain_pending_buffers(sc, txq);
1118 spin_unlock_bh(&txq->axq_lock);
1119 }
1120 }
1121
1122 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1123 spin_lock_bh(&txq->axq_lock);
1124 while (!list_empty(&txq->txq_fifo_pending)) {
1125 bf = list_first_entry(&txq->txq_fifo_pending,
1126 struct ath_buf, list);
1127 list_cut_position(&bf_head,
1128 &txq->txq_fifo_pending,
1129 &bf->bf_lastbf->list);
1130 spin_unlock_bh(&txq->axq_lock);
1131
1132 if (bf_isampdu(bf))
1133 ath_tx_complete_aggr(sc, txq, bf, &bf_head,
1134 &ts, 0);
1135 else
1136 ath_tx_complete_buf(sc, bf, txq, &bf_head,
1137 &ts, 0, 0);
1138 spin_lock_bh(&txq->axq_lock);
1139 }
1140 spin_unlock_bh(&txq->axq_lock);
1141 }
1142 }
1143
1144 void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
1145 {
1146 struct ath_hw *ah = sc->sc_ah;
1147 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1148 struct ath_txq *txq;
1149 int i, npend = 0;
1150
1151 if (sc->sc_flags & SC_OP_INVALID)
1152 return;
1153
1154 /* Stop beacon queue */
1155 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
1156
1157 /* Stop data queues */
1158 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1159 if (ATH_TXQ_SETUP(sc, i)) {
1160 txq = &sc->tx.txq[i];
1161 ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1162 npend += ath9k_hw_numtxpending(ah, txq->axq_qnum);
1163 }
1164 }
1165
1166 if (npend) {
1167 int r;
1168
1169 ath_print(common, ATH_DBG_FATAL,
1170 "Failed to stop TX DMA. Resetting hardware!\n");
1171
1172 spin_lock_bh(&sc->sc_resetlock);
1173 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false);
1174 if (r)
1175 ath_print(common, ATH_DBG_FATAL,
1176 "Unable to reset hardware; reset status %d\n",
1177 r);
1178 spin_unlock_bh(&sc->sc_resetlock);
1179 }
1180
1181 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1182 if (ATH_TXQ_SETUP(sc, i))
1183 ath_draintxq(sc, &sc->tx.txq[i], retry_tx);
1184 }
1185 }
1186
1187 void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1188 {
1189 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1190 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1191 }
1192
1193 void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1194 {
1195 struct ath_atx_ac *ac;
1196 struct ath_atx_tid *tid;
1197
1198 if (list_empty(&txq->axq_acq))
1199 return;
1200
1201 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
1202 list_del(&ac->list);
1203 ac->sched = false;
1204
1205 do {
1206 if (list_empty(&ac->tid_q))
1207 return;
1208
1209 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
1210 list_del(&tid->list);
1211 tid->sched = false;
1212
1213 if (tid->paused)
1214 continue;
1215
1216 ath_tx_sched_aggr(sc, txq, tid);
1217
1218 /*
1219 * add tid to round-robin queue if more frames
1220 * are pending for the tid
1221 */
1222 if (!list_empty(&tid->buf_q))
1223 ath_tx_queue_tid(txq, tid);
1224
1225 break;
1226 } while (!list_empty(&ac->tid_q));
1227
1228 if (!list_empty(&ac->tid_q)) {
1229 if (!ac->sched) {
1230 ac->sched = true;
1231 list_add_tail(&ac->list, &txq->axq_acq);
1232 }
1233 }
1234 }
1235
1236 int ath_tx_setup(struct ath_softc *sc, int haltype)
1237 {
1238 struct ath_txq *txq;
1239
1240 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
1241 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1242 "HAL AC %u out of range, max %zu!\n",
1243 haltype, ARRAY_SIZE(sc->tx.hwq_map));
1244 return 0;
1245 }
1246 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
1247 if (txq != NULL) {
1248 sc->tx.hwq_map[haltype] = txq->axq_qnum;
1249 return 1;
1250 } else
1251 return 0;
1252 }
1253
1254 /***********/
1255 /* TX, DMA */
1256 /***********/
1257
1258 /*
1259 * Insert a chain of ath_buf (descriptors) on a txq and
1260 * assume the descriptors are already chained together by caller.
1261 */
1262 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1263 struct list_head *head)
1264 {
1265 struct ath_hw *ah = sc->sc_ah;
1266 struct ath_common *common = ath9k_hw_common(ah);
1267 struct ath_buf *bf;
1268
1269 /*
1270 * Insert the frame on the outbound list and
1271 * pass it on to the hardware.
1272 */
1273
1274 if (list_empty(head))
1275 return;
1276
1277 bf = list_first_entry(head, struct ath_buf, list);
1278
1279 ath_print(common, ATH_DBG_QUEUE,
1280 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
1281
1282 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1283 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
1284 list_splice_tail_init(head, &txq->txq_fifo_pending);
1285 return;
1286 }
1287 if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
1288 ath_print(common, ATH_DBG_XMIT,
1289 "Initializing tx fifo %d which "
1290 "is non-empty\n",
1291 txq->txq_headidx);
1292 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
1293 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
1294 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
1295 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1296 ath_print(common, ATH_DBG_XMIT,
1297 "TXDP[%u] = %llx (%p)\n",
1298 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1299 } else {
1300 list_splice_tail_init(head, &txq->axq_q);
1301
1302 if (txq->axq_link == NULL) {
1303 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1304 ath_print(common, ATH_DBG_XMIT,
1305 "TXDP[%u] = %llx (%p)\n",
1306 txq->axq_qnum, ito64(bf->bf_daddr),
1307 bf->bf_desc);
1308 } else {
1309 *txq->axq_link = bf->bf_daddr;
1310 ath_print(common, ATH_DBG_XMIT,
1311 "link[%u] (%p)=%llx (%p)\n",
1312 txq->axq_qnum, txq->axq_link,
1313 ito64(bf->bf_daddr), bf->bf_desc);
1314 }
1315 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
1316 &txq->axq_link);
1317 ath9k_hw_txstart(ah, txq->axq_qnum);
1318 }
1319 txq->axq_depth++;
1320 }
1321
1322 static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1323 struct list_head *bf_head,
1324 struct ath_tx_control *txctl)
1325 {
1326 struct ath_buf *bf;
1327
1328 bf = list_first_entry(bf_head, struct ath_buf, list);
1329 bf->bf_state.bf_type |= BUF_AMPDU;
1330 TX_STAT_INC(txctl->txq->axq_qnum, a_queued);
1331
1332 /*
1333 * Do not queue to h/w when any of the following conditions is true:
1334 * - there are pending frames in software queue
1335 * - the TID is currently paused for ADDBA/BAR request
1336 * - seqno is not within block-ack window
1337 * - h/w queue depth exceeds low water mark
1338 */
1339 if (!list_empty(&tid->buf_q) || tid->paused ||
1340 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
1341 txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
1342 /*
1343 * Add this frame to software queue for scheduling later
1344 * for aggregation.
1345 */
1346 list_move_tail(&bf->list, &tid->buf_q);
1347 ath_tx_queue_tid(txctl->txq, tid);
1348 return;
1349 }
1350
1351 /* Add sub-frame to BAW */
1352 ath_tx_addto_baw(sc, tid, bf);
1353
1354 /* Queue to h/w without aggregation */
1355 bf->bf_nframes = 1;
1356 bf->bf_lastbf = bf;
1357 ath_buf_set_rate(sc, bf);
1358 ath_tx_txqaddbuf(sc, txctl->txq, bf_head);
1359 }
1360
1361 static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
1362 struct ath_atx_tid *tid,
1363 struct list_head *bf_head)
1364 {
1365 struct ath_buf *bf;
1366
1367 bf = list_first_entry(bf_head, struct ath_buf, list);
1368 bf->bf_state.bf_type &= ~BUF_AMPDU;
1369
1370 /* update starting sequence number for subsequent ADDBA request */
1371 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
1372
1373 bf->bf_nframes = 1;
1374 bf->bf_lastbf = bf;
1375 ath_buf_set_rate(sc, bf);
1376 ath_tx_txqaddbuf(sc, txq, bf_head);
1377 TX_STAT_INC(txq->axq_qnum, queued);
1378 }
1379
1380 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1381 struct list_head *bf_head)
1382 {
1383 struct ath_buf *bf;
1384
1385 bf = list_first_entry(bf_head, struct ath_buf, list);
1386
1387 bf->bf_lastbf = bf;
1388 bf->bf_nframes = 1;
1389 ath_buf_set_rate(sc, bf);
1390 ath_tx_txqaddbuf(sc, txq, bf_head);
1391 TX_STAT_INC(txq->axq_qnum, queued);
1392 }
1393
1394 static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
1395 {
1396 struct ieee80211_hdr *hdr;
1397 enum ath9k_pkt_type htype;
1398 __le16 fc;
1399
1400 hdr = (struct ieee80211_hdr *)skb->data;
1401 fc = hdr->frame_control;
1402
1403 if (ieee80211_is_beacon(fc))
1404 htype = ATH9K_PKT_TYPE_BEACON;
1405 else if (ieee80211_is_probe_resp(fc))
1406 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1407 else if (ieee80211_is_atim(fc))
1408 htype = ATH9K_PKT_TYPE_ATIM;
1409 else if (ieee80211_is_pspoll(fc))
1410 htype = ATH9K_PKT_TYPE_PSPOLL;
1411 else
1412 htype = ATH9K_PKT_TYPE_NORMAL;
1413
1414 return htype;
1415 }
1416
1417 static int get_hw_crypto_keytype(struct sk_buff *skb)
1418 {
1419 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1420
1421 if (tx_info->control.hw_key) {
1422 if (tx_info->control.hw_key->alg == ALG_WEP)
1423 return ATH9K_KEY_TYPE_WEP;
1424 else if (tx_info->control.hw_key->alg == ALG_TKIP)
1425 return ATH9K_KEY_TYPE_TKIP;
1426 else if (tx_info->control.hw_key->alg == ALG_CCMP)
1427 return ATH9K_KEY_TYPE_AES;
1428 }
1429
1430 return ATH9K_KEY_TYPE_CLEAR;
1431 }
1432
1433 static void assign_aggr_tid_seqno(struct sk_buff *skb,
1434 struct ath_buf *bf)
1435 {
1436 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1437 struct ieee80211_hdr *hdr;
1438 struct ath_node *an;
1439 struct ath_atx_tid *tid;
1440 __le16 fc;
1441 u8 *qc;
1442
1443 if (!tx_info->control.sta)
1444 return;
1445
1446 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1447 hdr = (struct ieee80211_hdr *)skb->data;
1448 fc = hdr->frame_control;
1449
1450 if (ieee80211_is_data_qos(fc)) {
1451 qc = ieee80211_get_qos_ctl(hdr);
1452 bf->bf_tidno = qc[0] & 0xf;
1453 }
1454
1455 /*
1456 * For HT capable stations, we save tidno for later use.
1457 * We also override seqno set by upper layer with the one
1458 * in tx aggregation state.
1459 */
1460 tid = ATH_AN_2_TID(an, bf->bf_tidno);
1461 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
1462 bf->bf_seqno = tid->seq_next;
1463 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1464 }
1465
1466 static int setup_tx_flags(struct sk_buff *skb, bool use_ldpc)
1467 {
1468 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1469 int flags = 0;
1470
1471 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
1472 flags |= ATH9K_TXDESC_INTREQ;
1473
1474 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1475 flags |= ATH9K_TXDESC_NOACK;
1476
1477 if (use_ldpc)
1478 flags |= ATH9K_TXDESC_LDPC;
1479
1480 return flags;
1481 }
1482
1483 /*
1484 * rix - rate index
1485 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1486 * width - 0 for 20 MHz, 1 for 40 MHz
1487 * half_gi - to use 4us v/s 3.6 us for symbol time
1488 */
1489 static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
1490 int width, int half_gi, bool shortPreamble)
1491 {
1492 u32 nbits, nsymbits, duration, nsymbols;
1493 int streams, pktlen;
1494
1495 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
1496
1497 /* find number of symbols: PLCP + data */
1498 streams = HT_RC_2_STREAMS(rix);
1499 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
1500 nsymbits = bits_per_symbol[rix % 8][width] * streams;
1501 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1502
1503 if (!half_gi)
1504 duration = SYMBOL_TIME(nsymbols);
1505 else
1506 duration = SYMBOL_TIME_HALFGI(nsymbols);
1507
1508 /* addup duration for legacy/ht training and signal fields */
1509 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
1510
1511 return duration;
1512 }
1513
1514 static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
1515 {
1516 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1517 struct ath9k_11n_rate_series series[4];
1518 struct sk_buff *skb;
1519 struct ieee80211_tx_info *tx_info;
1520 struct ieee80211_tx_rate *rates;
1521 const struct ieee80211_rate *rate;
1522 struct ieee80211_hdr *hdr;
1523 int i, flags = 0;
1524 u8 rix = 0, ctsrate = 0;
1525 bool is_pspoll;
1526
1527 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
1528
1529 skb = bf->bf_mpdu;
1530 tx_info = IEEE80211_SKB_CB(skb);
1531 rates = tx_info->control.rates;
1532 hdr = (struct ieee80211_hdr *)skb->data;
1533 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
1534
1535 /*
1536 * We check if Short Preamble is needed for the CTS rate by
1537 * checking the BSS's global flag.
1538 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
1539 */
1540 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
1541 ctsrate = rate->hw_value;
1542 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
1543 ctsrate |= rate->hw_value_short;
1544
1545 for (i = 0; i < 4; i++) {
1546 bool is_40, is_sgi, is_sp;
1547 int phy;
1548
1549 if (!rates[i].count || (rates[i].idx < 0))
1550 continue;
1551
1552 rix = rates[i].idx;
1553 series[i].Tries = rates[i].count;
1554 series[i].ChSel = common->tx_chainmask;
1555
1556 if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
1557 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
1558 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1559 flags |= ATH9K_TXDESC_RTSENA;
1560 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1561 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1562 flags |= ATH9K_TXDESC_CTSENA;
1563 }
1564
1565 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1566 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1567 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1568 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
1569
1570 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1571 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1572 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1573
1574 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1575 /* MCS rates */
1576 series[i].Rate = rix | 0x80;
1577 series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
1578 is_40, is_sgi, is_sp);
1579 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1580 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
1581 continue;
1582 }
1583
1584 /* legcay rates */
1585 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1586 !(rate->flags & IEEE80211_RATE_ERP_G))
1587 phy = WLAN_RC_PHY_CCK;
1588 else
1589 phy = WLAN_RC_PHY_OFDM;
1590
1591 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1592 series[i].Rate = rate->hw_value;
1593 if (rate->hw_value_short) {
1594 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1595 series[i].Rate |= rate->hw_value_short;
1596 } else {
1597 is_sp = false;
1598 }
1599
1600 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
1601 phy, rate->bitrate * 100, bf->bf_frmlen, rix, is_sp);
1602 }
1603
1604 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
1605 if (bf_isaggr(bf) && (bf->bf_al > sc->sc_ah->caps.rts_aggr_limit))
1606 flags &= ~ATH9K_TXDESC_RTSENA;
1607
1608 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1609 if (flags & ATH9K_TXDESC_RTSENA)
1610 flags &= ~ATH9K_TXDESC_CTSENA;
1611
1612 /* set dur_update_en for l-sig computation except for PS-Poll frames */
1613 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1614 bf->bf_lastbf->bf_desc,
1615 !is_pspoll, ctsrate,
1616 0, series, 4, flags);
1617
1618 if (sc->config.ath_aggr_prot && flags)
1619 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
1620 }
1621
1622 static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
1623 struct sk_buff *skb,
1624 struct ath_tx_control *txctl)
1625 {
1626 struct ath_wiphy *aphy = hw->priv;
1627 struct ath_softc *sc = aphy->sc;
1628 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1629 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1630 int hdrlen;
1631 __le16 fc;
1632 int padpos, padsize;
1633 bool use_ldpc = false;
1634
1635 tx_info->pad[0] = 0;
1636 switch (txctl->frame_type) {
1637 case ATH9K_IFT_NOT_INTERNAL:
1638 break;
1639 case ATH9K_IFT_PAUSE:
1640 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_PAUSE;
1641 /* fall through */
1642 case ATH9K_IFT_UNPAUSE:
1643 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_INTERNAL;
1644 break;
1645 }
1646 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1647 fc = hdr->frame_control;
1648
1649 ATH_TXBUF_RESET(bf);
1650
1651 bf->aphy = aphy;
1652 bf->bf_frmlen = skb->len + FCS_LEN;
1653 /* Remove the padding size from bf_frmlen, if any */
1654 padpos = ath9k_cmn_padpos(hdr->frame_control);
1655 padsize = padpos & 3;
1656 if (padsize && skb->len>padpos+padsize) {
1657 bf->bf_frmlen -= padsize;
1658 }
1659
1660 if (!txctl->paprd && conf_is_ht(&hw->conf)) {
1661 bf->bf_state.bf_type |= BUF_HT;
1662 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1663 use_ldpc = true;
1664 }
1665
1666 bf->bf_state.bfs_paprd = txctl->paprd;
1667 if (txctl->paprd)
1668 bf->bf_state.bfs_paprd_timestamp = jiffies;
1669 bf->bf_flags = setup_tx_flags(skb, use_ldpc);
1670
1671 bf->bf_keytype = get_hw_crypto_keytype(skb);
1672 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
1673 bf->bf_frmlen += tx_info->control.hw_key->icv_len;
1674 bf->bf_keyix = tx_info->control.hw_key->hw_key_idx;
1675 } else {
1676 bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
1677 }
1678
1679 if (ieee80211_is_data_qos(fc) && bf_isht(bf) &&
1680 (sc->sc_flags & SC_OP_TXAGGR))
1681 assign_aggr_tid_seqno(skb, bf);
1682
1683 bf->bf_mpdu = skb;
1684
1685 bf->bf_dmacontext = dma_map_single(sc->dev, skb->data,
1686 skb->len, DMA_TO_DEVICE);
1687 if (unlikely(dma_mapping_error(sc->dev, bf->bf_dmacontext))) {
1688 bf->bf_mpdu = NULL;
1689 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1690 "dma_mapping_error() on TX\n");
1691 return -ENOMEM;
1692 }
1693
1694 bf->bf_buf_addr = bf->bf_dmacontext;
1695
1696 /* tag if this is a nullfunc frame to enable PS when AP acks it */
1697 if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc)) {
1698 bf->bf_isnullfunc = true;
1699 sc->ps_flags &= ~PS_NULLFUNC_COMPLETED;
1700 } else
1701 bf->bf_isnullfunc = false;
1702
1703 bf->bf_tx_aborted = false;
1704
1705 return 0;
1706 }
1707
1708 /* FIXME: tx power */
1709 static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1710 struct ath_tx_control *txctl)
1711 {
1712 struct sk_buff *skb = bf->bf_mpdu;
1713 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1714 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1715 struct ath_node *an = NULL;
1716 struct list_head bf_head;
1717 struct ath_desc *ds;
1718 struct ath_atx_tid *tid;
1719 struct ath_hw *ah = sc->sc_ah;
1720 int frm_type;
1721 __le16 fc;
1722
1723 frm_type = get_hw_packet_type(skb);
1724 fc = hdr->frame_control;
1725
1726 INIT_LIST_HEAD(&bf_head);
1727 list_add_tail(&bf->list, &bf_head);
1728
1729 ds = bf->bf_desc;
1730 ath9k_hw_set_desc_link(ah, ds, 0);
1731
1732 ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
1733 bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
1734
1735 ath9k_hw_filltxdesc(ah, ds,
1736 skb->len, /* segment length */
1737 true, /* first segment */
1738 true, /* last segment */
1739 ds, /* first descriptor */
1740 bf->bf_buf_addr,
1741 txctl->txq->axq_qnum);
1742
1743 if (bf->bf_state.bfs_paprd)
1744 ar9003_hw_set_paprd_txdesc(ah, ds, bf->bf_state.bfs_paprd);
1745
1746 spin_lock_bh(&txctl->txq->axq_lock);
1747
1748 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) &&
1749 tx_info->control.sta) {
1750 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1751 tid = ATH_AN_2_TID(an, bf->bf_tidno);
1752
1753 if (!ieee80211_is_data_qos(fc)) {
1754 ath_tx_send_normal(sc, txctl->txq, &bf_head);
1755 goto tx_done;
1756 }
1757
1758 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
1759 /*
1760 * Try aggregation if it's a unicast data frame
1761 * and the destination is HT capable.
1762 */
1763 ath_tx_send_ampdu(sc, tid, &bf_head, txctl);
1764 } else {
1765 /*
1766 * Send this frame as regular when ADDBA
1767 * exchange is neither complete nor pending.
1768 */
1769 ath_tx_send_ht_normal(sc, txctl->txq,
1770 tid, &bf_head);
1771 }
1772 } else {
1773 ath_tx_send_normal(sc, txctl->txq, &bf_head);
1774 }
1775
1776 tx_done:
1777 spin_unlock_bh(&txctl->txq->axq_lock);
1778 }
1779
1780 /* Upon failure caller should free skb */
1781 int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1782 struct ath_tx_control *txctl)
1783 {
1784 struct ath_wiphy *aphy = hw->priv;
1785 struct ath_softc *sc = aphy->sc;
1786 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1787 struct ath_txq *txq = txctl->txq;
1788 struct ath_buf *bf;
1789 int q, r;
1790
1791 bf = ath_tx_get_buffer(sc);
1792 if (!bf) {
1793 ath_print(common, ATH_DBG_XMIT, "TX buffers are full\n");
1794 return -1;
1795 }
1796
1797 r = ath_tx_setup_buffer(hw, bf, skb, txctl);
1798 if (unlikely(r)) {
1799 ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n");
1800
1801 /* upon ath_tx_processq() this TX queue will be resumed, we
1802 * guarantee this will happen by knowing beforehand that
1803 * we will at least have to run TX completionon one buffer
1804 * on the queue */
1805 spin_lock_bh(&txq->axq_lock);
1806 if (!txq->stopped && txq->axq_depth > 1) {
1807 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
1808 txq->stopped = 1;
1809 }
1810 spin_unlock_bh(&txq->axq_lock);
1811
1812 ath_tx_return_buffer(sc, bf);
1813
1814 return r;
1815 }
1816
1817 q = skb_get_queue_mapping(skb);
1818 if (q >= 4)
1819 q = 0;
1820
1821 spin_lock_bh(&txq->axq_lock);
1822 if (++sc->tx.pending_frames[q] > ATH_MAX_QDEPTH && !txq->stopped) {
1823 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
1824 txq->stopped = 1;
1825 }
1826 spin_unlock_bh(&txq->axq_lock);
1827
1828 ath_tx_start_dma(sc, bf, txctl);
1829
1830 return 0;
1831 }
1832
1833 void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
1834 {
1835 struct ath_wiphy *aphy = hw->priv;
1836 struct ath_softc *sc = aphy->sc;
1837 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1838 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1839 int padpos, padsize;
1840 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1841 struct ath_tx_control txctl;
1842
1843 memset(&txctl, 0, sizeof(struct ath_tx_control));
1844
1845 /*
1846 * As a temporary workaround, assign seq# here; this will likely need
1847 * to be cleaned up to work better with Beacon transmission and virtual
1848 * BSSes.
1849 */
1850 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1851 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1852 sc->tx.seq_no += 0x10;
1853 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1854 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1855 }
1856
1857 /* Add the padding after the header if this is not already done */
1858 padpos = ath9k_cmn_padpos(hdr->frame_control);
1859 padsize = padpos & 3;
1860 if (padsize && skb->len>padpos) {
1861 if (skb_headroom(skb) < padsize) {
1862 ath_print(common, ATH_DBG_XMIT,
1863 "TX CABQ padding failed\n");
1864 dev_kfree_skb_any(skb);
1865 return;
1866 }
1867 skb_push(skb, padsize);
1868 memmove(skb->data, skb->data + padsize, padpos);
1869 }
1870
1871 txctl.txq = sc->beacon.cabq;
1872
1873 ath_print(common, ATH_DBG_XMIT,
1874 "transmitting CABQ packet, skb: %p\n", skb);
1875
1876 if (ath_tx_start(hw, skb, &txctl) != 0) {
1877 ath_print(common, ATH_DBG_XMIT, "CABQ TX failed\n");
1878 goto exit;
1879 }
1880
1881 return;
1882 exit:
1883 dev_kfree_skb_any(skb);
1884 }
1885
1886 /*****************/
1887 /* TX Completion */
1888 /*****************/
1889
1890 static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1891 struct ath_wiphy *aphy, int tx_flags)
1892 {
1893 struct ieee80211_hw *hw = sc->hw;
1894 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1895 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1896 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
1897 int q, padpos, padsize;
1898
1899 ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
1900
1901 if (aphy)
1902 hw = aphy->hw;
1903
1904 if (tx_flags & ATH_TX_BAR)
1905 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
1906
1907 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
1908 /* Frame was ACKed */
1909 tx_info->flags |= IEEE80211_TX_STAT_ACK;
1910 }
1911
1912 padpos = ath9k_cmn_padpos(hdr->frame_control);
1913 padsize = padpos & 3;
1914 if (padsize && skb->len>padpos+padsize) {
1915 /*
1916 * Remove MAC header padding before giving the frame back to
1917 * mac80211.
1918 */
1919 memmove(skb->data + padsize, skb->data, padpos);
1920 skb_pull(skb, padsize);
1921 }
1922
1923 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1924 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
1925 ath_print(common, ATH_DBG_PS,
1926 "Going back to sleep after having "
1927 "received TX status (0x%lx)\n",
1928 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1929 PS_WAIT_FOR_CAB |
1930 PS_WAIT_FOR_PSPOLL_DATA |
1931 PS_WAIT_FOR_TX_ACK));
1932 }
1933
1934 if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL))
1935 ath9k_tx_status(hw, skb);
1936 else {
1937 q = skb_get_queue_mapping(skb);
1938 if (q >= 4)
1939 q = 0;
1940
1941 if (--sc->tx.pending_frames[q] < 0)
1942 sc->tx.pending_frames[q] = 0;
1943
1944 ieee80211_tx_status(hw, skb);
1945 }
1946 }
1947
1948 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
1949 struct ath_txq *txq, struct list_head *bf_q,
1950 struct ath_tx_status *ts, int txok, int sendbar)
1951 {
1952 struct sk_buff *skb = bf->bf_mpdu;
1953 unsigned long flags;
1954 int tx_flags = 0;
1955
1956 if (sendbar)
1957 tx_flags = ATH_TX_BAR;
1958
1959 if (!txok) {
1960 tx_flags |= ATH_TX_ERROR;
1961
1962 if (bf_isxretried(bf))
1963 tx_flags |= ATH_TX_XRETRY;
1964 }
1965
1966 dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE);
1967
1968 if (bf->bf_state.bfs_paprd) {
1969 if (time_after(jiffies,
1970 bf->bf_state.bfs_paprd_timestamp +
1971 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
1972 dev_kfree_skb_any(skb);
1973 else
1974 complete(&sc->paprd_complete);
1975 } else {
1976 ath_tx_complete(sc, skb, bf->aphy, tx_flags);
1977 ath_debug_stat_tx(sc, txq, bf, ts);
1978 }
1979
1980 /*
1981 * Return the list of ath_buf of this mpdu to free queue
1982 */
1983 spin_lock_irqsave(&sc->tx.txbuflock, flags);
1984 list_splice_tail_init(bf_q, &sc->tx.txbuf);
1985 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
1986 }
1987
1988 static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
1989 struct ath_tx_status *ts, int txok)
1990 {
1991 u16 seq_st = 0;
1992 u32 ba[WME_BA_BMP_SIZE >> 5];
1993 int ba_index;
1994 int nbad = 0;
1995 int isaggr = 0;
1996
1997 if (bf->bf_lastbf->bf_tx_aborted)
1998 return 0;
1999
2000 isaggr = bf_isaggr(bf);
2001 if (isaggr) {
2002 seq_st = ts->ts_seqnum;
2003 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
2004 }
2005
2006 while (bf) {
2007 ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
2008 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
2009 nbad++;
2010
2011 bf = bf->bf_next;
2012 }
2013
2014 return nbad;
2015 }
2016
2017 static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
2018 int nbad, int txok, bool update_rc)
2019 {
2020 struct sk_buff *skb = bf->bf_mpdu;
2021 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2022 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2023 struct ieee80211_hw *hw = bf->aphy->hw;
2024 u8 i, tx_rateindex;
2025
2026 if (txok)
2027 tx_info->status.ack_signal = ts->ts_rssi;
2028
2029 tx_rateindex = ts->ts_rateindex;
2030 WARN_ON(tx_rateindex >= hw->max_rates);
2031
2032 if (ts->ts_status & ATH9K_TXERR_FILT)
2033 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
2034 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc)
2035 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
2036
2037 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
2038 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
2039 if (ieee80211_is_data(hdr->frame_control)) {
2040 if (ts->ts_flags &
2041 (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN))
2042 tx_info->pad[0] |= ATH_TX_INFO_UNDERRUN;
2043 if ((ts->ts_status & ATH9K_TXERR_XRETRY) ||
2044 (ts->ts_status & ATH9K_TXERR_FIFO))
2045 tx_info->pad[0] |= ATH_TX_INFO_XRETRY;
2046 tx_info->status.ampdu_len = bf->bf_nframes;
2047 tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad;
2048 }
2049 }
2050
2051 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
2052 tx_info->status.rates[i].count = 0;
2053 tx_info->status.rates[i].idx = -1;
2054 }
2055
2056 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
2057 }
2058
2059 static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq)
2060 {
2061 int qnum;
2062
2063 qnum = ath_get_mac80211_qnum(txq->axq_class, sc);
2064 if (qnum == -1)
2065 return;
2066
2067 spin_lock_bh(&txq->axq_lock);
2068 if (txq->stopped && sc->tx.pending_frames[qnum] < ATH_MAX_QDEPTH) {
2069 ath_mac80211_start_queue(sc, qnum);
2070 txq->stopped = 0;
2071 }
2072 spin_unlock_bh(&txq->axq_lock);
2073 }
2074
2075 static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2076 {
2077 struct ath_hw *ah = sc->sc_ah;
2078 struct ath_common *common = ath9k_hw_common(ah);
2079 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2080 struct list_head bf_head;
2081 struct ath_desc *ds;
2082 struct ath_tx_status ts;
2083 int txok;
2084 int status;
2085
2086 ath_print(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2087 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2088 txq->axq_link);
2089
2090 for (;;) {
2091 spin_lock_bh(&txq->axq_lock);
2092 if (list_empty(&txq->axq_q)) {
2093 txq->axq_link = NULL;
2094 spin_unlock_bh(&txq->axq_lock);
2095 break;
2096 }
2097 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2098
2099 /*
2100 * There is a race condition that a BH gets scheduled
2101 * after sw writes TxE and before hw re-load the last
2102 * descriptor to get the newly chained one.
2103 * Software must keep the last DONE descriptor as a
2104 * holding descriptor - software does so by marking
2105 * it with the STALE flag.
2106 */
2107 bf_held = NULL;
2108 if (bf->bf_stale) {
2109 bf_held = bf;
2110 if (list_is_last(&bf_held->list, &txq->axq_q)) {
2111 spin_unlock_bh(&txq->axq_lock);
2112 break;
2113 } else {
2114 bf = list_entry(bf_held->list.next,
2115 struct ath_buf, list);
2116 }
2117 }
2118
2119 lastbf = bf->bf_lastbf;
2120 ds = lastbf->bf_desc;
2121
2122 memset(&ts, 0, sizeof(ts));
2123 status = ath9k_hw_txprocdesc(ah, ds, &ts);
2124 if (status == -EINPROGRESS) {
2125 spin_unlock_bh(&txq->axq_lock);
2126 break;
2127 }
2128
2129 /*
2130 * We now know the nullfunc frame has been ACKed so we
2131 * can disable RX.
2132 */
2133 if (bf->bf_isnullfunc &&
2134 (ts.ts_status & ATH9K_TX_ACKED)) {
2135 if ((sc->ps_flags & PS_ENABLED))
2136 ath9k_enable_ps(sc);
2137 else
2138 sc->ps_flags |= PS_NULLFUNC_COMPLETED;
2139 }
2140
2141 /*
2142 * Remove ath_buf's of the same transmit unit from txq,
2143 * however leave the last descriptor back as the holding
2144 * descriptor for hw.
2145 */
2146 lastbf->bf_stale = true;
2147 INIT_LIST_HEAD(&bf_head);
2148 if (!list_is_singular(&lastbf->list))
2149 list_cut_position(&bf_head,
2150 &txq->axq_q, lastbf->list.prev);
2151
2152 txq->axq_depth--;
2153 txok = !(ts.ts_status & ATH9K_TXERR_MASK);
2154 txq->axq_tx_inprogress = false;
2155 if (bf_held)
2156 list_del(&bf_held->list);
2157 spin_unlock_bh(&txq->axq_lock);
2158
2159 if (bf_held)
2160 ath_tx_return_buffer(sc, bf_held);
2161
2162 if (!bf_isampdu(bf)) {
2163 /*
2164 * This frame is sent out as a single frame.
2165 * Use hardware retry status for this frame.
2166 */
2167 if (ts.ts_status & ATH9K_TXERR_XRETRY)
2168 bf->bf_state.bf_type |= BUF_XRETRY;
2169 ath_tx_rc_status(bf, &ts, 0, txok, true);
2170 }
2171
2172 if (bf_isampdu(bf))
2173 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok);
2174 else
2175 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
2176
2177 ath_wake_mac80211_queue(sc, txq);
2178
2179 spin_lock_bh(&txq->axq_lock);
2180 if (sc->sc_flags & SC_OP_TXAGGR)
2181 ath_txq_schedule(sc, txq);
2182 spin_unlock_bh(&txq->axq_lock);
2183 }
2184 }
2185
2186 static void ath_tx_complete_poll_work(struct work_struct *work)
2187 {
2188 struct ath_softc *sc = container_of(work, struct ath_softc,
2189 tx_complete_work.work);
2190 struct ath_txq *txq;
2191 int i;
2192 bool needreset = false;
2193
2194 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2195 if (ATH_TXQ_SETUP(sc, i)) {
2196 txq = &sc->tx.txq[i];
2197 spin_lock_bh(&txq->axq_lock);
2198 if (txq->axq_depth) {
2199 if (txq->axq_tx_inprogress) {
2200 needreset = true;
2201 spin_unlock_bh(&txq->axq_lock);
2202 break;
2203 } else {
2204 txq->axq_tx_inprogress = true;
2205 }
2206 }
2207 spin_unlock_bh(&txq->axq_lock);
2208 }
2209
2210 if (needreset) {
2211 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2212 "tx hung, resetting the chip\n");
2213 ath9k_ps_wakeup(sc);
2214 ath_reset(sc, false);
2215 ath9k_ps_restore(sc);
2216 }
2217
2218 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
2219 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2220 }
2221
2222
2223
2224 void ath_tx_tasklet(struct ath_softc *sc)
2225 {
2226 int i;
2227 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
2228
2229 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
2230
2231 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2232 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2233 ath_tx_processq(sc, &sc->tx.txq[i]);
2234 }
2235 }
2236
2237 void ath_tx_edma_tasklet(struct ath_softc *sc)
2238 {
2239 struct ath_tx_status txs;
2240 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2241 struct ath_hw *ah = sc->sc_ah;
2242 struct ath_txq *txq;
2243 struct ath_buf *bf, *lastbf;
2244 struct list_head bf_head;
2245 int status;
2246 int txok;
2247
2248 for (;;) {
2249 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
2250 if (status == -EINPROGRESS)
2251 break;
2252 if (status == -EIO) {
2253 ath_print(common, ATH_DBG_XMIT,
2254 "Error processing tx status\n");
2255 break;
2256 }
2257
2258 /* Skip beacon completions */
2259 if (txs.qid == sc->beacon.beaconq)
2260 continue;
2261
2262 txq = &sc->tx.txq[txs.qid];
2263
2264 spin_lock_bh(&txq->axq_lock);
2265 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2266 spin_unlock_bh(&txq->axq_lock);
2267 return;
2268 }
2269
2270 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2271 struct ath_buf, list);
2272 lastbf = bf->bf_lastbf;
2273
2274 INIT_LIST_HEAD(&bf_head);
2275 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2276 &lastbf->list);
2277 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2278 txq->axq_depth--;
2279 txq->axq_tx_inprogress = false;
2280 spin_unlock_bh(&txq->axq_lock);
2281
2282 txok = !(txs.ts_status & ATH9K_TXERR_MASK);
2283
2284 /*
2285 * Make sure null func frame is acked before configuring
2286 * hw into ps mode.
2287 */
2288 if (bf->bf_isnullfunc && txok) {
2289 if ((sc->ps_flags & PS_ENABLED))
2290 ath9k_enable_ps(sc);
2291 else
2292 sc->ps_flags |= PS_NULLFUNC_COMPLETED;
2293 }
2294
2295 if (!bf_isampdu(bf)) {
2296 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2297 bf->bf_state.bf_type |= BUF_XRETRY;
2298 ath_tx_rc_status(bf, &txs, 0, txok, true);
2299 }
2300
2301 if (bf_isampdu(bf))
2302 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs, txok);
2303 else
2304 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2305 &txs, txok, 0);
2306
2307 ath_wake_mac80211_queue(sc, txq);
2308
2309 spin_lock_bh(&txq->axq_lock);
2310 if (!list_empty(&txq->txq_fifo_pending)) {
2311 INIT_LIST_HEAD(&bf_head);
2312 bf = list_first_entry(&txq->txq_fifo_pending,
2313 struct ath_buf, list);
2314 list_cut_position(&bf_head, &txq->txq_fifo_pending,
2315 &bf->bf_lastbf->list);
2316 ath_tx_txqaddbuf(sc, txq, &bf_head);
2317 } else if (sc->sc_flags & SC_OP_TXAGGR)
2318 ath_txq_schedule(sc, txq);
2319 spin_unlock_bh(&txq->axq_lock);
2320 }
2321 }
2322
2323 /*****************/
2324 /* Init, Cleanup */
2325 /*****************/
2326
2327 static int ath_txstatus_setup(struct ath_softc *sc, int size)
2328 {
2329 struct ath_descdma *dd = &sc->txsdma;
2330 u8 txs_len = sc->sc_ah->caps.txs_len;
2331
2332 dd->dd_desc_len = size * txs_len;
2333 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2334 &dd->dd_desc_paddr, GFP_KERNEL);
2335 if (!dd->dd_desc)
2336 return -ENOMEM;
2337
2338 return 0;
2339 }
2340
2341 static int ath_tx_edma_init(struct ath_softc *sc)
2342 {
2343 int err;
2344
2345 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2346 if (!err)
2347 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2348 sc->txsdma.dd_desc_paddr,
2349 ATH_TXSTATUS_RING_SIZE);
2350
2351 return err;
2352 }
2353
2354 static void ath_tx_edma_cleanup(struct ath_softc *sc)
2355 {
2356 struct ath_descdma *dd = &sc->txsdma;
2357
2358 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2359 dd->dd_desc_paddr);
2360 }
2361
2362 int ath_tx_init(struct ath_softc *sc, int nbufs)
2363 {
2364 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2365 int error = 0;
2366
2367 spin_lock_init(&sc->tx.txbuflock);
2368
2369 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
2370 "tx", nbufs, 1, 1);
2371 if (error != 0) {
2372 ath_print(common, ATH_DBG_FATAL,
2373 "Failed to allocate tx descriptors: %d\n", error);
2374 goto err;
2375 }
2376
2377 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
2378 "beacon", ATH_BCBUF, 1, 1);
2379 if (error != 0) {
2380 ath_print(common, ATH_DBG_FATAL,
2381 "Failed to allocate beacon descriptors: %d\n", error);
2382 goto err;
2383 }
2384
2385 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2386
2387 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2388 error = ath_tx_edma_init(sc);
2389 if (error)
2390 goto err;
2391 }
2392
2393 err:
2394 if (error != 0)
2395 ath_tx_cleanup(sc);
2396
2397 return error;
2398 }
2399
2400 void ath_tx_cleanup(struct ath_softc *sc)
2401 {
2402 if (sc->beacon.bdma.dd_desc_len != 0)
2403 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
2404
2405 if (sc->tx.txdma.dd_desc_len != 0)
2406 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
2407
2408 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2409 ath_tx_edma_cleanup(sc);
2410 }
2411
2412 void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2413 {
2414 struct ath_atx_tid *tid;
2415 struct ath_atx_ac *ac;
2416 int tidno, acno;
2417
2418 for (tidno = 0, tid = &an->tid[tidno];
2419 tidno < WME_NUM_TID;
2420 tidno++, tid++) {
2421 tid->an = an;
2422 tid->tidno = tidno;
2423 tid->seq_start = tid->seq_next = 0;
2424 tid->baw_size = WME_MAX_BA;
2425 tid->baw_head = tid->baw_tail = 0;
2426 tid->sched = false;
2427 tid->paused = false;
2428 tid->state &= ~AGGR_CLEANUP;
2429 INIT_LIST_HEAD(&tid->buf_q);
2430 acno = TID_TO_WME_AC(tidno);
2431 tid->ac = &an->ac[acno];
2432 tid->state &= ~AGGR_ADDBA_COMPLETE;
2433 tid->state &= ~AGGR_ADDBA_PROGRESS;
2434 }
2435
2436 for (acno = 0, ac = &an->ac[acno];
2437 acno < WME_NUM_AC; acno++, ac++) {
2438 ac->sched = false;
2439 ac->qnum = sc->tx.hwq_map[acno];
2440 INIT_LIST_HEAD(&ac->tid_q);
2441 }
2442 }
2443
2444 void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
2445 {
2446 struct ath_atx_ac *ac;
2447 struct ath_atx_tid *tid;
2448 struct ath_txq *txq;
2449 int i, tidno;
2450
2451 for (tidno = 0, tid = &an->tid[tidno];
2452 tidno < WME_NUM_TID; tidno++, tid++) {
2453 i = tid->ac->qnum;
2454
2455 if (!ATH_TXQ_SETUP(sc, i))
2456 continue;
2457
2458 txq = &sc->tx.txq[i];
2459 ac = tid->ac;
2460
2461 spin_lock_bh(&txq->axq_lock);
2462
2463 if (tid->sched) {
2464 list_del(&tid->list);
2465 tid->sched = false;
2466 }
2467
2468 if (ac->sched) {
2469 list_del(&ac->list);
2470 tid->ac->sched = false;
2471 }
2472
2473 ath_tid_drain(sc, txq, tid);
2474 tid->state &= ~AGGR_ADDBA_COMPLETE;
2475 tid->state &= ~AGGR_CLEANUP;
2476
2477 spin_unlock_bh(&txq->axq_lock);
2478 }
2479 }
This page took 0.121035 seconds and 6 git commands to generate.