ath9k: fix an aggregation start related race condition
[deliverable/linux.git] / drivers / net / wireless / ath / ath9k / xmit.c
CommitLineData
f078f209 1/*
cee075a2 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
f078f209
LR
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
394cf0a1 17#include "ath9k.h"
b622a720 18#include "ar9003_mac.h"
f078f209
LR
19
20#define BITS_PER_BYTE 8
21#define OFDM_PLCP_BITS 22
7817e4ce 22#define HT_RC_2_MCS(_rc) ((_rc) & 0x1f)
f078f209
LR
23#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
35#define OFDM_SIFS_TIME 16
36
c6663876 37static u16 bits_per_symbol[][2] = {
f078f209
LR
38 /* 20MHz 40MHz */
39 { 26, 54 }, /* 0: BPSK */
40 { 52, 108 }, /* 1: QPSK 1/2 */
41 { 78, 162 }, /* 2: QPSK 3/4 */
42 { 104, 216 }, /* 3: 16-QAM 1/2 */
43 { 156, 324 }, /* 4: 16-QAM 3/4 */
44 { 208, 432 }, /* 5: 64-QAM 2/3 */
45 { 234, 486 }, /* 6: 64-QAM 3/4 */
46 { 260, 540 }, /* 7: 64-QAM 5/6 */
f078f209
LR
47};
48
49#define IS_HT_RATE(_rate) ((_rate) & 0x80)
50
c37452b0
S
51static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
52 struct ath_atx_tid *tid,
53 struct list_head *bf_head);
e8324357 54static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
db1a052b
FF
55 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar);
102e0572 57static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
e8324357
S
58 struct list_head *head);
59static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf);
0934af23 60static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
db1a052b
FF
61 struct ath_tx_status *ts, int txok);
62static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
8a92e2ee 63 int nbad, int txok, bool update_rc);
c4288390 64
545750d3 65enum {
0e668cde
FF
66 MCS_HT20,
67 MCS_HT20_SGI,
545750d3
FF
68 MCS_HT40,
69 MCS_HT40_SGI,
70};
71
0e668cde
FF
72static int ath_max_4ms_framelen[4][32] = {
73 [MCS_HT20] = {
74 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
75 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
76 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
77 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
78 },
79 [MCS_HT20_SGI] = {
80 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
81 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
82 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
83 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
545750d3
FF
84 },
85 [MCS_HT40] = {
0e668cde
FF
86 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
87 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
88 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
89 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
545750d3
FF
90 },
91 [MCS_HT40_SGI] = {
0e668cde
FF
92 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
93 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
94 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
95 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
545750d3
FF
96 }
97};
98
e8324357
S
99/*********************/
100/* Aggregation logic */
101/*********************/
f078f209 102
e8324357 103static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
ff37e337 104{
e8324357 105 struct ath_atx_ac *ac = tid->ac;
ff37e337 106
e8324357
S
107 if (tid->paused)
108 return;
ff37e337 109
e8324357
S
110 if (tid->sched)
111 return;
ff37e337 112
e8324357
S
113 tid->sched = true;
114 list_add_tail(&tid->list, &ac->tid_q);
528f0c6b 115
e8324357
S
116 if (ac->sched)
117 return;
f078f209 118
e8324357
S
119 ac->sched = true;
120 list_add_tail(&ac->list, &txq->axq_acq);
121}
f078f209 122
e8324357 123static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
f078f209 124{
e8324357 125 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
e6a9854b 126
75401849 127 WARN_ON(!tid->paused);
f078f209 128
75401849
LB
129 spin_lock_bh(&txq->axq_lock);
130 tid->paused = false;
f078f209 131
e8324357
S
132 if (list_empty(&tid->buf_q))
133 goto unlock;
f078f209 134
e8324357
S
135 ath_tx_queue_tid(txq, tid);
136 ath_txq_schedule(sc, txq);
137unlock:
138 spin_unlock_bh(&txq->axq_lock);
528f0c6b 139}
f078f209 140
e8324357 141static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
528f0c6b 142{
e8324357
S
143 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
144 struct ath_buf *bf;
145 struct list_head bf_head;
146 INIT_LIST_HEAD(&bf_head);
f078f209 147
75401849 148 WARN_ON(!tid->paused);
e6a9854b 149
75401849
LB
150 spin_lock_bh(&txq->axq_lock);
151 tid->paused = false;
f078f209 152
e8324357
S
153 while (!list_empty(&tid->buf_q)) {
154 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
9680e8a3 155 BUG_ON(bf_isretried(bf));
d43f3015 156 list_move_tail(&bf->list, &bf_head);
c37452b0 157 ath_tx_send_ht_normal(sc, txq, tid, &bf_head);
528f0c6b 158 }
f078f209 159
e8324357 160 spin_unlock_bh(&txq->axq_lock);
528f0c6b 161}
f078f209 162
e8324357
S
163static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
164 int seqno)
528f0c6b 165{
e8324357 166 int index, cindex;
f078f209 167
e8324357
S
168 index = ATH_BA_INDEX(tid->seq_start, seqno);
169 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
f078f209 170
81ee13ba 171 __clear_bit(cindex, tid->tx_buf);
528f0c6b 172
81ee13ba 173 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
e8324357
S
174 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
175 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
176 }
528f0c6b 177}
f078f209 178
e8324357
S
179static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
180 struct ath_buf *bf)
528f0c6b 181{
e8324357 182 int index, cindex;
528f0c6b 183
e8324357
S
184 if (bf_isretried(bf))
185 return;
528f0c6b 186
e8324357
S
187 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
188 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
81ee13ba 189 __set_bit(cindex, tid->tx_buf);
f078f209 190
e8324357
S
191 if (index >= ((tid->baw_tail - tid->baw_head) &
192 (ATH_TID_MAX_BUFS - 1))) {
193 tid->baw_tail = cindex;
194 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
f078f209 195 }
f078f209
LR
196}
197
198/*
e8324357
S
199 * TODO: For frame(s) that are in the retry state, we will reuse the
200 * sequence number(s) without setting the retry bit. The
201 * alternative is to give up on these and BAR the receiver's window
202 * forward.
f078f209 203 */
e8324357
S
204static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
205 struct ath_atx_tid *tid)
f078f209 206
f078f209 207{
e8324357
S
208 struct ath_buf *bf;
209 struct list_head bf_head;
db1a052b
FF
210 struct ath_tx_status ts;
211
212 memset(&ts, 0, sizeof(ts));
e8324357 213 INIT_LIST_HEAD(&bf_head);
f078f209 214
e8324357
S
215 for (;;) {
216 if (list_empty(&tid->buf_q))
217 break;
f078f209 218
d43f3015
S
219 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
220 list_move_tail(&bf->list, &bf_head);
f078f209 221
e8324357
S
222 if (bf_isretried(bf))
223 ath_tx_update_baw(sc, tid, bf->bf_seqno);
f078f209 224
e8324357 225 spin_unlock(&txq->axq_lock);
db1a052b 226 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
e8324357
S
227 spin_lock(&txq->axq_lock);
228 }
f078f209 229
e8324357
S
230 tid->seq_next = tid->seq_start;
231 tid->baw_tail = tid->baw_head;
f078f209
LR
232}
233
fec247c0
S
234static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
235 struct ath_buf *bf)
f078f209 236{
e8324357
S
237 struct sk_buff *skb;
238 struct ieee80211_hdr *hdr;
f078f209 239
e8324357
S
240 bf->bf_state.bf_type |= BUF_RETRY;
241 bf->bf_retries++;
fec247c0 242 TX_STAT_INC(txq->axq_qnum, a_retries);
f078f209 243
e8324357
S
244 skb = bf->bf_mpdu;
245 hdr = (struct ieee80211_hdr *)skb->data;
246 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
f078f209
LR
247}
248
0a8cea84 249static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
d43f3015 250{
0a8cea84 251 struct ath_buf *bf = NULL;
d43f3015
S
252
253 spin_lock_bh(&sc->tx.txbuflock);
0a8cea84
FF
254
255 if (unlikely(list_empty(&sc->tx.txbuf))) {
8a46097a
VT
256 spin_unlock_bh(&sc->tx.txbuflock);
257 return NULL;
258 }
0a8cea84
FF
259
260 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
261 list_del(&bf->list);
262
d43f3015
S
263 spin_unlock_bh(&sc->tx.txbuflock);
264
0a8cea84
FF
265 return bf;
266}
267
268static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
269{
270 spin_lock_bh(&sc->tx.txbuflock);
271 list_add_tail(&bf->list, &sc->tx.txbuf);
272 spin_unlock_bh(&sc->tx.txbuflock);
273}
274
275static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
276{
277 struct ath_buf *tbf;
278
279 tbf = ath_tx_get_buffer(sc);
280 if (WARN_ON(!tbf))
281 return NULL;
282
d43f3015
S
283 ATH_TXBUF_RESET(tbf);
284
827e69bf 285 tbf->aphy = bf->aphy;
d43f3015
S
286 tbf->bf_mpdu = bf->bf_mpdu;
287 tbf->bf_buf_addr = bf->bf_buf_addr;
d826c832 288 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
d43f3015
S
289 tbf->bf_state = bf->bf_state;
290 tbf->bf_dmacontext = bf->bf_dmacontext;
291
292 return tbf;
293}
294
295static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
296 struct ath_buf *bf, struct list_head *bf_q,
db1a052b 297 struct ath_tx_status *ts, int txok)
f078f209 298{
e8324357
S
299 struct ath_node *an = NULL;
300 struct sk_buff *skb;
1286ec6d 301 struct ieee80211_sta *sta;
76d5a9e8 302 struct ieee80211_hw *hw;
1286ec6d 303 struct ieee80211_hdr *hdr;
76d5a9e8 304 struct ieee80211_tx_info *tx_info;
e8324357 305 struct ath_atx_tid *tid = NULL;
d43f3015 306 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
e8324357 307 struct list_head bf_head, bf_pending;
0934af23 308 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
f078f209 309 u32 ba[WME_BA_BMP_SIZE >> 5];
0934af23
VT
310 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
311 bool rc_update = true;
78c4653a 312 struct ieee80211_tx_rate rates[4];
f078f209 313
a22be22a 314 skb = bf->bf_mpdu;
1286ec6d
S
315 hdr = (struct ieee80211_hdr *)skb->data;
316
76d5a9e8 317 tx_info = IEEE80211_SKB_CB(skb);
827e69bf 318 hw = bf->aphy->hw;
76d5a9e8 319
78c4653a
FF
320 memcpy(rates, tx_info->control.rates, sizeof(rates));
321
1286ec6d 322 rcu_read_lock();
f078f209 323
5ed176e1 324 /* XXX: use ieee80211_find_sta! */
76d5a9e8 325 sta = ieee80211_find_sta_by_hw(hw, hdr->addr1);
1286ec6d
S
326 if (!sta) {
327 rcu_read_unlock();
73e19463 328
31e79a59
FF
329 INIT_LIST_HEAD(&bf_head);
330 while (bf) {
331 bf_next = bf->bf_next;
332
333 bf->bf_state.bf_type |= BUF_XRETRY;
334 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
335 !bf->bf_stale || bf_next != NULL)
336 list_move_tail(&bf->list, &bf_head);
337
338 ath_tx_rc_status(bf, ts, 0, 0, false);
339 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
340 0, 0);
341
342 bf = bf_next;
343 }
1286ec6d 344 return;
f078f209
LR
345 }
346
1286ec6d
S
347 an = (struct ath_node *)sta->drv_priv;
348 tid = ATH_AN_2_TID(an, bf->bf_tidno);
349
b11b160d
FF
350 /*
351 * The hardware occasionally sends a tx status for the wrong TID.
352 * In this case, the BA status cannot be considered valid and all
353 * subframes need to be retransmitted
354 */
355 if (bf->bf_tidno != ts->tid)
356 txok = false;
357
e8324357 358 isaggr = bf_isaggr(bf);
d43f3015 359 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
f078f209 360
d43f3015 361 if (isaggr && txok) {
db1a052b
FF
362 if (ts->ts_flags & ATH9K_TX_BA) {
363 seq_st = ts->ts_seqnum;
364 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
e8324357 365 } else {
d43f3015
S
366 /*
367 * AR5416 can become deaf/mute when BA
368 * issue happens. Chip needs to be reset.
369 * But AP code may have sychronization issues
370 * when perform internal reset in this routine.
371 * Only enable reset in STA mode for now.
372 */
2660b81a 373 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
d43f3015 374 needreset = 1;
e8324357 375 }
f078f209
LR
376 }
377
e8324357
S
378 INIT_LIST_HEAD(&bf_pending);
379 INIT_LIST_HEAD(&bf_head);
f078f209 380
db1a052b 381 nbad = ath_tx_num_badfrms(sc, bf, ts, txok);
e8324357
S
382 while (bf) {
383 txfail = txpending = 0;
384 bf_next = bf->bf_next;
f078f209 385
78c4653a
FF
386 skb = bf->bf_mpdu;
387 tx_info = IEEE80211_SKB_CB(skb);
388
e8324357
S
389 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
390 /* transmit completion, subframe is
391 * acked by block ack */
0934af23 392 acked_cnt++;
e8324357
S
393 } else if (!isaggr && txok) {
394 /* transmit completion */
0934af23 395 acked_cnt++;
e8324357 396 } else {
e8324357 397 if (!(tid->state & AGGR_CLEANUP) &&
6d913f7d 398 !bf_last->bf_tx_aborted) {
e8324357 399 if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
fec247c0 400 ath_tx_set_retry(sc, txq, bf);
e8324357
S
401 txpending = 1;
402 } else {
403 bf->bf_state.bf_type |= BUF_XRETRY;
404 txfail = 1;
405 sendbar = 1;
0934af23 406 txfail_cnt++;
e8324357
S
407 }
408 } else {
409 /*
410 * cleanup in progress, just fail
411 * the un-acked sub-frames
412 */
413 txfail = 1;
414 }
415 }
f078f209 416
e5003249
VT
417 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
418 bf_next == NULL) {
cbfe89c6
VT
419 /*
420 * Make sure the last desc is reclaimed if it
421 * not a holding desc.
422 */
423 if (!bf_last->bf_stale)
424 list_move_tail(&bf->list, &bf_head);
425 else
426 INIT_LIST_HEAD(&bf_head);
e8324357 427 } else {
9680e8a3 428 BUG_ON(list_empty(bf_q));
d43f3015 429 list_move_tail(&bf->list, &bf_head);
e8324357 430 }
f078f209 431
e8324357
S
432 if (!txpending) {
433 /*
434 * complete the acked-ones/xretried ones; update
435 * block-ack window
436 */
437 spin_lock_bh(&txq->axq_lock);
438 ath_tx_update_baw(sc, tid, bf->bf_seqno);
439 spin_unlock_bh(&txq->axq_lock);
f078f209 440
8a92e2ee 441 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
78c4653a 442 memcpy(tx_info->control.rates, rates, sizeof(rates));
db1a052b 443 ath_tx_rc_status(bf, ts, nbad, txok, true);
8a92e2ee
VT
444 rc_update = false;
445 } else {
db1a052b 446 ath_tx_rc_status(bf, ts, nbad, txok, false);
8a92e2ee
VT
447 }
448
db1a052b
FF
449 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
450 !txfail, sendbar);
e8324357 451 } else {
d43f3015 452 /* retry the un-acked ones */
e5003249
VT
453 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
454 if (bf->bf_next == NULL && bf_last->bf_stale) {
455 struct ath_buf *tbf;
456
457 tbf = ath_clone_txbuf(sc, bf_last);
458 /*
459 * Update tx baw and complete the
460 * frame with failed status if we
461 * run out of tx buf.
462 */
463 if (!tbf) {
464 spin_lock_bh(&txq->axq_lock);
465 ath_tx_update_baw(sc, tid,
466 bf->bf_seqno);
467 spin_unlock_bh(&txq->axq_lock);
468
469 bf->bf_state.bf_type |=
470 BUF_XRETRY;
471 ath_tx_rc_status(bf, ts, nbad,
472 0, false);
473 ath_tx_complete_buf(sc, bf, txq,
474 &bf_head,
475 ts, 0, 0);
476 break;
477 }
478
479 ath9k_hw_cleartxdesc(sc->sc_ah,
480 tbf->bf_desc);
481 list_add_tail(&tbf->list, &bf_head);
482 } else {
483 /*
484 * Clear descriptor status words for
485 * software retry
486 */
487 ath9k_hw_cleartxdesc(sc->sc_ah,
488 bf->bf_desc);
c41d92dc 489 }
e8324357
S
490 }
491
492 /*
493 * Put this buffer to the temporary pending
494 * queue to retain ordering
495 */
496 list_splice_tail_init(&bf_head, &bf_pending);
497 }
498
499 bf = bf_next;
f078f209 500 }
f078f209 501
4cee7861
FF
502 /* prepend un-acked frames to the beginning of the pending frame queue */
503 if (!list_empty(&bf_pending)) {
504 spin_lock_bh(&txq->axq_lock);
505 list_splice(&bf_pending, &tid->buf_q);
506 ath_tx_queue_tid(txq, tid);
507 spin_unlock_bh(&txq->axq_lock);
508 }
509
e8324357 510 if (tid->state & AGGR_CLEANUP) {
e8324357
S
511 if (tid->baw_head == tid->baw_tail) {
512 tid->state &= ~AGGR_ADDBA_COMPLETE;
e8324357 513 tid->state &= ~AGGR_CLEANUP;
e63835b0 514
e8324357
S
515 /* send buffered frames as singles */
516 ath_tx_flush_tid(sc, tid);
d43f3015 517 }
1286ec6d 518 rcu_read_unlock();
e8324357
S
519 return;
520 }
f078f209 521
1286ec6d
S
522 rcu_read_unlock();
523
e8324357
S
524 if (needreset)
525 ath_reset(sc, false);
e8324357 526}
f078f209 527
e8324357
S
528static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
529 struct ath_atx_tid *tid)
f078f209 530{
528f0c6b
S
531 struct sk_buff *skb;
532 struct ieee80211_tx_info *tx_info;
a8efee4f 533 struct ieee80211_tx_rate *rates;
d43f3015 534 u32 max_4ms_framelen, frmlen;
4ef70841 535 u16 aggr_limit, legacy = 0;
e8324357 536 int i;
528f0c6b 537
a22be22a 538 skb = bf->bf_mpdu;
528f0c6b 539 tx_info = IEEE80211_SKB_CB(skb);
e63835b0 540 rates = tx_info->control.rates;
528f0c6b 541
e8324357
S
542 /*
543 * Find the lowest frame length among the rate series that will have a
544 * 4ms transmit duration.
545 * TODO - TXOP limit needs to be considered.
546 */
547 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
e63835b0 548
e8324357
S
549 for (i = 0; i < 4; i++) {
550 if (rates[i].count) {
545750d3
FF
551 int modeidx;
552 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
e8324357
S
553 legacy = 1;
554 break;
555 }
556
0e668cde 557 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
545750d3
FF
558 modeidx = MCS_HT40;
559 else
0e668cde
FF
560 modeidx = MCS_HT20;
561
562 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
563 modeidx++;
545750d3
FF
564
565 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
d43f3015 566 max_4ms_framelen = min(max_4ms_framelen, frmlen);
f078f209
LR
567 }
568 }
e63835b0 569
f078f209 570 /*
e8324357
S
571 * limit aggregate size by the minimum rate if rate selected is
572 * not a probe rate, if rate selected is a probe rate then
573 * avoid aggregation of this packet.
f078f209 574 */
e8324357
S
575 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
576 return 0;
f078f209 577
1773912b
VT
578 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
579 aggr_limit = min((max_4ms_framelen * 3) / 8,
580 (u32)ATH_AMPDU_LIMIT_MAX);
581 else
582 aggr_limit = min(max_4ms_framelen,
583 (u32)ATH_AMPDU_LIMIT_MAX);
f078f209 584
e8324357
S
585 /*
586 * h/w can accept aggregates upto 16 bit lengths (65535).
587 * The IE, however can hold upto 65536, which shows up here
588 * as zero. Ignore 65536 since we are constrained by hw.
f078f209 589 */
4ef70841
S
590 if (tid->an->maxampdu)
591 aggr_limit = min(aggr_limit, tid->an->maxampdu);
f078f209 592
e8324357
S
593 return aggr_limit;
594}
f078f209 595
e8324357 596/*
d43f3015 597 * Returns the number of delimiters to be added to
e8324357 598 * meet the minimum required mpdudensity.
e8324357
S
599 */
600static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
601 struct ath_buf *bf, u16 frmlen)
602{
e8324357
S
603 struct sk_buff *skb = bf->bf_mpdu;
604 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
4ef70841 605 u32 nsymbits, nsymbols;
e8324357 606 u16 minlen;
545750d3 607 u8 flags, rix;
c6663876 608 int width, streams, half_gi, ndelim, mindelim;
e8324357
S
609
610 /* Select standard number of delimiters based on frame length alone */
611 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
f078f209
LR
612
613 /*
e8324357
S
614 * If encryption enabled, hardware requires some more padding between
615 * subframes.
616 * TODO - this could be improved to be dependent on the rate.
617 * The hardware can keep up at lower rates, but not higher rates
f078f209 618 */
e8324357
S
619 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR)
620 ndelim += ATH_AGGR_ENCRYPTDELIM;
f078f209 621
e8324357
S
622 /*
623 * Convert desired mpdu density from microeconds to bytes based
624 * on highest rate in rate series (i.e. first rate) to determine
625 * required minimum length for subframe. Take into account
626 * whether high rate is 20 or 40Mhz and half or full GI.
4ef70841 627 *
e8324357
S
628 * If there is no mpdu density restriction, no further calculation
629 * is needed.
630 */
4ef70841
S
631
632 if (tid->an->mpdudensity == 0)
e8324357 633 return ndelim;
f078f209 634
e8324357
S
635 rix = tx_info->control.rates[0].idx;
636 flags = tx_info->control.rates[0].flags;
e8324357
S
637 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
638 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
f078f209 639
e8324357 640 if (half_gi)
4ef70841 641 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
e8324357 642 else
4ef70841 643 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
f078f209 644
e8324357
S
645 if (nsymbols == 0)
646 nsymbols = 1;
f078f209 647
c6663876
FF
648 streams = HT_RC_2_STREAMS(rix);
649 nsymbits = bits_per_symbol[rix % 8][width] * streams;
e8324357 650 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
f078f209 651
e8324357 652 if (frmlen < minlen) {
e8324357
S
653 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
654 ndelim = max(mindelim, ndelim);
f078f209
LR
655 }
656
e8324357 657 return ndelim;
f078f209
LR
658}
659
e8324357 660static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
fec247c0 661 struct ath_txq *txq,
d43f3015
S
662 struct ath_atx_tid *tid,
663 struct list_head *bf_q)
f078f209 664{
e8324357 665#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
d43f3015
S
666 struct ath_buf *bf, *bf_first, *bf_prev = NULL;
667 int rl = 0, nframes = 0, ndelim, prev_al = 0;
e8324357
S
668 u16 aggr_limit = 0, al = 0, bpad = 0,
669 al_delta, h_baw = tid->baw_size / 2;
670 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
f078f209 671
e8324357 672 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
f078f209 673
e8324357
S
674 do {
675 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
f078f209 676
d43f3015 677 /* do not step over block-ack window */
e8324357
S
678 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
679 status = ATH_AGGR_BAW_CLOSED;
680 break;
681 }
f078f209 682
e8324357
S
683 if (!rl) {
684 aggr_limit = ath_lookup_rate(sc, bf, tid);
685 rl = 1;
686 }
f078f209 687
d43f3015 688 /* do not exceed aggregation limit */
e8324357 689 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
f078f209 690
d43f3015
S
691 if (nframes &&
692 (aggr_limit < (al + bpad + al_delta + prev_al))) {
e8324357
S
693 status = ATH_AGGR_LIMITED;
694 break;
695 }
f078f209 696
d43f3015
S
697 /* do not exceed subframe limit */
698 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
e8324357
S
699 status = ATH_AGGR_LIMITED;
700 break;
701 }
d43f3015 702 nframes++;
f078f209 703
d43f3015 704 /* add padding for previous frame to aggregation length */
e8324357 705 al += bpad + al_delta;
f078f209 706
e8324357
S
707 /*
708 * Get the delimiters needed to meet the MPDU
709 * density for this node.
710 */
711 ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen);
e8324357 712 bpad = PADBYTES(al_delta) + (ndelim << 2);
f078f209 713
e8324357 714 bf->bf_next = NULL;
87d5efbb 715 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
f078f209 716
d43f3015 717 /* link buffers of this frame to the aggregate */
e8324357 718 ath_tx_addto_baw(sc, tid, bf);
d43f3015
S
719 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
720 list_move_tail(&bf->list, bf_q);
e8324357
S
721 if (bf_prev) {
722 bf_prev->bf_next = bf;
87d5efbb
VT
723 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
724 bf->bf_daddr);
e8324357
S
725 }
726 bf_prev = bf;
fec247c0 727
e8324357 728 } while (!list_empty(&tid->buf_q));
f078f209 729
e8324357
S
730 bf_first->bf_al = al;
731 bf_first->bf_nframes = nframes;
d43f3015 732
e8324357
S
733 return status;
734#undef PADBYTES
735}
f078f209 736
e8324357
S
737static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
738 struct ath_atx_tid *tid)
739{
d43f3015 740 struct ath_buf *bf;
e8324357
S
741 enum ATH_AGGR_STATUS status;
742 struct list_head bf_q;
f078f209 743
e8324357
S
744 do {
745 if (list_empty(&tid->buf_q))
746 return;
f078f209 747
e8324357
S
748 INIT_LIST_HEAD(&bf_q);
749
fec247c0 750 status = ath_tx_form_aggr(sc, txq, tid, &bf_q);
f078f209 751
f078f209 752 /*
d43f3015
S
753 * no frames picked up to be aggregated;
754 * block-ack window is not open.
f078f209 755 */
e8324357
S
756 if (list_empty(&bf_q))
757 break;
f078f209 758
e8324357 759 bf = list_first_entry(&bf_q, struct ath_buf, list);
d43f3015 760 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
f078f209 761
d43f3015 762 /* if only one frame, send as non-aggregate */
e8324357 763 if (bf->bf_nframes == 1) {
e8324357 764 bf->bf_state.bf_type &= ~BUF_AGGR;
d43f3015 765 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
e8324357
S
766 ath_buf_set_rate(sc, bf);
767 ath_tx_txqaddbuf(sc, txq, &bf_q);
768 continue;
769 }
f078f209 770
d43f3015 771 /* setup first desc of aggregate */
e8324357
S
772 bf->bf_state.bf_type |= BUF_AGGR;
773 ath_buf_set_rate(sc, bf);
774 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
f078f209 775
d43f3015
S
776 /* anchor last desc of aggregate */
777 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
f078f209 778
e8324357 779 ath_tx_txqaddbuf(sc, txq, &bf_q);
fec247c0 780 TX_STAT_INC(txq->axq_qnum, a_aggr);
f078f209 781
e8324357
S
782 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
783 status != ATH_AGGR_BAW_CLOSED);
784}
785
231c3a1f
FF
786int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
787 u16 tid, u16 *ssn)
e8324357
S
788{
789 struct ath_atx_tid *txtid;
790 struct ath_node *an;
791
792 an = (struct ath_node *)sta->drv_priv;
f83da965 793 txtid = ATH_AN_2_TID(an, tid);
231c3a1f
FF
794
795 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
796 return -EAGAIN;
797
f83da965 798 txtid->state |= AGGR_ADDBA_PROGRESS;
75401849 799 txtid->paused = true;
f83da965 800 *ssn = txtid->seq_start;
231c3a1f
FF
801
802 return 0;
e8324357 803}
f078f209 804
f83da965 805void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
e8324357
S
806{
807 struct ath_node *an = (struct ath_node *)sta->drv_priv;
808 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
809 struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum];
db1a052b 810 struct ath_tx_status ts;
e8324357
S
811 struct ath_buf *bf;
812 struct list_head bf_head;
db1a052b
FF
813
814 memset(&ts, 0, sizeof(ts));
e8324357 815 INIT_LIST_HEAD(&bf_head);
f078f209 816
e8324357 817 if (txtid->state & AGGR_CLEANUP)
f83da965 818 return;
f078f209 819
e8324357 820 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
5eae6592 821 txtid->state &= ~AGGR_ADDBA_PROGRESS;
f83da965 822 return;
e8324357 823 }
f078f209 824
e8324357
S
825 /* drop all software retried frames and mark this TID */
826 spin_lock_bh(&txq->axq_lock);
75401849 827 txtid->paused = true;
e8324357
S
828 while (!list_empty(&txtid->buf_q)) {
829 bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
830 if (!bf_isretried(bf)) {
831 /*
832 * NB: it's based on the assumption that
833 * software retried frame will always stay
834 * at the head of software queue.
835 */
836 break;
837 }
d43f3015 838 list_move_tail(&bf->list, &bf_head);
e8324357 839 ath_tx_update_baw(sc, txtid, bf->bf_seqno);
db1a052b 840 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
f078f209 841 }
d43f3015 842 spin_unlock_bh(&txq->axq_lock);
f078f209 843
e8324357 844 if (txtid->baw_head != txtid->baw_tail) {
e8324357
S
845 txtid->state |= AGGR_CLEANUP;
846 } else {
847 txtid->state &= ~AGGR_ADDBA_COMPLETE;
e8324357 848 ath_tx_flush_tid(sc, txtid);
f078f209 849 }
e8324357 850}
f078f209 851
e8324357
S
852void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
853{
854 struct ath_atx_tid *txtid;
855 struct ath_node *an;
856
857 an = (struct ath_node *)sta->drv_priv;
858
859 if (sc->sc_flags & SC_OP_TXAGGR) {
860 txtid = ATH_AN_2_TID(an, tid);
861 txtid->baw_size =
862 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
863 txtid->state |= AGGR_ADDBA_COMPLETE;
864 txtid->state &= ~AGGR_ADDBA_PROGRESS;
865 ath_tx_resume_tid(sc, txtid);
866 }
f078f209
LR
867}
868
e8324357 869bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno)
c4288390 870{
e8324357 871 struct ath_atx_tid *txtid;
c4288390 872
e8324357
S
873 if (!(sc->sc_flags & SC_OP_TXAGGR))
874 return false;
c4288390 875
e8324357
S
876 txtid = ATH_AN_2_TID(an, tidno);
877
c3d8f02e 878 if (!(txtid->state & (AGGR_ADDBA_COMPLETE | AGGR_ADDBA_PROGRESS)))
e8324357 879 return true;
e8324357 880 return false;
c4288390
S
881}
882
e8324357
S
883/********************/
884/* Queue Management */
885/********************/
f078f209 886
e8324357
S
887static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
888 struct ath_txq *txq)
f078f209 889{
e8324357
S
890 struct ath_atx_ac *ac, *ac_tmp;
891 struct ath_atx_tid *tid, *tid_tmp;
f078f209 892
e8324357
S
893 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
894 list_del(&ac->list);
895 ac->sched = false;
896 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
897 list_del(&tid->list);
898 tid->sched = false;
899 ath_tid_drain(sc, txq, tid);
900 }
f078f209
LR
901 }
902}
903
e8324357 904struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
f078f209 905{
cbe61d8a 906 struct ath_hw *ah = sc->sc_ah;
c46917bb 907 struct ath_common *common = ath9k_hw_common(ah);
e8324357 908 struct ath9k_tx_queue_info qi;
e5003249 909 int qnum, i;
f078f209 910
e8324357
S
911 memset(&qi, 0, sizeof(qi));
912 qi.tqi_subtype = subtype;
913 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
914 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
915 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
916 qi.tqi_physCompBuf = 0;
f078f209
LR
917
918 /*
e8324357
S
919 * Enable interrupts only for EOL and DESC conditions.
920 * We mark tx descriptors to receive a DESC interrupt
921 * when a tx queue gets deep; otherwise waiting for the
922 * EOL to reap descriptors. Note that this is done to
923 * reduce interrupt load and this only defers reaping
924 * descriptors, never transmitting frames. Aside from
925 * reducing interrupts this also permits more concurrency.
926 * The only potential downside is if the tx queue backs
927 * up in which case the top half of the kernel may backup
928 * due to a lack of tx descriptors.
929 *
930 * The UAPSD queue is an exception, since we take a desc-
931 * based intr on the EOSP frames.
f078f209 932 */
afe754d6
VT
933 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
934 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
935 TXQ_FLAG_TXERRINT_ENABLE;
936 } else {
937 if (qtype == ATH9K_TX_QUEUE_UAPSD)
938 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
939 else
940 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
941 TXQ_FLAG_TXDESCINT_ENABLE;
942 }
e8324357
S
943 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
944 if (qnum == -1) {
f078f209 945 /*
e8324357
S
946 * NB: don't print a message, this happens
947 * normally on parts with too few tx queues
f078f209 948 */
e8324357 949 return NULL;
f078f209 950 }
e8324357 951 if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
c46917bb
LR
952 ath_print(common, ATH_DBG_FATAL,
953 "qnum %u out of range, max %u!\n",
954 qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq));
e8324357
S
955 ath9k_hw_releasetxqueue(ah, qnum);
956 return NULL;
957 }
958 if (!ATH_TXQ_SETUP(sc, qnum)) {
959 struct ath_txq *txq = &sc->tx.txq[qnum];
f078f209 960
293f2ba8 961 txq->axq_class = subtype;
e8324357
S
962 txq->axq_qnum = qnum;
963 txq->axq_link = NULL;
964 INIT_LIST_HEAD(&txq->axq_q);
965 INIT_LIST_HEAD(&txq->axq_acq);
966 spin_lock_init(&txq->axq_lock);
967 txq->axq_depth = 0;
164ace38 968 txq->axq_tx_inprogress = false;
e8324357 969 sc->tx.txqsetup |= 1<<qnum;
e5003249
VT
970
971 txq->txq_headidx = txq->txq_tailidx = 0;
972 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
973 INIT_LIST_HEAD(&txq->txq_fifo[i]);
974 INIT_LIST_HEAD(&txq->txq_fifo_pending);
e8324357
S
975 }
976 return &sc->tx.txq[qnum];
f078f209
LR
977}
978
e8324357
S
979int ath_txq_update(struct ath_softc *sc, int qnum,
980 struct ath9k_tx_queue_info *qinfo)
981{
cbe61d8a 982 struct ath_hw *ah = sc->sc_ah;
e8324357
S
983 int error = 0;
984 struct ath9k_tx_queue_info qi;
985
986 if (qnum == sc->beacon.beaconq) {
987 /*
988 * XXX: for beacon queue, we just save the parameter.
989 * It will be picked up by ath_beaconq_config when
990 * it's necessary.
991 */
992 sc->beacon.beacon_qi = *qinfo;
f078f209 993 return 0;
e8324357 994 }
f078f209 995
9680e8a3 996 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
e8324357
S
997
998 ath9k_hw_get_txq_props(ah, qnum, &qi);
999 qi.tqi_aifs = qinfo->tqi_aifs;
1000 qi.tqi_cwmin = qinfo->tqi_cwmin;
1001 qi.tqi_cwmax = qinfo->tqi_cwmax;
1002 qi.tqi_burstTime = qinfo->tqi_burstTime;
1003 qi.tqi_readyTime = qinfo->tqi_readyTime;
1004
1005 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
c46917bb
LR
1006 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1007 "Unable to update hardware queue %u!\n", qnum);
e8324357
S
1008 error = -EIO;
1009 } else {
1010 ath9k_hw_resettxqueue(ah, qnum);
1011 }
1012
1013 return error;
1014}
1015
1016int ath_cabq_update(struct ath_softc *sc)
1017{
1018 struct ath9k_tx_queue_info qi;
1019 int qnum = sc->beacon.cabq->axq_qnum;
f078f209 1020
e8324357 1021 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
f078f209 1022 /*
e8324357 1023 * Ensure the readytime % is within the bounds.
f078f209 1024 */
17d7904d
S
1025 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1026 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1027 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1028 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
f078f209 1029
57c4d7b4 1030 qi.tqi_readyTime = (sc->beacon_interval *
fdbf7335 1031 sc->config.cabqReadytime) / 100;
e8324357
S
1032 ath_txq_update(sc, qnum, &qi);
1033
1034 return 0;
f078f209
LR
1035}
1036
043a0405
S
1037/*
1038 * Drain a given TX queue (could be Beacon or Data)
1039 *
1040 * This assumes output has been stopped and
1041 * we do not need to block ath_tx_tasklet.
1042 */
1043void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
f078f209 1044{
e8324357
S
1045 struct ath_buf *bf, *lastbf;
1046 struct list_head bf_head;
db1a052b
FF
1047 struct ath_tx_status ts;
1048
1049 memset(&ts, 0, sizeof(ts));
e8324357 1050 INIT_LIST_HEAD(&bf_head);
f078f209 1051
e8324357
S
1052 for (;;) {
1053 spin_lock_bh(&txq->axq_lock);
f078f209 1054
e5003249
VT
1055 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1056 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
1057 txq->txq_headidx = txq->txq_tailidx = 0;
1058 spin_unlock_bh(&txq->axq_lock);
1059 break;
1060 } else {
1061 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
1062 struct ath_buf, list);
1063 }
1064 } else {
1065 if (list_empty(&txq->axq_q)) {
1066 txq->axq_link = NULL;
1067 spin_unlock_bh(&txq->axq_lock);
1068 break;
1069 }
1070 bf = list_first_entry(&txq->axq_q, struct ath_buf,
1071 list);
f078f209 1072
e5003249
VT
1073 if (bf->bf_stale) {
1074 list_del(&bf->list);
1075 spin_unlock_bh(&txq->axq_lock);
f078f209 1076
0a8cea84 1077 ath_tx_return_buffer(sc, bf);
e5003249
VT
1078 continue;
1079 }
e8324357 1080 }
f078f209 1081
e8324357 1082 lastbf = bf->bf_lastbf;
6d913f7d
VT
1083 if (!retry_tx)
1084 lastbf->bf_tx_aborted = true;
f078f209 1085
e5003249
VT
1086 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1087 list_cut_position(&bf_head,
1088 &txq->txq_fifo[txq->txq_tailidx],
1089 &lastbf->list);
1090 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
1091 } else {
1092 /* remove ath_buf's of the same mpdu from txq */
1093 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
1094 }
1095
e8324357 1096 txq->axq_depth--;
f078f209 1097
e8324357
S
1098 spin_unlock_bh(&txq->axq_lock);
1099
1100 if (bf_isampdu(bf))
db1a052b 1101 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0);
e8324357 1102 else
db1a052b 1103 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
f078f209
LR
1104 }
1105
164ace38
SB
1106 spin_lock_bh(&txq->axq_lock);
1107 txq->axq_tx_inprogress = false;
1108 spin_unlock_bh(&txq->axq_lock);
1109
e8324357
S
1110 /* flush any pending frames if aggregation is enabled */
1111 if (sc->sc_flags & SC_OP_TXAGGR) {
1112 if (!retry_tx) {
1113 spin_lock_bh(&txq->axq_lock);
1114 ath_txq_drain_pending_buffers(sc, txq);
1115 spin_unlock_bh(&txq->axq_lock);
1116 }
1117 }
e5003249
VT
1118
1119 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1120 spin_lock_bh(&txq->axq_lock);
1121 while (!list_empty(&txq->txq_fifo_pending)) {
1122 bf = list_first_entry(&txq->txq_fifo_pending,
1123 struct ath_buf, list);
1124 list_cut_position(&bf_head,
1125 &txq->txq_fifo_pending,
1126 &bf->bf_lastbf->list);
1127 spin_unlock_bh(&txq->axq_lock);
1128
1129 if (bf_isampdu(bf))
1130 ath_tx_complete_aggr(sc, txq, bf, &bf_head,
1131 &ts, 0);
1132 else
1133 ath_tx_complete_buf(sc, bf, txq, &bf_head,
1134 &ts, 0, 0);
1135 spin_lock_bh(&txq->axq_lock);
1136 }
1137 spin_unlock_bh(&txq->axq_lock);
1138 }
f078f209
LR
1139}
1140
043a0405 1141void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
f078f209 1142{
cbe61d8a 1143 struct ath_hw *ah = sc->sc_ah;
c46917bb 1144 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
043a0405
S
1145 struct ath_txq *txq;
1146 int i, npend = 0;
1147
1148 if (sc->sc_flags & SC_OP_INVALID)
1149 return;
1150
1151 /* Stop beacon queue */
1152 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
1153
1154 /* Stop data queues */
1155 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1156 if (ATH_TXQ_SETUP(sc, i)) {
1157 txq = &sc->tx.txq[i];
1158 ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1159 npend += ath9k_hw_numtxpending(ah, txq->axq_qnum);
1160 }
1161 }
1162
1163 if (npend) {
1164 int r;
1165
e8009e98 1166 ath_print(common, ATH_DBG_FATAL,
9be8ab2e 1167 "Failed to stop TX DMA. Resetting hardware!\n");
043a0405
S
1168
1169 spin_lock_bh(&sc->sc_resetlock);
20bd2a09 1170 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false);
043a0405 1171 if (r)
c46917bb
LR
1172 ath_print(common, ATH_DBG_FATAL,
1173 "Unable to reset hardware; reset status %d\n",
1174 r);
043a0405
S
1175 spin_unlock_bh(&sc->sc_resetlock);
1176 }
1177
1178 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1179 if (ATH_TXQ_SETUP(sc, i))
1180 ath_draintxq(sc, &sc->tx.txq[i], retry_tx);
1181 }
e8324357 1182}
f078f209 1183
043a0405 1184void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
e8324357 1185{
043a0405
S
1186 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1187 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
e8324357 1188}
f078f209 1189
e8324357
S
1190void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1191{
1192 struct ath_atx_ac *ac;
1193 struct ath_atx_tid *tid;
f078f209 1194
e8324357
S
1195 if (list_empty(&txq->axq_acq))
1196 return;
f078f209 1197
e8324357
S
1198 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
1199 list_del(&ac->list);
1200 ac->sched = false;
f078f209 1201
e8324357
S
1202 do {
1203 if (list_empty(&ac->tid_q))
1204 return;
f078f209 1205
e8324357
S
1206 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
1207 list_del(&tid->list);
1208 tid->sched = false;
f078f209 1209
e8324357
S
1210 if (tid->paused)
1211 continue;
f078f209 1212
164ace38 1213 ath_tx_sched_aggr(sc, txq, tid);
f078f209
LR
1214
1215 /*
e8324357
S
1216 * add tid to round-robin queue if more frames
1217 * are pending for the tid
f078f209 1218 */
e8324357
S
1219 if (!list_empty(&tid->buf_q))
1220 ath_tx_queue_tid(txq, tid);
f078f209 1221
e8324357
S
1222 break;
1223 } while (!list_empty(&ac->tid_q));
f078f209 1224
e8324357
S
1225 if (!list_empty(&ac->tid_q)) {
1226 if (!ac->sched) {
1227 ac->sched = true;
1228 list_add_tail(&ac->list, &txq->axq_acq);
f078f209 1229 }
e8324357
S
1230 }
1231}
f078f209 1232
e8324357
S
1233int ath_tx_setup(struct ath_softc *sc, int haltype)
1234{
1235 struct ath_txq *txq;
f078f209 1236
e8324357 1237 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
c46917bb
LR
1238 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1239 "HAL AC %u out of range, max %zu!\n",
e8324357
S
1240 haltype, ARRAY_SIZE(sc->tx.hwq_map));
1241 return 0;
1242 }
1243 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
1244 if (txq != NULL) {
1245 sc->tx.hwq_map[haltype] = txq->axq_qnum;
1246 return 1;
1247 } else
1248 return 0;
f078f209
LR
1249}
1250
e8324357
S
1251/***********/
1252/* TX, DMA */
1253/***********/
1254
f078f209 1255/*
e8324357
S
1256 * Insert a chain of ath_buf (descriptors) on a txq and
1257 * assume the descriptors are already chained together by caller.
f078f209 1258 */
e8324357
S
1259static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1260 struct list_head *head)
f078f209 1261{
cbe61d8a 1262 struct ath_hw *ah = sc->sc_ah;
c46917bb 1263 struct ath_common *common = ath9k_hw_common(ah);
e8324357 1264 struct ath_buf *bf;
f078f209 1265
e8324357
S
1266 /*
1267 * Insert the frame on the outbound list and
1268 * pass it on to the hardware.
1269 */
f078f209 1270
e8324357
S
1271 if (list_empty(head))
1272 return;
f078f209 1273
e8324357 1274 bf = list_first_entry(head, struct ath_buf, list);
f078f209 1275
c46917bb
LR
1276 ath_print(common, ATH_DBG_QUEUE,
1277 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
f078f209 1278
e5003249
VT
1279 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1280 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
1281 list_splice_tail_init(head, &txq->txq_fifo_pending);
1282 return;
1283 }
1284 if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
1285 ath_print(common, ATH_DBG_XMIT,
1286 "Initializing tx fifo %d which "
1287 "is non-empty\n",
1288 txq->txq_headidx);
1289 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
1290 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
1291 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
e8324357 1292 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
c46917bb
LR
1293 ath_print(common, ATH_DBG_XMIT,
1294 "TXDP[%u] = %llx (%p)\n",
1295 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
e8324357 1296 } else {
e5003249
VT
1297 list_splice_tail_init(head, &txq->axq_q);
1298
1299 if (txq->axq_link == NULL) {
1300 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1301 ath_print(common, ATH_DBG_XMIT,
1302 "TXDP[%u] = %llx (%p)\n",
1303 txq->axq_qnum, ito64(bf->bf_daddr),
1304 bf->bf_desc);
1305 } else {
1306 *txq->axq_link = bf->bf_daddr;
1307 ath_print(common, ATH_DBG_XMIT,
1308 "link[%u] (%p)=%llx (%p)\n",
1309 txq->axq_qnum, txq->axq_link,
1310 ito64(bf->bf_daddr), bf->bf_desc);
1311 }
1312 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
1313 &txq->axq_link);
1314 ath9k_hw_txstart(ah, txq->axq_qnum);
e8324357 1315 }
e5003249 1316 txq->axq_depth++;
e8324357 1317}
f078f209 1318
e8324357
S
1319static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1320 struct list_head *bf_head,
1321 struct ath_tx_control *txctl)
f078f209
LR
1322{
1323 struct ath_buf *bf;
f078f209 1324
e8324357
S
1325 bf = list_first_entry(bf_head, struct ath_buf, list);
1326 bf->bf_state.bf_type |= BUF_AMPDU;
fec247c0 1327 TX_STAT_INC(txctl->txq->axq_qnum, a_queued);
f078f209 1328
e8324357
S
1329 /*
1330 * Do not queue to h/w when any of the following conditions is true:
1331 * - there are pending frames in software queue
1332 * - the TID is currently paused for ADDBA/BAR request
1333 * - seqno is not within block-ack window
1334 * - h/w queue depth exceeds low water mark
1335 */
1336 if (!list_empty(&tid->buf_q) || tid->paused ||
1337 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
1338 txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
f078f209 1339 /*
e8324357
S
1340 * Add this frame to software queue for scheduling later
1341 * for aggregation.
f078f209 1342 */
d43f3015 1343 list_move_tail(&bf->list, &tid->buf_q);
e8324357
S
1344 ath_tx_queue_tid(txctl->txq, tid);
1345 return;
1346 }
1347
1348 /* Add sub-frame to BAW */
1349 ath_tx_addto_baw(sc, tid, bf);
1350
1351 /* Queue to h/w without aggregation */
1352 bf->bf_nframes = 1;
d43f3015 1353 bf->bf_lastbf = bf;
e8324357
S
1354 ath_buf_set_rate(sc, bf);
1355 ath_tx_txqaddbuf(sc, txctl->txq, bf_head);
e8324357
S
1356}
1357
c37452b0
S
1358static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
1359 struct ath_atx_tid *tid,
1360 struct list_head *bf_head)
e8324357
S
1361{
1362 struct ath_buf *bf;
1363
e8324357
S
1364 bf = list_first_entry(bf_head, struct ath_buf, list);
1365 bf->bf_state.bf_type &= ~BUF_AMPDU;
1366
1367 /* update starting sequence number for subsequent ADDBA request */
1368 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
1369
1370 bf->bf_nframes = 1;
d43f3015 1371 bf->bf_lastbf = bf;
e8324357
S
1372 ath_buf_set_rate(sc, bf);
1373 ath_tx_txqaddbuf(sc, txq, bf_head);
fec247c0 1374 TX_STAT_INC(txq->axq_qnum, queued);
e8324357
S
1375}
1376
c37452b0
S
1377static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1378 struct list_head *bf_head)
1379{
1380 struct ath_buf *bf;
1381
1382 bf = list_first_entry(bf_head, struct ath_buf, list);
1383
1384 bf->bf_lastbf = bf;
1385 bf->bf_nframes = 1;
1386 ath_buf_set_rate(sc, bf);
1387 ath_tx_txqaddbuf(sc, txq, bf_head);
fec247c0 1388 TX_STAT_INC(txq->axq_qnum, queued);
c37452b0
S
1389}
1390
e8324357
S
1391static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
1392{
1393 struct ieee80211_hdr *hdr;
1394 enum ath9k_pkt_type htype;
1395 __le16 fc;
1396
1397 hdr = (struct ieee80211_hdr *)skb->data;
1398 fc = hdr->frame_control;
1399
1400 if (ieee80211_is_beacon(fc))
1401 htype = ATH9K_PKT_TYPE_BEACON;
1402 else if (ieee80211_is_probe_resp(fc))
1403 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1404 else if (ieee80211_is_atim(fc))
1405 htype = ATH9K_PKT_TYPE_ATIM;
1406 else if (ieee80211_is_pspoll(fc))
1407 htype = ATH9K_PKT_TYPE_PSPOLL;
1408 else
1409 htype = ATH9K_PKT_TYPE_NORMAL;
1410
1411 return htype;
1412}
1413
e8324357
S
1414static void assign_aggr_tid_seqno(struct sk_buff *skb,
1415 struct ath_buf *bf)
1416{
1417 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1418 struct ieee80211_hdr *hdr;
1419 struct ath_node *an;
1420 struct ath_atx_tid *tid;
1421 __le16 fc;
1422 u8 *qc;
1423
1424 if (!tx_info->control.sta)
1425 return;
1426
1427 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1428 hdr = (struct ieee80211_hdr *)skb->data;
1429 fc = hdr->frame_control;
1430
1431 if (ieee80211_is_data_qos(fc)) {
1432 qc = ieee80211_get_qos_ctl(hdr);
1433 bf->bf_tidno = qc[0] & 0xf;
1434 }
1435
1436 /*
1437 * For HT capable stations, we save tidno for later use.
1438 * We also override seqno set by upper layer with the one
1439 * in tx aggregation state.
e8324357
S
1440 */
1441 tid = ATH_AN_2_TID(an, bf->bf_tidno);
17b182e3 1442 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
e8324357
S
1443 bf->bf_seqno = tid->seq_next;
1444 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1445}
1446
b0a33448 1447static int setup_tx_flags(struct sk_buff *skb, bool use_ldpc)
e8324357
S
1448{
1449 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1450 int flags = 0;
1451
1452 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
1453 flags |= ATH9K_TXDESC_INTREQ;
1454
1455 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1456 flags |= ATH9K_TXDESC_NOACK;
e8324357 1457
b0a33448
LR
1458 if (use_ldpc)
1459 flags |= ATH9K_TXDESC_LDPC;
1460
e8324357
S
1461 return flags;
1462}
1463
1464/*
1465 * rix - rate index
1466 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1467 * width - 0 for 20 MHz, 1 for 40 MHz
1468 * half_gi - to use 4us v/s 3.6 us for symbol time
1469 */
1470static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
1471 int width, int half_gi, bool shortPreamble)
1472{
e8324357 1473 u32 nbits, nsymbits, duration, nsymbols;
e8324357
S
1474 int streams, pktlen;
1475
1476 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
e8324357
S
1477
1478 /* find number of symbols: PLCP + data */
c6663876 1479 streams = HT_RC_2_STREAMS(rix);
e8324357 1480 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
c6663876 1481 nsymbits = bits_per_symbol[rix % 8][width] * streams;
e8324357
S
1482 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1483
1484 if (!half_gi)
1485 duration = SYMBOL_TIME(nsymbols);
1486 else
1487 duration = SYMBOL_TIME_HALFGI(nsymbols);
1488
1489 /* addup duration for legacy/ht training and signal fields */
e8324357
S
1490 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
1491
1492 return duration;
1493}
1494
1495static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
1496{
43c27613 1497 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
e8324357
S
1498 struct ath9k_11n_rate_series series[4];
1499 struct sk_buff *skb;
1500 struct ieee80211_tx_info *tx_info;
1501 struct ieee80211_tx_rate *rates;
545750d3 1502 const struct ieee80211_rate *rate;
254ad0ff 1503 struct ieee80211_hdr *hdr;
c89424df
S
1504 int i, flags = 0;
1505 u8 rix = 0, ctsrate = 0;
254ad0ff 1506 bool is_pspoll;
e8324357
S
1507
1508 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
1509
a22be22a 1510 skb = bf->bf_mpdu;
e8324357
S
1511 tx_info = IEEE80211_SKB_CB(skb);
1512 rates = tx_info->control.rates;
254ad0ff
S
1513 hdr = (struct ieee80211_hdr *)skb->data;
1514 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
e8324357 1515
e8324357 1516 /*
c89424df
S
1517 * We check if Short Preamble is needed for the CTS rate by
1518 * checking the BSS's global flag.
1519 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
e8324357 1520 */
545750d3
FF
1521 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
1522 ctsrate = rate->hw_value;
c89424df 1523 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
545750d3 1524 ctsrate |= rate->hw_value_short;
e8324357 1525
e8324357 1526 for (i = 0; i < 4; i++) {
545750d3
FF
1527 bool is_40, is_sgi, is_sp;
1528 int phy;
1529
e8324357
S
1530 if (!rates[i].count || (rates[i].idx < 0))
1531 continue;
1532
1533 rix = rates[i].idx;
e8324357 1534 series[i].Tries = rates[i].count;
43c27613 1535 series[i].ChSel = common->tx_chainmask;
e8324357 1536
27032059
FF
1537 if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
1538 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
c89424df 1539 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
27032059
FF
1540 flags |= ATH9K_TXDESC_RTSENA;
1541 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1542 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1543 flags |= ATH9K_TXDESC_CTSENA;
1544 }
1545
c89424df
S
1546 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1547 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1548 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1549 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
e8324357 1550
545750d3
FF
1551 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1552 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1553 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1554
1555 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1556 /* MCS rates */
1557 series[i].Rate = rix | 0x80;
1558 series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
1559 is_40, is_sgi, is_sp);
074a8c0d
FF
1560 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1561 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
545750d3
FF
1562 continue;
1563 }
1564
1565 /* legcay rates */
1566 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1567 !(rate->flags & IEEE80211_RATE_ERP_G))
1568 phy = WLAN_RC_PHY_CCK;
1569 else
1570 phy = WLAN_RC_PHY_OFDM;
1571
1572 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1573 series[i].Rate = rate->hw_value;
1574 if (rate->hw_value_short) {
1575 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1576 series[i].Rate |= rate->hw_value_short;
1577 } else {
1578 is_sp = false;
1579 }
1580
1581 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
1582 phy, rate->bitrate * 100, bf->bf_frmlen, rix, is_sp);
f078f209
LR
1583 }
1584
27032059
FF
1585 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
1586 if (bf_isaggr(bf) && (bf->bf_al > sc->sc_ah->caps.rts_aggr_limit))
1587 flags &= ~ATH9K_TXDESC_RTSENA;
1588
1589 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1590 if (flags & ATH9K_TXDESC_RTSENA)
1591 flags &= ~ATH9K_TXDESC_CTSENA;
1592
e8324357 1593 /* set dur_update_en for l-sig computation except for PS-Poll frames */
c89424df
S
1594 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1595 bf->bf_lastbf->bf_desc,
254ad0ff 1596 !is_pspoll, ctsrate,
c89424df 1597 0, series, 4, flags);
f078f209 1598
17d7904d 1599 if (sc->config.ath_aggr_prot && flags)
c89424df 1600 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
f078f209
LR
1601}
1602
c52f33d0 1603static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
8f93b8b3 1604 struct sk_buff *skb,
528f0c6b 1605 struct ath_tx_control *txctl)
f078f209 1606{
c52f33d0
JM
1607 struct ath_wiphy *aphy = hw->priv;
1608 struct ath_softc *sc = aphy->sc;
528f0c6b
S
1609 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1610 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
528f0c6b
S
1611 int hdrlen;
1612 __le16 fc;
1bc14880 1613 int padpos, padsize;
b0a33448 1614 bool use_ldpc = false;
e022edbd 1615
827e69bf
FF
1616 tx_info->pad[0] = 0;
1617 switch (txctl->frame_type) {
c81494d5 1618 case ATH9K_IFT_NOT_INTERNAL:
827e69bf 1619 break;
c81494d5 1620 case ATH9K_IFT_PAUSE:
827e69bf
FF
1621 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_PAUSE;
1622 /* fall through */
c81494d5 1623 case ATH9K_IFT_UNPAUSE:
827e69bf
FF
1624 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_INTERNAL;
1625 break;
1626 }
528f0c6b
S
1627 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1628 fc = hdr->frame_control;
f078f209 1629
528f0c6b 1630 ATH_TXBUF_RESET(bf);
f078f209 1631
827e69bf 1632 bf->aphy = aphy;
1bc14880
BP
1633 bf->bf_frmlen = skb->len + FCS_LEN;
1634 /* Remove the padding size from bf_frmlen, if any */
1635 padpos = ath9k_cmn_padpos(hdr->frame_control);
1636 padsize = padpos & 3;
1637 if (padsize && skb->len>padpos+padsize) {
1638 bf->bf_frmlen -= padsize;
1639 }
cd3d39a6 1640
9f42c2b6 1641 if (!txctl->paprd && conf_is_ht(&hw->conf)) {
c656bbb5 1642 bf->bf_state.bf_type |= BUF_HT;
b0a33448
LR
1643 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1644 use_ldpc = true;
1645 }
528f0c6b 1646
9f42c2b6 1647 bf->bf_state.bfs_paprd = txctl->paprd;
ca369eb4
VT
1648 if (txctl->paprd)
1649 bf->bf_state.bfs_paprd_timestamp = jiffies;
b0a33448 1650 bf->bf_flags = setup_tx_flags(skb, use_ldpc);
528f0c6b 1651
c17512d8 1652 bf->bf_keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
528f0c6b
S
1653 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
1654 bf->bf_frmlen += tx_info->control.hw_key->icv_len;
1655 bf->bf_keyix = tx_info->control.hw_key->hw_key_idx;
1656 } else {
1657 bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
1658 }
1659
17b182e3
S
1660 if (ieee80211_is_data_qos(fc) && bf_isht(bf) &&
1661 (sc->sc_flags & SC_OP_TXAGGR))
528f0c6b
S
1662 assign_aggr_tid_seqno(skb, bf);
1663
f078f209 1664 bf->bf_mpdu = skb;
f8316df1 1665
7da3c55c
GJ
1666 bf->bf_dmacontext = dma_map_single(sc->dev, skb->data,
1667 skb->len, DMA_TO_DEVICE);
1668 if (unlikely(dma_mapping_error(sc->dev, bf->bf_dmacontext))) {
f8316df1 1669 bf->bf_mpdu = NULL;
c46917bb
LR
1670 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1671 "dma_mapping_error() on TX\n");
f8316df1
LR
1672 return -ENOMEM;
1673 }
1674
528f0c6b 1675 bf->bf_buf_addr = bf->bf_dmacontext;
e7824a50
LR
1676
1677 /* tag if this is a nullfunc frame to enable PS when AP acks it */
1678 if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc)) {
1679 bf->bf_isnullfunc = true;
1b04b930 1680 sc->ps_flags &= ~PS_NULLFUNC_COMPLETED;
e7824a50
LR
1681 } else
1682 bf->bf_isnullfunc = false;
1683
7c9fd60f
VT
1684 bf->bf_tx_aborted = false;
1685
f8316df1 1686 return 0;
528f0c6b
S
1687}
1688
1689/* FIXME: tx power */
1690static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
528f0c6b
S
1691 struct ath_tx_control *txctl)
1692{
a22be22a 1693 struct sk_buff *skb = bf->bf_mpdu;
528f0c6b 1694 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
c37452b0 1695 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
528f0c6b
S
1696 struct ath_node *an = NULL;
1697 struct list_head bf_head;
1698 struct ath_desc *ds;
1699 struct ath_atx_tid *tid;
cbe61d8a 1700 struct ath_hw *ah = sc->sc_ah;
528f0c6b 1701 int frm_type;
c37452b0 1702 __le16 fc;
528f0c6b 1703
528f0c6b 1704 frm_type = get_hw_packet_type(skb);
c37452b0 1705 fc = hdr->frame_control;
528f0c6b
S
1706
1707 INIT_LIST_HEAD(&bf_head);
1708 list_add_tail(&bf->list, &bf_head);
f078f209 1709
f078f209 1710 ds = bf->bf_desc;
87d5efbb 1711 ath9k_hw_set_desc_link(ah, ds, 0);
f078f209 1712
528f0c6b
S
1713 ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
1714 bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
1715
1716 ath9k_hw_filltxdesc(ah, ds,
8f93b8b3
S
1717 skb->len, /* segment length */
1718 true, /* first segment */
1719 true, /* last segment */
3f3a1c80 1720 ds, /* first descriptor */
cc610ac0
VT
1721 bf->bf_buf_addr,
1722 txctl->txq->axq_qnum);
f078f209 1723
9f42c2b6
FF
1724 if (bf->bf_state.bfs_paprd)
1725 ar9003_hw_set_paprd_txdesc(ah, ds, bf->bf_state.bfs_paprd);
1726
528f0c6b 1727 spin_lock_bh(&txctl->txq->axq_lock);
f078f209 1728
f1617967
JL
1729 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) &&
1730 tx_info->control.sta) {
1731 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1732 tid = ATH_AN_2_TID(an, bf->bf_tidno);
1733
c37452b0
S
1734 if (!ieee80211_is_data_qos(fc)) {
1735 ath_tx_send_normal(sc, txctl->txq, &bf_head);
1736 goto tx_done;
1737 }
1738
4fdec031 1739 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
f078f209
LR
1740 /*
1741 * Try aggregation if it's a unicast data frame
1742 * and the destination is HT capable.
1743 */
528f0c6b 1744 ath_tx_send_ampdu(sc, tid, &bf_head, txctl);
f078f209
LR
1745 } else {
1746 /*
528f0c6b
S
1747 * Send this frame as regular when ADDBA
1748 * exchange is neither complete nor pending.
f078f209 1749 */
c37452b0
S
1750 ath_tx_send_ht_normal(sc, txctl->txq,
1751 tid, &bf_head);
f078f209
LR
1752 }
1753 } else {
c37452b0 1754 ath_tx_send_normal(sc, txctl->txq, &bf_head);
f078f209 1755 }
528f0c6b 1756
c37452b0 1757tx_done:
528f0c6b 1758 spin_unlock_bh(&txctl->txq->axq_lock);
f078f209
LR
1759}
1760
f8316df1 1761/* Upon failure caller should free skb */
c52f33d0 1762int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
528f0c6b 1763 struct ath_tx_control *txctl)
f078f209 1764{
c52f33d0
JM
1765 struct ath_wiphy *aphy = hw->priv;
1766 struct ath_softc *sc = aphy->sc;
c46917bb 1767 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
84642d6b 1768 struct ath_txq *txq = txctl->txq;
528f0c6b 1769 struct ath_buf *bf;
97923b14 1770 int q, r;
f078f209 1771
528f0c6b
S
1772 bf = ath_tx_get_buffer(sc);
1773 if (!bf) {
c46917bb 1774 ath_print(common, ATH_DBG_XMIT, "TX buffers are full\n");
528f0c6b
S
1775 return -1;
1776 }
1777
c52f33d0 1778 r = ath_tx_setup_buffer(hw, bf, skb, txctl);
f8316df1 1779 if (unlikely(r)) {
c46917bb 1780 ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n");
c112d0c5
LR
1781
1782 /* upon ath_tx_processq() this TX queue will be resumed, we
1783 * guarantee this will happen by knowing beforehand that
1784 * we will at least have to run TX completionon one buffer
1785 * on the queue */
1786 spin_lock_bh(&txq->axq_lock);
84642d6b 1787 if (!txq->stopped && txq->axq_depth > 1) {
f52de03b 1788 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
c112d0c5
LR
1789 txq->stopped = 1;
1790 }
1791 spin_unlock_bh(&txq->axq_lock);
1792
0a8cea84 1793 ath_tx_return_buffer(sc, bf);
c112d0c5 1794
f8316df1
LR
1795 return r;
1796 }
1797
97923b14
FF
1798 q = skb_get_queue_mapping(skb);
1799 if (q >= 4)
1800 q = 0;
1801
1802 spin_lock_bh(&txq->axq_lock);
1803 if (++sc->tx.pending_frames[q] > ATH_MAX_QDEPTH && !txq->stopped) {
1804 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
1805 txq->stopped = 1;
1806 }
1807 spin_unlock_bh(&txq->axq_lock);
1808
8f93b8b3 1809 ath_tx_start_dma(sc, bf, txctl);
f078f209 1810
528f0c6b 1811 return 0;
f078f209
LR
1812}
1813
c52f33d0 1814void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
f078f209 1815{
c52f33d0
JM
1816 struct ath_wiphy *aphy = hw->priv;
1817 struct ath_softc *sc = aphy->sc;
c46917bb 1818 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
4d91f9f3
BP
1819 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1820 int padpos, padsize;
e8324357
S
1821 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1822 struct ath_tx_control txctl;
f078f209 1823
e8324357 1824 memset(&txctl, 0, sizeof(struct ath_tx_control));
f078f209
LR
1825
1826 /*
e8324357
S
1827 * As a temporary workaround, assign seq# here; this will likely need
1828 * to be cleaned up to work better with Beacon transmission and virtual
1829 * BSSes.
f078f209 1830 */
e8324357 1831 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
e8324357
S
1832 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1833 sc->tx.seq_no += 0x10;
1834 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1835 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
f078f209 1836 }
f078f209 1837
e8324357 1838 /* Add the padding after the header if this is not already done */
4d91f9f3
BP
1839 padpos = ath9k_cmn_padpos(hdr->frame_control);
1840 padsize = padpos & 3;
1841 if (padsize && skb->len>padpos) {
e8324357 1842 if (skb_headroom(skb) < padsize) {
c46917bb
LR
1843 ath_print(common, ATH_DBG_XMIT,
1844 "TX CABQ padding failed\n");
e8324357
S
1845 dev_kfree_skb_any(skb);
1846 return;
1847 }
1848 skb_push(skb, padsize);
4d91f9f3 1849 memmove(skb->data, skb->data + padsize, padpos);
f078f209 1850 }
f078f209 1851
e8324357 1852 txctl.txq = sc->beacon.cabq;
f078f209 1853
c46917bb
LR
1854 ath_print(common, ATH_DBG_XMIT,
1855 "transmitting CABQ packet, skb: %p\n", skb);
f078f209 1856
c52f33d0 1857 if (ath_tx_start(hw, skb, &txctl) != 0) {
c46917bb 1858 ath_print(common, ATH_DBG_XMIT, "CABQ TX failed\n");
e8324357 1859 goto exit;
f078f209 1860 }
f078f209 1861
e8324357
S
1862 return;
1863exit:
1864 dev_kfree_skb_any(skb);
f078f209
LR
1865}
1866
e8324357
S
1867/*****************/
1868/* TX Completion */
1869/*****************/
528f0c6b 1870
e8324357 1871static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
827e69bf 1872 struct ath_wiphy *aphy, int tx_flags)
528f0c6b 1873{
e8324357
S
1874 struct ieee80211_hw *hw = sc->hw;
1875 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
c46917bb 1876 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
4d91f9f3 1877 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
97923b14 1878 int q, padpos, padsize;
528f0c6b 1879
c46917bb 1880 ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
528f0c6b 1881
827e69bf
FF
1882 if (aphy)
1883 hw = aphy->hw;
528f0c6b 1884
6b2c4032 1885 if (tx_flags & ATH_TX_BAR)
e8324357 1886 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
e8324357 1887
6b2c4032 1888 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
e8324357
S
1889 /* Frame was ACKed */
1890 tx_info->flags |= IEEE80211_TX_STAT_ACK;
528f0c6b
S
1891 }
1892
4d91f9f3
BP
1893 padpos = ath9k_cmn_padpos(hdr->frame_control);
1894 padsize = padpos & 3;
1895 if (padsize && skb->len>padpos+padsize) {
e8324357
S
1896 /*
1897 * Remove MAC header padding before giving the frame back to
1898 * mac80211.
1899 */
4d91f9f3 1900 memmove(skb->data + padsize, skb->data, padpos);
e8324357
S
1901 skb_pull(skb, padsize);
1902 }
528f0c6b 1903
1b04b930
S
1904 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1905 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
c46917bb
LR
1906 ath_print(common, ATH_DBG_PS,
1907 "Going back to sleep after having "
f643e51d 1908 "received TX status (0x%lx)\n",
1b04b930
S
1909 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1910 PS_WAIT_FOR_CAB |
1911 PS_WAIT_FOR_PSPOLL_DATA |
1912 PS_WAIT_FOR_TX_ACK));
9a23f9ca
JM
1913 }
1914
827e69bf 1915 if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL))
f0ed85c6 1916 ath9k_tx_status(hw, skb);
97923b14
FF
1917 else {
1918 q = skb_get_queue_mapping(skb);
1919 if (q >= 4)
1920 q = 0;
1921
1922 if (--sc->tx.pending_frames[q] < 0)
1923 sc->tx.pending_frames[q] = 0;
1924
827e69bf 1925 ieee80211_tx_status(hw, skb);
97923b14 1926 }
e8324357 1927}
f078f209 1928
e8324357 1929static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
db1a052b
FF
1930 struct ath_txq *txq, struct list_head *bf_q,
1931 struct ath_tx_status *ts, int txok, int sendbar)
f078f209 1932{
e8324357 1933 struct sk_buff *skb = bf->bf_mpdu;
e8324357 1934 unsigned long flags;
6b2c4032 1935 int tx_flags = 0;
f078f209 1936
e8324357 1937 if (sendbar)
6b2c4032 1938 tx_flags = ATH_TX_BAR;
f078f209 1939
e8324357 1940 if (!txok) {
6b2c4032 1941 tx_flags |= ATH_TX_ERROR;
f078f209 1942
e8324357 1943 if (bf_isxretried(bf))
6b2c4032 1944 tx_flags |= ATH_TX_XRETRY;
f078f209
LR
1945 }
1946
e8324357 1947 dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE);
9f42c2b6
FF
1948
1949 if (bf->bf_state.bfs_paprd) {
ca369eb4
VT
1950 if (time_after(jiffies,
1951 bf->bf_state.bfs_paprd_timestamp +
78a18172 1952 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
ca369eb4 1953 dev_kfree_skb_any(skb);
78a18172 1954 else
ca369eb4 1955 complete(&sc->paprd_complete);
9f42c2b6
FF
1956 } else {
1957 ath_tx_complete(sc, skb, bf->aphy, tx_flags);
1958 ath_debug_stat_tx(sc, txq, bf, ts);
1959 }
e8324357
S
1960
1961 /*
1962 * Return the list of ath_buf of this mpdu to free queue
1963 */
1964 spin_lock_irqsave(&sc->tx.txbuflock, flags);
1965 list_splice_tail_init(bf_q, &sc->tx.txbuf);
1966 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
f078f209
LR
1967}
1968
e8324357 1969static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
db1a052b 1970 struct ath_tx_status *ts, int txok)
f078f209 1971{
e8324357
S
1972 u16 seq_st = 0;
1973 u32 ba[WME_BA_BMP_SIZE >> 5];
1974 int ba_index;
1975 int nbad = 0;
1976 int isaggr = 0;
f078f209 1977
7c9fd60f 1978 if (bf->bf_lastbf->bf_tx_aborted)
e8324357 1979 return 0;
f078f209 1980
e8324357
S
1981 isaggr = bf_isaggr(bf);
1982 if (isaggr) {
db1a052b
FF
1983 seq_st = ts->ts_seqnum;
1984 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
e8324357 1985 }
f078f209 1986
e8324357
S
1987 while (bf) {
1988 ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
1989 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
1990 nbad++;
1991
1992 bf = bf->bf_next;
1993 }
f078f209 1994
e8324357
S
1995 return nbad;
1996}
f078f209 1997
db1a052b 1998static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
8a92e2ee 1999 int nbad, int txok, bool update_rc)
f078f209 2000{
a22be22a 2001 struct sk_buff *skb = bf->bf_mpdu;
254ad0ff 2002 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
e8324357 2003 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
827e69bf 2004 struct ieee80211_hw *hw = bf->aphy->hw;
8a92e2ee 2005 u8 i, tx_rateindex;
f078f209 2006
95e4acb7 2007 if (txok)
db1a052b 2008 tx_info->status.ack_signal = ts->ts_rssi;
95e4acb7 2009
db1a052b 2010 tx_rateindex = ts->ts_rateindex;
8a92e2ee
VT
2011 WARN_ON(tx_rateindex >= hw->max_rates);
2012
db1a052b 2013 if (ts->ts_status & ATH9K_TXERR_FILT)
e8324357 2014 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
d969847c
FF
2015 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc)
2016 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
f078f209 2017
db1a052b 2018 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
8a92e2ee 2019 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
254ad0ff 2020 if (ieee80211_is_data(hdr->frame_control)) {
db1a052b 2021 if (ts->ts_flags &
827e69bf
FF
2022 (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN))
2023 tx_info->pad[0] |= ATH_TX_INFO_UNDERRUN;
db1a052b
FF
2024 if ((ts->ts_status & ATH9K_TXERR_XRETRY) ||
2025 (ts->ts_status & ATH9K_TXERR_FIFO))
827e69bf
FF
2026 tx_info->pad[0] |= ATH_TX_INFO_XRETRY;
2027 tx_info->status.ampdu_len = bf->bf_nframes;
2028 tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad;
e8324357 2029 }
f078f209 2030 }
8a92e2ee 2031
545750d3 2032 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
8a92e2ee 2033 tx_info->status.rates[i].count = 0;
545750d3
FF
2034 tx_info->status.rates[i].idx = -1;
2035 }
8a92e2ee 2036
78c4653a 2037 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
f078f209
LR
2038}
2039
059d806c
S
2040static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq)
2041{
2042 int qnum;
2043
97923b14
FF
2044 qnum = ath_get_mac80211_qnum(txq->axq_class, sc);
2045 if (qnum == -1)
2046 return;
2047
059d806c 2048 spin_lock_bh(&txq->axq_lock);
97923b14 2049 if (txq->stopped && sc->tx.pending_frames[qnum] < ATH_MAX_QDEPTH) {
68e8f2fa
VT
2050 if (ath_mac80211_start_queue(sc, qnum))
2051 txq->stopped = 0;
059d806c
S
2052 }
2053 spin_unlock_bh(&txq->axq_lock);
2054}
2055
e8324357 2056static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
f078f209 2057{
cbe61d8a 2058 struct ath_hw *ah = sc->sc_ah;
c46917bb 2059 struct ath_common *common = ath9k_hw_common(ah);
e8324357 2060 struct ath_buf *bf, *lastbf, *bf_held = NULL;
f078f209 2061 struct list_head bf_head;
e8324357 2062 struct ath_desc *ds;
29bffa96 2063 struct ath_tx_status ts;
0934af23 2064 int txok;
e8324357 2065 int status;
f078f209 2066
c46917bb
LR
2067 ath_print(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2068 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2069 txq->axq_link);
f078f209 2070
f078f209
LR
2071 for (;;) {
2072 spin_lock_bh(&txq->axq_lock);
f078f209
LR
2073 if (list_empty(&txq->axq_q)) {
2074 txq->axq_link = NULL;
f078f209
LR
2075 spin_unlock_bh(&txq->axq_lock);
2076 break;
2077 }
f078f209
LR
2078 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2079
e8324357
S
2080 /*
2081 * There is a race condition that a BH gets scheduled
2082 * after sw writes TxE and before hw re-load the last
2083 * descriptor to get the newly chained one.
2084 * Software must keep the last DONE descriptor as a
2085 * holding descriptor - software does so by marking
2086 * it with the STALE flag.
2087 */
2088 bf_held = NULL;
a119cc49 2089 if (bf->bf_stale) {
e8324357
S
2090 bf_held = bf;
2091 if (list_is_last(&bf_held->list, &txq->axq_q)) {
6ef9b13d 2092 spin_unlock_bh(&txq->axq_lock);
e8324357
S
2093 break;
2094 } else {
2095 bf = list_entry(bf_held->list.next,
6ef9b13d 2096 struct ath_buf, list);
e8324357 2097 }
f078f209
LR
2098 }
2099
2100 lastbf = bf->bf_lastbf;
e8324357 2101 ds = lastbf->bf_desc;
f078f209 2102
29bffa96
FF
2103 memset(&ts, 0, sizeof(ts));
2104 status = ath9k_hw_txprocdesc(ah, ds, &ts);
e8324357 2105 if (status == -EINPROGRESS) {
f078f209 2106 spin_unlock_bh(&txq->axq_lock);
e8324357 2107 break;
f078f209 2108 }
f078f209 2109
e7824a50
LR
2110 /*
2111 * We now know the nullfunc frame has been ACKed so we
2112 * can disable RX.
2113 */
2114 if (bf->bf_isnullfunc &&
29bffa96 2115 (ts.ts_status & ATH9K_TX_ACKED)) {
3f7c5c10
SB
2116 if ((sc->ps_flags & PS_ENABLED))
2117 ath9k_enable_ps(sc);
2118 else
1b04b930 2119 sc->ps_flags |= PS_NULLFUNC_COMPLETED;
e7824a50
LR
2120 }
2121
e8324357
S
2122 /*
2123 * Remove ath_buf's of the same transmit unit from txq,
2124 * however leave the last descriptor back as the holding
2125 * descriptor for hw.
2126 */
a119cc49 2127 lastbf->bf_stale = true;
e8324357 2128 INIT_LIST_HEAD(&bf_head);
e8324357
S
2129 if (!list_is_singular(&lastbf->list))
2130 list_cut_position(&bf_head,
2131 &txq->axq_q, lastbf->list.prev);
f078f209 2132
e8324357 2133 txq->axq_depth--;
29bffa96 2134 txok = !(ts.ts_status & ATH9K_TXERR_MASK);
164ace38 2135 txq->axq_tx_inprogress = false;
0a8cea84
FF
2136 if (bf_held)
2137 list_del(&bf_held->list);
e8324357 2138 spin_unlock_bh(&txq->axq_lock);
f078f209 2139
0a8cea84
FF
2140 if (bf_held)
2141 ath_tx_return_buffer(sc, bf_held);
f078f209 2142
e8324357
S
2143 if (!bf_isampdu(bf)) {
2144 /*
2145 * This frame is sent out as a single frame.
2146 * Use hardware retry status for this frame.
2147 */
29bffa96 2148 if (ts.ts_status & ATH9K_TXERR_XRETRY)
e8324357 2149 bf->bf_state.bf_type |= BUF_XRETRY;
29bffa96 2150 ath_tx_rc_status(bf, &ts, 0, txok, true);
e8324357 2151 }
f078f209 2152
e8324357 2153 if (bf_isampdu(bf))
29bffa96 2154 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok);
e8324357 2155 else
29bffa96 2156 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
8469cdef 2157
059d806c 2158 ath_wake_mac80211_queue(sc, txq);
8469cdef 2159
059d806c 2160 spin_lock_bh(&txq->axq_lock);
e8324357
S
2161 if (sc->sc_flags & SC_OP_TXAGGR)
2162 ath_txq_schedule(sc, txq);
2163 spin_unlock_bh(&txq->axq_lock);
8469cdef
S
2164 }
2165}
2166
305fe47f 2167static void ath_tx_complete_poll_work(struct work_struct *work)
164ace38
SB
2168{
2169 struct ath_softc *sc = container_of(work, struct ath_softc,
2170 tx_complete_work.work);
2171 struct ath_txq *txq;
2172 int i;
2173 bool needreset = false;
2174
2175 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2176 if (ATH_TXQ_SETUP(sc, i)) {
2177 txq = &sc->tx.txq[i];
2178 spin_lock_bh(&txq->axq_lock);
2179 if (txq->axq_depth) {
2180 if (txq->axq_tx_inprogress) {
2181 needreset = true;
2182 spin_unlock_bh(&txq->axq_lock);
2183 break;
2184 } else {
2185 txq->axq_tx_inprogress = true;
2186 }
2187 }
2188 spin_unlock_bh(&txq->axq_lock);
2189 }
2190
2191 if (needreset) {
c46917bb
LR
2192 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2193 "tx hung, resetting the chip\n");
332c5566 2194 ath9k_ps_wakeup(sc);
164ace38 2195 ath_reset(sc, false);
332c5566 2196 ath9k_ps_restore(sc);
164ace38
SB
2197 }
2198
42935eca 2199 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
164ace38
SB
2200 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2201}
2202
2203
f078f209 2204
e8324357 2205void ath_tx_tasklet(struct ath_softc *sc)
f078f209 2206{
e8324357
S
2207 int i;
2208 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
f078f209 2209
e8324357 2210 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
f078f209 2211
e8324357
S
2212 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2213 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2214 ath_tx_processq(sc, &sc->tx.txq[i]);
f078f209
LR
2215 }
2216}
2217
e5003249
VT
2218void ath_tx_edma_tasklet(struct ath_softc *sc)
2219{
2220 struct ath_tx_status txs;
2221 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2222 struct ath_hw *ah = sc->sc_ah;
2223 struct ath_txq *txq;
2224 struct ath_buf *bf, *lastbf;
2225 struct list_head bf_head;
2226 int status;
2227 int txok;
2228
2229 for (;;) {
2230 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
2231 if (status == -EINPROGRESS)
2232 break;
2233 if (status == -EIO) {
2234 ath_print(common, ATH_DBG_XMIT,
2235 "Error processing tx status\n");
2236 break;
2237 }
2238
2239 /* Skip beacon completions */
2240 if (txs.qid == sc->beacon.beaconq)
2241 continue;
2242
2243 txq = &sc->tx.txq[txs.qid];
2244
2245 spin_lock_bh(&txq->axq_lock);
2246 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2247 spin_unlock_bh(&txq->axq_lock);
2248 return;
2249 }
2250
2251 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2252 struct ath_buf, list);
2253 lastbf = bf->bf_lastbf;
2254
2255 INIT_LIST_HEAD(&bf_head);
2256 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2257 &lastbf->list);
2258 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2259 txq->axq_depth--;
2260 txq->axq_tx_inprogress = false;
2261 spin_unlock_bh(&txq->axq_lock);
2262
2263 txok = !(txs.ts_status & ATH9K_TXERR_MASK);
2264
de0f648d
VT
2265 /*
2266 * Make sure null func frame is acked before configuring
2267 * hw into ps mode.
2268 */
2269 if (bf->bf_isnullfunc && txok) {
2270 if ((sc->ps_flags & PS_ENABLED))
2271 ath9k_enable_ps(sc);
2272 else
2273 sc->ps_flags |= PS_NULLFUNC_COMPLETED;
2274 }
2275
e5003249 2276 if (!bf_isampdu(bf)) {
e5003249
VT
2277 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2278 bf->bf_state.bf_type |= BUF_XRETRY;
2279 ath_tx_rc_status(bf, &txs, 0, txok, true);
2280 }
2281
2282 if (bf_isampdu(bf))
2283 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs, txok);
2284 else
2285 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2286 &txs, txok, 0);
2287
7f9f3600
FF
2288 ath_wake_mac80211_queue(sc, txq);
2289
e5003249
VT
2290 spin_lock_bh(&txq->axq_lock);
2291 if (!list_empty(&txq->txq_fifo_pending)) {
2292 INIT_LIST_HEAD(&bf_head);
2293 bf = list_first_entry(&txq->txq_fifo_pending,
2294 struct ath_buf, list);
2295 list_cut_position(&bf_head, &txq->txq_fifo_pending,
2296 &bf->bf_lastbf->list);
2297 ath_tx_txqaddbuf(sc, txq, &bf_head);
2298 } else if (sc->sc_flags & SC_OP_TXAGGR)
2299 ath_txq_schedule(sc, txq);
2300 spin_unlock_bh(&txq->axq_lock);
2301 }
2302}
2303
e8324357
S
2304/*****************/
2305/* Init, Cleanup */
2306/*****************/
f078f209 2307
5088c2f1
VT
2308static int ath_txstatus_setup(struct ath_softc *sc, int size)
2309{
2310 struct ath_descdma *dd = &sc->txsdma;
2311 u8 txs_len = sc->sc_ah->caps.txs_len;
2312
2313 dd->dd_desc_len = size * txs_len;
2314 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2315 &dd->dd_desc_paddr, GFP_KERNEL);
2316 if (!dd->dd_desc)
2317 return -ENOMEM;
2318
2319 return 0;
2320}
2321
2322static int ath_tx_edma_init(struct ath_softc *sc)
2323{
2324 int err;
2325
2326 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2327 if (!err)
2328 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2329 sc->txsdma.dd_desc_paddr,
2330 ATH_TXSTATUS_RING_SIZE);
2331
2332 return err;
2333}
2334
2335static void ath_tx_edma_cleanup(struct ath_softc *sc)
2336{
2337 struct ath_descdma *dd = &sc->txsdma;
2338
2339 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2340 dd->dd_desc_paddr);
2341}
2342
e8324357 2343int ath_tx_init(struct ath_softc *sc, int nbufs)
f078f209 2344{
c46917bb 2345 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
e8324357 2346 int error = 0;
f078f209 2347
797fe5cb 2348 spin_lock_init(&sc->tx.txbuflock);
f078f209 2349
797fe5cb 2350 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
4adfcded 2351 "tx", nbufs, 1, 1);
797fe5cb 2352 if (error != 0) {
c46917bb
LR
2353 ath_print(common, ATH_DBG_FATAL,
2354 "Failed to allocate tx descriptors: %d\n", error);
797fe5cb
S
2355 goto err;
2356 }
f078f209 2357
797fe5cb 2358 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
5088c2f1 2359 "beacon", ATH_BCBUF, 1, 1);
797fe5cb 2360 if (error != 0) {
c46917bb
LR
2361 ath_print(common, ATH_DBG_FATAL,
2362 "Failed to allocate beacon descriptors: %d\n", error);
797fe5cb
S
2363 goto err;
2364 }
f078f209 2365
164ace38
SB
2366 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2367
5088c2f1
VT
2368 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2369 error = ath_tx_edma_init(sc);
2370 if (error)
2371 goto err;
2372 }
2373
797fe5cb 2374err:
e8324357
S
2375 if (error != 0)
2376 ath_tx_cleanup(sc);
f078f209 2377
e8324357 2378 return error;
f078f209
LR
2379}
2380
797fe5cb 2381void ath_tx_cleanup(struct ath_softc *sc)
e8324357
S
2382{
2383 if (sc->beacon.bdma.dd_desc_len != 0)
2384 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
2385
2386 if (sc->tx.txdma.dd_desc_len != 0)
2387 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
5088c2f1
VT
2388
2389 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2390 ath_tx_edma_cleanup(sc);
e8324357 2391}
f078f209
LR
2392
2393void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2394{
c5170163
S
2395 struct ath_atx_tid *tid;
2396 struct ath_atx_ac *ac;
2397 int tidno, acno;
f078f209 2398
8ee5afbc 2399 for (tidno = 0, tid = &an->tid[tidno];
c5170163
S
2400 tidno < WME_NUM_TID;
2401 tidno++, tid++) {
2402 tid->an = an;
2403 tid->tidno = tidno;
2404 tid->seq_start = tid->seq_next = 0;
2405 tid->baw_size = WME_MAX_BA;
2406 tid->baw_head = tid->baw_tail = 0;
2407 tid->sched = false;
e8324357 2408 tid->paused = false;
a37c2c79 2409 tid->state &= ~AGGR_CLEANUP;
c5170163 2410 INIT_LIST_HEAD(&tid->buf_q);
c5170163 2411 acno = TID_TO_WME_AC(tidno);
8ee5afbc 2412 tid->ac = &an->ac[acno];
a37c2c79
S
2413 tid->state &= ~AGGR_ADDBA_COMPLETE;
2414 tid->state &= ~AGGR_ADDBA_PROGRESS;
c5170163 2415 }
f078f209 2416
8ee5afbc 2417 for (acno = 0, ac = &an->ac[acno];
c5170163
S
2418 acno < WME_NUM_AC; acno++, ac++) {
2419 ac->sched = false;
1d2231e2 2420 ac->qnum = sc->tx.hwq_map[acno];
c5170163 2421 INIT_LIST_HEAD(&ac->tid_q);
f078f209
LR
2422 }
2423}
2424
b5aa9bf9 2425void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
f078f209 2426{
2b40994c
FF
2427 struct ath_atx_ac *ac;
2428 struct ath_atx_tid *tid;
f078f209 2429 struct ath_txq *txq;
2b40994c 2430 int i, tidno;
e8324357 2431
2b40994c
FF
2432 for (tidno = 0, tid = &an->tid[tidno];
2433 tidno < WME_NUM_TID; tidno++, tid++) {
2434 i = tid->ac->qnum;
f078f209 2435
2b40994c
FF
2436 if (!ATH_TXQ_SETUP(sc, i))
2437 continue;
f078f209 2438
2b40994c
FF
2439 txq = &sc->tx.txq[i];
2440 ac = tid->ac;
f078f209 2441
2b40994c
FF
2442 spin_lock_bh(&txq->axq_lock);
2443
2444 if (tid->sched) {
2445 list_del(&tid->list);
2446 tid->sched = false;
2447 }
2448
2449 if (ac->sched) {
2450 list_del(&ac->list);
2451 tid->ac->sched = false;
f078f209 2452 }
2b40994c
FF
2453
2454 ath_tid_drain(sc, txq, tid);
2455 tid->state &= ~AGGR_ADDBA_COMPLETE;
2456 tid->state &= ~AGGR_CLEANUP;
2457
2458 spin_unlock_bh(&txq->axq_lock);
f078f209
LR
2459 }
2460}
This page took 0.535644 seconds and 5 git commands to generate.