ath9k: handle tx underrun in the driver instead of rate control
[deliverable/linux.git] / drivers / net / wireless / ath / ath9k / xmit.c
CommitLineData
f078f209 1/*
cee075a2 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
f078f209
LR
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
394cf0a1 17#include "ath9k.h"
b622a720 18#include "ar9003_mac.h"
f078f209
LR
19
20#define BITS_PER_BYTE 8
21#define OFDM_PLCP_BITS 22
7817e4ce 22#define HT_RC_2_MCS(_rc) ((_rc) & 0x1f)
f078f209
LR
23#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
35#define OFDM_SIFS_TIME 16
36
c6663876 37static u16 bits_per_symbol[][2] = {
f078f209
LR
38 /* 20MHz 40MHz */
39 { 26, 54 }, /* 0: BPSK */
40 { 52, 108 }, /* 1: QPSK 1/2 */
41 { 78, 162 }, /* 2: QPSK 3/4 */
42 { 104, 216 }, /* 3: 16-QAM 1/2 */
43 { 156, 324 }, /* 4: 16-QAM 3/4 */
44 { 208, 432 }, /* 5: 64-QAM 2/3 */
45 { 234, 486 }, /* 6: 64-QAM 3/4 */
46 { 260, 540 }, /* 7: 64-QAM 5/6 */
f078f209
LR
47};
48
49#define IS_HT_RATE(_rate) ((_rate) & 0x80)
50
c37452b0
S
51static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
52 struct ath_atx_tid *tid,
53 struct list_head *bf_head);
e8324357 54static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
db1a052b
FF
55 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar);
102e0572 57static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
e8324357
S
58 struct list_head *head);
59static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf);
0934af23 60static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
db1a052b
FF
61 struct ath_tx_status *ts, int txok);
62static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
8a92e2ee 63 int nbad, int txok, bool update_rc);
90fa539c
FF
64static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
65 int seqno);
c4288390 66
545750d3 67enum {
0e668cde
FF
68 MCS_HT20,
69 MCS_HT20_SGI,
545750d3
FF
70 MCS_HT40,
71 MCS_HT40_SGI,
72};
73
0e668cde
FF
74static int ath_max_4ms_framelen[4][32] = {
75 [MCS_HT20] = {
76 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
77 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
78 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
79 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
80 },
81 [MCS_HT20_SGI] = {
82 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
83 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
84 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
85 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
545750d3
FF
86 },
87 [MCS_HT40] = {
0e668cde
FF
88 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
89 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
90 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
91 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
545750d3
FF
92 },
93 [MCS_HT40_SGI] = {
0e668cde
FF
94 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
95 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
96 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
97 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
545750d3
FF
98 }
99};
100
e8324357
S
101/*********************/
102/* Aggregation logic */
103/*********************/
f078f209 104
e8324357 105static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
ff37e337 106{
e8324357 107 struct ath_atx_ac *ac = tid->ac;
ff37e337 108
e8324357
S
109 if (tid->paused)
110 return;
ff37e337 111
e8324357
S
112 if (tid->sched)
113 return;
ff37e337 114
e8324357
S
115 tid->sched = true;
116 list_add_tail(&tid->list, &ac->tid_q);
528f0c6b 117
e8324357
S
118 if (ac->sched)
119 return;
f078f209 120
e8324357
S
121 ac->sched = true;
122 list_add_tail(&ac->list, &txq->axq_acq);
123}
f078f209 124
e8324357 125static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
f078f209 126{
066dae93 127 struct ath_txq *txq = tid->ac->txq;
e6a9854b 128
75401849 129 WARN_ON(!tid->paused);
f078f209 130
75401849
LB
131 spin_lock_bh(&txq->axq_lock);
132 tid->paused = false;
f078f209 133
e8324357
S
134 if (list_empty(&tid->buf_q))
135 goto unlock;
f078f209 136
e8324357
S
137 ath_tx_queue_tid(txq, tid);
138 ath_txq_schedule(sc, txq);
139unlock:
140 spin_unlock_bh(&txq->axq_lock);
528f0c6b 141}
f078f209 142
e8324357 143static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
528f0c6b 144{
066dae93 145 struct ath_txq *txq = tid->ac->txq;
e8324357
S
146 struct ath_buf *bf;
147 struct list_head bf_head;
90fa539c 148 struct ath_tx_status ts;
f078f209 149
90fa539c 150 INIT_LIST_HEAD(&bf_head);
e6a9854b 151
90fa539c 152 memset(&ts, 0, sizeof(ts));
75401849 153 spin_lock_bh(&txq->axq_lock);
f078f209 154
e8324357
S
155 while (!list_empty(&tid->buf_q)) {
156 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
d43f3015 157 list_move_tail(&bf->list, &bf_head);
90fa539c
FF
158
159 if (bf_isretried(bf)) {
160 ath_tx_update_baw(sc, tid, bf->bf_seqno);
161 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
162 } else {
163 ath_tx_send_ht_normal(sc, txq, tid, &bf_head);
164 }
528f0c6b 165 }
f078f209 166
e8324357 167 spin_unlock_bh(&txq->axq_lock);
528f0c6b 168}
f078f209 169
e8324357
S
170static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
171 int seqno)
528f0c6b 172{
e8324357 173 int index, cindex;
f078f209 174
e8324357
S
175 index = ATH_BA_INDEX(tid->seq_start, seqno);
176 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
f078f209 177
81ee13ba 178 __clear_bit(cindex, tid->tx_buf);
528f0c6b 179
81ee13ba 180 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
e8324357
S
181 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
182 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
183 }
528f0c6b 184}
f078f209 185
e8324357
S
186static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
187 struct ath_buf *bf)
528f0c6b 188{
e8324357 189 int index, cindex;
528f0c6b 190
e8324357
S
191 if (bf_isretried(bf))
192 return;
528f0c6b 193
e8324357
S
194 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
195 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
81ee13ba 196 __set_bit(cindex, tid->tx_buf);
f078f209 197
e8324357
S
198 if (index >= ((tid->baw_tail - tid->baw_head) &
199 (ATH_TID_MAX_BUFS - 1))) {
200 tid->baw_tail = cindex;
201 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
f078f209 202 }
f078f209
LR
203}
204
205/*
e8324357
S
206 * TODO: For frame(s) that are in the retry state, we will reuse the
207 * sequence number(s) without setting the retry bit. The
208 * alternative is to give up on these and BAR the receiver's window
209 * forward.
f078f209 210 */
e8324357
S
211static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
212 struct ath_atx_tid *tid)
f078f209 213
f078f209 214{
e8324357
S
215 struct ath_buf *bf;
216 struct list_head bf_head;
db1a052b
FF
217 struct ath_tx_status ts;
218
219 memset(&ts, 0, sizeof(ts));
e8324357 220 INIT_LIST_HEAD(&bf_head);
f078f209 221
e8324357
S
222 for (;;) {
223 if (list_empty(&tid->buf_q))
224 break;
f078f209 225
d43f3015
S
226 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
227 list_move_tail(&bf->list, &bf_head);
f078f209 228
e8324357
S
229 if (bf_isretried(bf))
230 ath_tx_update_baw(sc, tid, bf->bf_seqno);
f078f209 231
e8324357 232 spin_unlock(&txq->axq_lock);
db1a052b 233 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
e8324357
S
234 spin_lock(&txq->axq_lock);
235 }
f078f209 236
e8324357
S
237 tid->seq_next = tid->seq_start;
238 tid->baw_tail = tid->baw_head;
f078f209
LR
239}
240
fec247c0
S
241static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
242 struct ath_buf *bf)
f078f209 243{
e8324357
S
244 struct sk_buff *skb;
245 struct ieee80211_hdr *hdr;
f078f209 246
e8324357
S
247 bf->bf_state.bf_type |= BUF_RETRY;
248 bf->bf_retries++;
fec247c0 249 TX_STAT_INC(txq->axq_qnum, a_retries);
f078f209 250
e8324357
S
251 skb = bf->bf_mpdu;
252 hdr = (struct ieee80211_hdr *)skb->data;
253 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
f078f209
LR
254}
255
0a8cea84 256static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
d43f3015 257{
0a8cea84 258 struct ath_buf *bf = NULL;
d43f3015
S
259
260 spin_lock_bh(&sc->tx.txbuflock);
0a8cea84
FF
261
262 if (unlikely(list_empty(&sc->tx.txbuf))) {
8a46097a
VT
263 spin_unlock_bh(&sc->tx.txbuflock);
264 return NULL;
265 }
0a8cea84
FF
266
267 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
268 list_del(&bf->list);
269
d43f3015
S
270 spin_unlock_bh(&sc->tx.txbuflock);
271
0a8cea84
FF
272 return bf;
273}
274
275static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
276{
277 spin_lock_bh(&sc->tx.txbuflock);
278 list_add_tail(&bf->list, &sc->tx.txbuf);
279 spin_unlock_bh(&sc->tx.txbuflock);
280}
281
282static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
283{
284 struct ath_buf *tbf;
285
286 tbf = ath_tx_get_buffer(sc);
287 if (WARN_ON(!tbf))
288 return NULL;
289
d43f3015
S
290 ATH_TXBUF_RESET(tbf);
291
827e69bf 292 tbf->aphy = bf->aphy;
d43f3015
S
293 tbf->bf_mpdu = bf->bf_mpdu;
294 tbf->bf_buf_addr = bf->bf_buf_addr;
d826c832 295 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
d43f3015 296 tbf->bf_state = bf->bf_state;
d43f3015
S
297
298 return tbf;
299}
300
301static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
302 struct ath_buf *bf, struct list_head *bf_q,
db1a052b 303 struct ath_tx_status *ts, int txok)
f078f209 304{
e8324357
S
305 struct ath_node *an = NULL;
306 struct sk_buff *skb;
1286ec6d 307 struct ieee80211_sta *sta;
76d5a9e8 308 struct ieee80211_hw *hw;
1286ec6d 309 struct ieee80211_hdr *hdr;
76d5a9e8 310 struct ieee80211_tx_info *tx_info;
e8324357 311 struct ath_atx_tid *tid = NULL;
d43f3015 312 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
e8324357 313 struct list_head bf_head, bf_pending;
0934af23 314 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
f078f209 315 u32 ba[WME_BA_BMP_SIZE >> 5];
0934af23
VT
316 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
317 bool rc_update = true;
78c4653a 318 struct ieee80211_tx_rate rates[4];
ebd02287 319 int nframes;
f078f209 320
a22be22a 321 skb = bf->bf_mpdu;
1286ec6d
S
322 hdr = (struct ieee80211_hdr *)skb->data;
323
76d5a9e8 324 tx_info = IEEE80211_SKB_CB(skb);
827e69bf 325 hw = bf->aphy->hw;
76d5a9e8 326
78c4653a 327 memcpy(rates, tx_info->control.rates, sizeof(rates));
ebd02287 328 nframes = bf->bf_nframes;
78c4653a 329
1286ec6d 330 rcu_read_lock();
f078f209 331
686b9cb9 332 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
1286ec6d
S
333 if (!sta) {
334 rcu_read_unlock();
73e19463 335
31e79a59
FF
336 INIT_LIST_HEAD(&bf_head);
337 while (bf) {
338 bf_next = bf->bf_next;
339
340 bf->bf_state.bf_type |= BUF_XRETRY;
341 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
342 !bf->bf_stale || bf_next != NULL)
343 list_move_tail(&bf->list, &bf_head);
344
ebd02287 345 ath_tx_rc_status(bf, ts, 1, 0, false);
31e79a59
FF
346 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
347 0, 0);
348
349 bf = bf_next;
350 }
1286ec6d 351 return;
f078f209
LR
352 }
353
1286ec6d
S
354 an = (struct ath_node *)sta->drv_priv;
355 tid = ATH_AN_2_TID(an, bf->bf_tidno);
356
b11b160d
FF
357 /*
358 * The hardware occasionally sends a tx status for the wrong TID.
359 * In this case, the BA status cannot be considered valid and all
360 * subframes need to be retransmitted
361 */
362 if (bf->bf_tidno != ts->tid)
363 txok = false;
364
e8324357 365 isaggr = bf_isaggr(bf);
d43f3015 366 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
f078f209 367
d43f3015 368 if (isaggr && txok) {
db1a052b
FF
369 if (ts->ts_flags & ATH9K_TX_BA) {
370 seq_st = ts->ts_seqnum;
371 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
e8324357 372 } else {
d43f3015
S
373 /*
374 * AR5416 can become deaf/mute when BA
375 * issue happens. Chip needs to be reset.
376 * But AP code may have sychronization issues
377 * when perform internal reset in this routine.
378 * Only enable reset in STA mode for now.
379 */
2660b81a 380 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
d43f3015 381 needreset = 1;
e8324357 382 }
f078f209
LR
383 }
384
e8324357
S
385 INIT_LIST_HEAD(&bf_pending);
386 INIT_LIST_HEAD(&bf_head);
f078f209 387
db1a052b 388 nbad = ath_tx_num_badfrms(sc, bf, ts, txok);
e8324357
S
389 while (bf) {
390 txfail = txpending = 0;
391 bf_next = bf->bf_next;
f078f209 392
78c4653a
FF
393 skb = bf->bf_mpdu;
394 tx_info = IEEE80211_SKB_CB(skb);
395
e8324357
S
396 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
397 /* transmit completion, subframe is
398 * acked by block ack */
0934af23 399 acked_cnt++;
e8324357
S
400 } else if (!isaggr && txok) {
401 /* transmit completion */
0934af23 402 acked_cnt++;
e8324357 403 } else {
e8324357 404 if (!(tid->state & AGGR_CLEANUP) &&
6d913f7d 405 !bf_last->bf_tx_aborted) {
e8324357 406 if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
fec247c0 407 ath_tx_set_retry(sc, txq, bf);
e8324357
S
408 txpending = 1;
409 } else {
410 bf->bf_state.bf_type |= BUF_XRETRY;
411 txfail = 1;
412 sendbar = 1;
0934af23 413 txfail_cnt++;
e8324357
S
414 }
415 } else {
416 /*
417 * cleanup in progress, just fail
418 * the un-acked sub-frames
419 */
420 txfail = 1;
421 }
422 }
f078f209 423
e5003249
VT
424 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
425 bf_next == NULL) {
cbfe89c6
VT
426 /*
427 * Make sure the last desc is reclaimed if it
428 * not a holding desc.
429 */
430 if (!bf_last->bf_stale)
431 list_move_tail(&bf->list, &bf_head);
432 else
433 INIT_LIST_HEAD(&bf_head);
e8324357 434 } else {
9680e8a3 435 BUG_ON(list_empty(bf_q));
d43f3015 436 list_move_tail(&bf->list, &bf_head);
e8324357 437 }
f078f209 438
90fa539c 439 if (!txpending || (tid->state & AGGR_CLEANUP)) {
e8324357
S
440 /*
441 * complete the acked-ones/xretried ones; update
442 * block-ack window
443 */
444 spin_lock_bh(&txq->axq_lock);
445 ath_tx_update_baw(sc, tid, bf->bf_seqno);
446 spin_unlock_bh(&txq->axq_lock);
f078f209 447
8a92e2ee 448 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
78c4653a 449 memcpy(tx_info->control.rates, rates, sizeof(rates));
ebd02287 450 bf->bf_nframes = nframes;
db1a052b 451 ath_tx_rc_status(bf, ts, nbad, txok, true);
8a92e2ee
VT
452 rc_update = false;
453 } else {
db1a052b 454 ath_tx_rc_status(bf, ts, nbad, txok, false);
8a92e2ee
VT
455 }
456
db1a052b
FF
457 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
458 !txfail, sendbar);
e8324357 459 } else {
d43f3015 460 /* retry the un-acked ones */
e5003249
VT
461 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
462 if (bf->bf_next == NULL && bf_last->bf_stale) {
463 struct ath_buf *tbf;
464
465 tbf = ath_clone_txbuf(sc, bf_last);
466 /*
467 * Update tx baw and complete the
468 * frame with failed status if we
469 * run out of tx buf.
470 */
471 if (!tbf) {
472 spin_lock_bh(&txq->axq_lock);
473 ath_tx_update_baw(sc, tid,
474 bf->bf_seqno);
475 spin_unlock_bh(&txq->axq_lock);
476
477 bf->bf_state.bf_type |=
478 BUF_XRETRY;
479 ath_tx_rc_status(bf, ts, nbad,
480 0, false);
481 ath_tx_complete_buf(sc, bf, txq,
482 &bf_head,
483 ts, 0, 0);
484 break;
485 }
486
487 ath9k_hw_cleartxdesc(sc->sc_ah,
488 tbf->bf_desc);
489 list_add_tail(&tbf->list, &bf_head);
490 } else {
491 /*
492 * Clear descriptor status words for
493 * software retry
494 */
495 ath9k_hw_cleartxdesc(sc->sc_ah,
496 bf->bf_desc);
c41d92dc 497 }
e8324357
S
498 }
499
500 /*
501 * Put this buffer to the temporary pending
502 * queue to retain ordering
503 */
504 list_splice_tail_init(&bf_head, &bf_pending);
505 }
506
507 bf = bf_next;
f078f209 508 }
f078f209 509
4cee7861
FF
510 /* prepend un-acked frames to the beginning of the pending frame queue */
511 if (!list_empty(&bf_pending)) {
512 spin_lock_bh(&txq->axq_lock);
513 list_splice(&bf_pending, &tid->buf_q);
514 ath_tx_queue_tid(txq, tid);
515 spin_unlock_bh(&txq->axq_lock);
516 }
517
e8324357 518 if (tid->state & AGGR_CLEANUP) {
90fa539c
FF
519 ath_tx_flush_tid(sc, tid);
520
e8324357
S
521 if (tid->baw_head == tid->baw_tail) {
522 tid->state &= ~AGGR_ADDBA_COMPLETE;
e8324357 523 tid->state &= ~AGGR_CLEANUP;
d43f3015 524 }
e8324357 525 }
f078f209 526
1286ec6d
S
527 rcu_read_unlock();
528
e8324357
S
529 if (needreset)
530 ath_reset(sc, false);
e8324357 531}
f078f209 532
e8324357
S
533static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
534 struct ath_atx_tid *tid)
f078f209 535{
528f0c6b
S
536 struct sk_buff *skb;
537 struct ieee80211_tx_info *tx_info;
a8efee4f 538 struct ieee80211_tx_rate *rates;
d43f3015 539 u32 max_4ms_framelen, frmlen;
4ef70841 540 u16 aggr_limit, legacy = 0;
e8324357 541 int i;
528f0c6b 542
a22be22a 543 skb = bf->bf_mpdu;
528f0c6b 544 tx_info = IEEE80211_SKB_CB(skb);
e63835b0 545 rates = tx_info->control.rates;
528f0c6b 546
e8324357
S
547 /*
548 * Find the lowest frame length among the rate series that will have a
549 * 4ms transmit duration.
550 * TODO - TXOP limit needs to be considered.
551 */
552 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
e63835b0 553
e8324357
S
554 for (i = 0; i < 4; i++) {
555 if (rates[i].count) {
545750d3
FF
556 int modeidx;
557 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
e8324357
S
558 legacy = 1;
559 break;
560 }
561
0e668cde 562 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
545750d3
FF
563 modeidx = MCS_HT40;
564 else
0e668cde
FF
565 modeidx = MCS_HT20;
566
567 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
568 modeidx++;
545750d3
FF
569
570 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
d43f3015 571 max_4ms_framelen = min(max_4ms_framelen, frmlen);
f078f209
LR
572 }
573 }
e63835b0 574
f078f209 575 /*
e8324357
S
576 * limit aggregate size by the minimum rate if rate selected is
577 * not a probe rate, if rate selected is a probe rate then
578 * avoid aggregation of this packet.
f078f209 579 */
e8324357
S
580 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
581 return 0;
f078f209 582
1773912b
VT
583 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
584 aggr_limit = min((max_4ms_framelen * 3) / 8,
585 (u32)ATH_AMPDU_LIMIT_MAX);
586 else
587 aggr_limit = min(max_4ms_framelen,
588 (u32)ATH_AMPDU_LIMIT_MAX);
f078f209 589
e8324357
S
590 /*
591 * h/w can accept aggregates upto 16 bit lengths (65535).
592 * The IE, however can hold upto 65536, which shows up here
593 * as zero. Ignore 65536 since we are constrained by hw.
f078f209 594 */
4ef70841
S
595 if (tid->an->maxampdu)
596 aggr_limit = min(aggr_limit, tid->an->maxampdu);
f078f209 597
e8324357
S
598 return aggr_limit;
599}
f078f209 600
e8324357 601/*
d43f3015 602 * Returns the number of delimiters to be added to
e8324357 603 * meet the minimum required mpdudensity.
e8324357
S
604 */
605static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
606 struct ath_buf *bf, u16 frmlen)
607{
e8324357
S
608 struct sk_buff *skb = bf->bf_mpdu;
609 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
4ef70841 610 u32 nsymbits, nsymbols;
e8324357 611 u16 minlen;
545750d3 612 u8 flags, rix;
c6663876 613 int width, streams, half_gi, ndelim, mindelim;
e8324357
S
614
615 /* Select standard number of delimiters based on frame length alone */
616 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
f078f209
LR
617
618 /*
e8324357
S
619 * If encryption enabled, hardware requires some more padding between
620 * subframes.
621 * TODO - this could be improved to be dependent on the rate.
622 * The hardware can keep up at lower rates, but not higher rates
f078f209 623 */
e8324357
S
624 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR)
625 ndelim += ATH_AGGR_ENCRYPTDELIM;
f078f209 626
e8324357
S
627 /*
628 * Convert desired mpdu density from microeconds to bytes based
629 * on highest rate in rate series (i.e. first rate) to determine
630 * required minimum length for subframe. Take into account
631 * whether high rate is 20 or 40Mhz and half or full GI.
4ef70841 632 *
e8324357
S
633 * If there is no mpdu density restriction, no further calculation
634 * is needed.
635 */
4ef70841
S
636
637 if (tid->an->mpdudensity == 0)
e8324357 638 return ndelim;
f078f209 639
e8324357
S
640 rix = tx_info->control.rates[0].idx;
641 flags = tx_info->control.rates[0].flags;
e8324357
S
642 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
643 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
f078f209 644
e8324357 645 if (half_gi)
4ef70841 646 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
e8324357 647 else
4ef70841 648 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
f078f209 649
e8324357
S
650 if (nsymbols == 0)
651 nsymbols = 1;
f078f209 652
c6663876
FF
653 streams = HT_RC_2_STREAMS(rix);
654 nsymbits = bits_per_symbol[rix % 8][width] * streams;
e8324357 655 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
f078f209 656
e8324357 657 if (frmlen < minlen) {
e8324357
S
658 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
659 ndelim = max(mindelim, ndelim);
f078f209
LR
660 }
661
e8324357 662 return ndelim;
f078f209
LR
663}
664
e8324357 665static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
fec247c0 666 struct ath_txq *txq,
d43f3015
S
667 struct ath_atx_tid *tid,
668 struct list_head *bf_q)
f078f209 669{
e8324357 670#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
d43f3015
S
671 struct ath_buf *bf, *bf_first, *bf_prev = NULL;
672 int rl = 0, nframes = 0, ndelim, prev_al = 0;
e8324357
S
673 u16 aggr_limit = 0, al = 0, bpad = 0,
674 al_delta, h_baw = tid->baw_size / 2;
675 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
0299a50a 676 struct ieee80211_tx_info *tx_info;
f078f209 677
e8324357 678 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
f078f209 679
e8324357
S
680 do {
681 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
f078f209 682
d43f3015 683 /* do not step over block-ack window */
e8324357
S
684 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
685 status = ATH_AGGR_BAW_CLOSED;
686 break;
687 }
f078f209 688
e8324357
S
689 if (!rl) {
690 aggr_limit = ath_lookup_rate(sc, bf, tid);
691 rl = 1;
692 }
f078f209 693
d43f3015 694 /* do not exceed aggregation limit */
e8324357 695 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
f078f209 696
d43f3015
S
697 if (nframes &&
698 (aggr_limit < (al + bpad + al_delta + prev_al))) {
e8324357
S
699 status = ATH_AGGR_LIMITED;
700 break;
701 }
f078f209 702
0299a50a
FF
703 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
704 if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
705 !(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)))
706 break;
707
d43f3015
S
708 /* do not exceed subframe limit */
709 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
e8324357
S
710 status = ATH_AGGR_LIMITED;
711 break;
712 }
d43f3015 713 nframes++;
f078f209 714
d43f3015 715 /* add padding for previous frame to aggregation length */
e8324357 716 al += bpad + al_delta;
f078f209 717
e8324357
S
718 /*
719 * Get the delimiters needed to meet the MPDU
720 * density for this node.
721 */
722 ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen);
e8324357 723 bpad = PADBYTES(al_delta) + (ndelim << 2);
f078f209 724
e8324357 725 bf->bf_next = NULL;
87d5efbb 726 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
f078f209 727
d43f3015 728 /* link buffers of this frame to the aggregate */
e8324357 729 ath_tx_addto_baw(sc, tid, bf);
d43f3015
S
730 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
731 list_move_tail(&bf->list, bf_q);
e8324357
S
732 if (bf_prev) {
733 bf_prev->bf_next = bf;
87d5efbb
VT
734 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
735 bf->bf_daddr);
e8324357
S
736 }
737 bf_prev = bf;
fec247c0 738
e8324357 739 } while (!list_empty(&tid->buf_q));
f078f209 740
e8324357
S
741 bf_first->bf_al = al;
742 bf_first->bf_nframes = nframes;
d43f3015 743
e8324357
S
744 return status;
745#undef PADBYTES
746}
f078f209 747
e8324357
S
748static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
749 struct ath_atx_tid *tid)
750{
d43f3015 751 struct ath_buf *bf;
e8324357
S
752 enum ATH_AGGR_STATUS status;
753 struct list_head bf_q;
f078f209 754
e8324357
S
755 do {
756 if (list_empty(&tid->buf_q))
757 return;
f078f209 758
e8324357
S
759 INIT_LIST_HEAD(&bf_q);
760
fec247c0 761 status = ath_tx_form_aggr(sc, txq, tid, &bf_q);
f078f209 762
f078f209 763 /*
d43f3015
S
764 * no frames picked up to be aggregated;
765 * block-ack window is not open.
f078f209 766 */
e8324357
S
767 if (list_empty(&bf_q))
768 break;
f078f209 769
e8324357 770 bf = list_first_entry(&bf_q, struct ath_buf, list);
d43f3015 771 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
f078f209 772
d43f3015 773 /* if only one frame, send as non-aggregate */
e8324357 774 if (bf->bf_nframes == 1) {
e8324357 775 bf->bf_state.bf_type &= ~BUF_AGGR;
d43f3015 776 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
e8324357
S
777 ath_buf_set_rate(sc, bf);
778 ath_tx_txqaddbuf(sc, txq, &bf_q);
779 continue;
780 }
f078f209 781
d43f3015 782 /* setup first desc of aggregate */
e8324357
S
783 bf->bf_state.bf_type |= BUF_AGGR;
784 ath_buf_set_rate(sc, bf);
785 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
f078f209 786
d43f3015
S
787 /* anchor last desc of aggregate */
788 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
f078f209 789
e8324357 790 ath_tx_txqaddbuf(sc, txq, &bf_q);
fec247c0 791 TX_STAT_INC(txq->axq_qnum, a_aggr);
f078f209 792
e8324357
S
793 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
794 status != ATH_AGGR_BAW_CLOSED);
795}
796
231c3a1f
FF
797int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
798 u16 tid, u16 *ssn)
e8324357
S
799{
800 struct ath_atx_tid *txtid;
801 struct ath_node *an;
802
803 an = (struct ath_node *)sta->drv_priv;
f83da965 804 txtid = ATH_AN_2_TID(an, tid);
231c3a1f
FF
805
806 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
807 return -EAGAIN;
808
f83da965 809 txtid->state |= AGGR_ADDBA_PROGRESS;
75401849 810 txtid->paused = true;
f83da965 811 *ssn = txtid->seq_start;
231c3a1f
FF
812
813 return 0;
e8324357 814}
f078f209 815
f83da965 816void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
e8324357
S
817{
818 struct ath_node *an = (struct ath_node *)sta->drv_priv;
819 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
066dae93 820 struct ath_txq *txq = txtid->ac->txq;
f078f209 821
e8324357 822 if (txtid->state & AGGR_CLEANUP)
f83da965 823 return;
f078f209 824
e8324357 825 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
5eae6592 826 txtid->state &= ~AGGR_ADDBA_PROGRESS;
f83da965 827 return;
e8324357 828 }
f078f209 829
e8324357 830 spin_lock_bh(&txq->axq_lock);
75401849 831 txtid->paused = true;
f078f209 832
90fa539c
FF
833 /*
834 * If frames are still being transmitted for this TID, they will be
835 * cleaned up during tx completion. To prevent race conditions, this
836 * TID can only be reused after all in-progress subframes have been
837 * completed.
838 */
839 if (txtid->baw_head != txtid->baw_tail)
e8324357 840 txtid->state |= AGGR_CLEANUP;
90fa539c 841 else
e8324357 842 txtid->state &= ~AGGR_ADDBA_COMPLETE;
90fa539c
FF
843 spin_unlock_bh(&txq->axq_lock);
844
845 ath_tx_flush_tid(sc, txtid);
e8324357 846}
f078f209 847
e8324357
S
848void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
849{
850 struct ath_atx_tid *txtid;
851 struct ath_node *an;
852
853 an = (struct ath_node *)sta->drv_priv;
854
855 if (sc->sc_flags & SC_OP_TXAGGR) {
856 txtid = ATH_AN_2_TID(an, tid);
857 txtid->baw_size =
858 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
859 txtid->state |= AGGR_ADDBA_COMPLETE;
860 txtid->state &= ~AGGR_ADDBA_PROGRESS;
861 ath_tx_resume_tid(sc, txtid);
862 }
f078f209
LR
863}
864
e8324357
S
865/********************/
866/* Queue Management */
867/********************/
f078f209 868
e8324357
S
869static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
870 struct ath_txq *txq)
f078f209 871{
e8324357
S
872 struct ath_atx_ac *ac, *ac_tmp;
873 struct ath_atx_tid *tid, *tid_tmp;
f078f209 874
e8324357
S
875 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
876 list_del(&ac->list);
877 ac->sched = false;
878 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
879 list_del(&tid->list);
880 tid->sched = false;
881 ath_tid_drain(sc, txq, tid);
882 }
f078f209
LR
883 }
884}
885
e8324357 886struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
f078f209 887{
cbe61d8a 888 struct ath_hw *ah = sc->sc_ah;
c46917bb 889 struct ath_common *common = ath9k_hw_common(ah);
e8324357 890 struct ath9k_tx_queue_info qi;
066dae93
FF
891 static const int subtype_txq_to_hwq[] = {
892 [WME_AC_BE] = ATH_TXQ_AC_BE,
893 [WME_AC_BK] = ATH_TXQ_AC_BK,
894 [WME_AC_VI] = ATH_TXQ_AC_VI,
895 [WME_AC_VO] = ATH_TXQ_AC_VO,
896 };
e5003249 897 int qnum, i;
f078f209 898
e8324357 899 memset(&qi, 0, sizeof(qi));
066dae93 900 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
e8324357
S
901 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
902 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
903 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
904 qi.tqi_physCompBuf = 0;
f078f209
LR
905
906 /*
e8324357
S
907 * Enable interrupts only for EOL and DESC conditions.
908 * We mark tx descriptors to receive a DESC interrupt
909 * when a tx queue gets deep; otherwise waiting for the
910 * EOL to reap descriptors. Note that this is done to
911 * reduce interrupt load and this only defers reaping
912 * descriptors, never transmitting frames. Aside from
913 * reducing interrupts this also permits more concurrency.
914 * The only potential downside is if the tx queue backs
915 * up in which case the top half of the kernel may backup
916 * due to a lack of tx descriptors.
917 *
918 * The UAPSD queue is an exception, since we take a desc-
919 * based intr on the EOSP frames.
f078f209 920 */
afe754d6
VT
921 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
922 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
923 TXQ_FLAG_TXERRINT_ENABLE;
924 } else {
925 if (qtype == ATH9K_TX_QUEUE_UAPSD)
926 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
927 else
928 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
929 TXQ_FLAG_TXDESCINT_ENABLE;
930 }
e8324357
S
931 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
932 if (qnum == -1) {
f078f209 933 /*
e8324357
S
934 * NB: don't print a message, this happens
935 * normally on parts with too few tx queues
f078f209 936 */
e8324357 937 return NULL;
f078f209 938 }
e8324357 939 if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
c46917bb
LR
940 ath_print(common, ATH_DBG_FATAL,
941 "qnum %u out of range, max %u!\n",
942 qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq));
e8324357
S
943 ath9k_hw_releasetxqueue(ah, qnum);
944 return NULL;
945 }
946 if (!ATH_TXQ_SETUP(sc, qnum)) {
947 struct ath_txq *txq = &sc->tx.txq[qnum];
f078f209 948
e8324357
S
949 txq->axq_qnum = qnum;
950 txq->axq_link = NULL;
951 INIT_LIST_HEAD(&txq->axq_q);
952 INIT_LIST_HEAD(&txq->axq_acq);
953 spin_lock_init(&txq->axq_lock);
954 txq->axq_depth = 0;
164ace38 955 txq->axq_tx_inprogress = false;
e8324357 956 sc->tx.txqsetup |= 1<<qnum;
e5003249
VT
957
958 txq->txq_headidx = txq->txq_tailidx = 0;
959 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
960 INIT_LIST_HEAD(&txq->txq_fifo[i]);
961 INIT_LIST_HEAD(&txq->txq_fifo_pending);
e8324357
S
962 }
963 return &sc->tx.txq[qnum];
f078f209
LR
964}
965
e8324357
S
966int ath_txq_update(struct ath_softc *sc, int qnum,
967 struct ath9k_tx_queue_info *qinfo)
968{
cbe61d8a 969 struct ath_hw *ah = sc->sc_ah;
e8324357
S
970 int error = 0;
971 struct ath9k_tx_queue_info qi;
972
973 if (qnum == sc->beacon.beaconq) {
974 /*
975 * XXX: for beacon queue, we just save the parameter.
976 * It will be picked up by ath_beaconq_config when
977 * it's necessary.
978 */
979 sc->beacon.beacon_qi = *qinfo;
f078f209 980 return 0;
e8324357 981 }
f078f209 982
9680e8a3 983 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
e8324357
S
984
985 ath9k_hw_get_txq_props(ah, qnum, &qi);
986 qi.tqi_aifs = qinfo->tqi_aifs;
987 qi.tqi_cwmin = qinfo->tqi_cwmin;
988 qi.tqi_cwmax = qinfo->tqi_cwmax;
989 qi.tqi_burstTime = qinfo->tqi_burstTime;
990 qi.tqi_readyTime = qinfo->tqi_readyTime;
991
992 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
c46917bb
LR
993 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
994 "Unable to update hardware queue %u!\n", qnum);
e8324357
S
995 error = -EIO;
996 } else {
997 ath9k_hw_resettxqueue(ah, qnum);
998 }
999
1000 return error;
1001}
1002
1003int ath_cabq_update(struct ath_softc *sc)
1004{
1005 struct ath9k_tx_queue_info qi;
1006 int qnum = sc->beacon.cabq->axq_qnum;
f078f209 1007
e8324357 1008 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
f078f209 1009 /*
e8324357 1010 * Ensure the readytime % is within the bounds.
f078f209 1011 */
17d7904d
S
1012 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1013 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1014 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1015 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
f078f209 1016
57c4d7b4 1017 qi.tqi_readyTime = (sc->beacon_interval *
fdbf7335 1018 sc->config.cabqReadytime) / 100;
e8324357
S
1019 ath_txq_update(sc, qnum, &qi);
1020
1021 return 0;
f078f209
LR
1022}
1023
043a0405
S
1024/*
1025 * Drain a given TX queue (could be Beacon or Data)
1026 *
1027 * This assumes output has been stopped and
1028 * we do not need to block ath_tx_tasklet.
1029 */
1030void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
f078f209 1031{
e8324357
S
1032 struct ath_buf *bf, *lastbf;
1033 struct list_head bf_head;
db1a052b
FF
1034 struct ath_tx_status ts;
1035
1036 memset(&ts, 0, sizeof(ts));
e8324357 1037 INIT_LIST_HEAD(&bf_head);
f078f209 1038
e8324357
S
1039 for (;;) {
1040 spin_lock_bh(&txq->axq_lock);
f078f209 1041
e5003249
VT
1042 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1043 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
1044 txq->txq_headidx = txq->txq_tailidx = 0;
1045 spin_unlock_bh(&txq->axq_lock);
1046 break;
1047 } else {
1048 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
1049 struct ath_buf, list);
1050 }
1051 } else {
1052 if (list_empty(&txq->axq_q)) {
1053 txq->axq_link = NULL;
1054 spin_unlock_bh(&txq->axq_lock);
1055 break;
1056 }
1057 bf = list_first_entry(&txq->axq_q, struct ath_buf,
1058 list);
f078f209 1059
e5003249
VT
1060 if (bf->bf_stale) {
1061 list_del(&bf->list);
1062 spin_unlock_bh(&txq->axq_lock);
f078f209 1063
0a8cea84 1064 ath_tx_return_buffer(sc, bf);
e5003249
VT
1065 continue;
1066 }
e8324357 1067 }
f078f209 1068
e8324357 1069 lastbf = bf->bf_lastbf;
6d913f7d
VT
1070 if (!retry_tx)
1071 lastbf->bf_tx_aborted = true;
f078f209 1072
e5003249
VT
1073 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1074 list_cut_position(&bf_head,
1075 &txq->txq_fifo[txq->txq_tailidx],
1076 &lastbf->list);
1077 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
1078 } else {
1079 /* remove ath_buf's of the same mpdu from txq */
1080 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
1081 }
1082
e8324357 1083 txq->axq_depth--;
f078f209 1084
e8324357
S
1085 spin_unlock_bh(&txq->axq_lock);
1086
1087 if (bf_isampdu(bf))
db1a052b 1088 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0);
e8324357 1089 else
db1a052b 1090 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
f078f209
LR
1091 }
1092
164ace38
SB
1093 spin_lock_bh(&txq->axq_lock);
1094 txq->axq_tx_inprogress = false;
1095 spin_unlock_bh(&txq->axq_lock);
1096
e5003249
VT
1097 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1098 spin_lock_bh(&txq->axq_lock);
1099 while (!list_empty(&txq->txq_fifo_pending)) {
1100 bf = list_first_entry(&txq->txq_fifo_pending,
1101 struct ath_buf, list);
1102 list_cut_position(&bf_head,
1103 &txq->txq_fifo_pending,
1104 &bf->bf_lastbf->list);
1105 spin_unlock_bh(&txq->axq_lock);
1106
1107 if (bf_isampdu(bf))
1108 ath_tx_complete_aggr(sc, txq, bf, &bf_head,
1109 &ts, 0);
1110 else
1111 ath_tx_complete_buf(sc, bf, txq, &bf_head,
1112 &ts, 0, 0);
1113 spin_lock_bh(&txq->axq_lock);
1114 }
1115 spin_unlock_bh(&txq->axq_lock);
1116 }
e609e2ea
FF
1117
1118 /* flush any pending frames if aggregation is enabled */
1119 if (sc->sc_flags & SC_OP_TXAGGR) {
1120 if (!retry_tx) {
1121 spin_lock_bh(&txq->axq_lock);
1122 ath_txq_drain_pending_buffers(sc, txq);
1123 spin_unlock_bh(&txq->axq_lock);
1124 }
1125 }
f078f209
LR
1126}
1127
043a0405 1128void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
f078f209 1129{
cbe61d8a 1130 struct ath_hw *ah = sc->sc_ah;
c46917bb 1131 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
043a0405
S
1132 struct ath_txq *txq;
1133 int i, npend = 0;
1134
1135 if (sc->sc_flags & SC_OP_INVALID)
1136 return;
1137
1138 /* Stop beacon queue */
1139 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
1140
1141 /* Stop data queues */
1142 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1143 if (ATH_TXQ_SETUP(sc, i)) {
1144 txq = &sc->tx.txq[i];
1145 ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1146 npend += ath9k_hw_numtxpending(ah, txq->axq_qnum);
1147 }
1148 }
1149
1150 if (npend) {
1151 int r;
1152
e8009e98 1153 ath_print(common, ATH_DBG_FATAL,
9be8ab2e 1154 "Failed to stop TX DMA. Resetting hardware!\n");
043a0405 1155
20bd2a09 1156 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false);
043a0405 1157 if (r)
c46917bb
LR
1158 ath_print(common, ATH_DBG_FATAL,
1159 "Unable to reset hardware; reset status %d\n",
1160 r);
043a0405
S
1161 }
1162
1163 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1164 if (ATH_TXQ_SETUP(sc, i))
1165 ath_draintxq(sc, &sc->tx.txq[i], retry_tx);
1166 }
e8324357 1167}
f078f209 1168
043a0405 1169void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
e8324357 1170{
043a0405
S
1171 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1172 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
e8324357 1173}
f078f209 1174
e8324357
S
1175void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1176{
1177 struct ath_atx_ac *ac;
1178 struct ath_atx_tid *tid;
f078f209 1179
e8324357
S
1180 if (list_empty(&txq->axq_acq))
1181 return;
f078f209 1182
e8324357
S
1183 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
1184 list_del(&ac->list);
1185 ac->sched = false;
f078f209 1186
e8324357
S
1187 do {
1188 if (list_empty(&ac->tid_q))
1189 return;
f078f209 1190
e8324357
S
1191 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
1192 list_del(&tid->list);
1193 tid->sched = false;
f078f209 1194
e8324357
S
1195 if (tid->paused)
1196 continue;
f078f209 1197
164ace38 1198 ath_tx_sched_aggr(sc, txq, tid);
f078f209
LR
1199
1200 /*
e8324357
S
1201 * add tid to round-robin queue if more frames
1202 * are pending for the tid
f078f209 1203 */
e8324357
S
1204 if (!list_empty(&tid->buf_q))
1205 ath_tx_queue_tid(txq, tid);
f078f209 1206
e8324357
S
1207 break;
1208 } while (!list_empty(&ac->tid_q));
f078f209 1209
e8324357
S
1210 if (!list_empty(&ac->tid_q)) {
1211 if (!ac->sched) {
1212 ac->sched = true;
1213 list_add_tail(&ac->list, &txq->axq_acq);
f078f209 1214 }
e8324357
S
1215 }
1216}
f078f209 1217
e8324357
S
1218/***********/
1219/* TX, DMA */
1220/***********/
1221
f078f209 1222/*
e8324357
S
1223 * Insert a chain of ath_buf (descriptors) on a txq and
1224 * assume the descriptors are already chained together by caller.
f078f209 1225 */
e8324357
S
1226static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1227 struct list_head *head)
f078f209 1228{
cbe61d8a 1229 struct ath_hw *ah = sc->sc_ah;
c46917bb 1230 struct ath_common *common = ath9k_hw_common(ah);
e8324357 1231 struct ath_buf *bf;
f078f209 1232
e8324357
S
1233 /*
1234 * Insert the frame on the outbound list and
1235 * pass it on to the hardware.
1236 */
f078f209 1237
e8324357
S
1238 if (list_empty(head))
1239 return;
f078f209 1240
e8324357 1241 bf = list_first_entry(head, struct ath_buf, list);
f078f209 1242
c46917bb
LR
1243 ath_print(common, ATH_DBG_QUEUE,
1244 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
f078f209 1245
e5003249
VT
1246 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1247 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
1248 list_splice_tail_init(head, &txq->txq_fifo_pending);
1249 return;
1250 }
1251 if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
1252 ath_print(common, ATH_DBG_XMIT,
1253 "Initializing tx fifo %d which "
1254 "is non-empty\n",
1255 txq->txq_headidx);
1256 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
1257 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
1258 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
e8324357 1259 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
c46917bb
LR
1260 ath_print(common, ATH_DBG_XMIT,
1261 "TXDP[%u] = %llx (%p)\n",
1262 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
e8324357 1263 } else {
e5003249
VT
1264 list_splice_tail_init(head, &txq->axq_q);
1265
1266 if (txq->axq_link == NULL) {
1267 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1268 ath_print(common, ATH_DBG_XMIT,
1269 "TXDP[%u] = %llx (%p)\n",
1270 txq->axq_qnum, ito64(bf->bf_daddr),
1271 bf->bf_desc);
1272 } else {
1273 *txq->axq_link = bf->bf_daddr;
1274 ath_print(common, ATH_DBG_XMIT,
1275 "link[%u] (%p)=%llx (%p)\n",
1276 txq->axq_qnum, txq->axq_link,
1277 ito64(bf->bf_daddr), bf->bf_desc);
1278 }
1279 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
1280 &txq->axq_link);
1281 ath9k_hw_txstart(ah, txq->axq_qnum);
e8324357 1282 }
e5003249 1283 txq->axq_depth++;
e8324357 1284}
f078f209 1285
e8324357
S
1286static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1287 struct list_head *bf_head,
1288 struct ath_tx_control *txctl)
f078f209
LR
1289{
1290 struct ath_buf *bf;
f078f209 1291
e8324357
S
1292 bf = list_first_entry(bf_head, struct ath_buf, list);
1293 bf->bf_state.bf_type |= BUF_AMPDU;
fec247c0 1294 TX_STAT_INC(txctl->txq->axq_qnum, a_queued);
f078f209 1295
e8324357
S
1296 /*
1297 * Do not queue to h/w when any of the following conditions is true:
1298 * - there are pending frames in software queue
1299 * - the TID is currently paused for ADDBA/BAR request
1300 * - seqno is not within block-ack window
1301 * - h/w queue depth exceeds low water mark
1302 */
1303 if (!list_empty(&tid->buf_q) || tid->paused ||
1304 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
1305 txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
f078f209 1306 /*
e8324357
S
1307 * Add this frame to software queue for scheduling later
1308 * for aggregation.
f078f209 1309 */
d43f3015 1310 list_move_tail(&bf->list, &tid->buf_q);
e8324357
S
1311 ath_tx_queue_tid(txctl->txq, tid);
1312 return;
1313 }
1314
1315 /* Add sub-frame to BAW */
1316 ath_tx_addto_baw(sc, tid, bf);
1317
1318 /* Queue to h/w without aggregation */
1319 bf->bf_nframes = 1;
d43f3015 1320 bf->bf_lastbf = bf;
e8324357
S
1321 ath_buf_set_rate(sc, bf);
1322 ath_tx_txqaddbuf(sc, txctl->txq, bf_head);
e8324357
S
1323}
1324
c37452b0
S
1325static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
1326 struct ath_atx_tid *tid,
1327 struct list_head *bf_head)
e8324357
S
1328{
1329 struct ath_buf *bf;
1330
e8324357
S
1331 bf = list_first_entry(bf_head, struct ath_buf, list);
1332 bf->bf_state.bf_type &= ~BUF_AMPDU;
1333
1334 /* update starting sequence number for subsequent ADDBA request */
1335 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
1336
1337 bf->bf_nframes = 1;
d43f3015 1338 bf->bf_lastbf = bf;
e8324357
S
1339 ath_buf_set_rate(sc, bf);
1340 ath_tx_txqaddbuf(sc, txq, bf_head);
fec247c0 1341 TX_STAT_INC(txq->axq_qnum, queued);
e8324357
S
1342}
1343
c37452b0
S
1344static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1345 struct list_head *bf_head)
1346{
1347 struct ath_buf *bf;
1348
1349 bf = list_first_entry(bf_head, struct ath_buf, list);
1350
1351 bf->bf_lastbf = bf;
1352 bf->bf_nframes = 1;
1353 ath_buf_set_rate(sc, bf);
1354 ath_tx_txqaddbuf(sc, txq, bf_head);
fec247c0 1355 TX_STAT_INC(txq->axq_qnum, queued);
c37452b0
S
1356}
1357
e8324357
S
1358static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
1359{
1360 struct ieee80211_hdr *hdr;
1361 enum ath9k_pkt_type htype;
1362 __le16 fc;
1363
1364 hdr = (struct ieee80211_hdr *)skb->data;
1365 fc = hdr->frame_control;
1366
1367 if (ieee80211_is_beacon(fc))
1368 htype = ATH9K_PKT_TYPE_BEACON;
1369 else if (ieee80211_is_probe_resp(fc))
1370 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1371 else if (ieee80211_is_atim(fc))
1372 htype = ATH9K_PKT_TYPE_ATIM;
1373 else if (ieee80211_is_pspoll(fc))
1374 htype = ATH9K_PKT_TYPE_PSPOLL;
1375 else
1376 htype = ATH9K_PKT_TYPE_NORMAL;
1377
1378 return htype;
1379}
1380
e8324357
S
1381static void assign_aggr_tid_seqno(struct sk_buff *skb,
1382 struct ath_buf *bf)
1383{
1384 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1385 struct ieee80211_hdr *hdr;
1386 struct ath_node *an;
1387 struct ath_atx_tid *tid;
1388 __le16 fc;
1389 u8 *qc;
1390
1391 if (!tx_info->control.sta)
1392 return;
1393
1394 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1395 hdr = (struct ieee80211_hdr *)skb->data;
1396 fc = hdr->frame_control;
1397
1398 if (ieee80211_is_data_qos(fc)) {
1399 qc = ieee80211_get_qos_ctl(hdr);
1400 bf->bf_tidno = qc[0] & 0xf;
1401 }
1402
1403 /*
1404 * For HT capable stations, we save tidno for later use.
1405 * We also override seqno set by upper layer with the one
1406 * in tx aggregation state.
e8324357
S
1407 */
1408 tid = ATH_AN_2_TID(an, bf->bf_tidno);
17b182e3 1409 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
e8324357
S
1410 bf->bf_seqno = tid->seq_next;
1411 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1412}
1413
b0a33448 1414static int setup_tx_flags(struct sk_buff *skb, bool use_ldpc)
e8324357
S
1415{
1416 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1417 int flags = 0;
1418
1419 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
1420 flags |= ATH9K_TXDESC_INTREQ;
1421
1422 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1423 flags |= ATH9K_TXDESC_NOACK;
e8324357 1424
b0a33448
LR
1425 if (use_ldpc)
1426 flags |= ATH9K_TXDESC_LDPC;
1427
e8324357
S
1428 return flags;
1429}
1430
1431/*
1432 * rix - rate index
1433 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1434 * width - 0 for 20 MHz, 1 for 40 MHz
1435 * half_gi - to use 4us v/s 3.6 us for symbol time
1436 */
1437static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
1438 int width, int half_gi, bool shortPreamble)
1439{
e8324357 1440 u32 nbits, nsymbits, duration, nsymbols;
e8324357
S
1441 int streams, pktlen;
1442
1443 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
e8324357
S
1444
1445 /* find number of symbols: PLCP + data */
c6663876 1446 streams = HT_RC_2_STREAMS(rix);
e8324357 1447 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
c6663876 1448 nsymbits = bits_per_symbol[rix % 8][width] * streams;
e8324357
S
1449 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1450
1451 if (!half_gi)
1452 duration = SYMBOL_TIME(nsymbols);
1453 else
1454 duration = SYMBOL_TIME_HALFGI(nsymbols);
1455
1456 /* addup duration for legacy/ht training and signal fields */
e8324357
S
1457 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
1458
1459 return duration;
1460}
1461
1462static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
1463{
43c27613 1464 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
e8324357
S
1465 struct ath9k_11n_rate_series series[4];
1466 struct sk_buff *skb;
1467 struct ieee80211_tx_info *tx_info;
1468 struct ieee80211_tx_rate *rates;
545750d3 1469 const struct ieee80211_rate *rate;
254ad0ff 1470 struct ieee80211_hdr *hdr;
c89424df
S
1471 int i, flags = 0;
1472 u8 rix = 0, ctsrate = 0;
254ad0ff 1473 bool is_pspoll;
e8324357
S
1474
1475 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
1476
a22be22a 1477 skb = bf->bf_mpdu;
e8324357
S
1478 tx_info = IEEE80211_SKB_CB(skb);
1479 rates = tx_info->control.rates;
254ad0ff
S
1480 hdr = (struct ieee80211_hdr *)skb->data;
1481 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
e8324357 1482
e8324357 1483 /*
c89424df
S
1484 * We check if Short Preamble is needed for the CTS rate by
1485 * checking the BSS's global flag.
1486 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
e8324357 1487 */
545750d3
FF
1488 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
1489 ctsrate = rate->hw_value;
c89424df 1490 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
545750d3 1491 ctsrate |= rate->hw_value_short;
e8324357 1492
e8324357 1493 for (i = 0; i < 4; i++) {
545750d3
FF
1494 bool is_40, is_sgi, is_sp;
1495 int phy;
1496
e8324357
S
1497 if (!rates[i].count || (rates[i].idx < 0))
1498 continue;
1499
1500 rix = rates[i].idx;
e8324357 1501 series[i].Tries = rates[i].count;
43c27613 1502 series[i].ChSel = common->tx_chainmask;
e8324357 1503
27032059
FF
1504 if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
1505 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
c89424df 1506 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
27032059
FF
1507 flags |= ATH9K_TXDESC_RTSENA;
1508 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1509 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1510 flags |= ATH9K_TXDESC_CTSENA;
1511 }
1512
c89424df
S
1513 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1514 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1515 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1516 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
e8324357 1517
545750d3
FF
1518 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1519 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1520 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1521
1522 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1523 /* MCS rates */
1524 series[i].Rate = rix | 0x80;
1525 series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
1526 is_40, is_sgi, is_sp);
074a8c0d
FF
1527 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1528 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
545750d3
FF
1529 continue;
1530 }
1531
1532 /* legcay rates */
1533 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1534 !(rate->flags & IEEE80211_RATE_ERP_G))
1535 phy = WLAN_RC_PHY_CCK;
1536 else
1537 phy = WLAN_RC_PHY_OFDM;
1538
1539 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1540 series[i].Rate = rate->hw_value;
1541 if (rate->hw_value_short) {
1542 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1543 series[i].Rate |= rate->hw_value_short;
1544 } else {
1545 is_sp = false;
1546 }
1547
1548 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
1549 phy, rate->bitrate * 100, bf->bf_frmlen, rix, is_sp);
f078f209
LR
1550 }
1551
27032059
FF
1552 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
1553 if (bf_isaggr(bf) && (bf->bf_al > sc->sc_ah->caps.rts_aggr_limit))
1554 flags &= ~ATH9K_TXDESC_RTSENA;
1555
1556 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1557 if (flags & ATH9K_TXDESC_RTSENA)
1558 flags &= ~ATH9K_TXDESC_CTSENA;
1559
e8324357 1560 /* set dur_update_en for l-sig computation except for PS-Poll frames */
c89424df
S
1561 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1562 bf->bf_lastbf->bf_desc,
254ad0ff 1563 !is_pspoll, ctsrate,
c89424df 1564 0, series, 4, flags);
f078f209 1565
17d7904d 1566 if (sc->config.ath_aggr_prot && flags)
c89424df 1567 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
f078f209
LR
1568}
1569
c52f33d0 1570static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
8f93b8b3 1571 struct sk_buff *skb,
528f0c6b 1572 struct ath_tx_control *txctl)
f078f209 1573{
c52f33d0
JM
1574 struct ath_wiphy *aphy = hw->priv;
1575 struct ath_softc *sc = aphy->sc;
528f0c6b
S
1576 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1577 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
528f0c6b
S
1578 int hdrlen;
1579 __le16 fc;
1bc14880 1580 int padpos, padsize;
b0a33448 1581 bool use_ldpc = false;
e022edbd 1582
827e69bf
FF
1583 tx_info->pad[0] = 0;
1584 switch (txctl->frame_type) {
c81494d5 1585 case ATH9K_IFT_NOT_INTERNAL:
827e69bf 1586 break;
c81494d5 1587 case ATH9K_IFT_PAUSE:
827e69bf
FF
1588 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_PAUSE;
1589 /* fall through */
c81494d5 1590 case ATH9K_IFT_UNPAUSE:
827e69bf
FF
1591 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_INTERNAL;
1592 break;
1593 }
528f0c6b
S
1594 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1595 fc = hdr->frame_control;
f078f209 1596
528f0c6b 1597 ATH_TXBUF_RESET(bf);
f078f209 1598
827e69bf 1599 bf->aphy = aphy;
1bc14880
BP
1600 bf->bf_frmlen = skb->len + FCS_LEN;
1601 /* Remove the padding size from bf_frmlen, if any */
1602 padpos = ath9k_cmn_padpos(hdr->frame_control);
1603 padsize = padpos & 3;
1604 if (padsize && skb->len>padpos+padsize) {
1605 bf->bf_frmlen -= padsize;
1606 }
cd3d39a6 1607
9f42c2b6 1608 if (!txctl->paprd && conf_is_ht(&hw->conf)) {
c656bbb5 1609 bf->bf_state.bf_type |= BUF_HT;
b0a33448
LR
1610 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1611 use_ldpc = true;
1612 }
528f0c6b 1613
9f42c2b6 1614 bf->bf_state.bfs_paprd = txctl->paprd;
ca369eb4
VT
1615 if (txctl->paprd)
1616 bf->bf_state.bfs_paprd_timestamp = jiffies;
b0a33448 1617 bf->bf_flags = setup_tx_flags(skb, use_ldpc);
528f0c6b 1618
c17512d8 1619 bf->bf_keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
528f0c6b
S
1620 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
1621 bf->bf_frmlen += tx_info->control.hw_key->icv_len;
1622 bf->bf_keyix = tx_info->control.hw_key->hw_key_idx;
1623 } else {
1624 bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
1625 }
1626
17b182e3
S
1627 if (ieee80211_is_data_qos(fc) && bf_isht(bf) &&
1628 (sc->sc_flags & SC_OP_TXAGGR))
528f0c6b
S
1629 assign_aggr_tid_seqno(skb, bf);
1630
f078f209 1631 bf->bf_mpdu = skb;
f8316df1 1632
c1739eb3
BG
1633 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1634 skb->len, DMA_TO_DEVICE);
1635 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
f8316df1 1636 bf->bf_mpdu = NULL;
6cf9e995 1637 bf->bf_buf_addr = 0;
c46917bb
LR
1638 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1639 "dma_mapping_error() on TX\n");
f8316df1
LR
1640 return -ENOMEM;
1641 }
1642
7c9fd60f
VT
1643 bf->bf_tx_aborted = false;
1644
f8316df1 1645 return 0;
528f0c6b
S
1646}
1647
1648/* FIXME: tx power */
1649static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
528f0c6b
S
1650 struct ath_tx_control *txctl)
1651{
a22be22a 1652 struct sk_buff *skb = bf->bf_mpdu;
528f0c6b 1653 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
c37452b0 1654 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
528f0c6b
S
1655 struct ath_node *an = NULL;
1656 struct list_head bf_head;
1657 struct ath_desc *ds;
1658 struct ath_atx_tid *tid;
cbe61d8a 1659 struct ath_hw *ah = sc->sc_ah;
528f0c6b 1660 int frm_type;
c37452b0 1661 __le16 fc;
528f0c6b 1662
528f0c6b 1663 frm_type = get_hw_packet_type(skb);
c37452b0 1664 fc = hdr->frame_control;
528f0c6b
S
1665
1666 INIT_LIST_HEAD(&bf_head);
1667 list_add_tail(&bf->list, &bf_head);
f078f209 1668
f078f209 1669 ds = bf->bf_desc;
87d5efbb 1670 ath9k_hw_set_desc_link(ah, ds, 0);
f078f209 1671
528f0c6b
S
1672 ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
1673 bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
1674
1675 ath9k_hw_filltxdesc(ah, ds,
8f93b8b3
S
1676 skb->len, /* segment length */
1677 true, /* first segment */
1678 true, /* last segment */
3f3a1c80 1679 ds, /* first descriptor */
cc610ac0
VT
1680 bf->bf_buf_addr,
1681 txctl->txq->axq_qnum);
f078f209 1682
9f42c2b6
FF
1683 if (bf->bf_state.bfs_paprd)
1684 ar9003_hw_set_paprd_txdesc(ah, ds, bf->bf_state.bfs_paprd);
1685
528f0c6b 1686 spin_lock_bh(&txctl->txq->axq_lock);
f078f209 1687
f1617967
JL
1688 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) &&
1689 tx_info->control.sta) {
1690 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1691 tid = ATH_AN_2_TID(an, bf->bf_tidno);
1692
c37452b0
S
1693 if (!ieee80211_is_data_qos(fc)) {
1694 ath_tx_send_normal(sc, txctl->txq, &bf_head);
1695 goto tx_done;
1696 }
1697
066dae93 1698 WARN_ON(tid->ac->txq != txctl->txq);
4fdec031 1699 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
f078f209
LR
1700 /*
1701 * Try aggregation if it's a unicast data frame
1702 * and the destination is HT capable.
1703 */
528f0c6b 1704 ath_tx_send_ampdu(sc, tid, &bf_head, txctl);
f078f209
LR
1705 } else {
1706 /*
528f0c6b
S
1707 * Send this frame as regular when ADDBA
1708 * exchange is neither complete nor pending.
f078f209 1709 */
c37452b0
S
1710 ath_tx_send_ht_normal(sc, txctl->txq,
1711 tid, &bf_head);
f078f209
LR
1712 }
1713 } else {
c37452b0 1714 ath_tx_send_normal(sc, txctl->txq, &bf_head);
f078f209 1715 }
528f0c6b 1716
c37452b0 1717tx_done:
528f0c6b 1718 spin_unlock_bh(&txctl->txq->axq_lock);
f078f209
LR
1719}
1720
f8316df1 1721/* Upon failure caller should free skb */
c52f33d0 1722int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
528f0c6b 1723 struct ath_tx_control *txctl)
f078f209 1724{
c52f33d0
JM
1725 struct ath_wiphy *aphy = hw->priv;
1726 struct ath_softc *sc = aphy->sc;
c46917bb 1727 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
84642d6b 1728 struct ath_txq *txq = txctl->txq;
528f0c6b 1729 struct ath_buf *bf;
97923b14 1730 int q, r;
f078f209 1731
528f0c6b
S
1732 bf = ath_tx_get_buffer(sc);
1733 if (!bf) {
c46917bb 1734 ath_print(common, ATH_DBG_XMIT, "TX buffers are full\n");
528f0c6b
S
1735 return -1;
1736 }
1737
066dae93 1738 q = skb_get_queue_mapping(skb);
c52f33d0 1739 r = ath_tx_setup_buffer(hw, bf, skb, txctl);
f8316df1 1740 if (unlikely(r)) {
c46917bb 1741 ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n");
c112d0c5
LR
1742
1743 /* upon ath_tx_processq() this TX queue will be resumed, we
1744 * guarantee this will happen by knowing beforehand that
1745 * we will at least have to run TX completionon one buffer
1746 * on the queue */
1747 spin_lock_bh(&txq->axq_lock);
066dae93
FF
1748 if (txq == sc->tx.txq_map[q] && !txq->stopped &&
1749 txq->axq_depth > 1) {
1750 ath_mac80211_stop_queue(sc, q);
c112d0c5
LR
1751 txq->stopped = 1;
1752 }
1753 spin_unlock_bh(&txq->axq_lock);
1754
0a8cea84 1755 ath_tx_return_buffer(sc, bf);
c112d0c5 1756
f8316df1
LR
1757 return r;
1758 }
1759
97923b14 1760 spin_lock_bh(&txq->axq_lock);
066dae93
FF
1761 if (txq == sc->tx.txq_map[q] &&
1762 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
1763 ath_mac80211_stop_queue(sc, q);
97923b14
FF
1764 txq->stopped = 1;
1765 }
1766 spin_unlock_bh(&txq->axq_lock);
1767
8f93b8b3 1768 ath_tx_start_dma(sc, bf, txctl);
f078f209 1769
528f0c6b 1770 return 0;
f078f209
LR
1771}
1772
c52f33d0 1773void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
f078f209 1774{
c52f33d0
JM
1775 struct ath_wiphy *aphy = hw->priv;
1776 struct ath_softc *sc = aphy->sc;
c46917bb 1777 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
4d91f9f3
BP
1778 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1779 int padpos, padsize;
e8324357
S
1780 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1781 struct ath_tx_control txctl;
f078f209 1782
e8324357 1783 memset(&txctl, 0, sizeof(struct ath_tx_control));
f078f209
LR
1784
1785 /*
e8324357
S
1786 * As a temporary workaround, assign seq# here; this will likely need
1787 * to be cleaned up to work better with Beacon transmission and virtual
1788 * BSSes.
f078f209 1789 */
e8324357 1790 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
e8324357
S
1791 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1792 sc->tx.seq_no += 0x10;
1793 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1794 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
f078f209 1795 }
f078f209 1796
e8324357 1797 /* Add the padding after the header if this is not already done */
4d91f9f3
BP
1798 padpos = ath9k_cmn_padpos(hdr->frame_control);
1799 padsize = padpos & 3;
1800 if (padsize && skb->len>padpos) {
e8324357 1801 if (skb_headroom(skb) < padsize) {
c46917bb
LR
1802 ath_print(common, ATH_DBG_XMIT,
1803 "TX CABQ padding failed\n");
e8324357
S
1804 dev_kfree_skb_any(skb);
1805 return;
1806 }
1807 skb_push(skb, padsize);
4d91f9f3 1808 memmove(skb->data, skb->data + padsize, padpos);
f078f209 1809 }
f078f209 1810
e8324357 1811 txctl.txq = sc->beacon.cabq;
f078f209 1812
c46917bb
LR
1813 ath_print(common, ATH_DBG_XMIT,
1814 "transmitting CABQ packet, skb: %p\n", skb);
f078f209 1815
c52f33d0 1816 if (ath_tx_start(hw, skb, &txctl) != 0) {
c46917bb 1817 ath_print(common, ATH_DBG_XMIT, "CABQ TX failed\n");
e8324357 1818 goto exit;
f078f209 1819 }
f078f209 1820
e8324357
S
1821 return;
1822exit:
1823 dev_kfree_skb_any(skb);
f078f209
LR
1824}
1825
e8324357
S
1826/*****************/
1827/* TX Completion */
1828/*****************/
528f0c6b 1829
e8324357 1830static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
066dae93
FF
1831 struct ath_wiphy *aphy, int tx_flags,
1832 struct ath_txq *txq)
528f0c6b 1833{
e8324357
S
1834 struct ieee80211_hw *hw = sc->hw;
1835 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
c46917bb 1836 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
4d91f9f3 1837 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
97923b14 1838 int q, padpos, padsize;
528f0c6b 1839
c46917bb 1840 ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
528f0c6b 1841
827e69bf
FF
1842 if (aphy)
1843 hw = aphy->hw;
528f0c6b 1844
6b2c4032 1845 if (tx_flags & ATH_TX_BAR)
e8324357 1846 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
e8324357 1847
6b2c4032 1848 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
e8324357
S
1849 /* Frame was ACKed */
1850 tx_info->flags |= IEEE80211_TX_STAT_ACK;
528f0c6b
S
1851 }
1852
4d91f9f3
BP
1853 padpos = ath9k_cmn_padpos(hdr->frame_control);
1854 padsize = padpos & 3;
1855 if (padsize && skb->len>padpos+padsize) {
e8324357
S
1856 /*
1857 * Remove MAC header padding before giving the frame back to
1858 * mac80211.
1859 */
4d91f9f3 1860 memmove(skb->data + padsize, skb->data, padpos);
e8324357
S
1861 skb_pull(skb, padsize);
1862 }
528f0c6b 1863
1b04b930
S
1864 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1865 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
c46917bb
LR
1866 ath_print(common, ATH_DBG_PS,
1867 "Going back to sleep after having "
f643e51d 1868 "received TX status (0x%lx)\n",
1b04b930
S
1869 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1870 PS_WAIT_FOR_CAB |
1871 PS_WAIT_FOR_PSPOLL_DATA |
1872 PS_WAIT_FOR_TX_ACK));
9a23f9ca
JM
1873 }
1874
827e69bf 1875 if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL))
f0ed85c6 1876 ath9k_tx_status(hw, skb);
97923b14
FF
1877 else {
1878 q = skb_get_queue_mapping(skb);
066dae93
FF
1879 if (txq == sc->tx.txq_map[q]) {
1880 spin_lock_bh(&txq->axq_lock);
1881 if (WARN_ON(--txq->pending_frames < 0))
1882 txq->pending_frames = 0;
1883 spin_unlock_bh(&txq->axq_lock);
1884 }
97923b14 1885
827e69bf 1886 ieee80211_tx_status(hw, skb);
97923b14 1887 }
e8324357 1888}
f078f209 1889
e8324357 1890static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
db1a052b
FF
1891 struct ath_txq *txq, struct list_head *bf_q,
1892 struct ath_tx_status *ts, int txok, int sendbar)
f078f209 1893{
e8324357 1894 struct sk_buff *skb = bf->bf_mpdu;
e8324357 1895 unsigned long flags;
6b2c4032 1896 int tx_flags = 0;
f078f209 1897
e8324357 1898 if (sendbar)
6b2c4032 1899 tx_flags = ATH_TX_BAR;
f078f209 1900
e8324357 1901 if (!txok) {
6b2c4032 1902 tx_flags |= ATH_TX_ERROR;
f078f209 1903
e8324357 1904 if (bf_isxretried(bf))
6b2c4032 1905 tx_flags |= ATH_TX_XRETRY;
f078f209
LR
1906 }
1907
c1739eb3 1908 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
6cf9e995 1909 bf->bf_buf_addr = 0;
9f42c2b6
FF
1910
1911 if (bf->bf_state.bfs_paprd) {
ca369eb4
VT
1912 if (time_after(jiffies,
1913 bf->bf_state.bfs_paprd_timestamp +
78a18172 1914 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
ca369eb4 1915 dev_kfree_skb_any(skb);
78a18172 1916 else
ca369eb4 1917 complete(&sc->paprd_complete);
9f42c2b6 1918 } else {
066dae93
FF
1919 ath_debug_stat_tx(sc, bf, ts);
1920 ath_tx_complete(sc, skb, bf->aphy, tx_flags, txq);
9f42c2b6 1921 }
6cf9e995
BG
1922 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
1923 * accidentally reference it later.
1924 */
1925 bf->bf_mpdu = NULL;
e8324357
S
1926
1927 /*
1928 * Return the list of ath_buf of this mpdu to free queue
1929 */
1930 spin_lock_irqsave(&sc->tx.txbuflock, flags);
1931 list_splice_tail_init(bf_q, &sc->tx.txbuf);
1932 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
f078f209
LR
1933}
1934
e8324357 1935static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
db1a052b 1936 struct ath_tx_status *ts, int txok)
f078f209 1937{
e8324357
S
1938 u16 seq_st = 0;
1939 u32 ba[WME_BA_BMP_SIZE >> 5];
1940 int ba_index;
1941 int nbad = 0;
1942 int isaggr = 0;
f078f209 1943
7c9fd60f 1944 if (bf->bf_lastbf->bf_tx_aborted)
e8324357 1945 return 0;
f078f209 1946
e8324357
S
1947 isaggr = bf_isaggr(bf);
1948 if (isaggr) {
db1a052b
FF
1949 seq_st = ts->ts_seqnum;
1950 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
e8324357 1951 }
f078f209 1952
e8324357
S
1953 while (bf) {
1954 ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
1955 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
1956 nbad++;
1957
1958 bf = bf->bf_next;
1959 }
f078f209 1960
e8324357
S
1961 return nbad;
1962}
f078f209 1963
db1a052b 1964static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
8a92e2ee 1965 int nbad, int txok, bool update_rc)
f078f209 1966{
a22be22a 1967 struct sk_buff *skb = bf->bf_mpdu;
254ad0ff 1968 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
e8324357 1969 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
827e69bf 1970 struct ieee80211_hw *hw = bf->aphy->hw;
f0c255a0
FF
1971 struct ath_softc *sc = bf->aphy->sc;
1972 struct ath_hw *ah = sc->sc_ah;
8a92e2ee 1973 u8 i, tx_rateindex;
f078f209 1974
95e4acb7 1975 if (txok)
db1a052b 1976 tx_info->status.ack_signal = ts->ts_rssi;
95e4acb7 1977
db1a052b 1978 tx_rateindex = ts->ts_rateindex;
8a92e2ee
VT
1979 WARN_ON(tx_rateindex >= hw->max_rates);
1980
db1a052b 1981 if (ts->ts_status & ATH9K_TXERR_FILT)
e8324357 1982 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
ebd02287 1983 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
d969847c 1984 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
f078f209 1985
ebd02287
BS
1986 BUG_ON(nbad > bf->bf_nframes);
1987
1988 tx_info->status.ampdu_len = bf->bf_nframes;
1989 tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad;
1990 }
1991
db1a052b 1992 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
8a92e2ee 1993 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
f0c255a0
FF
1994 /*
1995 * If an underrun error is seen assume it as an excessive
1996 * retry only if max frame trigger level has been reached
1997 * (2 KB for single stream, and 4 KB for dual stream).
1998 * Adjust the long retry as if the frame was tried
1999 * hw->max_rate_tries times to affect how rate control updates
2000 * PER for the failed rate.
2001 * In case of congestion on the bus penalizing this type of
2002 * underruns should help hardware actually transmit new frames
2003 * successfully by eventually preferring slower rates.
2004 * This itself should also alleviate congestion on the bus.
2005 */
2006 if (ieee80211_is_data(hdr->frame_control) &&
2007 (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
2008 ATH9K_TX_DELIM_UNDERRUN)) &&
2009 ah->tx_trig_level >= sc->sc_ah->caps.tx_triglevel_max)
2010 tx_info->status.rates[tx_rateindex].count =
2011 hw->max_rate_tries;
f078f209 2012 }
8a92e2ee 2013
545750d3 2014 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
8a92e2ee 2015 tx_info->status.rates[i].count = 0;
545750d3
FF
2016 tx_info->status.rates[i].idx = -1;
2017 }
8a92e2ee 2018
78c4653a 2019 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
f078f209
LR
2020}
2021
066dae93 2022static void ath_wake_mac80211_queue(struct ath_softc *sc, int qnum)
059d806c 2023{
066dae93 2024 struct ath_txq *txq;
97923b14 2025
066dae93 2026 txq = sc->tx.txq_map[qnum];
059d806c 2027 spin_lock_bh(&txq->axq_lock);
066dae93 2028 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
68e8f2fa
VT
2029 if (ath_mac80211_start_queue(sc, qnum))
2030 txq->stopped = 0;
059d806c
S
2031 }
2032 spin_unlock_bh(&txq->axq_lock);
2033}
2034
e8324357 2035static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
f078f209 2036{
cbe61d8a 2037 struct ath_hw *ah = sc->sc_ah;
c46917bb 2038 struct ath_common *common = ath9k_hw_common(ah);
e8324357 2039 struct ath_buf *bf, *lastbf, *bf_held = NULL;
f078f209 2040 struct list_head bf_head;
e8324357 2041 struct ath_desc *ds;
29bffa96 2042 struct ath_tx_status ts;
0934af23 2043 int txok;
e8324357 2044 int status;
066dae93 2045 int qnum;
f078f209 2046
c46917bb
LR
2047 ath_print(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2048 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2049 txq->axq_link);
f078f209 2050
f078f209
LR
2051 for (;;) {
2052 spin_lock_bh(&txq->axq_lock);
f078f209
LR
2053 if (list_empty(&txq->axq_q)) {
2054 txq->axq_link = NULL;
f078f209
LR
2055 spin_unlock_bh(&txq->axq_lock);
2056 break;
2057 }
f078f209
LR
2058 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2059
e8324357
S
2060 /*
2061 * There is a race condition that a BH gets scheduled
2062 * after sw writes TxE and before hw re-load the last
2063 * descriptor to get the newly chained one.
2064 * Software must keep the last DONE descriptor as a
2065 * holding descriptor - software does so by marking
2066 * it with the STALE flag.
2067 */
2068 bf_held = NULL;
a119cc49 2069 if (bf->bf_stale) {
e8324357
S
2070 bf_held = bf;
2071 if (list_is_last(&bf_held->list, &txq->axq_q)) {
6ef9b13d 2072 spin_unlock_bh(&txq->axq_lock);
e8324357
S
2073 break;
2074 } else {
2075 bf = list_entry(bf_held->list.next,
6ef9b13d 2076 struct ath_buf, list);
e8324357 2077 }
f078f209
LR
2078 }
2079
2080 lastbf = bf->bf_lastbf;
e8324357 2081 ds = lastbf->bf_desc;
f078f209 2082
29bffa96
FF
2083 memset(&ts, 0, sizeof(ts));
2084 status = ath9k_hw_txprocdesc(ah, ds, &ts);
e8324357 2085 if (status == -EINPROGRESS) {
f078f209 2086 spin_unlock_bh(&txq->axq_lock);
e8324357 2087 break;
f078f209 2088 }
f078f209 2089
e8324357
S
2090 /*
2091 * Remove ath_buf's of the same transmit unit from txq,
2092 * however leave the last descriptor back as the holding
2093 * descriptor for hw.
2094 */
a119cc49 2095 lastbf->bf_stale = true;
e8324357 2096 INIT_LIST_HEAD(&bf_head);
e8324357
S
2097 if (!list_is_singular(&lastbf->list))
2098 list_cut_position(&bf_head,
2099 &txq->axq_q, lastbf->list.prev);
f078f209 2100
e8324357 2101 txq->axq_depth--;
29bffa96 2102 txok = !(ts.ts_status & ATH9K_TXERR_MASK);
164ace38 2103 txq->axq_tx_inprogress = false;
0a8cea84
FF
2104 if (bf_held)
2105 list_del(&bf_held->list);
e8324357 2106 spin_unlock_bh(&txq->axq_lock);
f078f209 2107
0a8cea84
FF
2108 if (bf_held)
2109 ath_tx_return_buffer(sc, bf_held);
f078f209 2110
e8324357
S
2111 if (!bf_isampdu(bf)) {
2112 /*
2113 * This frame is sent out as a single frame.
2114 * Use hardware retry status for this frame.
2115 */
29bffa96 2116 if (ts.ts_status & ATH9K_TXERR_XRETRY)
e8324357 2117 bf->bf_state.bf_type |= BUF_XRETRY;
ebd02287 2118 ath_tx_rc_status(bf, &ts, txok ? 0 : 1, txok, true);
e8324357 2119 }
f078f209 2120
066dae93
FF
2121 qnum = skb_get_queue_mapping(bf->bf_mpdu);
2122
e8324357 2123 if (bf_isampdu(bf))
29bffa96 2124 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok);
e8324357 2125 else
29bffa96 2126 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
8469cdef 2127
066dae93
FF
2128 if (txq == sc->tx.txq_map[qnum])
2129 ath_wake_mac80211_queue(sc, qnum);
8469cdef 2130
059d806c 2131 spin_lock_bh(&txq->axq_lock);
e8324357
S
2132 if (sc->sc_flags & SC_OP_TXAGGR)
2133 ath_txq_schedule(sc, txq);
2134 spin_unlock_bh(&txq->axq_lock);
8469cdef
S
2135 }
2136}
2137
305fe47f 2138static void ath_tx_complete_poll_work(struct work_struct *work)
164ace38
SB
2139{
2140 struct ath_softc *sc = container_of(work, struct ath_softc,
2141 tx_complete_work.work);
2142 struct ath_txq *txq;
2143 int i;
2144 bool needreset = false;
2145
2146 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2147 if (ATH_TXQ_SETUP(sc, i)) {
2148 txq = &sc->tx.txq[i];
2149 spin_lock_bh(&txq->axq_lock);
2150 if (txq->axq_depth) {
2151 if (txq->axq_tx_inprogress) {
2152 needreset = true;
2153 spin_unlock_bh(&txq->axq_lock);
2154 break;
2155 } else {
2156 txq->axq_tx_inprogress = true;
2157 }
2158 }
2159 spin_unlock_bh(&txq->axq_lock);
2160 }
2161
2162 if (needreset) {
c46917bb
LR
2163 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2164 "tx hung, resetting the chip\n");
332c5566 2165 ath9k_ps_wakeup(sc);
fac6b6a0 2166 ath_reset(sc, true);
332c5566 2167 ath9k_ps_restore(sc);
164ace38
SB
2168 }
2169
42935eca 2170 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
164ace38
SB
2171 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2172}
2173
2174
f078f209 2175
e8324357 2176void ath_tx_tasklet(struct ath_softc *sc)
f078f209 2177{
e8324357
S
2178 int i;
2179 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
f078f209 2180
e8324357 2181 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
f078f209 2182
e8324357
S
2183 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2184 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2185 ath_tx_processq(sc, &sc->tx.txq[i]);
f078f209
LR
2186 }
2187}
2188
e5003249
VT
2189void ath_tx_edma_tasklet(struct ath_softc *sc)
2190{
2191 struct ath_tx_status txs;
2192 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2193 struct ath_hw *ah = sc->sc_ah;
2194 struct ath_txq *txq;
2195 struct ath_buf *bf, *lastbf;
2196 struct list_head bf_head;
2197 int status;
2198 int txok;
066dae93 2199 int qnum;
e5003249
VT
2200
2201 for (;;) {
2202 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
2203 if (status == -EINPROGRESS)
2204 break;
2205 if (status == -EIO) {
2206 ath_print(common, ATH_DBG_XMIT,
2207 "Error processing tx status\n");
2208 break;
2209 }
2210
2211 /* Skip beacon completions */
2212 if (txs.qid == sc->beacon.beaconq)
2213 continue;
2214
2215 txq = &sc->tx.txq[txs.qid];
2216
2217 spin_lock_bh(&txq->axq_lock);
2218 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2219 spin_unlock_bh(&txq->axq_lock);
2220 return;
2221 }
2222
2223 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2224 struct ath_buf, list);
2225 lastbf = bf->bf_lastbf;
2226
2227 INIT_LIST_HEAD(&bf_head);
2228 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2229 &lastbf->list);
2230 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2231 txq->axq_depth--;
2232 txq->axq_tx_inprogress = false;
2233 spin_unlock_bh(&txq->axq_lock);
2234
2235 txok = !(txs.ts_status & ATH9K_TXERR_MASK);
2236
2237 if (!bf_isampdu(bf)) {
e5003249
VT
2238 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2239 bf->bf_state.bf_type |= BUF_XRETRY;
ebd02287 2240 ath_tx_rc_status(bf, &txs, txok ? 0 : 1, txok, true);
e5003249
VT
2241 }
2242
066dae93
FF
2243 qnum = skb_get_queue_mapping(bf->bf_mpdu);
2244
e5003249
VT
2245 if (bf_isampdu(bf))
2246 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs, txok);
2247 else
2248 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2249 &txs, txok, 0);
2250
066dae93
FF
2251 if (txq == sc->tx.txq_map[qnum])
2252 ath_wake_mac80211_queue(sc, qnum);
7f9f3600 2253
e5003249
VT
2254 spin_lock_bh(&txq->axq_lock);
2255 if (!list_empty(&txq->txq_fifo_pending)) {
2256 INIT_LIST_HEAD(&bf_head);
2257 bf = list_first_entry(&txq->txq_fifo_pending,
2258 struct ath_buf, list);
2259 list_cut_position(&bf_head, &txq->txq_fifo_pending,
2260 &bf->bf_lastbf->list);
2261 ath_tx_txqaddbuf(sc, txq, &bf_head);
2262 } else if (sc->sc_flags & SC_OP_TXAGGR)
2263 ath_txq_schedule(sc, txq);
2264 spin_unlock_bh(&txq->axq_lock);
2265 }
2266}
2267
e8324357
S
2268/*****************/
2269/* Init, Cleanup */
2270/*****************/
f078f209 2271
5088c2f1
VT
2272static int ath_txstatus_setup(struct ath_softc *sc, int size)
2273{
2274 struct ath_descdma *dd = &sc->txsdma;
2275 u8 txs_len = sc->sc_ah->caps.txs_len;
2276
2277 dd->dd_desc_len = size * txs_len;
2278 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2279 &dd->dd_desc_paddr, GFP_KERNEL);
2280 if (!dd->dd_desc)
2281 return -ENOMEM;
2282
2283 return 0;
2284}
2285
2286static int ath_tx_edma_init(struct ath_softc *sc)
2287{
2288 int err;
2289
2290 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2291 if (!err)
2292 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2293 sc->txsdma.dd_desc_paddr,
2294 ATH_TXSTATUS_RING_SIZE);
2295
2296 return err;
2297}
2298
2299static void ath_tx_edma_cleanup(struct ath_softc *sc)
2300{
2301 struct ath_descdma *dd = &sc->txsdma;
2302
2303 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2304 dd->dd_desc_paddr);
2305}
2306
e8324357 2307int ath_tx_init(struct ath_softc *sc, int nbufs)
f078f209 2308{
c46917bb 2309 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
e8324357 2310 int error = 0;
f078f209 2311
797fe5cb 2312 spin_lock_init(&sc->tx.txbuflock);
f078f209 2313
797fe5cb 2314 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
4adfcded 2315 "tx", nbufs, 1, 1);
797fe5cb 2316 if (error != 0) {
c46917bb
LR
2317 ath_print(common, ATH_DBG_FATAL,
2318 "Failed to allocate tx descriptors: %d\n", error);
797fe5cb
S
2319 goto err;
2320 }
f078f209 2321
797fe5cb 2322 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
5088c2f1 2323 "beacon", ATH_BCBUF, 1, 1);
797fe5cb 2324 if (error != 0) {
c46917bb
LR
2325 ath_print(common, ATH_DBG_FATAL,
2326 "Failed to allocate beacon descriptors: %d\n", error);
797fe5cb
S
2327 goto err;
2328 }
f078f209 2329
164ace38
SB
2330 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2331
5088c2f1
VT
2332 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2333 error = ath_tx_edma_init(sc);
2334 if (error)
2335 goto err;
2336 }
2337
797fe5cb 2338err:
e8324357
S
2339 if (error != 0)
2340 ath_tx_cleanup(sc);
f078f209 2341
e8324357 2342 return error;
f078f209
LR
2343}
2344
797fe5cb 2345void ath_tx_cleanup(struct ath_softc *sc)
e8324357
S
2346{
2347 if (sc->beacon.bdma.dd_desc_len != 0)
2348 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
2349
2350 if (sc->tx.txdma.dd_desc_len != 0)
2351 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
5088c2f1
VT
2352
2353 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2354 ath_tx_edma_cleanup(sc);
e8324357 2355}
f078f209
LR
2356
2357void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2358{
c5170163
S
2359 struct ath_atx_tid *tid;
2360 struct ath_atx_ac *ac;
2361 int tidno, acno;
f078f209 2362
8ee5afbc 2363 for (tidno = 0, tid = &an->tid[tidno];
c5170163
S
2364 tidno < WME_NUM_TID;
2365 tidno++, tid++) {
2366 tid->an = an;
2367 tid->tidno = tidno;
2368 tid->seq_start = tid->seq_next = 0;
2369 tid->baw_size = WME_MAX_BA;
2370 tid->baw_head = tid->baw_tail = 0;
2371 tid->sched = false;
e8324357 2372 tid->paused = false;
a37c2c79 2373 tid->state &= ~AGGR_CLEANUP;
c5170163 2374 INIT_LIST_HEAD(&tid->buf_q);
c5170163 2375 acno = TID_TO_WME_AC(tidno);
8ee5afbc 2376 tid->ac = &an->ac[acno];
a37c2c79
S
2377 tid->state &= ~AGGR_ADDBA_COMPLETE;
2378 tid->state &= ~AGGR_ADDBA_PROGRESS;
c5170163 2379 }
f078f209 2380
8ee5afbc 2381 for (acno = 0, ac = &an->ac[acno];
c5170163
S
2382 acno < WME_NUM_AC; acno++, ac++) {
2383 ac->sched = false;
066dae93 2384 ac->txq = sc->tx.txq_map[acno];
c5170163 2385 INIT_LIST_HEAD(&ac->tid_q);
f078f209
LR
2386 }
2387}
2388
b5aa9bf9 2389void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
f078f209 2390{
2b40994c
FF
2391 struct ath_atx_ac *ac;
2392 struct ath_atx_tid *tid;
f078f209 2393 struct ath_txq *txq;
066dae93 2394 int tidno;
e8324357 2395
2b40994c
FF
2396 for (tidno = 0, tid = &an->tid[tidno];
2397 tidno < WME_NUM_TID; tidno++, tid++) {
f078f209 2398
2b40994c 2399 ac = tid->ac;
066dae93 2400 txq = ac->txq;
f078f209 2401
2b40994c
FF
2402 spin_lock_bh(&txq->axq_lock);
2403
2404 if (tid->sched) {
2405 list_del(&tid->list);
2406 tid->sched = false;
2407 }
2408
2409 if (ac->sched) {
2410 list_del(&ac->list);
2411 tid->ac->sched = false;
f078f209 2412 }
2b40994c
FF
2413
2414 ath_tid_drain(sc, txq, tid);
2415 tid->state &= ~AGGR_ADDBA_COMPLETE;
2416 tid->state &= ~AGGR_CLEANUP;
2417
2418 spin_unlock_bh(&txq->axq_lock);
f078f209
LR
2419 }
2420}
This page took 0.618425 seconds and 5 git commands to generate.