rt2500usb: fallback to SW encryption for TKIP+AES
[deliverable/linux.git] / drivers / net / wireless / ath / ath9k / xmit.c
CommitLineData
f078f209 1/*
cee075a2 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
f078f209
LR
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
394cf0a1 17#include "ath9k.h"
b622a720 18#include "ar9003_mac.h"
f078f209
LR
19
20#define BITS_PER_BYTE 8
21#define OFDM_PLCP_BITS 22
7817e4ce 22#define HT_RC_2_MCS(_rc) ((_rc) & 0x1f)
f078f209
LR
23#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
35#define OFDM_SIFS_TIME 16
36
c6663876 37static u16 bits_per_symbol[][2] = {
f078f209
LR
38 /* 20MHz 40MHz */
39 { 26, 54 }, /* 0: BPSK */
40 { 52, 108 }, /* 1: QPSK 1/2 */
41 { 78, 162 }, /* 2: QPSK 3/4 */
42 { 104, 216 }, /* 3: 16-QAM 1/2 */
43 { 156, 324 }, /* 4: 16-QAM 3/4 */
44 { 208, 432 }, /* 5: 64-QAM 2/3 */
45 { 234, 486 }, /* 6: 64-QAM 3/4 */
46 { 260, 540 }, /* 7: 64-QAM 5/6 */
f078f209
LR
47};
48
49#define IS_HT_RATE(_rate) ((_rate) & 0x80)
50
c37452b0
S
51static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
52 struct ath_atx_tid *tid,
53 struct list_head *bf_head);
e8324357 54static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
db1a052b
FF
55 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar);
102e0572 57static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
e8324357
S
58 struct list_head *head);
59static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf);
0934af23 60static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
db1a052b
FF
61 struct ath_tx_status *ts, int txok);
62static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
8a92e2ee 63 int nbad, int txok, bool update_rc);
c4288390 64
545750d3 65enum {
0e668cde
FF
66 MCS_HT20,
67 MCS_HT20_SGI,
545750d3
FF
68 MCS_HT40,
69 MCS_HT40_SGI,
70};
71
0e668cde
FF
72static int ath_max_4ms_framelen[4][32] = {
73 [MCS_HT20] = {
74 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
75 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
76 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
77 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
78 },
79 [MCS_HT20_SGI] = {
80 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
81 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
82 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
83 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
545750d3
FF
84 },
85 [MCS_HT40] = {
0e668cde
FF
86 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
87 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
88 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
89 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
545750d3
FF
90 },
91 [MCS_HT40_SGI] = {
0e668cde
FF
92 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
93 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
94 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
95 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
545750d3
FF
96 }
97};
98
e8324357
S
99/*********************/
100/* Aggregation logic */
101/*********************/
f078f209 102
e8324357 103static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
ff37e337 104{
e8324357 105 struct ath_atx_ac *ac = tid->ac;
ff37e337 106
e8324357
S
107 if (tid->paused)
108 return;
ff37e337 109
e8324357
S
110 if (tid->sched)
111 return;
ff37e337 112
e8324357
S
113 tid->sched = true;
114 list_add_tail(&tid->list, &ac->tid_q);
528f0c6b 115
e8324357
S
116 if (ac->sched)
117 return;
f078f209 118
e8324357
S
119 ac->sched = true;
120 list_add_tail(&ac->list, &txq->axq_acq);
121}
f078f209 122
e8324357
S
123static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
124{
125 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
f078f209 126
e8324357
S
127 spin_lock_bh(&txq->axq_lock);
128 tid->paused++;
129 spin_unlock_bh(&txq->axq_lock);
f078f209
LR
130}
131
e8324357 132static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
f078f209 133{
e8324357 134 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
e6a9854b 135
9680e8a3 136 BUG_ON(tid->paused <= 0);
e8324357 137 spin_lock_bh(&txq->axq_lock);
f078f209 138
e8324357 139 tid->paused--;
f078f209 140
e8324357
S
141 if (tid->paused > 0)
142 goto unlock;
f078f209 143
e8324357
S
144 if (list_empty(&tid->buf_q))
145 goto unlock;
f078f209 146
e8324357
S
147 ath_tx_queue_tid(txq, tid);
148 ath_txq_schedule(sc, txq);
149unlock:
150 spin_unlock_bh(&txq->axq_lock);
528f0c6b 151}
f078f209 152
e8324357 153static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
528f0c6b 154{
e8324357
S
155 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
156 struct ath_buf *bf;
157 struct list_head bf_head;
158 INIT_LIST_HEAD(&bf_head);
f078f209 159
9680e8a3 160 BUG_ON(tid->paused <= 0);
e8324357 161 spin_lock_bh(&txq->axq_lock);
e6a9854b 162
e8324357 163 tid->paused--;
f078f209 164
e8324357
S
165 if (tid->paused > 0) {
166 spin_unlock_bh(&txq->axq_lock);
167 return;
168 }
f078f209 169
e8324357
S
170 while (!list_empty(&tid->buf_q)) {
171 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
9680e8a3 172 BUG_ON(bf_isretried(bf));
d43f3015 173 list_move_tail(&bf->list, &bf_head);
c37452b0 174 ath_tx_send_ht_normal(sc, txq, tid, &bf_head);
528f0c6b 175 }
f078f209 176
e8324357 177 spin_unlock_bh(&txq->axq_lock);
528f0c6b 178}
f078f209 179
e8324357
S
180static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
181 int seqno)
528f0c6b 182{
e8324357 183 int index, cindex;
f078f209 184
e8324357
S
185 index = ATH_BA_INDEX(tid->seq_start, seqno);
186 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
f078f209 187
e8324357 188 tid->tx_buf[cindex] = NULL;
528f0c6b 189
e8324357
S
190 while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) {
191 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
192 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
193 }
528f0c6b 194}
f078f209 195
e8324357
S
196static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
197 struct ath_buf *bf)
528f0c6b 198{
e8324357 199 int index, cindex;
528f0c6b 200
e8324357
S
201 if (bf_isretried(bf))
202 return;
528f0c6b 203
e8324357
S
204 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
205 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
f078f209 206
9680e8a3 207 BUG_ON(tid->tx_buf[cindex] != NULL);
e8324357 208 tid->tx_buf[cindex] = bf;
f078f209 209
e8324357
S
210 if (index >= ((tid->baw_tail - tid->baw_head) &
211 (ATH_TID_MAX_BUFS - 1))) {
212 tid->baw_tail = cindex;
213 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
f078f209 214 }
f078f209
LR
215}
216
217/*
e8324357
S
218 * TODO: For frame(s) that are in the retry state, we will reuse the
219 * sequence number(s) without setting the retry bit. The
220 * alternative is to give up on these and BAR the receiver's window
221 * forward.
f078f209 222 */
e8324357
S
223static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
224 struct ath_atx_tid *tid)
f078f209 225
f078f209 226{
e8324357
S
227 struct ath_buf *bf;
228 struct list_head bf_head;
db1a052b
FF
229 struct ath_tx_status ts;
230
231 memset(&ts, 0, sizeof(ts));
e8324357 232 INIT_LIST_HEAD(&bf_head);
f078f209 233
e8324357
S
234 for (;;) {
235 if (list_empty(&tid->buf_q))
236 break;
f078f209 237
d43f3015
S
238 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
239 list_move_tail(&bf->list, &bf_head);
f078f209 240
e8324357
S
241 if (bf_isretried(bf))
242 ath_tx_update_baw(sc, tid, bf->bf_seqno);
f078f209 243
e8324357 244 spin_unlock(&txq->axq_lock);
db1a052b 245 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
e8324357
S
246 spin_lock(&txq->axq_lock);
247 }
f078f209 248
e8324357
S
249 tid->seq_next = tid->seq_start;
250 tid->baw_tail = tid->baw_head;
f078f209
LR
251}
252
fec247c0
S
253static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
254 struct ath_buf *bf)
f078f209 255{
e8324357
S
256 struct sk_buff *skb;
257 struct ieee80211_hdr *hdr;
f078f209 258
e8324357
S
259 bf->bf_state.bf_type |= BUF_RETRY;
260 bf->bf_retries++;
fec247c0 261 TX_STAT_INC(txq->axq_qnum, a_retries);
f078f209 262
e8324357
S
263 skb = bf->bf_mpdu;
264 hdr = (struct ieee80211_hdr *)skb->data;
265 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
f078f209
LR
266}
267
0a8cea84 268static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
d43f3015 269{
0a8cea84 270 struct ath_buf *bf = NULL;
d43f3015
S
271
272 spin_lock_bh(&sc->tx.txbuflock);
0a8cea84
FF
273
274 if (unlikely(list_empty(&sc->tx.txbuf))) {
8a46097a
VT
275 spin_unlock_bh(&sc->tx.txbuflock);
276 return NULL;
277 }
0a8cea84
FF
278
279 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
280 list_del(&bf->list);
281
d43f3015
S
282 spin_unlock_bh(&sc->tx.txbuflock);
283
0a8cea84
FF
284 return bf;
285}
286
287static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
288{
289 spin_lock_bh(&sc->tx.txbuflock);
290 list_add_tail(&bf->list, &sc->tx.txbuf);
291 spin_unlock_bh(&sc->tx.txbuflock);
292}
293
294static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
295{
296 struct ath_buf *tbf;
297
298 tbf = ath_tx_get_buffer(sc);
299 if (WARN_ON(!tbf))
300 return NULL;
301
d43f3015
S
302 ATH_TXBUF_RESET(tbf);
303
827e69bf 304 tbf->aphy = bf->aphy;
d43f3015
S
305 tbf->bf_mpdu = bf->bf_mpdu;
306 tbf->bf_buf_addr = bf->bf_buf_addr;
d826c832 307 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
d43f3015
S
308 tbf->bf_state = bf->bf_state;
309 tbf->bf_dmacontext = bf->bf_dmacontext;
310
311 return tbf;
312}
313
314static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
315 struct ath_buf *bf, struct list_head *bf_q,
db1a052b 316 struct ath_tx_status *ts, int txok)
f078f209 317{
e8324357
S
318 struct ath_node *an = NULL;
319 struct sk_buff *skb;
1286ec6d 320 struct ieee80211_sta *sta;
76d5a9e8 321 struct ieee80211_hw *hw;
1286ec6d 322 struct ieee80211_hdr *hdr;
76d5a9e8 323 struct ieee80211_tx_info *tx_info;
e8324357 324 struct ath_atx_tid *tid = NULL;
d43f3015 325 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
e8324357 326 struct list_head bf_head, bf_pending;
0934af23 327 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
f078f209 328 u32 ba[WME_BA_BMP_SIZE >> 5];
0934af23
VT
329 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
330 bool rc_update = true;
f078f209 331
a22be22a 332 skb = bf->bf_mpdu;
1286ec6d
S
333 hdr = (struct ieee80211_hdr *)skb->data;
334
76d5a9e8 335 tx_info = IEEE80211_SKB_CB(skb);
827e69bf 336 hw = bf->aphy->hw;
76d5a9e8 337
1286ec6d 338 rcu_read_lock();
f078f209 339
5ed176e1 340 /* XXX: use ieee80211_find_sta! */
76d5a9e8 341 sta = ieee80211_find_sta_by_hw(hw, hdr->addr1);
1286ec6d
S
342 if (!sta) {
343 rcu_read_unlock();
344 return;
f078f209
LR
345 }
346
1286ec6d
S
347 an = (struct ath_node *)sta->drv_priv;
348 tid = ATH_AN_2_TID(an, bf->bf_tidno);
349
e8324357 350 isaggr = bf_isaggr(bf);
d43f3015 351 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
f078f209 352
d43f3015 353 if (isaggr && txok) {
db1a052b
FF
354 if (ts->ts_flags & ATH9K_TX_BA) {
355 seq_st = ts->ts_seqnum;
356 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
e8324357 357 } else {
d43f3015
S
358 /*
359 * AR5416 can become deaf/mute when BA
360 * issue happens. Chip needs to be reset.
361 * But AP code may have sychronization issues
362 * when perform internal reset in this routine.
363 * Only enable reset in STA mode for now.
364 */
2660b81a 365 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
d43f3015 366 needreset = 1;
e8324357 367 }
f078f209
LR
368 }
369
e8324357
S
370 INIT_LIST_HEAD(&bf_pending);
371 INIT_LIST_HEAD(&bf_head);
f078f209 372
db1a052b 373 nbad = ath_tx_num_badfrms(sc, bf, ts, txok);
e8324357
S
374 while (bf) {
375 txfail = txpending = 0;
376 bf_next = bf->bf_next;
f078f209 377
e8324357
S
378 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
379 /* transmit completion, subframe is
380 * acked by block ack */
0934af23 381 acked_cnt++;
e8324357
S
382 } else if (!isaggr && txok) {
383 /* transmit completion */
0934af23 384 acked_cnt++;
e8324357 385 } else {
e8324357 386 if (!(tid->state & AGGR_CLEANUP) &&
6d913f7d 387 !bf_last->bf_tx_aborted) {
e8324357 388 if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
fec247c0 389 ath_tx_set_retry(sc, txq, bf);
e8324357
S
390 txpending = 1;
391 } else {
392 bf->bf_state.bf_type |= BUF_XRETRY;
393 txfail = 1;
394 sendbar = 1;
0934af23 395 txfail_cnt++;
e8324357
S
396 }
397 } else {
398 /*
399 * cleanup in progress, just fail
400 * the un-acked sub-frames
401 */
402 txfail = 1;
403 }
404 }
f078f209 405
e5003249
VT
406 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
407 bf_next == NULL) {
cbfe89c6
VT
408 /*
409 * Make sure the last desc is reclaimed if it
410 * not a holding desc.
411 */
412 if (!bf_last->bf_stale)
413 list_move_tail(&bf->list, &bf_head);
414 else
415 INIT_LIST_HEAD(&bf_head);
e8324357 416 } else {
9680e8a3 417 BUG_ON(list_empty(bf_q));
d43f3015 418 list_move_tail(&bf->list, &bf_head);
e8324357 419 }
f078f209 420
e8324357
S
421 if (!txpending) {
422 /*
423 * complete the acked-ones/xretried ones; update
424 * block-ack window
425 */
426 spin_lock_bh(&txq->axq_lock);
427 ath_tx_update_baw(sc, tid, bf->bf_seqno);
428 spin_unlock_bh(&txq->axq_lock);
f078f209 429
8a92e2ee 430 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
db1a052b 431 ath_tx_rc_status(bf, ts, nbad, txok, true);
8a92e2ee
VT
432 rc_update = false;
433 } else {
db1a052b 434 ath_tx_rc_status(bf, ts, nbad, txok, false);
8a92e2ee
VT
435 }
436
db1a052b
FF
437 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
438 !txfail, sendbar);
e8324357 439 } else {
d43f3015 440 /* retry the un-acked ones */
e5003249
VT
441 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
442 if (bf->bf_next == NULL && bf_last->bf_stale) {
443 struct ath_buf *tbf;
444
445 tbf = ath_clone_txbuf(sc, bf_last);
446 /*
447 * Update tx baw and complete the
448 * frame with failed status if we
449 * run out of tx buf.
450 */
451 if (!tbf) {
452 spin_lock_bh(&txq->axq_lock);
453 ath_tx_update_baw(sc, tid,
454 bf->bf_seqno);
455 spin_unlock_bh(&txq->axq_lock);
456
457 bf->bf_state.bf_type |=
458 BUF_XRETRY;
459 ath_tx_rc_status(bf, ts, nbad,
460 0, false);
461 ath_tx_complete_buf(sc, bf, txq,
462 &bf_head,
463 ts, 0, 0);
464 break;
465 }
466
467 ath9k_hw_cleartxdesc(sc->sc_ah,
468 tbf->bf_desc);
469 list_add_tail(&tbf->list, &bf_head);
470 } else {
471 /*
472 * Clear descriptor status words for
473 * software retry
474 */
475 ath9k_hw_cleartxdesc(sc->sc_ah,
476 bf->bf_desc);
c41d92dc 477 }
e8324357
S
478 }
479
480 /*
481 * Put this buffer to the temporary pending
482 * queue to retain ordering
483 */
484 list_splice_tail_init(&bf_head, &bf_pending);
485 }
486
487 bf = bf_next;
f078f209 488 }
f078f209 489
e8324357 490 if (tid->state & AGGR_CLEANUP) {
e8324357
S
491 if (tid->baw_head == tid->baw_tail) {
492 tid->state &= ~AGGR_ADDBA_COMPLETE;
e8324357 493 tid->state &= ~AGGR_CLEANUP;
e63835b0 494
e8324357
S
495 /* send buffered frames as singles */
496 ath_tx_flush_tid(sc, tid);
d43f3015 497 }
1286ec6d 498 rcu_read_unlock();
e8324357
S
499 return;
500 }
f078f209 501
d43f3015 502 /* prepend un-acked frames to the beginning of the pending frame queue */
e8324357
S
503 if (!list_empty(&bf_pending)) {
504 spin_lock_bh(&txq->axq_lock);
505 list_splice(&bf_pending, &tid->buf_q);
506 ath_tx_queue_tid(txq, tid);
507 spin_unlock_bh(&txq->axq_lock);
508 }
102e0572 509
1286ec6d
S
510 rcu_read_unlock();
511
e8324357
S
512 if (needreset)
513 ath_reset(sc, false);
e8324357 514}
f078f209 515
e8324357
S
516static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
517 struct ath_atx_tid *tid)
f078f209 518{
528f0c6b
S
519 struct sk_buff *skb;
520 struct ieee80211_tx_info *tx_info;
a8efee4f 521 struct ieee80211_tx_rate *rates;
d43f3015 522 u32 max_4ms_framelen, frmlen;
4ef70841 523 u16 aggr_limit, legacy = 0;
e8324357 524 int i;
528f0c6b 525
a22be22a 526 skb = bf->bf_mpdu;
528f0c6b 527 tx_info = IEEE80211_SKB_CB(skb);
e63835b0 528 rates = tx_info->control.rates;
528f0c6b 529
e8324357
S
530 /*
531 * Find the lowest frame length among the rate series that will have a
532 * 4ms transmit duration.
533 * TODO - TXOP limit needs to be considered.
534 */
535 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
e63835b0 536
e8324357
S
537 for (i = 0; i < 4; i++) {
538 if (rates[i].count) {
545750d3
FF
539 int modeidx;
540 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
e8324357
S
541 legacy = 1;
542 break;
543 }
544
0e668cde 545 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
545750d3
FF
546 modeidx = MCS_HT40;
547 else
0e668cde
FF
548 modeidx = MCS_HT20;
549
550 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
551 modeidx++;
545750d3
FF
552
553 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
d43f3015 554 max_4ms_framelen = min(max_4ms_framelen, frmlen);
f078f209
LR
555 }
556 }
e63835b0 557
f078f209 558 /*
e8324357
S
559 * limit aggregate size by the minimum rate if rate selected is
560 * not a probe rate, if rate selected is a probe rate then
561 * avoid aggregation of this packet.
f078f209 562 */
e8324357
S
563 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
564 return 0;
f078f209 565
1773912b
VT
566 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
567 aggr_limit = min((max_4ms_framelen * 3) / 8,
568 (u32)ATH_AMPDU_LIMIT_MAX);
569 else
570 aggr_limit = min(max_4ms_framelen,
571 (u32)ATH_AMPDU_LIMIT_MAX);
f078f209 572
e8324357
S
573 /*
574 * h/w can accept aggregates upto 16 bit lengths (65535).
575 * The IE, however can hold upto 65536, which shows up here
576 * as zero. Ignore 65536 since we are constrained by hw.
f078f209 577 */
4ef70841
S
578 if (tid->an->maxampdu)
579 aggr_limit = min(aggr_limit, tid->an->maxampdu);
f078f209 580
e8324357
S
581 return aggr_limit;
582}
f078f209 583
e8324357 584/*
d43f3015 585 * Returns the number of delimiters to be added to
e8324357 586 * meet the minimum required mpdudensity.
e8324357
S
587 */
588static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
589 struct ath_buf *bf, u16 frmlen)
590{
e8324357
S
591 struct sk_buff *skb = bf->bf_mpdu;
592 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
4ef70841 593 u32 nsymbits, nsymbols;
e8324357 594 u16 minlen;
545750d3 595 u8 flags, rix;
c6663876 596 int width, streams, half_gi, ndelim, mindelim;
e8324357
S
597
598 /* Select standard number of delimiters based on frame length alone */
599 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
f078f209
LR
600
601 /*
e8324357
S
602 * If encryption enabled, hardware requires some more padding between
603 * subframes.
604 * TODO - this could be improved to be dependent on the rate.
605 * The hardware can keep up at lower rates, but not higher rates
f078f209 606 */
e8324357
S
607 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR)
608 ndelim += ATH_AGGR_ENCRYPTDELIM;
f078f209 609
e8324357
S
610 /*
611 * Convert desired mpdu density from microeconds to bytes based
612 * on highest rate in rate series (i.e. first rate) to determine
613 * required minimum length for subframe. Take into account
614 * whether high rate is 20 or 40Mhz and half or full GI.
4ef70841 615 *
e8324357
S
616 * If there is no mpdu density restriction, no further calculation
617 * is needed.
618 */
4ef70841
S
619
620 if (tid->an->mpdudensity == 0)
e8324357 621 return ndelim;
f078f209 622
e8324357
S
623 rix = tx_info->control.rates[0].idx;
624 flags = tx_info->control.rates[0].flags;
e8324357
S
625 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
626 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
f078f209 627
e8324357 628 if (half_gi)
4ef70841 629 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
e8324357 630 else
4ef70841 631 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
f078f209 632
e8324357
S
633 if (nsymbols == 0)
634 nsymbols = 1;
f078f209 635
c6663876
FF
636 streams = HT_RC_2_STREAMS(rix);
637 nsymbits = bits_per_symbol[rix % 8][width] * streams;
e8324357 638 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
f078f209 639
e8324357 640 if (frmlen < minlen) {
e8324357
S
641 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
642 ndelim = max(mindelim, ndelim);
f078f209
LR
643 }
644
e8324357 645 return ndelim;
f078f209
LR
646}
647
e8324357 648static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
fec247c0 649 struct ath_txq *txq,
d43f3015
S
650 struct ath_atx_tid *tid,
651 struct list_head *bf_q)
f078f209 652{
e8324357 653#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
d43f3015
S
654 struct ath_buf *bf, *bf_first, *bf_prev = NULL;
655 int rl = 0, nframes = 0, ndelim, prev_al = 0;
e8324357
S
656 u16 aggr_limit = 0, al = 0, bpad = 0,
657 al_delta, h_baw = tid->baw_size / 2;
658 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
f078f209 659
e8324357 660 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
f078f209 661
e8324357
S
662 do {
663 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
f078f209 664
d43f3015 665 /* do not step over block-ack window */
e8324357
S
666 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
667 status = ATH_AGGR_BAW_CLOSED;
668 break;
669 }
f078f209 670
e8324357
S
671 if (!rl) {
672 aggr_limit = ath_lookup_rate(sc, bf, tid);
673 rl = 1;
674 }
f078f209 675
d43f3015 676 /* do not exceed aggregation limit */
e8324357 677 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
f078f209 678
d43f3015
S
679 if (nframes &&
680 (aggr_limit < (al + bpad + al_delta + prev_al))) {
e8324357
S
681 status = ATH_AGGR_LIMITED;
682 break;
683 }
f078f209 684
d43f3015
S
685 /* do not exceed subframe limit */
686 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
e8324357
S
687 status = ATH_AGGR_LIMITED;
688 break;
689 }
d43f3015 690 nframes++;
f078f209 691
d43f3015 692 /* add padding for previous frame to aggregation length */
e8324357 693 al += bpad + al_delta;
f078f209 694
e8324357
S
695 /*
696 * Get the delimiters needed to meet the MPDU
697 * density for this node.
698 */
699 ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen);
e8324357 700 bpad = PADBYTES(al_delta) + (ndelim << 2);
f078f209 701
e8324357 702 bf->bf_next = NULL;
87d5efbb 703 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
f078f209 704
d43f3015 705 /* link buffers of this frame to the aggregate */
e8324357 706 ath_tx_addto_baw(sc, tid, bf);
d43f3015
S
707 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
708 list_move_tail(&bf->list, bf_q);
e8324357
S
709 if (bf_prev) {
710 bf_prev->bf_next = bf;
87d5efbb
VT
711 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
712 bf->bf_daddr);
e8324357
S
713 }
714 bf_prev = bf;
fec247c0 715
e8324357 716 } while (!list_empty(&tid->buf_q));
f078f209 717
e8324357
S
718 bf_first->bf_al = al;
719 bf_first->bf_nframes = nframes;
d43f3015 720
e8324357
S
721 return status;
722#undef PADBYTES
723}
f078f209 724
e8324357
S
725static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
726 struct ath_atx_tid *tid)
727{
d43f3015 728 struct ath_buf *bf;
e8324357
S
729 enum ATH_AGGR_STATUS status;
730 struct list_head bf_q;
f078f209 731
e8324357
S
732 do {
733 if (list_empty(&tid->buf_q))
734 return;
f078f209 735
e8324357
S
736 INIT_LIST_HEAD(&bf_q);
737
fec247c0 738 status = ath_tx_form_aggr(sc, txq, tid, &bf_q);
f078f209 739
f078f209 740 /*
d43f3015
S
741 * no frames picked up to be aggregated;
742 * block-ack window is not open.
f078f209 743 */
e8324357
S
744 if (list_empty(&bf_q))
745 break;
f078f209 746
e8324357 747 bf = list_first_entry(&bf_q, struct ath_buf, list);
d43f3015 748 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
f078f209 749
d43f3015 750 /* if only one frame, send as non-aggregate */
e8324357 751 if (bf->bf_nframes == 1) {
e8324357 752 bf->bf_state.bf_type &= ~BUF_AGGR;
d43f3015 753 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
e8324357
S
754 ath_buf_set_rate(sc, bf);
755 ath_tx_txqaddbuf(sc, txq, &bf_q);
756 continue;
757 }
f078f209 758
d43f3015 759 /* setup first desc of aggregate */
e8324357
S
760 bf->bf_state.bf_type |= BUF_AGGR;
761 ath_buf_set_rate(sc, bf);
762 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
f078f209 763
d43f3015
S
764 /* anchor last desc of aggregate */
765 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
f078f209 766
e8324357 767 ath_tx_txqaddbuf(sc, txq, &bf_q);
fec247c0 768 TX_STAT_INC(txq->axq_qnum, a_aggr);
f078f209 769
e8324357
S
770 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
771 status != ATH_AGGR_BAW_CLOSED);
772}
773
f83da965
S
774void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
775 u16 tid, u16 *ssn)
e8324357
S
776{
777 struct ath_atx_tid *txtid;
778 struct ath_node *an;
779
780 an = (struct ath_node *)sta->drv_priv;
f83da965
S
781 txtid = ATH_AN_2_TID(an, tid);
782 txtid->state |= AGGR_ADDBA_PROGRESS;
783 ath_tx_pause_tid(sc, txtid);
784 *ssn = txtid->seq_start;
e8324357 785}
f078f209 786
f83da965 787void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
e8324357
S
788{
789 struct ath_node *an = (struct ath_node *)sta->drv_priv;
790 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
791 struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum];
db1a052b 792 struct ath_tx_status ts;
e8324357
S
793 struct ath_buf *bf;
794 struct list_head bf_head;
db1a052b
FF
795
796 memset(&ts, 0, sizeof(ts));
e8324357 797 INIT_LIST_HEAD(&bf_head);
f078f209 798
e8324357 799 if (txtid->state & AGGR_CLEANUP)
f83da965 800 return;
f078f209 801
e8324357 802 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
5eae6592 803 txtid->state &= ~AGGR_ADDBA_PROGRESS;
f83da965 804 return;
e8324357 805 }
f078f209 806
e8324357
S
807 ath_tx_pause_tid(sc, txtid);
808
809 /* drop all software retried frames and mark this TID */
810 spin_lock_bh(&txq->axq_lock);
811 while (!list_empty(&txtid->buf_q)) {
812 bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
813 if (!bf_isretried(bf)) {
814 /*
815 * NB: it's based on the assumption that
816 * software retried frame will always stay
817 * at the head of software queue.
818 */
819 break;
820 }
d43f3015 821 list_move_tail(&bf->list, &bf_head);
e8324357 822 ath_tx_update_baw(sc, txtid, bf->bf_seqno);
db1a052b 823 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
f078f209 824 }
d43f3015 825 spin_unlock_bh(&txq->axq_lock);
f078f209 826
e8324357 827 if (txtid->baw_head != txtid->baw_tail) {
e8324357
S
828 txtid->state |= AGGR_CLEANUP;
829 } else {
830 txtid->state &= ~AGGR_ADDBA_COMPLETE;
e8324357 831 ath_tx_flush_tid(sc, txtid);
f078f209 832 }
e8324357 833}
f078f209 834
e8324357
S
835void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
836{
837 struct ath_atx_tid *txtid;
838 struct ath_node *an;
839
840 an = (struct ath_node *)sta->drv_priv;
841
842 if (sc->sc_flags & SC_OP_TXAGGR) {
843 txtid = ATH_AN_2_TID(an, tid);
844 txtid->baw_size =
845 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
846 txtid->state |= AGGR_ADDBA_COMPLETE;
847 txtid->state &= ~AGGR_ADDBA_PROGRESS;
848 ath_tx_resume_tid(sc, txtid);
849 }
f078f209
LR
850}
851
e8324357 852bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno)
c4288390 853{
e8324357 854 struct ath_atx_tid *txtid;
c4288390 855
e8324357
S
856 if (!(sc->sc_flags & SC_OP_TXAGGR))
857 return false;
c4288390 858
e8324357
S
859 txtid = ATH_AN_2_TID(an, tidno);
860
c3d8f02e 861 if (!(txtid->state & (AGGR_ADDBA_COMPLETE | AGGR_ADDBA_PROGRESS)))
e8324357 862 return true;
e8324357 863 return false;
c4288390
S
864}
865
e8324357
S
866/********************/
867/* Queue Management */
868/********************/
f078f209 869
e8324357
S
870static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
871 struct ath_txq *txq)
f078f209 872{
e8324357
S
873 struct ath_atx_ac *ac, *ac_tmp;
874 struct ath_atx_tid *tid, *tid_tmp;
f078f209 875
e8324357
S
876 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
877 list_del(&ac->list);
878 ac->sched = false;
879 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
880 list_del(&tid->list);
881 tid->sched = false;
882 ath_tid_drain(sc, txq, tid);
883 }
f078f209
LR
884 }
885}
886
e8324357 887struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
f078f209 888{
cbe61d8a 889 struct ath_hw *ah = sc->sc_ah;
c46917bb 890 struct ath_common *common = ath9k_hw_common(ah);
e8324357 891 struct ath9k_tx_queue_info qi;
e5003249 892 int qnum, i;
f078f209 893
e8324357
S
894 memset(&qi, 0, sizeof(qi));
895 qi.tqi_subtype = subtype;
896 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
897 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
898 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
899 qi.tqi_physCompBuf = 0;
f078f209
LR
900
901 /*
e8324357
S
902 * Enable interrupts only for EOL and DESC conditions.
903 * We mark tx descriptors to receive a DESC interrupt
904 * when a tx queue gets deep; otherwise waiting for the
905 * EOL to reap descriptors. Note that this is done to
906 * reduce interrupt load and this only defers reaping
907 * descriptors, never transmitting frames. Aside from
908 * reducing interrupts this also permits more concurrency.
909 * The only potential downside is if the tx queue backs
910 * up in which case the top half of the kernel may backup
911 * due to a lack of tx descriptors.
912 *
913 * The UAPSD queue is an exception, since we take a desc-
914 * based intr on the EOSP frames.
f078f209 915 */
afe754d6
VT
916 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
917 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
918 TXQ_FLAG_TXERRINT_ENABLE;
919 } else {
920 if (qtype == ATH9K_TX_QUEUE_UAPSD)
921 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
922 else
923 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
924 TXQ_FLAG_TXDESCINT_ENABLE;
925 }
e8324357
S
926 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
927 if (qnum == -1) {
f078f209 928 /*
e8324357
S
929 * NB: don't print a message, this happens
930 * normally on parts with too few tx queues
f078f209 931 */
e8324357 932 return NULL;
f078f209 933 }
e8324357 934 if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
c46917bb
LR
935 ath_print(common, ATH_DBG_FATAL,
936 "qnum %u out of range, max %u!\n",
937 qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq));
e8324357
S
938 ath9k_hw_releasetxqueue(ah, qnum);
939 return NULL;
940 }
941 if (!ATH_TXQ_SETUP(sc, qnum)) {
942 struct ath_txq *txq = &sc->tx.txq[qnum];
f078f209 943
293f2ba8 944 txq->axq_class = subtype;
e8324357
S
945 txq->axq_qnum = qnum;
946 txq->axq_link = NULL;
947 INIT_LIST_HEAD(&txq->axq_q);
948 INIT_LIST_HEAD(&txq->axq_acq);
949 spin_lock_init(&txq->axq_lock);
950 txq->axq_depth = 0;
164ace38 951 txq->axq_tx_inprogress = false;
e8324357 952 sc->tx.txqsetup |= 1<<qnum;
e5003249
VT
953
954 txq->txq_headidx = txq->txq_tailidx = 0;
955 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
956 INIT_LIST_HEAD(&txq->txq_fifo[i]);
957 INIT_LIST_HEAD(&txq->txq_fifo_pending);
e8324357
S
958 }
959 return &sc->tx.txq[qnum];
f078f209
LR
960}
961
e8324357
S
962int ath_txq_update(struct ath_softc *sc, int qnum,
963 struct ath9k_tx_queue_info *qinfo)
964{
cbe61d8a 965 struct ath_hw *ah = sc->sc_ah;
e8324357
S
966 int error = 0;
967 struct ath9k_tx_queue_info qi;
968
969 if (qnum == sc->beacon.beaconq) {
970 /*
971 * XXX: for beacon queue, we just save the parameter.
972 * It will be picked up by ath_beaconq_config when
973 * it's necessary.
974 */
975 sc->beacon.beacon_qi = *qinfo;
f078f209 976 return 0;
e8324357 977 }
f078f209 978
9680e8a3 979 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
e8324357
S
980
981 ath9k_hw_get_txq_props(ah, qnum, &qi);
982 qi.tqi_aifs = qinfo->tqi_aifs;
983 qi.tqi_cwmin = qinfo->tqi_cwmin;
984 qi.tqi_cwmax = qinfo->tqi_cwmax;
985 qi.tqi_burstTime = qinfo->tqi_burstTime;
986 qi.tqi_readyTime = qinfo->tqi_readyTime;
987
988 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
c46917bb
LR
989 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
990 "Unable to update hardware queue %u!\n", qnum);
e8324357
S
991 error = -EIO;
992 } else {
993 ath9k_hw_resettxqueue(ah, qnum);
994 }
995
996 return error;
997}
998
999int ath_cabq_update(struct ath_softc *sc)
1000{
1001 struct ath9k_tx_queue_info qi;
1002 int qnum = sc->beacon.cabq->axq_qnum;
f078f209 1003
e8324357 1004 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
f078f209 1005 /*
e8324357 1006 * Ensure the readytime % is within the bounds.
f078f209 1007 */
17d7904d
S
1008 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1009 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1010 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1011 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
f078f209 1012
57c4d7b4 1013 qi.tqi_readyTime = (sc->beacon_interval *
fdbf7335 1014 sc->config.cabqReadytime) / 100;
e8324357
S
1015 ath_txq_update(sc, qnum, &qi);
1016
1017 return 0;
f078f209
LR
1018}
1019
043a0405
S
1020/*
1021 * Drain a given TX queue (could be Beacon or Data)
1022 *
1023 * This assumes output has been stopped and
1024 * we do not need to block ath_tx_tasklet.
1025 */
1026void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
f078f209 1027{
e8324357
S
1028 struct ath_buf *bf, *lastbf;
1029 struct list_head bf_head;
db1a052b
FF
1030 struct ath_tx_status ts;
1031
1032 memset(&ts, 0, sizeof(ts));
e8324357 1033 INIT_LIST_HEAD(&bf_head);
f078f209 1034
e8324357
S
1035 for (;;) {
1036 spin_lock_bh(&txq->axq_lock);
f078f209 1037
e5003249
VT
1038 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1039 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
1040 txq->txq_headidx = txq->txq_tailidx = 0;
1041 spin_unlock_bh(&txq->axq_lock);
1042 break;
1043 } else {
1044 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
1045 struct ath_buf, list);
1046 }
1047 } else {
1048 if (list_empty(&txq->axq_q)) {
1049 txq->axq_link = NULL;
1050 spin_unlock_bh(&txq->axq_lock);
1051 break;
1052 }
1053 bf = list_first_entry(&txq->axq_q, struct ath_buf,
1054 list);
f078f209 1055
e5003249
VT
1056 if (bf->bf_stale) {
1057 list_del(&bf->list);
1058 spin_unlock_bh(&txq->axq_lock);
f078f209 1059
0a8cea84 1060 ath_tx_return_buffer(sc, bf);
e5003249
VT
1061 continue;
1062 }
e8324357 1063 }
f078f209 1064
e8324357 1065 lastbf = bf->bf_lastbf;
6d913f7d
VT
1066 if (!retry_tx)
1067 lastbf->bf_tx_aborted = true;
f078f209 1068
e5003249
VT
1069 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1070 list_cut_position(&bf_head,
1071 &txq->txq_fifo[txq->txq_tailidx],
1072 &lastbf->list);
1073 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
1074 } else {
1075 /* remove ath_buf's of the same mpdu from txq */
1076 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
1077 }
1078
e8324357 1079 txq->axq_depth--;
f078f209 1080
e8324357
S
1081 spin_unlock_bh(&txq->axq_lock);
1082
1083 if (bf_isampdu(bf))
db1a052b 1084 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0);
e8324357 1085 else
db1a052b 1086 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
f078f209
LR
1087 }
1088
164ace38
SB
1089 spin_lock_bh(&txq->axq_lock);
1090 txq->axq_tx_inprogress = false;
1091 spin_unlock_bh(&txq->axq_lock);
1092
e8324357
S
1093 /* flush any pending frames if aggregation is enabled */
1094 if (sc->sc_flags & SC_OP_TXAGGR) {
1095 if (!retry_tx) {
1096 spin_lock_bh(&txq->axq_lock);
1097 ath_txq_drain_pending_buffers(sc, txq);
1098 spin_unlock_bh(&txq->axq_lock);
1099 }
1100 }
e5003249
VT
1101
1102 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1103 spin_lock_bh(&txq->axq_lock);
1104 while (!list_empty(&txq->txq_fifo_pending)) {
1105 bf = list_first_entry(&txq->txq_fifo_pending,
1106 struct ath_buf, list);
1107 list_cut_position(&bf_head,
1108 &txq->txq_fifo_pending,
1109 &bf->bf_lastbf->list);
1110 spin_unlock_bh(&txq->axq_lock);
1111
1112 if (bf_isampdu(bf))
1113 ath_tx_complete_aggr(sc, txq, bf, &bf_head,
1114 &ts, 0);
1115 else
1116 ath_tx_complete_buf(sc, bf, txq, &bf_head,
1117 &ts, 0, 0);
1118 spin_lock_bh(&txq->axq_lock);
1119 }
1120 spin_unlock_bh(&txq->axq_lock);
1121 }
f078f209
LR
1122}
1123
043a0405 1124void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
f078f209 1125{
cbe61d8a 1126 struct ath_hw *ah = sc->sc_ah;
c46917bb 1127 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
043a0405
S
1128 struct ath_txq *txq;
1129 int i, npend = 0;
1130
1131 if (sc->sc_flags & SC_OP_INVALID)
1132 return;
1133
1134 /* Stop beacon queue */
1135 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
1136
1137 /* Stop data queues */
1138 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1139 if (ATH_TXQ_SETUP(sc, i)) {
1140 txq = &sc->tx.txq[i];
1141 ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1142 npend += ath9k_hw_numtxpending(ah, txq->axq_qnum);
1143 }
1144 }
1145
1146 if (npend) {
1147 int r;
1148
e8009e98 1149 ath_print(common, ATH_DBG_FATAL,
9be8ab2e 1150 "Failed to stop TX DMA. Resetting hardware!\n");
043a0405
S
1151
1152 spin_lock_bh(&sc->sc_resetlock);
e8009e98 1153 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false);
043a0405 1154 if (r)
c46917bb
LR
1155 ath_print(common, ATH_DBG_FATAL,
1156 "Unable to reset hardware; reset status %d\n",
1157 r);
043a0405
S
1158 spin_unlock_bh(&sc->sc_resetlock);
1159 }
1160
1161 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1162 if (ATH_TXQ_SETUP(sc, i))
1163 ath_draintxq(sc, &sc->tx.txq[i], retry_tx);
1164 }
e8324357 1165}
f078f209 1166
043a0405 1167void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
e8324357 1168{
043a0405
S
1169 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1170 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
e8324357 1171}
f078f209 1172
e8324357
S
1173void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1174{
1175 struct ath_atx_ac *ac;
1176 struct ath_atx_tid *tid;
f078f209 1177
e8324357
S
1178 if (list_empty(&txq->axq_acq))
1179 return;
f078f209 1180
e8324357
S
1181 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
1182 list_del(&ac->list);
1183 ac->sched = false;
f078f209 1184
e8324357
S
1185 do {
1186 if (list_empty(&ac->tid_q))
1187 return;
f078f209 1188
e8324357
S
1189 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
1190 list_del(&tid->list);
1191 tid->sched = false;
f078f209 1192
e8324357
S
1193 if (tid->paused)
1194 continue;
f078f209 1195
164ace38 1196 ath_tx_sched_aggr(sc, txq, tid);
f078f209
LR
1197
1198 /*
e8324357
S
1199 * add tid to round-robin queue if more frames
1200 * are pending for the tid
f078f209 1201 */
e8324357
S
1202 if (!list_empty(&tid->buf_q))
1203 ath_tx_queue_tid(txq, tid);
f078f209 1204
e8324357
S
1205 break;
1206 } while (!list_empty(&ac->tid_q));
f078f209 1207
e8324357
S
1208 if (!list_empty(&ac->tid_q)) {
1209 if (!ac->sched) {
1210 ac->sched = true;
1211 list_add_tail(&ac->list, &txq->axq_acq);
f078f209 1212 }
e8324357
S
1213 }
1214}
f078f209 1215
e8324357
S
1216int ath_tx_setup(struct ath_softc *sc, int haltype)
1217{
1218 struct ath_txq *txq;
f078f209 1219
e8324357 1220 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
c46917bb
LR
1221 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1222 "HAL AC %u out of range, max %zu!\n",
e8324357
S
1223 haltype, ARRAY_SIZE(sc->tx.hwq_map));
1224 return 0;
1225 }
1226 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
1227 if (txq != NULL) {
1228 sc->tx.hwq_map[haltype] = txq->axq_qnum;
1229 return 1;
1230 } else
1231 return 0;
f078f209
LR
1232}
1233
e8324357
S
1234/***********/
1235/* TX, DMA */
1236/***********/
1237
f078f209 1238/*
e8324357
S
1239 * Insert a chain of ath_buf (descriptors) on a txq and
1240 * assume the descriptors are already chained together by caller.
f078f209 1241 */
e8324357
S
1242static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1243 struct list_head *head)
f078f209 1244{
cbe61d8a 1245 struct ath_hw *ah = sc->sc_ah;
c46917bb 1246 struct ath_common *common = ath9k_hw_common(ah);
e8324357 1247 struct ath_buf *bf;
f078f209 1248
e8324357
S
1249 /*
1250 * Insert the frame on the outbound list and
1251 * pass it on to the hardware.
1252 */
f078f209 1253
e8324357
S
1254 if (list_empty(head))
1255 return;
f078f209 1256
e8324357 1257 bf = list_first_entry(head, struct ath_buf, list);
f078f209 1258
c46917bb
LR
1259 ath_print(common, ATH_DBG_QUEUE,
1260 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
f078f209 1261
e5003249
VT
1262 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1263 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
1264 list_splice_tail_init(head, &txq->txq_fifo_pending);
1265 return;
1266 }
1267 if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
1268 ath_print(common, ATH_DBG_XMIT,
1269 "Initializing tx fifo %d which "
1270 "is non-empty\n",
1271 txq->txq_headidx);
1272 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
1273 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
1274 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
e8324357 1275 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
c46917bb
LR
1276 ath_print(common, ATH_DBG_XMIT,
1277 "TXDP[%u] = %llx (%p)\n",
1278 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
e8324357 1279 } else {
e5003249
VT
1280 list_splice_tail_init(head, &txq->axq_q);
1281
1282 if (txq->axq_link == NULL) {
1283 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1284 ath_print(common, ATH_DBG_XMIT,
1285 "TXDP[%u] = %llx (%p)\n",
1286 txq->axq_qnum, ito64(bf->bf_daddr),
1287 bf->bf_desc);
1288 } else {
1289 *txq->axq_link = bf->bf_daddr;
1290 ath_print(common, ATH_DBG_XMIT,
1291 "link[%u] (%p)=%llx (%p)\n",
1292 txq->axq_qnum, txq->axq_link,
1293 ito64(bf->bf_daddr), bf->bf_desc);
1294 }
1295 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
1296 &txq->axq_link);
1297 ath9k_hw_txstart(ah, txq->axq_qnum);
e8324357 1298 }
e5003249 1299 txq->axq_depth++;
e8324357 1300}
f078f209 1301
e8324357
S
1302static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1303 struct list_head *bf_head,
1304 struct ath_tx_control *txctl)
f078f209
LR
1305{
1306 struct ath_buf *bf;
f078f209 1307
e8324357
S
1308 bf = list_first_entry(bf_head, struct ath_buf, list);
1309 bf->bf_state.bf_type |= BUF_AMPDU;
fec247c0 1310 TX_STAT_INC(txctl->txq->axq_qnum, a_queued);
f078f209 1311
e8324357
S
1312 /*
1313 * Do not queue to h/w when any of the following conditions is true:
1314 * - there are pending frames in software queue
1315 * - the TID is currently paused for ADDBA/BAR request
1316 * - seqno is not within block-ack window
1317 * - h/w queue depth exceeds low water mark
1318 */
1319 if (!list_empty(&tid->buf_q) || tid->paused ||
1320 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
1321 txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
f078f209 1322 /*
e8324357
S
1323 * Add this frame to software queue for scheduling later
1324 * for aggregation.
f078f209 1325 */
d43f3015 1326 list_move_tail(&bf->list, &tid->buf_q);
e8324357
S
1327 ath_tx_queue_tid(txctl->txq, tid);
1328 return;
1329 }
1330
1331 /* Add sub-frame to BAW */
1332 ath_tx_addto_baw(sc, tid, bf);
1333
1334 /* Queue to h/w without aggregation */
1335 bf->bf_nframes = 1;
d43f3015 1336 bf->bf_lastbf = bf;
e8324357
S
1337 ath_buf_set_rate(sc, bf);
1338 ath_tx_txqaddbuf(sc, txctl->txq, bf_head);
e8324357
S
1339}
1340
c37452b0
S
1341static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
1342 struct ath_atx_tid *tid,
1343 struct list_head *bf_head)
e8324357
S
1344{
1345 struct ath_buf *bf;
1346
e8324357
S
1347 bf = list_first_entry(bf_head, struct ath_buf, list);
1348 bf->bf_state.bf_type &= ~BUF_AMPDU;
1349
1350 /* update starting sequence number for subsequent ADDBA request */
1351 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
1352
1353 bf->bf_nframes = 1;
d43f3015 1354 bf->bf_lastbf = bf;
e8324357
S
1355 ath_buf_set_rate(sc, bf);
1356 ath_tx_txqaddbuf(sc, txq, bf_head);
fec247c0 1357 TX_STAT_INC(txq->axq_qnum, queued);
e8324357
S
1358}
1359
c37452b0
S
1360static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1361 struct list_head *bf_head)
1362{
1363 struct ath_buf *bf;
1364
1365 bf = list_first_entry(bf_head, struct ath_buf, list);
1366
1367 bf->bf_lastbf = bf;
1368 bf->bf_nframes = 1;
1369 ath_buf_set_rate(sc, bf);
1370 ath_tx_txqaddbuf(sc, txq, bf_head);
fec247c0 1371 TX_STAT_INC(txq->axq_qnum, queued);
c37452b0
S
1372}
1373
e8324357
S
1374static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
1375{
1376 struct ieee80211_hdr *hdr;
1377 enum ath9k_pkt_type htype;
1378 __le16 fc;
1379
1380 hdr = (struct ieee80211_hdr *)skb->data;
1381 fc = hdr->frame_control;
1382
1383 if (ieee80211_is_beacon(fc))
1384 htype = ATH9K_PKT_TYPE_BEACON;
1385 else if (ieee80211_is_probe_resp(fc))
1386 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1387 else if (ieee80211_is_atim(fc))
1388 htype = ATH9K_PKT_TYPE_ATIM;
1389 else if (ieee80211_is_pspoll(fc))
1390 htype = ATH9K_PKT_TYPE_PSPOLL;
1391 else
1392 htype = ATH9K_PKT_TYPE_NORMAL;
1393
1394 return htype;
1395}
1396
e8324357
S
1397static int get_hw_crypto_keytype(struct sk_buff *skb)
1398{
1399 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1400
1401 if (tx_info->control.hw_key) {
1402 if (tx_info->control.hw_key->alg == ALG_WEP)
1403 return ATH9K_KEY_TYPE_WEP;
1404 else if (tx_info->control.hw_key->alg == ALG_TKIP)
1405 return ATH9K_KEY_TYPE_TKIP;
1406 else if (tx_info->control.hw_key->alg == ALG_CCMP)
1407 return ATH9K_KEY_TYPE_AES;
1408 }
1409
1410 return ATH9K_KEY_TYPE_CLEAR;
1411}
1412
1413static void assign_aggr_tid_seqno(struct sk_buff *skb,
1414 struct ath_buf *bf)
1415{
1416 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1417 struct ieee80211_hdr *hdr;
1418 struct ath_node *an;
1419 struct ath_atx_tid *tid;
1420 __le16 fc;
1421 u8 *qc;
1422
1423 if (!tx_info->control.sta)
1424 return;
1425
1426 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1427 hdr = (struct ieee80211_hdr *)skb->data;
1428 fc = hdr->frame_control;
1429
1430 if (ieee80211_is_data_qos(fc)) {
1431 qc = ieee80211_get_qos_ctl(hdr);
1432 bf->bf_tidno = qc[0] & 0xf;
1433 }
1434
1435 /*
1436 * For HT capable stations, we save tidno for later use.
1437 * We also override seqno set by upper layer with the one
1438 * in tx aggregation state.
e8324357
S
1439 */
1440 tid = ATH_AN_2_TID(an, bf->bf_tidno);
17b182e3 1441 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
e8324357
S
1442 bf->bf_seqno = tid->seq_next;
1443 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1444}
1445
b0a33448 1446static int setup_tx_flags(struct sk_buff *skb, bool use_ldpc)
e8324357
S
1447{
1448 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1449 int flags = 0;
1450
1451 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
1452 flags |= ATH9K_TXDESC_INTREQ;
1453
1454 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1455 flags |= ATH9K_TXDESC_NOACK;
e8324357 1456
b0a33448
LR
1457 if (use_ldpc)
1458 flags |= ATH9K_TXDESC_LDPC;
1459
e8324357
S
1460 return flags;
1461}
1462
1463/*
1464 * rix - rate index
1465 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1466 * width - 0 for 20 MHz, 1 for 40 MHz
1467 * half_gi - to use 4us v/s 3.6 us for symbol time
1468 */
1469static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
1470 int width, int half_gi, bool shortPreamble)
1471{
e8324357 1472 u32 nbits, nsymbits, duration, nsymbols;
e8324357
S
1473 int streams, pktlen;
1474
1475 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
e8324357
S
1476
1477 /* find number of symbols: PLCP + data */
c6663876 1478 streams = HT_RC_2_STREAMS(rix);
e8324357 1479 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
c6663876 1480 nsymbits = bits_per_symbol[rix % 8][width] * streams;
e8324357
S
1481 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1482
1483 if (!half_gi)
1484 duration = SYMBOL_TIME(nsymbols);
1485 else
1486 duration = SYMBOL_TIME_HALFGI(nsymbols);
1487
1488 /* addup duration for legacy/ht training and signal fields */
e8324357
S
1489 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
1490
1491 return duration;
1492}
1493
1494static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
1495{
43c27613 1496 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
e8324357
S
1497 struct ath9k_11n_rate_series series[4];
1498 struct sk_buff *skb;
1499 struct ieee80211_tx_info *tx_info;
1500 struct ieee80211_tx_rate *rates;
545750d3 1501 const struct ieee80211_rate *rate;
254ad0ff 1502 struct ieee80211_hdr *hdr;
c89424df
S
1503 int i, flags = 0;
1504 u8 rix = 0, ctsrate = 0;
254ad0ff 1505 bool is_pspoll;
e8324357
S
1506
1507 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
1508
a22be22a 1509 skb = bf->bf_mpdu;
e8324357
S
1510 tx_info = IEEE80211_SKB_CB(skb);
1511 rates = tx_info->control.rates;
254ad0ff
S
1512 hdr = (struct ieee80211_hdr *)skb->data;
1513 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
e8324357 1514
e8324357 1515 /*
c89424df
S
1516 * We check if Short Preamble is needed for the CTS rate by
1517 * checking the BSS's global flag.
1518 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
e8324357 1519 */
545750d3
FF
1520 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
1521 ctsrate = rate->hw_value;
c89424df 1522 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
545750d3 1523 ctsrate |= rate->hw_value_short;
e8324357 1524
e8324357 1525 for (i = 0; i < 4; i++) {
545750d3
FF
1526 bool is_40, is_sgi, is_sp;
1527 int phy;
1528
e8324357
S
1529 if (!rates[i].count || (rates[i].idx < 0))
1530 continue;
1531
1532 rix = rates[i].idx;
e8324357 1533 series[i].Tries = rates[i].count;
43c27613 1534 series[i].ChSel = common->tx_chainmask;
e8324357 1535
27032059
FF
1536 if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
1537 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
c89424df 1538 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
27032059
FF
1539 flags |= ATH9K_TXDESC_RTSENA;
1540 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1541 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1542 flags |= ATH9K_TXDESC_CTSENA;
1543 }
1544
c89424df
S
1545 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1546 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1547 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1548 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
e8324357 1549
545750d3
FF
1550 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1551 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1552 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1553
1554 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1555 /* MCS rates */
1556 series[i].Rate = rix | 0x80;
1557 series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
1558 is_40, is_sgi, is_sp);
074a8c0d
FF
1559 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1560 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
545750d3
FF
1561 continue;
1562 }
1563
1564 /* legcay rates */
1565 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1566 !(rate->flags & IEEE80211_RATE_ERP_G))
1567 phy = WLAN_RC_PHY_CCK;
1568 else
1569 phy = WLAN_RC_PHY_OFDM;
1570
1571 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1572 series[i].Rate = rate->hw_value;
1573 if (rate->hw_value_short) {
1574 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1575 series[i].Rate |= rate->hw_value_short;
1576 } else {
1577 is_sp = false;
1578 }
1579
1580 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
1581 phy, rate->bitrate * 100, bf->bf_frmlen, rix, is_sp);
f078f209
LR
1582 }
1583
27032059
FF
1584 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
1585 if (bf_isaggr(bf) && (bf->bf_al > sc->sc_ah->caps.rts_aggr_limit))
1586 flags &= ~ATH9K_TXDESC_RTSENA;
1587
1588 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1589 if (flags & ATH9K_TXDESC_RTSENA)
1590 flags &= ~ATH9K_TXDESC_CTSENA;
1591
e8324357 1592 /* set dur_update_en for l-sig computation except for PS-Poll frames */
c89424df
S
1593 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1594 bf->bf_lastbf->bf_desc,
254ad0ff 1595 !is_pspoll, ctsrate,
c89424df 1596 0, series, 4, flags);
f078f209 1597
17d7904d 1598 if (sc->config.ath_aggr_prot && flags)
c89424df 1599 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
f078f209
LR
1600}
1601
c52f33d0 1602static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
8f93b8b3 1603 struct sk_buff *skb,
528f0c6b 1604 struct ath_tx_control *txctl)
f078f209 1605{
c52f33d0
JM
1606 struct ath_wiphy *aphy = hw->priv;
1607 struct ath_softc *sc = aphy->sc;
528f0c6b
S
1608 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1609 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
528f0c6b
S
1610 int hdrlen;
1611 __le16 fc;
1bc14880 1612 int padpos, padsize;
b0a33448 1613 bool use_ldpc = false;
e022edbd 1614
827e69bf
FF
1615 tx_info->pad[0] = 0;
1616 switch (txctl->frame_type) {
c81494d5 1617 case ATH9K_IFT_NOT_INTERNAL:
827e69bf 1618 break;
c81494d5 1619 case ATH9K_IFT_PAUSE:
827e69bf
FF
1620 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_PAUSE;
1621 /* fall through */
c81494d5 1622 case ATH9K_IFT_UNPAUSE:
827e69bf
FF
1623 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_INTERNAL;
1624 break;
1625 }
528f0c6b
S
1626 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1627 fc = hdr->frame_control;
f078f209 1628
528f0c6b 1629 ATH_TXBUF_RESET(bf);
f078f209 1630
827e69bf 1631 bf->aphy = aphy;
1bc14880
BP
1632 bf->bf_frmlen = skb->len + FCS_LEN;
1633 /* Remove the padding size from bf_frmlen, if any */
1634 padpos = ath9k_cmn_padpos(hdr->frame_control);
1635 padsize = padpos & 3;
1636 if (padsize && skb->len>padpos+padsize) {
1637 bf->bf_frmlen -= padsize;
1638 }
cd3d39a6 1639
9f42c2b6 1640 if (!txctl->paprd && conf_is_ht(&hw->conf)) {
c656bbb5 1641 bf->bf_state.bf_type |= BUF_HT;
b0a33448
LR
1642 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1643 use_ldpc = true;
1644 }
528f0c6b 1645
9f42c2b6 1646 bf->bf_state.bfs_paprd = txctl->paprd;
b0a33448 1647 bf->bf_flags = setup_tx_flags(skb, use_ldpc);
528f0c6b 1648
528f0c6b 1649 bf->bf_keytype = get_hw_crypto_keytype(skb);
528f0c6b
S
1650 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
1651 bf->bf_frmlen += tx_info->control.hw_key->icv_len;
1652 bf->bf_keyix = tx_info->control.hw_key->hw_key_idx;
1653 } else {
1654 bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
1655 }
1656
17b182e3
S
1657 if (ieee80211_is_data_qos(fc) && bf_isht(bf) &&
1658 (sc->sc_flags & SC_OP_TXAGGR))
528f0c6b
S
1659 assign_aggr_tid_seqno(skb, bf);
1660
f078f209 1661 bf->bf_mpdu = skb;
f8316df1 1662
7da3c55c
GJ
1663 bf->bf_dmacontext = dma_map_single(sc->dev, skb->data,
1664 skb->len, DMA_TO_DEVICE);
1665 if (unlikely(dma_mapping_error(sc->dev, bf->bf_dmacontext))) {
f8316df1 1666 bf->bf_mpdu = NULL;
c46917bb
LR
1667 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1668 "dma_mapping_error() on TX\n");
f8316df1
LR
1669 return -ENOMEM;
1670 }
1671
528f0c6b 1672 bf->bf_buf_addr = bf->bf_dmacontext;
e7824a50
LR
1673
1674 /* tag if this is a nullfunc frame to enable PS when AP acks it */
1675 if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc)) {
1676 bf->bf_isnullfunc = true;
1b04b930 1677 sc->ps_flags &= ~PS_NULLFUNC_COMPLETED;
e7824a50
LR
1678 } else
1679 bf->bf_isnullfunc = false;
1680
7c9fd60f
VT
1681 bf->bf_tx_aborted = false;
1682
f8316df1 1683 return 0;
528f0c6b
S
1684}
1685
1686/* FIXME: tx power */
1687static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
528f0c6b
S
1688 struct ath_tx_control *txctl)
1689{
a22be22a 1690 struct sk_buff *skb = bf->bf_mpdu;
528f0c6b 1691 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
c37452b0 1692 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
528f0c6b
S
1693 struct ath_node *an = NULL;
1694 struct list_head bf_head;
1695 struct ath_desc *ds;
1696 struct ath_atx_tid *tid;
cbe61d8a 1697 struct ath_hw *ah = sc->sc_ah;
528f0c6b 1698 int frm_type;
c37452b0 1699 __le16 fc;
528f0c6b 1700
528f0c6b 1701 frm_type = get_hw_packet_type(skb);
c37452b0 1702 fc = hdr->frame_control;
528f0c6b
S
1703
1704 INIT_LIST_HEAD(&bf_head);
1705 list_add_tail(&bf->list, &bf_head);
f078f209 1706
f078f209 1707 ds = bf->bf_desc;
87d5efbb 1708 ath9k_hw_set_desc_link(ah, ds, 0);
f078f209 1709
528f0c6b
S
1710 ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
1711 bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
1712
1713 ath9k_hw_filltxdesc(ah, ds,
8f93b8b3
S
1714 skb->len, /* segment length */
1715 true, /* first segment */
1716 true, /* last segment */
3f3a1c80 1717 ds, /* first descriptor */
cc610ac0
VT
1718 bf->bf_buf_addr,
1719 txctl->txq->axq_qnum);
f078f209 1720
9f42c2b6
FF
1721 if (bf->bf_state.bfs_paprd)
1722 ar9003_hw_set_paprd_txdesc(ah, ds, bf->bf_state.bfs_paprd);
1723
528f0c6b 1724 spin_lock_bh(&txctl->txq->axq_lock);
f078f209 1725
f1617967
JL
1726 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) &&
1727 tx_info->control.sta) {
1728 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1729 tid = ATH_AN_2_TID(an, bf->bf_tidno);
1730
c37452b0
S
1731 if (!ieee80211_is_data_qos(fc)) {
1732 ath_tx_send_normal(sc, txctl->txq, &bf_head);
1733 goto tx_done;
1734 }
1735
4fdec031 1736 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
f078f209
LR
1737 /*
1738 * Try aggregation if it's a unicast data frame
1739 * and the destination is HT capable.
1740 */
528f0c6b 1741 ath_tx_send_ampdu(sc, tid, &bf_head, txctl);
f078f209
LR
1742 } else {
1743 /*
528f0c6b
S
1744 * Send this frame as regular when ADDBA
1745 * exchange is neither complete nor pending.
f078f209 1746 */
c37452b0
S
1747 ath_tx_send_ht_normal(sc, txctl->txq,
1748 tid, &bf_head);
f078f209
LR
1749 }
1750 } else {
c37452b0 1751 ath_tx_send_normal(sc, txctl->txq, &bf_head);
f078f209 1752 }
528f0c6b 1753
c37452b0 1754tx_done:
528f0c6b 1755 spin_unlock_bh(&txctl->txq->axq_lock);
f078f209
LR
1756}
1757
f8316df1 1758/* Upon failure caller should free skb */
c52f33d0 1759int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
528f0c6b 1760 struct ath_tx_control *txctl)
f078f209 1761{
c52f33d0
JM
1762 struct ath_wiphy *aphy = hw->priv;
1763 struct ath_softc *sc = aphy->sc;
c46917bb 1764 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
84642d6b 1765 struct ath_txq *txq = txctl->txq;
528f0c6b 1766 struct ath_buf *bf;
97923b14 1767 int q, r;
f078f209 1768
528f0c6b
S
1769 bf = ath_tx_get_buffer(sc);
1770 if (!bf) {
c46917bb 1771 ath_print(common, ATH_DBG_XMIT, "TX buffers are full\n");
528f0c6b
S
1772 return -1;
1773 }
1774
c52f33d0 1775 r = ath_tx_setup_buffer(hw, bf, skb, txctl);
f8316df1 1776 if (unlikely(r)) {
c46917bb 1777 ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n");
c112d0c5
LR
1778
1779 /* upon ath_tx_processq() this TX queue will be resumed, we
1780 * guarantee this will happen by knowing beforehand that
1781 * we will at least have to run TX completionon one buffer
1782 * on the queue */
1783 spin_lock_bh(&txq->axq_lock);
84642d6b 1784 if (!txq->stopped && txq->axq_depth > 1) {
f52de03b 1785 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
c112d0c5
LR
1786 txq->stopped = 1;
1787 }
1788 spin_unlock_bh(&txq->axq_lock);
1789
0a8cea84 1790 ath_tx_return_buffer(sc, bf);
c112d0c5 1791
f8316df1
LR
1792 return r;
1793 }
1794
97923b14
FF
1795 q = skb_get_queue_mapping(skb);
1796 if (q >= 4)
1797 q = 0;
1798
1799 spin_lock_bh(&txq->axq_lock);
1800 if (++sc->tx.pending_frames[q] > ATH_MAX_QDEPTH && !txq->stopped) {
1801 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
1802 txq->stopped = 1;
1803 }
1804 spin_unlock_bh(&txq->axq_lock);
1805
8f93b8b3 1806 ath_tx_start_dma(sc, bf, txctl);
f078f209 1807
528f0c6b 1808 return 0;
f078f209
LR
1809}
1810
c52f33d0 1811void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
f078f209 1812{
c52f33d0
JM
1813 struct ath_wiphy *aphy = hw->priv;
1814 struct ath_softc *sc = aphy->sc;
c46917bb 1815 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
4d91f9f3
BP
1816 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1817 int padpos, padsize;
e8324357
S
1818 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1819 struct ath_tx_control txctl;
f078f209 1820
e8324357 1821 memset(&txctl, 0, sizeof(struct ath_tx_control));
f078f209
LR
1822
1823 /*
e8324357
S
1824 * As a temporary workaround, assign seq# here; this will likely need
1825 * to be cleaned up to work better with Beacon transmission and virtual
1826 * BSSes.
f078f209 1827 */
e8324357 1828 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
e8324357
S
1829 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1830 sc->tx.seq_no += 0x10;
1831 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1832 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
f078f209 1833 }
f078f209 1834
e8324357 1835 /* Add the padding after the header if this is not already done */
4d91f9f3
BP
1836 padpos = ath9k_cmn_padpos(hdr->frame_control);
1837 padsize = padpos & 3;
1838 if (padsize && skb->len>padpos) {
e8324357 1839 if (skb_headroom(skb) < padsize) {
c46917bb
LR
1840 ath_print(common, ATH_DBG_XMIT,
1841 "TX CABQ padding failed\n");
e8324357
S
1842 dev_kfree_skb_any(skb);
1843 return;
1844 }
1845 skb_push(skb, padsize);
4d91f9f3 1846 memmove(skb->data, skb->data + padsize, padpos);
f078f209 1847 }
f078f209 1848
e8324357 1849 txctl.txq = sc->beacon.cabq;
f078f209 1850
c46917bb
LR
1851 ath_print(common, ATH_DBG_XMIT,
1852 "transmitting CABQ packet, skb: %p\n", skb);
f078f209 1853
c52f33d0 1854 if (ath_tx_start(hw, skb, &txctl) != 0) {
c46917bb 1855 ath_print(common, ATH_DBG_XMIT, "CABQ TX failed\n");
e8324357 1856 goto exit;
f078f209 1857 }
f078f209 1858
e8324357
S
1859 return;
1860exit:
1861 dev_kfree_skb_any(skb);
f078f209
LR
1862}
1863
e8324357
S
1864/*****************/
1865/* TX Completion */
1866/*****************/
528f0c6b 1867
e8324357 1868static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
827e69bf 1869 struct ath_wiphy *aphy, int tx_flags)
528f0c6b 1870{
e8324357
S
1871 struct ieee80211_hw *hw = sc->hw;
1872 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
c46917bb 1873 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
4d91f9f3 1874 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
97923b14 1875 int q, padpos, padsize;
528f0c6b 1876
c46917bb 1877 ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
528f0c6b 1878
827e69bf
FF
1879 if (aphy)
1880 hw = aphy->hw;
528f0c6b 1881
6b2c4032 1882 if (tx_flags & ATH_TX_BAR)
e8324357 1883 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
e8324357 1884
6b2c4032 1885 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
e8324357
S
1886 /* Frame was ACKed */
1887 tx_info->flags |= IEEE80211_TX_STAT_ACK;
528f0c6b
S
1888 }
1889
4d91f9f3
BP
1890 padpos = ath9k_cmn_padpos(hdr->frame_control);
1891 padsize = padpos & 3;
1892 if (padsize && skb->len>padpos+padsize) {
e8324357
S
1893 /*
1894 * Remove MAC header padding before giving the frame back to
1895 * mac80211.
1896 */
4d91f9f3 1897 memmove(skb->data + padsize, skb->data, padpos);
e8324357
S
1898 skb_pull(skb, padsize);
1899 }
528f0c6b 1900
1b04b930
S
1901 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1902 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
c46917bb
LR
1903 ath_print(common, ATH_DBG_PS,
1904 "Going back to sleep after having "
f643e51d 1905 "received TX status (0x%lx)\n",
1b04b930
S
1906 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1907 PS_WAIT_FOR_CAB |
1908 PS_WAIT_FOR_PSPOLL_DATA |
1909 PS_WAIT_FOR_TX_ACK));
9a23f9ca
JM
1910 }
1911
827e69bf 1912 if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL))
f0ed85c6 1913 ath9k_tx_status(hw, skb);
97923b14
FF
1914 else {
1915 q = skb_get_queue_mapping(skb);
1916 if (q >= 4)
1917 q = 0;
1918
1919 if (--sc->tx.pending_frames[q] < 0)
1920 sc->tx.pending_frames[q] = 0;
1921
827e69bf 1922 ieee80211_tx_status(hw, skb);
97923b14 1923 }
e8324357 1924}
f078f209 1925
e8324357 1926static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
db1a052b
FF
1927 struct ath_txq *txq, struct list_head *bf_q,
1928 struct ath_tx_status *ts, int txok, int sendbar)
f078f209 1929{
e8324357 1930 struct sk_buff *skb = bf->bf_mpdu;
e8324357 1931 unsigned long flags;
6b2c4032 1932 int tx_flags = 0;
f078f209 1933
e8324357 1934 if (sendbar)
6b2c4032 1935 tx_flags = ATH_TX_BAR;
f078f209 1936
e8324357 1937 if (!txok) {
6b2c4032 1938 tx_flags |= ATH_TX_ERROR;
f078f209 1939
e8324357 1940 if (bf_isxretried(bf))
6b2c4032 1941 tx_flags |= ATH_TX_XRETRY;
f078f209
LR
1942 }
1943
e8324357 1944 dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE);
9f42c2b6
FF
1945
1946 if (bf->bf_state.bfs_paprd) {
1947 sc->paprd_txok = txok;
1948 complete(&sc->paprd_complete);
1949 } else {
1950 ath_tx_complete(sc, skb, bf->aphy, tx_flags);
1951 ath_debug_stat_tx(sc, txq, bf, ts);
1952 }
e8324357
S
1953
1954 /*
1955 * Return the list of ath_buf of this mpdu to free queue
1956 */
1957 spin_lock_irqsave(&sc->tx.txbuflock, flags);
1958 list_splice_tail_init(bf_q, &sc->tx.txbuf);
1959 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
f078f209
LR
1960}
1961
e8324357 1962static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
db1a052b 1963 struct ath_tx_status *ts, int txok)
f078f209 1964{
e8324357
S
1965 u16 seq_st = 0;
1966 u32 ba[WME_BA_BMP_SIZE >> 5];
1967 int ba_index;
1968 int nbad = 0;
1969 int isaggr = 0;
f078f209 1970
7c9fd60f 1971 if (bf->bf_lastbf->bf_tx_aborted)
e8324357 1972 return 0;
f078f209 1973
e8324357
S
1974 isaggr = bf_isaggr(bf);
1975 if (isaggr) {
db1a052b
FF
1976 seq_st = ts->ts_seqnum;
1977 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
e8324357 1978 }
f078f209 1979
e8324357
S
1980 while (bf) {
1981 ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
1982 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
1983 nbad++;
1984
1985 bf = bf->bf_next;
1986 }
f078f209 1987
e8324357
S
1988 return nbad;
1989}
f078f209 1990
db1a052b 1991static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
8a92e2ee 1992 int nbad, int txok, bool update_rc)
f078f209 1993{
a22be22a 1994 struct sk_buff *skb = bf->bf_mpdu;
254ad0ff 1995 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
e8324357 1996 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
827e69bf 1997 struct ieee80211_hw *hw = bf->aphy->hw;
8a92e2ee 1998 u8 i, tx_rateindex;
f078f209 1999
95e4acb7 2000 if (txok)
db1a052b 2001 tx_info->status.ack_signal = ts->ts_rssi;
95e4acb7 2002
db1a052b 2003 tx_rateindex = ts->ts_rateindex;
8a92e2ee
VT
2004 WARN_ON(tx_rateindex >= hw->max_rates);
2005
db1a052b 2006 if (ts->ts_status & ATH9K_TXERR_FILT)
e8324357 2007 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
d969847c
FF
2008 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc)
2009 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
f078f209 2010
db1a052b 2011 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
8a92e2ee 2012 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
254ad0ff 2013 if (ieee80211_is_data(hdr->frame_control)) {
db1a052b 2014 if (ts->ts_flags &
827e69bf
FF
2015 (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN))
2016 tx_info->pad[0] |= ATH_TX_INFO_UNDERRUN;
db1a052b
FF
2017 if ((ts->ts_status & ATH9K_TXERR_XRETRY) ||
2018 (ts->ts_status & ATH9K_TXERR_FIFO))
827e69bf
FF
2019 tx_info->pad[0] |= ATH_TX_INFO_XRETRY;
2020 tx_info->status.ampdu_len = bf->bf_nframes;
2021 tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad;
e8324357 2022 }
f078f209 2023 }
8a92e2ee 2024
545750d3 2025 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
8a92e2ee 2026 tx_info->status.rates[i].count = 0;
545750d3
FF
2027 tx_info->status.rates[i].idx = -1;
2028 }
8a92e2ee
VT
2029
2030 tx_info->status.rates[tx_rateindex].count = bf->bf_retries + 1;
f078f209
LR
2031}
2032
059d806c
S
2033static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq)
2034{
2035 int qnum;
2036
97923b14
FF
2037 qnum = ath_get_mac80211_qnum(txq->axq_class, sc);
2038 if (qnum == -1)
2039 return;
2040
059d806c 2041 spin_lock_bh(&txq->axq_lock);
97923b14
FF
2042 if (txq->stopped && sc->tx.pending_frames[qnum] < ATH_MAX_QDEPTH) {
2043 ath_mac80211_start_queue(sc, qnum);
2044 txq->stopped = 0;
059d806c
S
2045 }
2046 spin_unlock_bh(&txq->axq_lock);
2047}
2048
e8324357 2049static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
f078f209 2050{
cbe61d8a 2051 struct ath_hw *ah = sc->sc_ah;
c46917bb 2052 struct ath_common *common = ath9k_hw_common(ah);
e8324357 2053 struct ath_buf *bf, *lastbf, *bf_held = NULL;
f078f209 2054 struct list_head bf_head;
e8324357 2055 struct ath_desc *ds;
29bffa96 2056 struct ath_tx_status ts;
0934af23 2057 int txok;
e8324357 2058 int status;
f078f209 2059
c46917bb
LR
2060 ath_print(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2061 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2062 txq->axq_link);
f078f209 2063
f078f209
LR
2064 for (;;) {
2065 spin_lock_bh(&txq->axq_lock);
f078f209
LR
2066 if (list_empty(&txq->axq_q)) {
2067 txq->axq_link = NULL;
f078f209
LR
2068 spin_unlock_bh(&txq->axq_lock);
2069 break;
2070 }
f078f209
LR
2071 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2072
e8324357
S
2073 /*
2074 * There is a race condition that a BH gets scheduled
2075 * after sw writes TxE and before hw re-load the last
2076 * descriptor to get the newly chained one.
2077 * Software must keep the last DONE descriptor as a
2078 * holding descriptor - software does so by marking
2079 * it with the STALE flag.
2080 */
2081 bf_held = NULL;
a119cc49 2082 if (bf->bf_stale) {
e8324357
S
2083 bf_held = bf;
2084 if (list_is_last(&bf_held->list, &txq->axq_q)) {
6ef9b13d 2085 spin_unlock_bh(&txq->axq_lock);
e8324357
S
2086 break;
2087 } else {
2088 bf = list_entry(bf_held->list.next,
6ef9b13d 2089 struct ath_buf, list);
e8324357 2090 }
f078f209
LR
2091 }
2092
2093 lastbf = bf->bf_lastbf;
e8324357 2094 ds = lastbf->bf_desc;
f078f209 2095
29bffa96
FF
2096 memset(&ts, 0, sizeof(ts));
2097 status = ath9k_hw_txprocdesc(ah, ds, &ts);
e8324357 2098 if (status == -EINPROGRESS) {
f078f209 2099 spin_unlock_bh(&txq->axq_lock);
e8324357 2100 break;
f078f209 2101 }
f078f209 2102
e7824a50
LR
2103 /*
2104 * We now know the nullfunc frame has been ACKed so we
2105 * can disable RX.
2106 */
2107 if (bf->bf_isnullfunc &&
29bffa96 2108 (ts.ts_status & ATH9K_TX_ACKED)) {
3f7c5c10
SB
2109 if ((sc->ps_flags & PS_ENABLED))
2110 ath9k_enable_ps(sc);
2111 else
1b04b930 2112 sc->ps_flags |= PS_NULLFUNC_COMPLETED;
e7824a50
LR
2113 }
2114
e8324357
S
2115 /*
2116 * Remove ath_buf's of the same transmit unit from txq,
2117 * however leave the last descriptor back as the holding
2118 * descriptor for hw.
2119 */
a119cc49 2120 lastbf->bf_stale = true;
e8324357 2121 INIT_LIST_HEAD(&bf_head);
e8324357
S
2122 if (!list_is_singular(&lastbf->list))
2123 list_cut_position(&bf_head,
2124 &txq->axq_q, lastbf->list.prev);
f078f209 2125
e8324357 2126 txq->axq_depth--;
29bffa96 2127 txok = !(ts.ts_status & ATH9K_TXERR_MASK);
164ace38 2128 txq->axq_tx_inprogress = false;
0a8cea84
FF
2129 if (bf_held)
2130 list_del(&bf_held->list);
e8324357 2131 spin_unlock_bh(&txq->axq_lock);
f078f209 2132
0a8cea84
FF
2133 if (bf_held)
2134 ath_tx_return_buffer(sc, bf_held);
f078f209 2135
e8324357
S
2136 if (!bf_isampdu(bf)) {
2137 /*
2138 * This frame is sent out as a single frame.
2139 * Use hardware retry status for this frame.
2140 */
29bffa96
FF
2141 bf->bf_retries = ts.ts_longretry;
2142 if (ts.ts_status & ATH9K_TXERR_XRETRY)
e8324357 2143 bf->bf_state.bf_type |= BUF_XRETRY;
29bffa96 2144 ath_tx_rc_status(bf, &ts, 0, txok, true);
e8324357 2145 }
f078f209 2146
e8324357 2147 if (bf_isampdu(bf))
29bffa96 2148 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok);
e8324357 2149 else
29bffa96 2150 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
8469cdef 2151
059d806c 2152 ath_wake_mac80211_queue(sc, txq);
8469cdef 2153
059d806c 2154 spin_lock_bh(&txq->axq_lock);
e8324357
S
2155 if (sc->sc_flags & SC_OP_TXAGGR)
2156 ath_txq_schedule(sc, txq);
2157 spin_unlock_bh(&txq->axq_lock);
8469cdef
S
2158 }
2159}
2160
305fe47f 2161static void ath_tx_complete_poll_work(struct work_struct *work)
164ace38
SB
2162{
2163 struct ath_softc *sc = container_of(work, struct ath_softc,
2164 tx_complete_work.work);
2165 struct ath_txq *txq;
2166 int i;
2167 bool needreset = false;
2168
2169 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2170 if (ATH_TXQ_SETUP(sc, i)) {
2171 txq = &sc->tx.txq[i];
2172 spin_lock_bh(&txq->axq_lock);
2173 if (txq->axq_depth) {
2174 if (txq->axq_tx_inprogress) {
2175 needreset = true;
2176 spin_unlock_bh(&txq->axq_lock);
2177 break;
2178 } else {
2179 txq->axq_tx_inprogress = true;
2180 }
2181 }
2182 spin_unlock_bh(&txq->axq_lock);
2183 }
2184
2185 if (needreset) {
c46917bb
LR
2186 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2187 "tx hung, resetting the chip\n");
332c5566 2188 ath9k_ps_wakeup(sc);
164ace38 2189 ath_reset(sc, false);
332c5566 2190 ath9k_ps_restore(sc);
164ace38
SB
2191 }
2192
42935eca 2193 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
164ace38
SB
2194 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2195}
2196
2197
f078f209 2198
e8324357 2199void ath_tx_tasklet(struct ath_softc *sc)
f078f209 2200{
e8324357
S
2201 int i;
2202 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
f078f209 2203
e8324357 2204 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
f078f209 2205
e8324357
S
2206 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2207 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2208 ath_tx_processq(sc, &sc->tx.txq[i]);
f078f209
LR
2209 }
2210}
2211
e5003249
VT
2212void ath_tx_edma_tasklet(struct ath_softc *sc)
2213{
2214 struct ath_tx_status txs;
2215 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2216 struct ath_hw *ah = sc->sc_ah;
2217 struct ath_txq *txq;
2218 struct ath_buf *bf, *lastbf;
2219 struct list_head bf_head;
2220 int status;
2221 int txok;
2222
2223 for (;;) {
2224 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
2225 if (status == -EINPROGRESS)
2226 break;
2227 if (status == -EIO) {
2228 ath_print(common, ATH_DBG_XMIT,
2229 "Error processing tx status\n");
2230 break;
2231 }
2232
2233 /* Skip beacon completions */
2234 if (txs.qid == sc->beacon.beaconq)
2235 continue;
2236
2237 txq = &sc->tx.txq[txs.qid];
2238
2239 spin_lock_bh(&txq->axq_lock);
2240 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2241 spin_unlock_bh(&txq->axq_lock);
2242 return;
2243 }
2244
2245 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2246 struct ath_buf, list);
2247 lastbf = bf->bf_lastbf;
2248
2249 INIT_LIST_HEAD(&bf_head);
2250 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2251 &lastbf->list);
2252 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2253 txq->axq_depth--;
2254 txq->axq_tx_inprogress = false;
2255 spin_unlock_bh(&txq->axq_lock);
2256
2257 txok = !(txs.ts_status & ATH9K_TXERR_MASK);
2258
de0f648d
VT
2259 /*
2260 * Make sure null func frame is acked before configuring
2261 * hw into ps mode.
2262 */
2263 if (bf->bf_isnullfunc && txok) {
2264 if ((sc->ps_flags & PS_ENABLED))
2265 ath9k_enable_ps(sc);
2266 else
2267 sc->ps_flags |= PS_NULLFUNC_COMPLETED;
2268 }
2269
e5003249
VT
2270 if (!bf_isampdu(bf)) {
2271 bf->bf_retries = txs.ts_longretry;
2272 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2273 bf->bf_state.bf_type |= BUF_XRETRY;
2274 ath_tx_rc_status(bf, &txs, 0, txok, true);
2275 }
2276
2277 if (bf_isampdu(bf))
2278 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs, txok);
2279 else
2280 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2281 &txs, txok, 0);
2282
7f9f3600
FF
2283 ath_wake_mac80211_queue(sc, txq);
2284
e5003249
VT
2285 spin_lock_bh(&txq->axq_lock);
2286 if (!list_empty(&txq->txq_fifo_pending)) {
2287 INIT_LIST_HEAD(&bf_head);
2288 bf = list_first_entry(&txq->txq_fifo_pending,
2289 struct ath_buf, list);
2290 list_cut_position(&bf_head, &txq->txq_fifo_pending,
2291 &bf->bf_lastbf->list);
2292 ath_tx_txqaddbuf(sc, txq, &bf_head);
2293 } else if (sc->sc_flags & SC_OP_TXAGGR)
2294 ath_txq_schedule(sc, txq);
2295 spin_unlock_bh(&txq->axq_lock);
2296 }
2297}
2298
e8324357
S
2299/*****************/
2300/* Init, Cleanup */
2301/*****************/
f078f209 2302
5088c2f1
VT
2303static int ath_txstatus_setup(struct ath_softc *sc, int size)
2304{
2305 struct ath_descdma *dd = &sc->txsdma;
2306 u8 txs_len = sc->sc_ah->caps.txs_len;
2307
2308 dd->dd_desc_len = size * txs_len;
2309 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2310 &dd->dd_desc_paddr, GFP_KERNEL);
2311 if (!dd->dd_desc)
2312 return -ENOMEM;
2313
2314 return 0;
2315}
2316
2317static int ath_tx_edma_init(struct ath_softc *sc)
2318{
2319 int err;
2320
2321 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2322 if (!err)
2323 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2324 sc->txsdma.dd_desc_paddr,
2325 ATH_TXSTATUS_RING_SIZE);
2326
2327 return err;
2328}
2329
2330static void ath_tx_edma_cleanup(struct ath_softc *sc)
2331{
2332 struct ath_descdma *dd = &sc->txsdma;
2333
2334 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2335 dd->dd_desc_paddr);
2336}
2337
e8324357 2338int ath_tx_init(struct ath_softc *sc, int nbufs)
f078f209 2339{
c46917bb 2340 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
e8324357 2341 int error = 0;
f078f209 2342
797fe5cb 2343 spin_lock_init(&sc->tx.txbuflock);
f078f209 2344
797fe5cb 2345 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
4adfcded 2346 "tx", nbufs, 1, 1);
797fe5cb 2347 if (error != 0) {
c46917bb
LR
2348 ath_print(common, ATH_DBG_FATAL,
2349 "Failed to allocate tx descriptors: %d\n", error);
797fe5cb
S
2350 goto err;
2351 }
f078f209 2352
797fe5cb 2353 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
5088c2f1 2354 "beacon", ATH_BCBUF, 1, 1);
797fe5cb 2355 if (error != 0) {
c46917bb
LR
2356 ath_print(common, ATH_DBG_FATAL,
2357 "Failed to allocate beacon descriptors: %d\n", error);
797fe5cb
S
2358 goto err;
2359 }
f078f209 2360
164ace38
SB
2361 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2362
5088c2f1
VT
2363 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2364 error = ath_tx_edma_init(sc);
2365 if (error)
2366 goto err;
2367 }
2368
797fe5cb 2369err:
e8324357
S
2370 if (error != 0)
2371 ath_tx_cleanup(sc);
f078f209 2372
e8324357 2373 return error;
f078f209
LR
2374}
2375
797fe5cb 2376void ath_tx_cleanup(struct ath_softc *sc)
e8324357
S
2377{
2378 if (sc->beacon.bdma.dd_desc_len != 0)
2379 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
2380
2381 if (sc->tx.txdma.dd_desc_len != 0)
2382 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
5088c2f1
VT
2383
2384 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2385 ath_tx_edma_cleanup(sc);
e8324357 2386}
f078f209
LR
2387
2388void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2389{
c5170163
S
2390 struct ath_atx_tid *tid;
2391 struct ath_atx_ac *ac;
2392 int tidno, acno;
f078f209 2393
8ee5afbc 2394 for (tidno = 0, tid = &an->tid[tidno];
c5170163
S
2395 tidno < WME_NUM_TID;
2396 tidno++, tid++) {
2397 tid->an = an;
2398 tid->tidno = tidno;
2399 tid->seq_start = tid->seq_next = 0;
2400 tid->baw_size = WME_MAX_BA;
2401 tid->baw_head = tid->baw_tail = 0;
2402 tid->sched = false;
e8324357 2403 tid->paused = false;
a37c2c79 2404 tid->state &= ~AGGR_CLEANUP;
c5170163 2405 INIT_LIST_HEAD(&tid->buf_q);
c5170163 2406 acno = TID_TO_WME_AC(tidno);
8ee5afbc 2407 tid->ac = &an->ac[acno];
a37c2c79
S
2408 tid->state &= ~AGGR_ADDBA_COMPLETE;
2409 tid->state &= ~AGGR_ADDBA_PROGRESS;
c5170163 2410 }
f078f209 2411
8ee5afbc 2412 for (acno = 0, ac = &an->ac[acno];
c5170163
S
2413 acno < WME_NUM_AC; acno++, ac++) {
2414 ac->sched = false;
1d2231e2 2415 ac->qnum = sc->tx.hwq_map[acno];
c5170163 2416 INIT_LIST_HEAD(&ac->tid_q);
f078f209
LR
2417 }
2418}
2419
b5aa9bf9 2420void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
f078f209
LR
2421{
2422 int i;
2423 struct ath_atx_ac *ac, *ac_tmp;
2424 struct ath_atx_tid *tid, *tid_tmp;
2425 struct ath_txq *txq;
e8324357 2426
f078f209
LR
2427 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2428 if (ATH_TXQ_SETUP(sc, i)) {
b77f483f 2429 txq = &sc->tx.txq[i];
f078f209 2430
a9f042cb 2431 spin_lock_bh(&txq->axq_lock);
f078f209
LR
2432
2433 list_for_each_entry_safe(ac,
2434 ac_tmp, &txq->axq_acq, list) {
2435 tid = list_first_entry(&ac->tid_q,
2436 struct ath_atx_tid, list);
2437 if (tid && tid->an != an)
2438 continue;
2439 list_del(&ac->list);
2440 ac->sched = false;
2441
2442 list_for_each_entry_safe(tid,
2443 tid_tmp, &ac->tid_q, list) {
2444 list_del(&tid->list);
2445 tid->sched = false;
b5aa9bf9 2446 ath_tid_drain(sc, txq, tid);
a37c2c79 2447 tid->state &= ~AGGR_ADDBA_COMPLETE;
a37c2c79 2448 tid->state &= ~AGGR_CLEANUP;
f078f209
LR
2449 }
2450 }
2451
a9f042cb 2452 spin_unlock_bh(&txq->axq_lock);
f078f209
LR
2453 }
2454 }
2455}
This page took 0.717117 seconds and 5 git commands to generate.